content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import h5py
import numpy as np
import os
import torch as pt
from torch_geometric.data import Data, Dataset, download_url, extract_tar
from tqdm import tqdm
| [
11748,
289,
20,
9078,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
28034,
355,
42975,
198,
6738,
28034,
62,
469,
16996,
13,
7890,
1330,
6060,
11,
16092,
292,
316,
11,
4321,
62,
6371,
11,
7925,
62,
18870,
198,
673... | 3.12 | 50 |
import sys
from check_output import *
workdir = sys.argv[1]
src = "keepgoing_same_step.sv"
assert_a = line_ref(workdir, src, "assert(a)")
assert_not_a = line_ref(workdir, src, "assert(!a)")
assert_0 = line_ref(workdir, src, "assert(0)")
assert_false = line_ref(workdir, "extra.smtc", "assert false")
assert_distinct = line_ref(workdir, "extra.smtc", "assert (distinct")
log = open(workdir + "/logfile.txt").read()
log_per_trace = log.split("Writing trace to VCD file")[:-1]
assert len(log_per_trace) == 4
assert re.search(r"Assert failed in test: %s \(.*\)$" % assert_a, log, re.M)
assert re.search(r"Assert failed in test: %s \(.*\)$" % assert_not_a, log, re.M)
assert re.search(r"Assert src/%s failed: false" % assert_false, log_per_trace[0], re.M)
assert re.search(r"Assert failed in test: %s \(.*\)$" % assert_0, log_per_trace[1], re.M)
assert re.search(r"Assert failed in test: %s \(.*\) \[failed before\]$" % assert_0, log_per_trace[2], re.M)
assert re.search(r"Assert src/%s failed: \(distinct" % assert_distinct, log_per_trace[3], re.M)
| [
11748,
25064,
198,
6738,
2198,
62,
22915,
1330,
1635,
198,
198,
1818,
15908,
796,
25064,
13,
853,
85,
58,
16,
60,
198,
10677,
796,
366,
14894,
5146,
62,
31642,
62,
9662,
13,
21370,
1,
198,
198,
30493,
62,
64,
796,
1627,
62,
5420,
... | 2.492891 | 422 |
import os
from Bio import AlignIO
from Bio.AlignIO import MultipleSeqAlignment
import logging
from sklearn.cluster import KMeans
import numpy as np
import gzip
from make_prg import utils
remove_duplicates = utils.remove_duplicates
contains_only = utils.contains_only
def get_interval_seqs(interval_alignment):
"""Replace - with nothing, remove seqs containing N or other non-allowed letters
and duplicate sequences containing RYKMSW, replacing with AGCT alternatives """
allowed = ['A','C','G','T','R','Y','K','M','S','W']
iupac = {'R': ['G', 'A'], 'Y': ['T', 'C'], 'K': ['G', 'T'], 'M': ['A', 'C'], 'S': ['G', 'C'], 'W': ['A', 'T']}
seqs = []
for s in list(remove_duplicates([str(record.seq).replace('-', '').upper() for record in interval_alignment])):
if contains_only(s, allowed):
new_seqs = [s]
for letter in iupac.keys():
letter_seqs = []
for t in new_seqs:
if letter in t:
letter_seqs.append(t.replace(letter, iupac[letter][0]))
letter_seqs.append(t.replace(letter, iupac[letter][1]))
else:
letter_seqs.append(t)
new_seqs = letter_seqs
seqs.extend(new_seqs)
ret_list = list(set(seqs))
if len(ret_list) == 0:
logging.warning("WARNING: Every sequence must have contained an N in this slice - redo sequence curation because this is nonsense")
logging.warning("Sequences were", " ".join(list(remove_duplicates([str(record.seq).replace('-', '').upper() for record in interval_alignment]))))
logging.warning("Using these sequences anyway, and should be ignored downstream")
seqs = list(remove_duplicates([str(record.seq).replace('-', '').upper() for record in interval_alignment]))
return sorted(list(set(seqs)))
class AlignedSeq(object):
"""
Object based on a set of aligned sequences. Note min_match_length must be strictly greater than max_nesting + 1.
"""
def get_consensus(self):
"""Given a set of aligment records from AlignIO, creates
a consensus string.
Lower and upper case are equivalent
Non AGCT symbols RYKMSW result in non-consensus and are substituted in graph
N results in consensus at that position unless they are all N."""
first_string = str(self.alignment[0].seq)
consensus_string = ''
for i, letter in enumerate(first_string):
consensus = True
for record in self.alignment:
if (record.seq[i].upper() != "N" and letter.upper() != "N") and (record.seq[i].upper() != letter.upper() or record.seq[i].upper() in ['R','Y','K','M','S','W']):
consensus = False
break
if letter.upper() == "N" and record.seq[i].upper() != "N":
letter = record.seq[i].upper()
if consensus and letter.upper() != "N":
consensus_string += letter
else:
consensus_string += '*'
assert(len(first_string)==len(consensus_string))
return consensus_string
@property
def get_match_intervals(self):
"""Return a list of intervals in which we have
consensus sequence longer than min_match_length, and
a list of the non-match intervals left."""
match_intervals = []
non_match_intervals = []
match_count = 0
match_start = 0
non_match_start = 0
logging.debug("consensus: %s" %self.consensus)
if len(self.consensus.replace('-', '')) < self.min_match_length:
# It makes no sense to classify a fully consensus sequence as
# a non-match just because it is too short.
if '*' in self.consensus:
interval_alignment = self.alignment[:, 0:self.length]
interval_seqs = get_interval_seqs(interval_alignment)
if len(interval_seqs) > 1:
logging.debug("add short non-match whole interval [%d,%d]" %(0,self.length - 1))
non_match_intervals.append([0, self.length - 1])
else:
logging.debug("add short match whole interval [%d,%d]" %(0,self.length - 1))
match_intervals.append([0, self.length - 1])
else:
match_intervals.append([0, self.length - 1])
logging.debug("add short match whole interval [%d,%d]" % (0, self.length - 1))
else:
for i in range(self.length):
letter = self.consensus[i]
if letter != '*':
# In a match region.
if match_count == 0:
match_start = i
match_count += 1
elif match_count > 0:
# Have reached a non-match. Check if previous match string is long enough to add to match_regions
match_string = self.consensus[match_start: match_start + match_count].replace('-', '')
match_len = len(match_string)
logging.debug("have match string %s" % match_string)
if match_len >= self.min_match_length:
# if the non_match sequences in the interval are really the same, add a match interval
interval_alignment = self.alignment[:, non_match_start:match_start + 1]
interval_seqs = get_interval_seqs(interval_alignment)
if non_match_start < match_start and len(interval_seqs) > 1:
non_match_intervals.append([non_match_start, match_start - 1])
logging.debug("add non-match interval as have alts [%d,%d]"
% (non_match_start, match_start - 1))
elif non_match_start < match_start:
match_intervals.append([non_match_start, match_start - 1])
logging.debug("add match interval as only one seq [%d,%d]"
% (non_match_start, match_start - 1))
match_intervals.append([match_start, match_start + match_count - 1])
logging.debug("add match interval to complete step [%d,%d]"
% (match_start, match_start + match_count- 1))
non_match_start = i
match_count = 0
match_start = non_match_start
# At end add last intervals
match_string = self.consensus[match_start: match_start + match_count].replace('-', '')
match_len = len(match_string)
logging.debug("at end have match string %s" % match_string)
if 0 < match_len < self.min_match_length:
logging.debug("have short match region at end, so include it in non-match-region before - "
"match count was %d" %match_count)
match_count = 0
match_start = non_match_start
logging.debug("match count is now %d" % match_count)
if match_count > 0:
interval_alignment = self.alignment[:, non_match_start:match_start + 1]
else:
interval_alignment = self.alignment[:, non_match_start:self.length]
interval_seqs = get_interval_seqs(interval_alignment)
if len(interval_seqs) == 1:
match_intervals.append([non_match_start, self.length - 1])
logging.debug("add match interval at end as only one seq [%d,%d]" % (non_match_start, self.length - 1))
elif len(interval_seqs) > 1 and non_match_start < match_start:
non_match_intervals.append([non_match_start, match_start - 1])
logging.debug("add non-match interval at end as have alts [%d,%d]" % (non_match_start, match_start - 1))
match_intervals.append([match_start, self.length - 1])
logging.debug("add match interval at end [%d,%d]" % (match_start, self.length - 1))
else:
non_match_intervals.append([non_match_start, self.length - 1])
logging.debug("add only non-match interval at end as have alts [%d,%d]" % (non_match_start, self.length - 1))
# check all stretches of consensus are in an interval, and intervals don't overlap
for i in range(self.length):
count_match = 0
for interval in match_intervals:
if interval[0] <= i <= interval[1]:
count_match += 1
count_non_match = 0
for interval in non_match_intervals:
if interval[0] <= i <= interval[1]:
count_non_match += 1
assert (count_match | count_non_match), "Failed to correctly identify match intervals: position %d " \
"appeared in both/neither match and non-match intervals" % i
assert (count_match + count_non_match == 1), "Failed to correctly identify match intervals: position " \
"%d appeared in %d intervals" % (
i, count_match + count_non_match)
return match_intervals, non_match_intervals
def check_nonmatch_intervals(self):
"""Goes through non-match intervals and makes sure there is more than one sequence there, else makes it a match
interval."""
for i in reversed(range(len(self.non_match_intervals))):
interval = self.non_match_intervals[i]
interval_alignment = self.alignment[:, interval[0]:interval[1] + 1]
interval_seqs = get_interval_seqs(interval_alignment)
if len(interval_seqs) < 2:
self.match_intervals.append(self.non_match_intervals[i])
self.non_match_intervals.pop(i)
self.match_intervals.sort()
def kmeans_cluster_seqs_in_interval(self, interval): # , kmer_size=self.min_match_length):
"""Divide sequences in interval into subgroups of similar
sequences. Return a list of lists of ids."""
if interval[1] - interval[0] <= self.min_match_length:
logging.info("Small variation site in interval %s \n", interval)
logging.debug("interval[1] - interval[0] <= self.min_match_length: %d <= %d", interval[1] - interval[0],
self.min_match_length)
interval_alignment = self.alignment[:, interval[0]:interval[1] + 1]
interval_seqs = get_interval_seqs(interval_alignment)
assert len(interval_seqs) == len(
list(remove_duplicates(interval_seqs))), "should not have duplicate alternative allele sequences"
return_id_lists = [[record.id for record in self.alignment if
str(record.seq[interval[0]:interval[1] + 1]).replace('-', '') == seq] for seq in
interval_seqs]
else:
logging.debug("Get kmeans partition of interval [%d, %d]", interval[0], interval[1])
interval_alignment = self.alignment[:, interval[0]:interval[1] + 1]
interval_seq_dict = {}
small_interval_seq_dict = {}
seq_dict_keys = []
for record in interval_alignment:
seq = str(record.seq).replace('-', '')
if seq in list(interval_seq_dict.keys()):
interval_seq_dict[seq].append(record.id)
elif seq in list(small_interval_seq_dict.keys()):
small_interval_seq_dict[seq].append(record.id)
elif len(seq) >= self.min_match_length:
interval_seq_dict[seq] = [record.id]
seq_dict_keys.append(seq)
else:
small_interval_seq_dict[seq] = [record.id]
seq_dict_keys.append(seq)
assert len(seq_dict_keys) == len(
list(remove_duplicates(seq_dict_keys))), "error, have duplicate dictionary keys"
assert len([key for key in list(interval_seq_dict.keys()) if
key in list(small_interval_seq_dict.keys())]) == 0, "error, should have no overlap of keys"
assert len([key for key in list(small_interval_seq_dict.keys()) if
key in list(interval_seq_dict.keys())]) == 0, "error, should have no overlap of keys"
logging.debug("Add classes corresponding to %d small sequences" % len(list(small_interval_seq_dict.keys())))
logging.debug("Now add classes corresponding to %d longer sequences" % len(list(interval_seq_dict.keys())))
interval_seqs = list(interval_seq_dict.keys())
big_return_id_lists = []
if len(interval_seqs) > 1:
# first transform sequences into kmer occurance vectors using a dict
logging.debug("First transform sequences into kmer occurance vectors")
# make dict based on number of kmers in all sequences
self.kmer_dict = {}
n = 0
for j, seq in enumerate(interval_seqs):
for i in range(len(seq) - self.min_match_length + 1):
if seq not in list(self.kmer_dict.keys()):
self.kmer_dict[seq[i:i + self.min_match_length]] = n
n += 1
logging.debug("These vectors have length %d" % n)
# transform to vectors using dict
seq_kmer_counts = np.zeros(shape=(len(interval_seqs), n))
for j, seq in enumerate(interval_seqs):
counts = np.zeros(n)
for i in range(len(seq) - self.min_match_length + 1):
counts[self.kmer_dict[seq[i:i + self.min_match_length]]] += 1
seq_kmer_counts[j] = counts
# cluster sequences using kmeans
logging.debug("Now cluster:")
kmeans = KMeans(n_clusters=1, random_state=2).fit(seq_kmer_counts)
pre_cluster_inertia = kmeans.inertia_
if pre_cluster_inertia == 0:
logging.debug("pre_cluster_intertia is 0!")
for key in list(interval_seq_dict.keys()):
logging.debug("seq: %s, num_seqs with this seq: %d", key, len(interval_seq_dict[key]))
cluster_inertia = pre_cluster_inertia
number_of_clusters = 1
logging.debug("number of clusters: %d, inertia: %f", number_of_clusters, cluster_inertia)
while (cluster_inertia > 0
and cluster_inertia > pre_cluster_inertia / 2
and number_of_clusters <= len(interval_seqs)):
number_of_clusters += 1
kmeans = KMeans(n_clusters=number_of_clusters, random_state=2).fit(seq_kmer_counts)
cluster_inertia = kmeans.inertia_
logging.debug("number of clusters: %d, inertia: %f", number_of_clusters, cluster_inertia)
# now extract the equivalence class details from this partition and return
logging.debug("Extract equivalence classes from this partition")
if pre_cluster_inertia > 0:
equiv_class_ids = list(kmeans.predict(seq_kmer_counts))
for i in range(max(equiv_class_ids) + 1):
big_return_id_lists.append([])
for i, val in enumerate(equiv_class_ids):
big_return_id_lists[val].extend(interval_seq_dict[interval_seqs[i]])
else:
logging.debug("default to not clustering")
big_return_id_lists = [interval_seq_dict[key] for key in interval_seq_dict.keys()]
elif len(interval_seqs) == 1:
big_return_id_lists = [interval_seq_dict[interval_seqs[0]]]
# now merge big and small return_id_lists so as to maintain the order of seqs before
logging.debug("Merge return id lists for the partitions")
return_id_lists = []
added_ids = []
big_keys = list(interval_seq_dict.keys())
small_keys = list(small_interval_seq_dict.keys())
for seq in seq_dict_keys:
if seq in small_keys:
logging.debug("add (small) return ids: %s" % small_interval_seq_dict[seq])
return_id_lists.append(small_interval_seq_dict[seq])
elif seq in big_keys:
not_added = [nid for nid in interval_seq_dict[seq] if nid not in added_ids]
if len(not_added) == len(interval_seq_dict[seq]):
logging.debug("want to add (big) return ids: %s" % interval_seq_dict[seq])
for i in range(len(big_return_id_lists)):
if interval_seq_dict[seq][0] in big_return_id_lists[i]:
logging.debug("add (big) return ids %d: %s" % (i, big_return_id_lists[i]))
return_id_lists.append(big_return_id_lists[i])
added_ids.extend(return_id_lists[-1])
break
else:
assert len(
not_added) == 0, "Equivalent sequences should be in same part of partition and are not"
else:
logging.warning("Key %s doesn't seem to be in either big keys or small keys")
assert len(interval_alignment) == sum([len(i) for i in return_id_lists]), \
"I seem to have lost (or gained?) some sequences in the process of clustering"
assert len(return_id_lists) > 1, \
"should have some alternate alleles, not only one sequence, this is a non-match interval"
return return_id_lists
@property
| [
11748,
28686,
198,
6738,
16024,
1330,
978,
570,
9399,
198,
6738,
16024,
13,
2348,
570,
9399,
1330,
20401,
4653,
80,
2348,
16747,
198,
11748,
18931,
198,
6738,
1341,
35720,
13,
565,
5819,
1330,
509,
5308,
504,
198,
11748,
299,
32152,
355... | 2.045674 | 8,911 |
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import numpy.random as npr
from ConfigParser import *
import os
import cPickle
import scipy.io
import sys
import glob
from numpy.linalg import norm
from scipy import misc
import utils
if __name__=='__main__':
print '...'
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
11192,
273,
11125,
13,
3642,
822,
13,
82,
2475,
355,
18862,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
299,
32152,
13,
25120,
355,
299,
1050,
198,
6738,
17056,
46677,
1330,
1635,
19... | 2.95283 | 106 |
#!/usr/bin/python3
from bdl_requests.bdl_request import *
from logger import logger
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
6738,
275,
25404,
62,
8897,
3558,
13,
17457,
75,
62,
25927,
1330,
1635,
198,
6738,
49706,
1330,
49706,
628,
198
] | 2.9 | 30 |
from sqlalchemy import Column, Integer, String, Numeric, func, distinct, Boolean
from app import db
| [
6738,
44161,
282,
26599,
1330,
29201,
11,
34142,
11,
10903,
11,
399,
39223,
11,
25439,
11,
7310,
11,
41146,
198,
6738,
598,
1330,
20613,
628
] | 4.04 | 25 |
from scp_fetcher_bs4.scp_info import SCPInfo, SCPParsingError
import requests
_TEMPLATE = """
Information about SCP-{0}:
\tObject Class: {1}
"""
def fetch_scp_173():
"""
Fetch the information about SCP-173 from http://scp-wiki.wikidot.com
"""
req = requests.get('http://scp-wiki.wikidot.com/scp-173')
text = req.text
req.close()
try:
scp_info = SCPInfo.from_html_page(text)
except SCPParsingError:
raise
print(_TEMPLATE.format(scp_info.id, scp_info.object_class))
if __name__ == '__main__':
fetch_scp_173()
| [
6738,
629,
79,
62,
34045,
2044,
62,
1443,
19,
13,
1416,
79,
62,
10951,
1330,
17527,
12360,
11,
6374,
10246,
945,
278,
12331,
198,
11748,
7007,
628,
198,
62,
51,
3620,
6489,
6158,
796,
37227,
198,
21918,
546,
17527,
12,
90,
15,
38362... | 2.307692 | 247 |
# --------------------------------------------------------
#
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Shuhao Fu
# --------------------------------------------------------
"""
Proposal Target Operator selects foreground and background roi and assigns label, bbox_transform to them.
"""
import mxnet as mx
import cv2
import numpy as np
import time
from graphcut.comp_max_flow import solve_masks_with_lists
@mx.operator.register('GraphCut')
| [
2,
20368,
22369,
201,
198,
2,
201,
198,
2,
15069,
357,
66,
8,
2177,
5413,
201,
198,
2,
49962,
739,
383,
24843,
12,
17,
13,
15,
13789,
685,
3826,
38559,
24290,
329,
3307,
60,
201,
198,
2,
22503,
416,
32344,
23778,
13333,
201,
198,
... | 3.62585 | 147 |
__author__ = 'johnnylee'
import geometery as geo
import random
import matplotlib
matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
import numpy as np
import math
import transformations as xforms
import sys
points = np.zeros((4, 2))
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal', xlim=(0, 1), ylim=(0, 1))
data_lines, = ax.plot([])
data_pts, = ax.plot([], [], 'ro')
start_pt, = ax.plot([], [], 'go')
plt_text = ax.text(0.05, 0.05, 'text',
horizontalalignment='left',
verticalalignment='bottom',
transform=ax.transAxes)
#runTest()
#cid = fig.canvas.mpl_connect('key_press_event', press)
#plt.show()
m = np.identity(4)
#m[2, 2] = 0
#m[1, 2] = 1
#m[1, 1] = 0
#m[2, 1] = -1
m[2, 2] = 0
m[2, 1] = 1
m[1, 1] = 0
m[1, 2] = -1
print m
quaternion = xforms.quaternion_from_matrix(m)
print quaternion
print xforms.quaternion_matrix(quaternion)
print math.sqrt(2.0)/2.0 | [
834,
9800,
834,
796,
705,
30686,
77,
2349,
68,
6,
198,
198,
11748,
4903,
908,
1924,
355,
40087,
198,
11748,
4738,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
48,
83,
19,
46384,
11537,
198,
11748,
2603,
2948... | 2.194245 | 417 |
# -*- coding:utf-8 -*-
#
# Copyright (C) 2019-2021, Saarland University
# Copyright (C) 2019-2021, Maximilian Köhl <koehl@cs.uni-saarland.de>
from __future__ import annotations
import dataclasses as d
import typing as t
import collections
import enum
from . import actions, errors, functions, expressions, types
from .automata import Automaton
from .networks import Network
class ModelType(enum.Enum):
"""
An enum representing different *model types*.
Attributes
----------
full_name: str
The full human-readable name of the model type.
"""
LTS = "Labeled Transition System"
""" Labeled Transition System """
DTMC = "Discrete-Time Markov Chain"
""" Discrete-Time Markov Chain """
CTMC = "Continuous-Time Markov Chain"
""" Continuous-Time Markov Chain """
MDP = "Markov Decision Process"
""" Markov Decision Process """
CTMDP = "Continuous-Time Markov Decision Process"
""" Continuous-Time Markov Decision Process """
MA = "Markov Automaton"
""" Markov Automaton """
TA = "Timed Automaton"
""" Timed Automaton """
PTA = "Probabilistic Timed Automaton"
""" Probabilistic Timed Automaton """
STA = "Stochastic Timed Automaton"
""" Stochastic Timed Automaton """
HA = "Hybrid Automaton"
""" Hybrid Automaton """
PHA = "Probabilistic Hybrid Automaton"
""" Probabilistic Hybrid Automaton """
SHA = "Stochastic Hybrid Automaton"
""" Stochastic Hybrid Automaton """
full_name: str
@property
def uses_clocks(self) -> bool:
"""
Returns :obj:`True` if and only if the respective models use real-value clocks.
"""
return self in _CLOCK_TYPES
@property
def is_untimed(self) -> bool:
"""
Returns :obj:`True` if and only if the model type is *not timed*.
Untimed model types are :code:`LTS`, :code:`DTMC`, and :code:`MDP`.
"""
return self in _UNTIMED_TYPES
_CLOCK_TYPES = {
ModelType.TA,
ModelType.PTA,
ModelType.STA,
ModelType.HA,
ModelType.PHA,
ModelType.SHA,
}
_UNTIMED_TYPES = {ModelType.MDP, ModelType.LTS, ModelType.DTMC}
# XXX: this class should be abstract, however, then it would not type-check
# https://github.com/python/mypy/issues/5374
@d.dataclass(frozen=True)
class IdentifierDeclaration:
"""
Represents a declaration of an identifier.
Attributes
----------
identifier:
The declared identifier.
typ:
The type of the identifier.
comment:
An additional optional comment for the declaration.
"""
identifier: str
typ: types.Type
comment: t.Optional[str] = None
# XXX: this method shall be implemented by all subclasses
def validate(self, scope: Scope) -> None:
"""
Validates that the declaration is valid in the given scope.
Raises :class:`~errors.ModelingError` if the declaration is invalid.
"""
raise NotImplementedError()
@d.dataclass(frozen=True)
class VariableDeclaration(IdentifierDeclaration):
"""
Represents a *variable declaration*.
Attributes
----------
is_transient:
Optional boolean flag indicating whether the variable is *transient*.
initial_value:
Optional :class:`~momba.model.Expression` providing an initial value for the variable.
"""
is_transient: t.Optional[bool] = None
initial_value: t.Optional[expressions.Expression] = None
@d.dataclass(frozen=True)
class ConstantDeclaration(IdentifierDeclaration):
"""
Represents a *constant declaration*.
Attributes
----------
value:
Optional :class:`~momba.model.Expression` specifying the value of the constant.
"""
value: t.Optional[expressions.Expression] = None
@property
def is_parameter(self) -> bool:
"""
Returns :obj:`True` if and only if the constant is a *parameter*.
Parameters are constants without a :attr:`value`.
"""
return self.value is None
@d.dataclass(frozen=True)
class PropertyDefinition:
"""
Represents a *property definition*.
Attributes
----------
name:
The name of the property.
expression:
An :class:`~momba.model.Expression` defining the property.
comment:
An optional comment describing the property.
"""
name: str
expression: expressions.Expression
comment: t.Optional[str] = None
class Scope:
"""
Represents a *scope*.
Attributes
----------
ctx:
The modeling context associated with the scope.
parent:
The parent scope if it exists (:obj:`None` if there is no parent).
"""
ctx: Context
parent: t.Optional[Scope]
_declarations: t.OrderedDict[str, IdentifierDeclaration]
_functions: t.OrderedDict[str, functions.FunctionDefinition]
_type_cache: t.Dict[expressions.Expression, types.Type]
@property
def declarations(self) -> t.AbstractSet[IdentifierDeclaration]:
"""
Variable and constant declarations of the scope.
"""
return frozenset(self._declarations.values())
@property
def variable_declarations(self) -> t.Sequence[VariableDeclaration]:
"""
Variable declarations of the scope.
"""
return tuple(
declaration
for declaration in self._declarations.values()
if isinstance(declaration, VariableDeclaration)
)
@property
def constant_declarations(self) -> t.Sequence[ConstantDeclaration]:
"""
Constant declarations of the scope.
"""
return tuple(
declaration
for declaration in self._declarations.values()
if isinstance(declaration, ConstantDeclaration)
)
@property
def clock_declarations(self) -> t.AbstractSet[VariableDeclaration]:
"""
Variable declarations of clock variables of the scope.
"""
# FIXME: this does not return declarations with a bounded CLOCK type
return frozenset(
declaration
for declaration in self._declarations.values()
if (
isinstance(declaration, VariableDeclaration)
and declaration.typ == types.CLOCK
)
)
def create_child_scope(self) -> Scope:
"""
Creates a child scope.
"""
return Scope(self.ctx, parent=self)
def get_type(self, expr: expressions.Expression) -> types.Type:
"""
Returns the (inferred) type of the given expression in the scope.
"""
if expr not in self._type_cache:
inferred_type = expr.infer_type(self)
inferred_type.validate_in(self)
self._type_cache[expr] = inferred_type
return self._type_cache[expr]
def get_function(self, name: str) -> functions.FunctionDefinition:
"""
Retrieves a :class:`FunctionDefinition` by its name.
Raises :class:`~errors.NotFoundError` if no such definition exists.
"""
try:
return self._functions[name]
except KeyError:
if self.parent is None:
raise errors.NotFoundError(f"no function with name {name} found")
return self.parent.get_function(name)
def is_local(self, identifier: str) -> bool:
"""
Checks whether the identifier is locally declared in the scope.
"""
return identifier in self._declarations
def is_declared(self, identifier: str) -> bool:
"""
Checks whether the identifier is declared in the scope.
"""
if identifier in self._declarations:
return True
if self.parent is not None:
return self.parent.is_declared(identifier)
return False
def get_scope(self, identifier: str) -> Scope:
"""
Retrieves the scope in which the given identifier is declared.
Raises :class:`~errors.NotFoundError` if no such identifier is declared.
"""
if identifier in self._declarations:
return self
else:
if self.parent is None:
raise errors.NotFoundError(
f"identifier {identifier!r} is unbound in scope {self!r}"
)
return self.parent.get_scope(identifier)
def lookup(self, identifier: str) -> IdentifierDeclaration:
"""
Retrieves the declaration for the given identifier.
Raises :class:`~errors.NotFoundError` if no such identifier is declared.
"""
try:
return self._declarations[identifier]
except KeyError:
if self.parent is None:
raise errors.NotFoundError(
f"identifier {identifier!r} is unbound in scope {self!r}"
)
return self.parent.lookup(identifier)
def add_declaration(
self, declaration: IdentifierDeclaration, *, validate: bool = True
) -> None:
"""
Adds an identifier declaration to the scope.
The flag `validate` specifies whether the declaration should
be validated within the scope before adding it. In case
validation fails, a :class:`~errors.ModelingError` is raised.
Raises :class:`~errors.ModelingError` in case the identifier
has already been declared.
"""
if declaration.identifier in self._declarations:
raise errors.InvalidDeclarationError(
f"identifier {declaration.identifier!r} has already been declared"
)
if validate:
declaration.validate(self)
self._declarations[declaration.identifier] = declaration
def declare_variable(
self,
identifier: str,
typ: types.Type,
*,
is_transient: t.Optional[bool] = None,
initial_value: t.Optional[expressions.ValueOrExpression] = None,
comment: t.Optional[str] = None,
) -> None:
"""
Declares a variable within the scope.
The parameters are passed to :class:`VariableDeclaration` with
the exception of `initial_value`. When provided with a value
which is not an expressions, this function implicitly converts
the provided value into an expression using :func:`ensure_expr`.
Raises :class:`~errors.ModelingError` in case the identifier
has already been declared.
"""
value = None
if initial_value is not None:
value = expressions.ensure_expr(initial_value)
self.add_declaration(
VariableDeclaration(
identifier,
typ,
is_transient=is_transient,
initial_value=value,
comment=comment,
)
)
def declare_constant(
self,
identifier: str,
typ: types.Type,
*,
value: t.Optional[expressions.ValueOrExpression] = None,
comment: t.Optional[str] = None,
) -> None:
"""
Declares a constant within the scope.
The parameters are passed to :class:`ConstantDeclaration` with
the exception of `value`. When provided with a value which is
not an expressions, this function implicitly converts the
provided value into an expression using :func:`ensure_expr`.
Raises :class:`~errors.ModelingError` in case the identifier
has already been declared.
"""
if value is None:
self.add_declaration(
ConstantDeclaration(identifier, typ, comment=comment, value=None)
)
else:
self.add_declaration(
ConstantDeclaration(
identifier,
typ,
comment=comment,
value=expressions.ensure_expr(value),
)
)
def define_function(
self,
name: str,
parameters: t.Sequence[functions.FunctionParameter],
returns: types.Type,
body: expressions.Expression,
) -> functions.FunctionDefinition:
"""
Defines a function within the scope.
The parameters are passed to :class:`FunctionDefinition`.
Raises :class:`~errors.ModelingError` in case an identically
named function already exists.
"""
if name in self._functions:
raise errors.ModelingError(f"a function named {name!r} already exists")
definition = functions.FunctionDefinition(
name, tuple(parameters), returns, body
)
self._functions[name] = definition
return definition
class Context:
"""
Represents a *modeling context*.
Attributes:
model_type:
The :class:`ModelType` of the modeling context.
global_scope:
The global :class:`Scope` of the modeling context.
"""
model_type: ModelType
global_scope: Scope
_automata: t.Set[Automaton]
_networks: t.Set[Network]
_action_types: t.Dict[str, actions.ActionType]
_named_properties: t.Dict[str, PropertyDefinition]
_metadata: t.Dict[str, str]
@property
def automata(self) -> t.AbstractSet[Automaton]:
"""
The set of automata defined on the modeling context.
"""
return self._automata
@property
def networks(self) -> t.AbstractSet[Network]:
"""
The set of networks defined on the modeling context.
"""
return self._networks
@property
def metadata(self) -> t.Mapping[str, str]:
"""
Additional metadata associated with the modeling
context (e.g., author information).
"""
return self._metadata
@property
def action_types(self) -> t.Mapping[str, actions.ActionType]:
"""
The action types defined on the modeling context.
"""
return self._action_types
@property
def properties(self) -> t.Mapping[str, PropertyDefinition]:
"""
The properties defined on the modeling context.
"""
return self._named_properties
def update_metadata(self, metadata: t.Mapping[str, str]) -> None:
"""
Updates the metadata with the provided mapping.
"""
self._metadata.update(metadata)
def get_automaton_by_name(self, name: str) -> Automaton:
"""
Retrieves an automaton by its name.
Raises :class:`~errors.NotFoundError` if no such automaton exists.
"""
for automaton in self._automata:
if automaton.name == name:
return automaton
raise errors.NotFoundError(f"there exists no automaton named {name!r}")
def get_network_by_name(self, name: str) -> Network:
"""
Retrives a network by its name.
Raises :class:`~errors.NotFoundError` if no such network exists.
"""
for network in self._networks:
if network.name == name:
return network
raise errors.NotFoundError(f"there exists no network named {name!r}")
def get_property_definition_by_name(self, name: str) -> PropertyDefinition:
"""
Retrieves a property definition by its name.
Raises :class:`~errors.NotFoundError` if no
such property definition exists.
"""
try:
return self._named_properties[name]
except KeyError:
raise errors.NotFoundError(
f"there exists no property definition named {name!r}"
)
def get_action_type_by_name(self, name: str) -> actions.ActionType:
"""
Retrives an action type by its name.
Raises :class:`~errors.NotFoundError` if no such action type exists.
"""
try:
return self._action_types[name]
except KeyError:
raise errors.NotFoundError(f"there exists no action type named {name!r}")
def _add_action_type(self, action_type: actions.ActionType) -> None:
"""
Adds an action type to the modeling context.
Raises :class:`~errors.ModelingError` if an identically
named action type already exists.
"""
if action_type.label in self._action_types:
raise errors.ModelingError(
f"an action type with name {action_type.label!r} already exists"
)
self._action_types[action_type.label] = action_type
def create_action_type(
self, name: str, *, parameters: t.Sequence[actions.ActionParameter] = ()
) -> actions.ActionType:
"""
Creates a new action type with the given name and parameters.
Raises :class:`~errors.ModelingError` if an identically
named action type already exists.
"""
if name in self._action_types:
raise errors.ModelingError(f"action type with name {name!r} already exists")
action_type = actions.ActionType(name, tuple(parameters))
self._add_action_type(action_type)
return action_type
def create_automaton(self, *, name: t.Optional[str] = None) -> Automaton:
"""
Creates an automaton with the given optional name and returns it.
"""
automaton = Automaton(self, name=name)
self._automata.add(automaton)
return automaton
def create_network(self, *, name: t.Optional[str] = None) -> Network:
"""
Creates a network with the given optional name and returns it.
"""
network = Network(self, name=name)
self._networks.add(network)
return network
def define_property(
self, name: str, expression: expressions.Expression
) -> PropertyDefinition:
"""
Defines a property on the modeling context.
"""
definition = PropertyDefinition(name, expression)
self._named_properties[name] = definition
return definition
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
34,
8,
13130,
12,
1238,
2481,
11,
10318,
283,
1044,
2059,
198,
2,
15069,
357,
34,
8,
13130,
12,
1238,
2481,
11,
5436,
26641,
666,
509,
9101,
1851... | 2.452423 | 7,367 |
# Generated by Django 2.0.1 on 2018-01-25 10:55
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
16,
319,
2864,
12,
486,
12,
1495,
838,
25,
2816,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
import praw
import time
from praw.exceptions import APIException
import re
# The main functions of the bot
print("About to run bot")
if __name__ == "__main__":
cache = []
main()
| [
11748,
279,
1831,
198,
11748,
640,
198,
6738,
279,
1831,
13,
1069,
11755,
1330,
7824,
16922,
198,
11748,
302,
628,
198,
2,
383,
1388,
5499,
286,
262,
10214,
628,
198,
198,
4798,
7203,
8585,
284,
1057,
10214,
4943,
198,
361,
11593,
367... | 3.047619 | 63 |
import pytest
import requests
from src.articles import config
from tests.e2e.fake_authentication import authenticate
@pytest.mark.usefixtures('restart_api')
@pytest.mark.usefixtures('restart_api')
| [
11748,
12972,
9288,
198,
11748,
7007,
198,
198,
6738,
12351,
13,
26845,
1330,
4566,
198,
6738,
5254,
13,
68,
17,
68,
13,
30706,
62,
41299,
3299,
1330,
8323,
5344,
628,
198,
31,
9078,
9288,
13,
4102,
13,
1904,
69,
25506,
10786,
2118,
... | 3.15625 | 64 |
# Generated by Django 3.1.4 on 2021-02-23 12:23
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
19,
319,
33448,
12,
2999,
12,
1954,
1105,
25,
1954,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# logicPlan.py
# ------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
In logicPlan.py, you will implement logic planning methods which are called by
Pacman agents (in logicAgents.py).
"""
import util
import sys
import logic
import game
pacman_str = 'P'
ghost_pos_str = 'G'
ghost_east_str = 'GE'
pacman_alive_str = 'PA'
class PlanningProblem:
"""
This class outlines the structure of a planning problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the planning problem.
"""
util.raiseNotDefined()
def getGhostStartStates(self):
"""
Returns a list containing the start state for each ghost.
Only used in problems that use ghosts (FoodGhostPlanningProblem)
"""
util.raiseNotDefined()
def getGoalState(self):
"""
Returns goal state for problem. Note only defined for problems that have
a unique goal state such as PositionPlanningProblem
"""
util.raiseNotDefined()
def tinyMazePlan(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def sentence1():
"""Returns a logic.Expr instance that encodes that the following expressions are all true.
A or B
(not A) if and only if ((not B) or C)
(not A) or (not B) or C
"""
A = logic.Expr('A')
B = logic.Expr('B')
C = logic.Expr('C')
return logic.conjoin([(A | B),
(~A % (~B | C)),
logic.disjoin([~A, ~B, C])])
def sentence2():
"""Returns a logic.Expr instance that encodes that the following expressions are all true.
C if and only if (B or D)
A implies ((not B) and (not D))
(not (B and (not C))) implies A
(not D) implies C
"""
A = logic.Expr('A')
B = logic.Expr('B')
C = logic.Expr('C')
D = logic.Expr('D')
return logic.conjoin([(C % (B | D)),
A >> (~B & ~D),
~(B & ~C) >> A,
~D >> C])
def sentence3():
"""Using the symbols WumpusAlive[1], WumpusAlive[0], WumpusBorn[0], and WumpusKilled[0],
created using the logic.PropSymbolExpr constructor, return a logic.PropSymbolExpr
instance that encodes the following English sentences (in this order):
The Wumpus is alive at time 1 if and only if the Wumpus was alive at time 0 and it was
not killed at time 0 or it was not alive and time 0 and it was born at time 0.
The Wumpus cannot both be alive at time 0 and be born at time 0.
The Wumpus is born at time 0.
"""
WumpusAlive_1 = logic.PropSymbolExpr('WumpusAlive', 1)
WumpusAlive_0 = logic.PropSymbolExpr('WumpusAlive', 0)
WumpusBorn_0 = logic.PropSymbolExpr('WumpusBorn', 0)
WumpusKilled_0 = logic.PropSymbolExpr('WumpusKilled', 0)
return logic.conjoin([WumpusAlive_1 % ((WumpusAlive_0 & ~WumpusKilled_0) | (~WumpusAlive_0 & WumpusBorn_0)),
~(WumpusAlive_0 & WumpusBorn_0),
WumpusBorn_0])
def findModel(sentence):
"""Given a propositional logic sentence (i.e. a logic.Expr instance), returns a satisfying
model if one exists. Otherwise, returns False.
"""
cnf = logic.to_cnf(sentence)
sol = logic.pycoSAT(cnf)
return sol
def atLeastOne(literals):
"""
Given a list of logic.Expr literals (i.e. in the form A or ~A), return a single
logic.Expr instance in CNF (conjunctive normal form) that represents the logic
that at least one of the literals in the list is true.
>>> A = logic.PropSymbolExpr('A')
>>> B = logic.PropSymbolExpr('B')
>>> symbols = [A, B]
>>> atleast1 = atLeastOne(symbols)
>>> model1 = {A:False, B:False}
>>> print logic.pl_true(atleast1,model1)
False
>>> model2 = {A:False, B:True}
>>> print logic.pl_true(atleast1,model2)
True
>>> model3 = {A:True, B:True}
>>> print logic.pl_true(atleast1,model2)
True
"""
final = logic.disjoin(literals)
return final
def atMostOne(literals):
"""
Given a list of logic.Expr literals, return a single logic.Expr instance in
CNF (conjunctive normal form) that represents the logic that at most one of
the expressions in the list is true.
"""
conj = []
for i in range(len(literals)):
for j in range(len(literals)):
if j > i:
conj.append(logic.disjoin([~literals[i], ~literals[j]]))
# print(logic.conjoin(conj))
return logic.conjoin(conj)
def exactlyOne(literals):
"""
Given a list of logic.Expr literals, return a single logic.Expr instance in
CNF (conjunctive normal form)that represents the logic that exactly one of
the expressions in the list is true.
"""
return atLeastOne(literals) & atMostOne(literals)
def extractActionSequence(model, actions):
"""
Convert a model in to an ordered list of actions.
model: Propositional logic model stored as a dictionary with keys being
the symbol strings and values being Boolean: True or False
Example:
>>> model = {"North[3]":True, "P[3,4,1]":True, "P[3,3,1]":False, "West[1]":True, "GhostScary":True, "West[3]":False, "South[2]":True, "East[1]":False}
>>> actions = ['North', 'South', 'East', 'West']
>>> plan = extractActionSequence(model, actions)
>>> print plan
['West', 'South', 'North']
"""
plan = []
for k in model:
if (logic.PropSymbolExpr.parseExpr(k)[0] in actions) and model[k] == True:
plan.append((logic.PropSymbolExpr.parseExpr(k)[0], int(logic.PropSymbolExpr.parseExpr(k)[1])))
return [xxx[0] for xxx in sorted(plan, key=lambda x: x[1])]
def pacmanSuccessorStateAxioms(x, y, t, walls_grid):
"""
Successor state axiom for state (x,y,t) (from t-1), given the board (as a
grid representing the wall locations).
Current <==> (previous position at time t-1) & (took action to move to x, y)
"""
listlit = []
actions = ['South', 'North', 'West', 'East']
xs = [x, x, x + 1, x - 1]
ys = [y + 1, y - 1, y, y]
for i in range(len(actions)):
if not walls_grid[xs[i]][ys[i]]:
listlit.append(
logic.PropSymbolExpr(pacman_str, xs[i], ys[i], t - 1) & logic.PropSymbolExpr(actions[i], t - 1))
return logic.to_cnf(logic.disjoin(listlit) % logic.PropSymbolExpr(pacman_str, x, y, t))
def positionLogicPlan(problem):
"""
Given an instance of a PositionPlanningProblem, return a list of actions that lead to the goal.
Available actions are game.Directions.{NORTH,SOUTH,EAST,WEST}
Note that STOP is not an available action.
"""
walls = problem.walls
width, height = problem.getWidth(), problem.getHeight()
"********CODE*******"
x_0, y_0 = problem.getStartState()
x_N, y_N = problem.getGoalState()
actions = ['South', 'North', 'West', 'East']
MAX_TIME = 50
knowledgeBase = logic.PropSymbolExpr(pacman_str, x_0, y_0, 0)
for t in range(0, MAX_TIME + 1):
checkGoal = logic.PropSymbolExpr(pacman_str, x_N, y_N, t + 1)
knowledgeBase = logic.conjoin(
[knowledgeBase, exactlyOne([logic.PropSymbolExpr(action, t) for action in actions])])
for x in range(1, width + 1):
for y in range(1, height + 1):
if t == 0 and (x != x_0 or y != y_0):
knowledgeBase = logic.conjoin([knowledgeBase, ~(logic.PropSymbolExpr(pacman_str, x, y, t))])
if not walls[x][y]:
knowledgeBase = logic.conjoin([knowledgeBase, pacmanSuccessorStateAxioms(x, y, t + 1, walls)])
isGoal = logic.conjoin(knowledgeBase, checkGoal)
model = findModel(isGoal)
if model:
return extractActionSequence(model, actions)
def foodLogicPlan(problem):
"""
Given an instance of a FoodPlanningProblem, return a list of actions that help Pacman
eat all of the food.
Available actions are game.Directions.{NORTH,SOUTH,EAST,WEST}
Note that STOP is not an available action.
"""
walls = problem.walls
width, height = problem.getWidth(), problem.getHeight()
"*** YOUR CODE HERE ***"
x_0, y_0 = problem.getStartState()[0]
foods = problem.getStartState()[1]
actions = ['South', 'North', 'West', 'East']
MAX_TIME = 50
knowledgeBase = logic.PropSymbolExpr(pacman_str, x_0, y_0, 0)
for t in range(0, MAX_TIME + 1):
knowledgeBase = logic.conjoin([knowledgeBase,
exactlyOne([logic.PropSymbolExpr(action, t) for action in actions])])
checkGoal = logic.PropSymbolExpr(pacman_str, x_0, y_0, 0)
for x in range(1, width + 1):
for y in range(1, height + 1):
if t == 0 and (x != x_0 or y != y_0):
knowledgeBase = logic.conjoin([knowledgeBase, ~(logic.PropSymbolExpr(pacman_str, x, y, t))])
if not walls[x][y]:
knowledgeBase = logic.conjoin([knowledgeBase, pacmanSuccessorStateAxioms(x, y, t + 1, walls)])
if foods[x][y]:
checkGoal = logic.conjoin([checkGoal,
logic.disjoin([logic.PropSymbolExpr(pacman_str, x, y, tt) for tt in range(0, t+2)])])
isGoal = logic.conjoin([checkGoal, knowledgeBase])
model = findModel(isGoal)
if model:
return extractActionSequence(model, actions)
# Abbreviations
plp = positionLogicPlan
flp = foodLogicPlan
# Some for the logic module uses pretty deep recursion on long expressions
sys.setrecursionlimit(100000)
| [
2,
9156,
20854,
13,
9078,
198,
2,
220,
10541,
198,
2,
10483,
26426,
6188,
25,
220,
921,
389,
1479,
284,
779,
393,
9117,
777,
4493,
329,
198,
2,
9856,
4959,
2810,
326,
357,
16,
8,
345,
466,
407,
14983,
393,
7715,
198,
2,
8136,
11... | 2.430147 | 4,352 |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 7 12:32:36 2016
@author: user
"""
import sys
sys.path.insert(0, '/Users/user/Desktop/repo_for_pyseries/pyseries/')
import pyseries.LoadingData as loading
import pyseries.Preprocessing as prep
import pyseries.Analysis as analysis
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
7653,
220,
767,
1105,
25,
2624,
25,
2623,
1584,
198,
198,
31,
9800,
25,
2836,
198,
37811,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
2... | 2.8 | 100 |
"""
Definition to run a test profile
"""
from datetime import datetime, timedelta
from airflow import DAG
from InformaticaPlugin import ExecuteProfile
schedule_interval = None
default_args = {
'owner': 'Jac. Beekers',
'depends_on_past': False,
'email': ['noone@nowhere.no'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 0,
'retry_delay': timedelta(seconds=30)
}
dag = DAG(
'run_testprofile',
start_date=datetime(2019, 11, 1),
schedule_interval=schedule_interval,
default_args=default_args)
profile = ExecuteProfile(
task_id = "task_testprofile",
profile_path = "/SchedulerTest/Profile_SchedulerTest",
dag=dag
)
| [
37811,
198,
30396,
284,
1057,
257,
1332,
7034,
198,
37811,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
45771,
1330,
360,
4760,
198,
6738,
45255,
1512,
64,
37233,
1330,
8393,
1133,
37046,
198,
198,
15952,
5... | 2.553506 | 271 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import socket
import time
import argparse
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
... | 3.1125 | 80 |
from argparse import ArgumentParser
from collections import OrderedDict
import pytest
import olutils as lib
import olutils.params as lib2
| [
6738,
1822,
29572,
1330,
45751,
46677,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
12972,
9288,
198,
198,
11748,
25776,
26791,
355,
9195,
198,
11748,
25776,
26791,
13,
37266,
355,
9195,
17,
628,
628,
628
] | 3.891892 | 37 |
# Generated by Django 3.2 on 2021-04-30 00:58
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
319,
33448,
12,
3023,
12,
1270,
3571,
25,
3365,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
628
] | 2.904762 | 42 |
import timeit
import random
for i in range (10000, 100000, 20000):
t = timeit.Timer("random.randrange(%d) in x"%i, "from __main__ import random, x")
x = list(range(i))
list_time = t.timeit(number = 1000)
x = {j:None for j in range(i)}
dict_time = t.timeit(number = 1000)
print "Counter: " + str(i) + " List: " + str(list_time) + " Dict: " + str(dict_time)
| [
11748,
640,
270,
198,
11748,
4738,
198,
198,
1640,
1312,
287,
2837,
357,
49388,
11,
1802,
830,
11,
939,
405,
2599,
198,
220,
256,
796,
640,
270,
13,
48801,
7203,
25120,
13,
25192,
9521,
7,
4,
67,
8,
287,
2124,
1,
4,
72,
11,
366,... | 2.510067 | 149 |
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Union, List
import numpy as np
from rl_coach.filters.action.box_discretization import BoxDiscretization
from rl_coach.filters.action.partial_discrete_action_space_map import PartialDiscreteActionSpaceMap
from rl_coach.spaces import AttentionActionSpace, BoxActionSpace, DiscreteActionSpace
class AttentionDiscretization(PartialDiscreteActionSpaceMap):
"""
Given a box action space, this is used to discretize the space.
The discretization is achieved by creating a grid in the space with num_bins_per_dimension bins per dimension in the
space. Each discrete action is mapped to a single sub-box in the BoxActionSpace action space.
"""
| [
2,
198,
2,
15069,
357,
66,
8,
2177,
8180,
10501,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
... | 3.625714 | 350 |
from model.contact import Contact
testdata = [
Contact(firstname="", lastname="", address="", homephone="", mobilephone="", workphone="",
secondaryphone="", email1="", email2="", email3=""),
Contact(firstname="Ivan", lastname="Ivanov", address="SomeWhere St., 4", homephone="+111111111",
mobilephone="(222)222222", workphone="333-333-333", secondaryphone="444444444",
email1="ii@email1.email", email2="ii@email2.email", email3="ii@email3.email")
] | [
6738,
2746,
13,
32057,
1330,
14039,
628,
198,
9288,
7890,
796,
685,
198,
220,
220,
220,
14039,
7,
11085,
3672,
2625,
1600,
938,
3672,
2625,
1600,
2209,
2625,
1600,
1363,
4862,
2625,
1600,
5175,
4862,
2625,
1600,
670,
4862,
2625,
1600,
... | 2.636364 | 187 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# tools.roi_formatter.py
"""Formatting tools for roi data (dicompyler, Shapely, DVHA)"""
# Copyright (c) 2016-2019 Dan Cutright
# This file is part of DVH Analytics, released under a BSD license.
# See the file LICENSE included with this distribution, also
# available at https://github.com/cutright/DVH-Analytics
from shapely.geometry import Polygon, Point, MultiPolygon
from shapely import speedups
import numpy as np
# "sets of points" objects
# dictionaries using str(z) as keys
# where z is the slice or z in DICOM coordinates
# each item is a list of points representing a polygon,
# each point is a 3-item list [x, y, z]
#
# roi_coord_string from the SQL database
# Each contour is delimited with a ':'
# For example, ring ROIs will have an outer contour with a negative
# inner contour
# Each contour is a csv of of x,y,z values in the following format
# z,x1,y1,x2,y2...xn,yn
# Each contour has the same z coordinate for all points
MIN_SLICE_THICKNESS = 2 # Update method to pull from DICOM
# Enable shapely calculations using C, as opposed to the C++ default
if speedups.available:
speedups.enable()
def get_planes_from_string(roi_coord_string):
"""
Parameters
----------
roi_coord_string : string: str
roi string representation of an roi as formatted in the SQL database
Returns
-------
dict
a "sets of points" formatted dictionary
"""
planes = {}
contours = roi_coord_string.split(":")
for contour in contours:
contour = contour.split(",")
z = contour.pop(0)
z = round(float(z), 2)
z_str = str(z)
if z_str not in list(planes):
planes[z_str] = []
i, points = 0, []
while i < len(contour):
point = [float(contour[i]), float(contour[i + 1]), z]
points.append(point)
i += 2
planes[z_str].append(points)
return planes
def points_to_shapely_polygon(sets_of_points):
"""
Parameters
----------
sets_of_points : dict
a "sets of points" formatted dictionary
Returns
-------
type
a composite polygon as a shapely object (either polygon or multipolygon)
"""
composite_polygon = None
for set_of_points in sets_of_points:
if len(set_of_points) > 3:
points = [(point[0], point[1]) for point in set_of_points]
points.append(
points[0]
) # Explicitly connect the final point to the first
# if there are multiple sets of points in a slice, each set is a polygon,
# interior polygons are subtractions, exterior are addition
# Only need to check one point for interior vs exterior
current_polygon = Polygon(points).buffer(0) # clean stray points
if composite_polygon:
if Point((points[0][0], points[0][1])).disjoint(
composite_polygon
):
composite_polygon = composite_polygon.union(
current_polygon
)
else:
composite_polygon = composite_polygon.symmetric_difference(
current_polygon
)
else:
composite_polygon = current_polygon
return composite_polygon
def get_roi_coordinates_from_string(roi_coord_string):
"""
Parameters
----------
roi_coord_string : string: str
roi string representation of an roi as formatted in the SQL database
Returns
-------
list
a list of numpy arrays, each array is the x, y, z coordinates of the given point
"""
roi_coordinates = []
contours = roi_coord_string.split(":")
for contour in contours:
contour = contour.split(",")
z = contour.pop(0)
z = float(z)
i = 0
while i < len(contour):
roi_coordinates.append(
np.array((float(contour[i]), float(contour[i + 1]), z))
)
i += 2
return roi_coordinates
def get_roi_coordinates_from_planes(sets_of_points):
"""
Parameters
----------
sets_of_points : dict
a "sets of points" formatted dictionary
Returns
-------
list
a list of numpy arrays, each array is the x, y, z coordinates of the given point
"""
roi_coordinates = []
for z in list(sets_of_points):
for polygon in sets_of_points[z]:
for point in polygon:
roi_coordinates.append(
np.array((point[0], point[1], point[2]))
)
return roi_coordinates
def get_roi_coordinates_from_shapely(shapely_dict, sample_res=None):
"""
Parameters
----------
shapely_dict : dict
output from get_shapely_from_sets_of_points
sample_res : int, float
If set to a numeric value, sample each polygon with this resolution (mm)
Returns
-------
list
a list of numpy arrays, each array is the x, y, z coordinates of the given point
"""
roi_coordinates = []
for i, shape in enumerate(shapely_dict["polygon"]):
multi_polygon = shape if isinstance(shape, MultiPolygon) else [shape]
for polygon in multi_polygon:
if isinstance(sample_res, (int, float)):
x, y = get_contour_sample(polygon, sample_res)
else:
x, y = polygon.exterior.coords.xy
for j, point_x in enumerate(x):
roi_coordinates.append(
np.array((point_x, y[j], shapely_dict["z"][i]))
)
return roi_coordinates
def dicompyler_roi_coord_to_db_string(coord):
"""
Parameters
----------
coord :
dicompyler structure coordinates from GetStructureCoordinates()
Returns
-------
str
roi string representation of an roi as formatted in the SQL database (roi_coord_string)
"""
contours = []
for z in coord:
for plane in coord[z]:
points = [z]
for point in plane["data"]:
points.append(str(round(point[0], 3)))
points.append(str(round(point[1], 3)))
contours.append(",".join(points))
return ":".join(contours)
def get_shapely_from_sets_of_points(sets_of_points, tolerance=None, preserve_topology=True):
"""
Parameters
----------
sets_of_points : dict
a "sets of points" formatted dictionary
tolerance : bool, optional
If set to a number, will use Shapely's simplify on each contour with
the given tolerance
preserve_topology : bool, optional
Passed to Shapely's simplify if ``simplify_tolerance`` is set
Returns
-------
dict
roi_slices which is a dictionary of lists of z, thickness, and a Shapely Polygon class object
"""
roi_slices = {"z": [], "thickness": [], "polygon": []}
sets_of_points_keys = list(sets_of_points)
sets_of_points_keys.sort()
all_z_values = [round(float(z), 2) for z in sets_of_points_keys]
thicknesses = np.abs(np.diff(all_z_values))
if len(thicknesses):
thicknesses = np.append(thicknesses, np.min(thicknesses))
else:
thicknesses = np.array([MIN_SLICE_THICKNESS])
for z in sets_of_points:
thickness = thicknesses[all_z_values.index(round(float(z), 2))]
shapely_roi = points_to_shapely_polygon(sets_of_points[z])
if shapely_roi:
if tolerance is not None:
shapely_roi = shapely_roi.simplify(tolerance, preserve_topology)
roi_slices["z"].append(round(float(z), 2))
roi_slices["thickness"].append(thickness)
roi_slices["polygon"].append(shapely_roi)
return roi_slices
def dicompyler_roi_to_sets_of_points(coord):
"""
Parameters
----------
coord :
dicompyler structure coordinates from GetStructureCoordinates()
Returns
-------
dict
a "sets of points" formatted dictionary
"""
all_points = {}
for z in coord:
all_points[z] = []
for plane in coord[z]:
plane_points = [
[float(point[0]), float(point[1])] for point in plane["data"]
]
for point in plane["data"]:
plane_points.append([float(point[0]), float(point[1])])
if len(plane_points) > 2:
all_points[z].append(plane_points)
return all_points
def get_contour_sample(polygon, dth_res=0.5) -> tuple:
"""Get 3D points uniformly distributed in the perimeter space
Parameters
----------
polygon : Polygon
shapely object
dth_res : int, float
Sampling distance in perimeter space (mm)
Returns
-------
np.ndarray
x coordinates of sampled contour
np.ndarray
y coordinates of sampled contour
"""
# Get coordinates of polygon and index array
x, y = polygon.exterior.coords.xy
indices = np.arange(len(x))
# cumulative path length
s = np.cumsum(np.sqrt(np.diff(x) ** 2 + np.diff(y) ** 2))
num_samples = int(np.floor(s[-1] / dth_res))
# path length space
sample_s = np.arange(num_samples) * dth_res
# path length space to index space
sample_i = np.interp(sample_s, s, indices[1:])
# Get x and y coordinates of sampled index space
sample_x = np.interp(sample_i, indices, x)
sample_y = np.interp(sample_i, indices, y)
return sample_x, sample_y
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
4899,
13,
305,
72,
62,
687,
1436,
13,
9078,
198,
37811,
26227,
889,
4899,
329,
686,
72,
1366,
357,
67,
291,... | 2.287574 | 4,225 |
"""Update the pday column
Some care has to be made here such that trace values do not accumulate when
there are actual measurable precip. Eventually, the DSM or other summary
messages come and overwrite the trouble. Run from RUN_10MIN.sh
"""
from __future__ import print_function
import datetime
from pyiem.util import get_dbconn
from pyiem.reference import TRACE_VALUE
def main():
"""Go!"""
pgconn = get_dbconn('iem')
icursor = pgconn.cursor()
icursor2 = pgconn.cursor()
now = datetime.datetime.now()
yyyy = now.year
# Run for the previous hour, so that we don't skip totaling up 11 PM
icursor.execute("""
WITH obs as (
SELECT s.iemid, date(valid at time zone s.tzname) as d,
max(phour) as rain,
extract(hour from (valid - '1 minute'::interval)) as hour
from current_log c, stations s
WHERE (s.network IN ('AWOS') or s.network ~* 'ASOS') and
c.iemid = s.iemid and
date(valid at time zone s.tzname) =
date((now() - '1 hour'::interval) at time zone s.tzname)
and phour > 0
GROUP by s.iemid, hour, d
), agg as (
select o.iemid, o.d, sum(rain) as precip
from obs o JOIN stations s on
(s.iemid = o.iemid) GROUP by o.iemid, o.d
), agg2 as (
SELECT iemid, d,
case when precip >= 0.01 then
round(precip::numeric, 2) else precip end as pday
from agg
)
UPDATE summary_"""+str(yyyy)+""" s
SET pday =
case when a.pday < 0.009 and a.pday > 0 then %s else a.pday end
FROM agg2 a
WHERE s.iemid = a.iemid and s.day = a.d and
(s.pday is null or s.pday != a.pday)
""", (TRACE_VALUE, ))
icursor2.close()
icursor.close()
pgconn.commit()
if __name__ == '__main__':
main()
| [
37811,
10260,
262,
279,
820,
5721,
198,
198,
4366,
1337,
468,
284,
307,
925,
994,
884,
326,
12854,
3815,
466,
407,
29915,
618,
198,
8117,
389,
4036,
40757,
18812,
13,
220,
16178,
11,
262,
37297,
393,
584,
10638,
198,
37348,
1095,
1282... | 2.20023 | 869 |
""" Module to send metrics to storage systems. """
# Raspi-sump, a sump pump monitoring system.
# Al Audet
# http://www.linuxnorth.org/raspi-sump/
#
# All configuration changes should be done in raspisump.conf
# MIT License -- http://www.linuxnorth.org/raspi-sump/license.html
import configparser
from raspisump import log
from influxdb import InfluxDBClient
from datetime import datetime
config = configparser.RawConfigParser()
config.read("/home/pi/raspi-sump/raspisump.conf")
def something(metrics):
'''receives metrics from sensor collection function,
determines and calls storage mechanisms.'''
def csv_writer(value, csv_file):
'''Store data using original csv method.'''
log.log_reading(value, csv_file)
def zabbix_writer(metrics):
'''Store data using zabbix trapper method.'''
configs = {
"zabbix_host": config.get("storage", "zabbix_host")
}
def influxdb_writer(values):
'''Store data using influxdb method.'''
from influxdb import InfluxDBClient
configs = {
"influxdb_host": config.get("storage", "influxdb_host"),
"influxdb_port": config.get("storage", "influxdb_port"),
"influxdb_username": config.get("storage", "influxdb_username"),
"influxdb_password": config.get("storage", "influxdb_password"),
"influxdb_database": config.get("storage", "influxdb_database"),
"influxdb_measurement": config.get("storage", "influxdb_measurement"),
"influxdb_location": config.get("storage", "influxdb_location")
}
dbClient = InfluxDBClient(host=configs["influxdb_host"], port=configs["influxdb_port"], username=configs["influxdb_username"], password=configs["influxdb_password"], database=configs["influxdb_database"])
jbody = []
jbody.append(
{
"measurement": configs["influxdb_measurement"],
"tags": {
"Location": configs["influxdb_location"]
},
"fields": {
"waterlevel": values["depth"],
"watertemp": values["temp"]
}
}
)
dbResult = dbClient.write_points(jbody)
#log errors to some log file
| [
37811,
19937,
284,
3758,
20731,
284,
6143,
3341,
13,
37227,
198,
198,
2,
371,
5126,
72,
12,
82,
931,
11,
257,
264,
931,
8901,
9904,
1080,
13,
198,
2,
978,
7591,
316,
198,
2,
2638,
1378,
2503,
13,
23289,
43588,
13,
2398,
14,
81,
... | 2.528588 | 857 |
# @ copyright SPARKZZZ
"""
idk
"""
from telethon.tl.functions.channels import LeaveChannelRequest
from userbot.utils import admin_cmd
import time
@sparkzzz.on(admin_cmd(pattern="idk", outgoing=True))
| [
2,
2488,
6634,
6226,
14175,
30148,
57,
198,
198,
37811,
198,
198,
312,
74,
198,
198,
37811,
628,
198,
198,
6738,
5735,
400,
261,
13,
28781,
13,
12543,
2733,
13,
354,
8961,
1330,
17446,
29239,
18453,
198,
6738,
2836,
13645,
13,
26791,
... | 2.646341 | 82 |
########################################################################################################################
# Module: inference/resampling.py
# Description: Resampling schemes for converting weighted particles (series of positions/edges/distances) to
# unweighted. Notably multinomial resampling and fixed-lag resampling (with stitching).
#
# Web: https://github.com/SamDuffield/bmm
########################################################################################################################
from typing import Union, Tuple
import numpy as np
from networkx.classes import MultiDiGraph
from bmm.src.inference.particles import MMParticles
from bmm.src.inference.model import MapMatchingModel
from bmm.src.tools.edges import get_geometry
def multinomial(particles: Union[list, np.ndarray, MMParticles],
weights: np.ndarray) -> Union[list, np.ndarray, MMParticles]:
"""
Full multinomial resampling scheme. Lengths of particles and weights must conform.
:param particles: list-like or MMParticles object to be resampled
:param weights: resampling probabilities
:return: unweighted collection of objects in the same form as input
"""
# Number of samples
n = len(weights)
# Check weights are normalised
weights_sum = np.sum(weights)
if weights_sum != 1:
weights /= weights_sum
# Sample indices according to weights (with replacement)
sampled_indices = np.random.choice(n, n, replace=True, p=weights)
# Update and output particles
if isinstance(particles, MMParticles):
if particles.n != n:
raise ValueError("Length of MMParticles to be resampled and weights do not conform")
out_particles = particles.copy()
out_particles.particles = [out_particles.particles[i] for i in sampled_indices]
if hasattr(out_particles, 'prior_norm'):
if out_particles.prior_norm.shape[1] == n:
out_particles.prior_norm = out_particles.prior_norm[:, sampled_indices]
else:
out_particles.prior_norm = out_particles.prior_norm[sampled_indices]
elif isinstance(particles, np.ndarray):
if len(particles) != n:
raise ValueError("Length of particles (numpy.ndarray) to be resampled and weights do not conform")
out_particles = particles[sampled_indices]
else:
if len(particles) != n:
raise ValueError("Length of particles to be resampled and weights do not conform")
out_particles = [particles[i] for i in sampled_indices]
return out_particles
def full_fixed_lag_stitch(fixed_particle: np.ndarray,
last_edge_fixed: np.ndarray,
last_edge_fixed_length: float,
new_particles: MMParticles,
adjusted_weights: np.ndarray,
stitch_time_interval: float,
min_resample_time_indices: Union[list, np.ndarray],
mm_model: MapMatchingModel,
return_ess_stitch: bool = False) -> Union[np.ndarray, Tuple[np.ndarray, float]]:
"""
Evaluate full interacting weights, normalise and sample (stitch) for a single fixed particle
:param fixed_particle: trajectory prior to stitching time
:param last_edge_fixed: row of last fixed particle
:param last_edge_fixed_length: length of last fixed edge (so don't have to call get_geometry)
:param new_particles: particles proposed to stitching
:param adjusted_weights: non-interacting weights for new_particles
:param stitch_time_interval: time between stitching observations
:param min_resample_time_indices: indices for row of min_resample_time in new_particles
:param mm_model: MapMatchingModel
:param return_ess_stitch: whether to calculate and return the ESS of the full stitching weights
:return: stitched particle (and ess_stitch if return_ess_stitch)
"""
n = len(new_particles)
# Possible particles to be resampled placeholder
newer_particles_adjusted = [None] * n
# Stitching distances
new_stitching_distances = np.empty(n)
new_stitching_distances[:] = np.nan
new_cart_coords = np.empty((n, 2))
for k in range(n):
# if adjusted_weights[k] == 0:
# continue
if new_particles[k] is None:
continue
new_particle = new_particles[k].copy()
# Check both particles start from same edge
if np.array_equal(last_edge_fixed[1:4], new_particle[0, 1:4]):
# Check that new edge overtakes fixed edge. i.e. distance isn't negative
if np.array_equal(last_edge_fixed[1:4], new_particle[1, 1:4]) and \
new_particle[1, 4] < (last_edge_fixed[4] - 1e-6):
continue
new_cart_coords[k] = new_particle[min_resample_time_indices[k], 5:7]
# Calculate distance modification
first_distance_j_to_k = (new_particle[1, 4] - last_edge_fixed[4]) * last_edge_fixed_length
first_distance_k = new_particle[1, -1]
change_dist = np.round(first_distance_j_to_k - first_distance_k, 5)
new_particle[1:(min_resample_time_indices[k] + 1), -1] += change_dist
new_stitching_distances[k] = new_particle[min_resample_time_indices[k], -1]
# Store adjusted particle
newer_particles_adjusted[k] = new_particle[1:]
# Calculate adjusted weight
res_weights = np.zeros(n)
possible_inds = ~np.isnan(new_stitching_distances)
new_stitching_distances_trimmed = new_stitching_distances[possible_inds]
new_cart_coords_trimmed = new_cart_coords[possible_inds]
adjusted_weights_trimmed = adjusted_weights[possible_inds]
if adjusted_weights_trimmed.sum() == 0:
adjusted_weights_trimmed[:] = 1
stitched_distance_prior_evals_trimmed = mm_model.distance_prior_evaluate(new_stitching_distances_trimmed,
stitch_time_interval)
stitched_deviation_prior_trimmed = mm_model.deviation_prior_evaluate(fixed_particle[-1, 5:7],
new_cart_coords_trimmed,
new_stitching_distances_trimmed)
res_weights[possible_inds] = adjusted_weights_trimmed \
* stitched_distance_prior_evals_trimmed \
* stitched_deviation_prior_trimmed
# Normalise adjusted resample weights
with np.errstate(invalid='ignore'):
res_weights /= res_weights.sum()
# If only particle on fixed edge resample full trajectory
if max(res_weights) == 0 or np.all(np.isnan(res_weights)):
out_particle = None
ess_stitch = 1 / np.sum(adjusted_weights ** 2)
# Otherwise fixed-lag resample and stitch
else:
# Resample index
res_index = np.random.choice(n, 1, p=res_weights)[0]
# Update output
out_particle = np.append(fixed_particle, newer_particles_adjusted[res_index], axis=0)
# Track ESS
ess_stitch = 1 / np.sum(res_weights ** 2)
if return_ess_stitch:
return out_particle, ess_stitch
else:
return out_particle
def rejection_fixed_lag_stitch(fixed_particle: np.ndarray,
last_edge_fixed: np.ndarray,
last_edge_fixed_length: float,
new_particles: MMParticles,
adjusted_weights: np.ndarray,
stitch_time_interval: float,
min_resample_time_indices: Union[list, np.ndarray],
dist_prior_bound: float,
mm_model: MapMatchingModel,
max_rejections: int,
break_on_zero: bool = False) -> Union[np.ndarray, None, int]:
"""
Attempt up to max_rejections of rejection sampling to stitch a single fixed particle
:param fixed_particle: trajectory prior to stitching time
:param last_edge_fixed: row of last fixed particle
:param last_edge_fixed_length: length of last fixed edge (so don't have to call get_geometry)
:param new_particles: particles proposed to stitching
:param adjusted_weights: non-interacting stitching weights
:param stitch_time_interval: time between stitching observations
:param min_resample_time_indices: indices for row of min_resample_time in new_particles
:param dist_prior_bound: bound on distance transition density (given positive if break_on_zero)
:param mm_model: MapMatchingModel
:param max_rejections: number of rejections to attempt, if none succeed return None
:param break_on_zero: whether to return 0 if new_stitching_distance=0
:return: stitched particle
"""
n = len(new_particles)
for reject_ind in range(max_rejections):
new_index = np.random.choice(n, 1, p=adjusted_weights)[0]
new_particle = new_particles[new_index].copy()
# Reject if new_particle starts from different edge
if not np.array_equal(last_edge_fixed[1:4], new_particle[0, 1:4]):
continue
# Reject if new_particle doesn't overtake fixed_particles
elif np.array_equal(last_edge_fixed[1:4], new_particle[1, 1:4]) and \
new_particle[1, 4] < last_edge_fixed[4]:
continue
# Calculate stitching distance
first_distance_j_to_k = (new_particle[1, 4] - last_edge_fixed[4]) * last_edge_fixed_length
first_distance_k = new_particle[1, -1]
change_dist = np.round(first_distance_j_to_k - first_distance_k, 5)
new_particle[1:(min_resample_time_indices[new_index] + 1), -1] += change_dist
new_stitching_distance = new_particle[min_resample_time_indices[new_index], -1]
if break_on_zero and new_stitching_distance < 1e-5:
return 0
# Evaluate distance prior
new_stitching_distance_prior = mm_model.distance_prior_evaluate(new_stitching_distance, stitch_time_interval)
new_stitching_deviation_prior = mm_model.deviation_prior_evaluate(fixed_particle[-1, 5:7],
new_particle[None,
min_resample_time_indices[new_index], 5:7],
new_stitching_distance)
accept_prob = new_stitching_distance_prior * new_stitching_deviation_prior / dist_prior_bound
if accept_prob > (1 - 1e-5) or np.random.uniform() < accept_prob:
out_particle = np.append(fixed_particle, new_particle[1:], axis=0)
return out_particle
return None
def fixed_lag_stitch_post_split(graph: MultiDiGraph,
fixed_particles: MMParticles,
new_particles: MMParticles,
new_weights: np.ndarray,
mm_model: MapMatchingModel,
max_rejections: int) -> MMParticles:
"""
Stitch together fixed_particles with samples from new_particles according to joint fixed-lag posterior
:param graph: encodes road network, simplified and projected to UTM
:param fixed_particles: trajectories before stitching time (won't be changed)
:param new_particles: trajectories after stitching time (to be resampled)
one observation time overlap with fixed_particles
:param new_weights: weights applied to new_particles
:param mm_model: MapMatchingModel
:param max_rejections: number of rejections to attempt before doing full fixed-lag stitching
0 will do full fixed-lag stitching and track ess_stitch
:return: MMParticles object
"""
n = len(fixed_particles)
full_fixed_lag_resample = max_rejections == 0
min_resample_time = new_particles.observation_times[1]
min_resample_time_indices = [np.where(particle[:, 0] == min_resample_time)[0][0] if particle is not None else 0
for particle in new_particles]
originial_stitching_distances = np.array([new_particles[j][min_resample_time_indices[j], -1]
if new_particles[j] is not None else 0 for j in range(n)])
max_fixed_time = fixed_particles._first_non_none_particle[-1, 0]
stitch_time_interval = min_resample_time - max_fixed_time
distance_prior_evals = mm_model.distance_prior_evaluate(originial_stitching_distances, stitch_time_interval)
fixed_last_coords = np.array([part[0, 5:7] if part is not None else [0, 0] for part in new_particles])
new_coords = np.array([new_particles[j][min_resample_time_indices[j], 5:7]
if new_particles[j] is not None else [0, 0] for j in range(n)])
deviation_prior_evals = mm_model.deviation_prior_evaluate(fixed_last_coords,
new_coords,
originial_stitching_distances)
original_prior_evals = np.zeros(n)
pos_inds = new_particles.prior_norm > 1e-5
original_prior_evals[pos_inds] = distance_prior_evals[pos_inds] \
* deviation_prior_evals[pos_inds] \
* new_particles.prior_norm[pos_inds]
out_particles = fixed_particles
# Initiate some required quantities depending on whether to do rejection sampling or not
if full_fixed_lag_resample:
ess_stitch_track = np.zeros(n)
# distance_prior_bound = None
# adjusted_weights = None
else:
ess_stitch_track = None
pos_prior_bound = mm_model.pos_distance_prior_bound(stitch_time_interval)
prior_bound = mm_model.distance_prior_bound(stitch_time_interval)
store_out_parts = fixed_particles.copy()
adjusted_weights = new_weights.copy()
adjusted_weights[original_prior_evals > 1e-5] /= original_prior_evals[original_prior_evals > 1e-5]
adjusted_weights[original_prior_evals < 1e-5] = 0
adjusted_weights /= np.sum(adjusted_weights)
resort_to_full = False
# Iterate through particles
for j in range(n):
fixed_particle = fixed_particles[j]
# Check if particle is None
# i.e. fixed lag approx has failed
if fixed_particle is None:
out_particles[j] = None
if full_fixed_lag_resample:
ess_stitch_track[j] = 0
continue
last_edge_fixed = fixed_particle[-1]
last_edge_fixed_geom = get_geometry(graph, last_edge_fixed[1:4])
last_edge_fixed_length = last_edge_fixed_geom.length
if full_fixed_lag_resample:
# Full resampling
out_particles[j], ess_stitch_track[j] = full_fixed_lag_stitch(fixed_particle,
last_edge_fixed, last_edge_fixed_length,
new_particles,
adjusted_weights,
stitch_time_interval,
min_resample_time_indices,
mm_model,
True)
else:
# Rejection sampling
out_particles[j] = rejection_fixed_lag_stitch(fixed_particle, last_edge_fixed, last_edge_fixed_length,
new_particles, adjusted_weights,
stitch_time_interval,
min_resample_time_indices,
pos_prior_bound,
mm_model,
max_rejections,
break_on_zero=True)
if out_particles[j] is None:
# Rejection sampling reached max_rejections -> try full resampling
out_particles[j] = full_fixed_lag_stitch(fixed_particle, last_edge_fixed, last_edge_fixed_length,
new_particles,
adjusted_weights,
stitch_time_interval,
min_resample_time_indices,
mm_model,
False)
if isinstance(out_particles[j], int) and out_particles[j] == 0:
resort_to_full = True
break
if resort_to_full:
for j in range(n):
fixed_particle = store_out_parts[j]
# Check if particle is None
# i.e. fixed lag approx has failed
if fixed_particle is None:
out_particles[j] = None
if full_fixed_lag_resample:
ess_stitch_track[j] = 0
continue
last_edge_fixed = fixed_particle[-1]
last_edge_fixed_geom = get_geometry(graph, last_edge_fixed[1:4])
last_edge_fixed_length = last_edge_fixed_geom.length
# Rejection sampling with full bound
out_particles[j] = rejection_fixed_lag_stitch(fixed_particle, last_edge_fixed, last_edge_fixed_length,
new_particles, adjusted_weights,
stitch_time_interval,
min_resample_time_indices,
prior_bound,
mm_model,
max_rejections)
if out_particles[j] is None:
# Rejection sampling reached max_rejections -> try full resampling
out_particles[j] = full_fixed_lag_stitch(fixed_particle, last_edge_fixed, last_edge_fixed_length,
new_particles,
adjusted_weights,
stitch_time_interval,
min_resample_time_indices,
mm_model,
False)
if full_fixed_lag_resample:
out_particles.ess_stitch = np.append(out_particles.ess_stitch, np.atleast_2d(ess_stitch_track), axis=0)
# Do full resampling where fixed lag approx broke
none_inds = np.array([p is None for p in out_particles])
good_inds = ~none_inds
n_good = good_inds.sum()
if n_good == 0:
raise ValueError("Map-matching failed: all stitching probabilities zero,"
"try increasing the lag or number of particles")
if n_good < n:
none_inds_res_indices = np.random.choice(n, n - n_good, p=good_inds / n_good)
for i, j in enumerate(np.where(none_inds)[0]):
out_particles[j] = out_particles[none_inds_res_indices[i]]
if full_fixed_lag_resample:
out_particles.ess_stitch[-1, none_inds] = 1 / (new_weights ** 2).sum()
return out_particles
def fixed_lag_stitching(graph: MultiDiGraph,
mm_model: MapMatchingModel,
particles: MMParticles,
weights: np.ndarray,
lag: int,
max_rejections: int) -> MMParticles:
"""
Split particles and resample (with stitching) coordinates after a certain time - defined by the lag parameter.
:param graph: encodes road network, simplified and projected to UTM
:param mm_model: MapMatchingModel
:param particles: MMParticles object
:param weights: shape = (n,) weights at latest observation time
:param lag: fixed lag for resampling/stitching
None indicates full multinomial resampling
:param max_rejections: number of rejections to attempt before doing full fixed-lag stitching
0 will do full fixed-lag stitching and track ess_stitch
:return: MMParticles object
"""
# Bool whether to store ESS stitch quantities
full_fixed_lag_resample = max_rejections == 0
# Check weights are normalised
weights_sum = np.sum(weights)
if weights_sum != 1:
weights /= weights_sum
# Extract basic quantities
observation_times = particles.observation_times
m = len(observation_times) - 1
n = particles.n
ess_pf = 1 / np.sum(weights ** 2)
# Initiate output
out_particles = particles.copy()
# If not reached lag yet do standard resampling
if lag is None or m <= lag:
if full_fixed_lag_resample:
out_particles.ess_stitch = np.append(particles.ess_stitch, np.ones((1, n)) * ess_pf,
axis=0)
return multinomial(out_particles, weights)
# Largest time not to be resampled
max_fixed_time = observation_times[m - lag - 1]
# Pre-process a bit
fixed_particles = out_particles.copy()
new_particles = out_particles.copy()
max_fixed_time_indices = [0] * n
for j in range(n):
if out_particles[j] is None:
continue
max_fixed_time_indices[j] = np.where(out_particles[j][:, 0] == max_fixed_time)[0][0]
fixed_particles[j] = out_particles[j][:(max_fixed_time_indices[j] + 1)]
new_particles[j] = out_particles[j][max_fixed_time_indices[j]:]
new_particles.prior_norm = out_particles.prior_norm[m - lag - 1]
# Stitch
out_particles = fixed_lag_stitch_post_split(graph,
fixed_particles,
new_particles,
weights,
mm_model,
max_rejections)
return out_particles
| [
29113,
29113,
29113,
14468,
7804,
198,
2,
19937,
25,
32278,
14,
411,
321,
11347,
13,
9078,
198,
2,
12489,
25,
1874,
321,
11347,
16546,
329,
23202,
26356,
13166,
357,
25076,
286,
6116,
14,
276,
3212,
14,
17080,
1817,
8,
284,
198,
2,
... | 2.023757 | 11,281 |
"""
robots.py
Robots used to manoeuvre around in the Game-environment.
"""
from numpy import pi, sqrt
from configs.bot_config import BotConfig
from environment.entities.sensors import AngularSensor, DeltaDistanceSensor, DistanceSensor, ProximitySensor
from utils.vec2d import angle_to_vec, Vec2d
class MarXBot:
"""The FootBot is the main bot used in this project. It is a simple circular robot with two wheels on its sides."""
__slots__ = (
"game",
"pos", "prev_pos", "init_pos", "init_angle", "angle", "prev_angle", "radius",
"n_proximity", "n_angular", "n_delta_distance", "n_distance",
"sensors", "active_sensors",
)
def __init__(self,
game, # Type not specified due to circular imports
r: float = 0
):
"""
Create a new FootBot object.
:param game: Reference to the game in which the robot is created [Game]
:param r: Radius of the circular robot
"""
# Game specific parameter
self.game = game # Game in which robot runs
# Robot specific parameters (Placeholders)
self.pos = Vec2d(0, 0) # Current position
self.init_pos = Vec2d(0, 0) # Initial position
self.prev_pos = Vec2d(0, 0) # Previous current position
self.angle: float = 0 # Current angle
self.init_angle: float = 0 # Initial angle
self.prev_angle: float = 0 # Previous angle
self.radius: float = r if r else game.bot_config.radius # Radius of the bot
# Container of all the sensors
self.sensors: dict = dict()
# Counters for number of sensors used
self.n_proximity: int = 0
self.n_angular: int = 0
self.n_delta_distance: int = 0
self.n_distance: int = 0
# Create the sensors (fixed order!)
self.create_proximity_sensors(cfg=game.bot_config)
self.create_angular_sensors(cfg=game.bot_config)
self.create_delta_distance_sensor(cfg=game.bot_config)
self.add_distance_sensor()
# Number of distance-sensors must always be equal to 1
assert self.n_distance == 1
# Set all the sensors as active initially
self.active_sensors = set(self.sensors.keys())
# ------------------------------------------------> MAIN METHODS <------------------------------------------------ #
def drive(self, dt: float, lw: float, rw: float):
"""
Update the robot's position and orientation based on the action of the wheels.
:param dt: Delta time (must always be positioned first)
:param lw: Speed of the left wheel, float [-1,1]
:param rw: Speed of the right wheel, float [-1,1]
"""
# Constraint the inputs
lw = max(min(lw, 1), -1)
rw = max(min(rw, 1), -1)
# Update previous state
self.prev_pos.x, self.prev_pos.y = self.pos.x, self.pos.y
self.prev_angle = self.angle
# Update angle is determined by the speed of both wheels
self.angle += (rw - lw) * self.game.bot_config.turning_speed * dt
self.angle %= 2 * pi
# Update position is the average of the two wheels times the maximum driving speed
self.pos += angle_to_vec(self.angle) * float((((lw + rw) / 2) * self.game.bot_config.driving_speed * dt))
def get_sensor_readings(self, close_walls: set = None):
"""List of the current sensory-readings."""
for i in self.active_sensors: self.sensors[i].measure(close_walls)
return [self.sensors[i].value for i in sorted(self.active_sensors)]
def get_sensor_readings_distance(self):
"""Value of current distance-reading."""
sensor: DistanceSensor = self.sensors[len(self.sensors) - 1] # Distance is always the last sensor
assert type(sensor) == DistanceSensor
return sensor.value * sensor.normalizer
def reset(self):
"""Put the robot back to its initial parameters."""
self.pos.x = self.init_pos.x
self.pos.y = self.init_pos.y
self.prev_pos.x = self.init_pos.x
self.prev_pos.y = self.init_pos.y
self.angle = self.init_angle
self.prev_angle = self.init_angle
# -----------------------------------------------> SENSOR METHODS <----------------------------------------------- #
def add_angular_sensors(self, clockwise: bool = True):
"""
Add an angular sensor to the agent and give it an idea one greater than the last sensor added, or 0 if it is the
first sensor that is added.
"""
self.sensors[len(self.sensors)] = AngularSensor(
sensor_id=len(self.sensors),
game=self.game,
clockwise=clockwise)
self.n_angular += 1
def add_delta_distance_sensor(self):
"""Single distance sensor which determines distance between agent's center and target's center."""
self.sensors[len(self.sensors)] = DeltaDistanceSensor(
sensor_id=len(self.sensors),
game=self.game)
self.n_delta_distance += 1
def add_distance_sensor(self):
"""Single distance sensor which determines distance between agent's center and target's center."""
self.sensors[len(self.sensors)] = DistanceSensor(
sensor_id=len(self.sensors),
normalizer=sqrt((self.game.game_config.x_axis - 1) ** 2 + (self.game.game_config.y_axis - 1) ** 2),
game=self.game)
self.n_distance += 1
def add_proximity_sensor(self, angle: float):
"""
Add an proximity sensor to the agent and give it an id one greater than the last sensor added, or 0 if it is
the first sensor that is added.
:param angle: Relative angle to the robot's facing-direction
* pi / 2 = 90° to the left of the robot
* 0 = the same direction as the robot is facing
* -pi / 2 = 90° to the right of the robot
"""
self.sensors[len(self.sensors)] = ProximitySensor(
sensor_id=len(self.sensors),
game=self.game,
angle=angle,
pos_offset=self.game.bot_config.radius,
max_dist=self.game.bot_config.ray_distance)
self.n_proximity += 1
def create_angular_sensors(self, cfg: BotConfig):
"""
Two angular sensors that define the angle between the orientation the agent is heading and the agent towards the
target 'in crows flight'. One measures this angle in clockwise, the other counterclockwise.
"""
for clockwise in cfg.angular_dir: self.add_angular_sensors(clockwise=clockwise)
def create_delta_distance_sensor(self, cfg: BotConfig):
"""Add a delta-distance sensor which measures the difference in distance to the target each time-point."""
if cfg.delta_dist_enabled: self.add_delta_distance_sensor()
def create_proximity_sensors(self, cfg: BotConfig):
"""
13 proximity sensors, which measure the distance between the agent and an object, if this object is within 0.5
meters of distance. The proximity sensors are not evenly spaced, since the fact that the robot has a front will
be exploited. Sensors are added from the left-side of the drone to the right.
"""
for angle in cfg.prox_angles: self.add_proximity_sensor(angle=angle)
def set_active_sensors(self, connections: set):
"""
Update all the sensor keys used by the robot.
:param connections: Set of all connections in tuple format (sending node, receiving node)
"""
self.active_sensors = get_active_sensors(connections=connections, total_input_size=len(self.sensors))
def get_active_sensors(connections: set, total_input_size: int):
"""Get a set of all the used input-sensors based on the connections. The distance sensor is always used."""
# Exploit the fact that sensor inputs have negative connection keys
used = {a + total_input_size for (a, _) in connections if a < 0}
used.add(total_input_size - 1) # Always use the distance sensor
return used
def get_number_of_sensors(cfg: BotConfig):
"""Get the number of sensors mounted on the robot."""
return len(cfg.prox_angles) + len(cfg.angular_dir) + int(cfg.delta_dist_enabled) + 1
def get_snapshot(cfg: BotConfig):
"""
Get the snapshot of the current robot-configuration. This method mimics the 'Create the sensors' section in the
MarXBot creation process.
"""
sorted_names = []
# Proximity sensors
for a in cfg.prox_angles: sorted_names.append(str(ProximitySensor(game=None, max_dist=1, angle=a)))
# Angular sensors
for cw in cfg.angular_dir: sorted_names.append(str(AngularSensor(game=None, clockwise=cw)))
# Delta distance sensor
if cfg.delta_dist_enabled: sorted_names.append(str(DeltaDistanceSensor(game=None)))
# Distance sensor
sorted_names.append(str(DistanceSensor(game=None, normalizer=1)))
# Negate all the keys to create the snapshot
snapshot = dict()
for i, name in enumerate(sorted_names): snapshot[-len(sorted_names) + i] = name
return snapshot
| [
37811,
198,
22609,
1747,
13,
9078,
198,
198,
14350,
1747,
973,
284,
42618,
260,
1088,
287,
262,
3776,
12,
38986,
13,
198,
37811,
198,
6738,
299,
32152,
1330,
31028,
11,
19862,
17034,
198,
198,
6738,
4566,
82,
13,
13645,
62,
11250,
133... | 2.440599 | 3,872 |
from django.core.cache import cache
from django.core.validators import RegexValidator
from django.db import models
from django.db.models.signals import post_delete
from django.db.models.signals import post_save
from django.dispatch import receiver
from ozpcenter import constants
from .external_model import ExternalModel
from .image import Image
class Intent(ExternalModel):
"""
An Intent is an abstract description of an operation to be performed
TODO: Auditing for create, update, delete
"""
# TODO unique on type
action = models.CharField(
max_length=64,
validators=[
RegexValidator(
regex=constants.INTENT_ACTION_REGEX,
message='action must be a valid action',
code='invalid action')]
)
media_type = models.CharField(
max_length=129,
validators=[
RegexValidator(
regex=constants.MEDIA_TYPE_REGEX,
message='type must be a valid media type',
code='invalid type')]
)
label = models.CharField(max_length=255)
icon = models.ForeignKey(Image, related_name='intent')
@receiver(post_save, sender=Intent)
@receiver(post_delete, sender=Intent)
| [
6738,
42625,
14208,
13,
7295,
13,
23870,
1330,
12940,
198,
6738,
42625,
14208,
13,
7295,
13,
12102,
2024,
1330,
797,
25636,
47139,
1352,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
12... | 2.526423 | 492 |
from collections import Counter
from concurrent.futures import ProcessPoolExecutor
import concurrent
from typing import List, Any, Generator, Tuple, KeysView, ValuesView, Dict
import scipy as sp
from scipy import sparse
import numpy as np
from tokenizers.spacy_tokenizer import SpacyTokenizer
from logger import logger
from utils import hash
TOKENIZER = None
class HashingTfIdfVectorizer:
"""
Create a tfidf matrix from collection of documents.
"""
def __init__(self, data_iterator, hash_size=2 ** 24, tokenizer=SpacyTokenizer(ngram_range=(1,2))):
"""
:param data_iterator: an instance of an iterator class, producing data batches;
the iterator class should implement read_batch() method
:param hash_size: a size of hash, power of 2
:param tokenizer: an instance of a tokenizer class; should implement "lemmatize()"
and/or "tokenize() methods"
"""
self.doc_index = data_iterator.doc2index
# self.doc2index = None
self.hash_size = hash_size
self.tokenizer = tokenizer
global TOKENIZER
TOKENIZER = self.tokenizer
if hasattr(self.tokenizer, 'lemmatize'):
processing_fn = self.tokenizer.lemmatize
elif hasattr(self.tokenizer, 'tokenize'):
processing_fn = self.tokenizer.tokenize
else:
raise AttributeError("{} should implement either 'tokenize()' or lemmatize()".
format(self.tokenizer.__class__.__name__))
self.processing_fn = processing_fn
self.data_iterator = data_iterator
self.tfidf_matrix = None
self.term_freqs = None
@staticmethod
def get_counts_parallel(kwargs) -> Tuple[List[int], List[int], List[int]]:
"""
Get batch counts. The same as get_counts(), but rewritten as staticmethod to be suitable
for parallelization.
"""
docs = kwargs['docs']
doc_ids = kwargs['doc_ids']
index = kwargs['doc_index']
hash_size = kwargs['hash_size']
logger.info("Tokenizing batch...")
if hasattr(TOKENIZER, 'lemmatize'):
processing_fn = TOKENIZER.lemmatize
elif hasattr(TOKENIZER, 'tokenize'):
processing_fn = TOKENIZER.tokenize
else:
raise AttributeError("{} should implement either 'tokenize()' or lemmatize()".
format(TOKENIZER.__class__.__name__))
batch_ngrams = list(processing_fn(docs))
doc_id = iter(doc_ids)
batch_hashes = []
batch_values = []
batch_col_ids = []
logger.info("Counting hash...")
for ngrams in batch_ngrams:
counts = Counter([hash(gram, hash_size) for gram in ngrams])
hashes = counts.keys()
values = counts.values()
col_id = [index[next(doc_id)]] * len(values)
batch_hashes.extend(hashes)
batch_values.extend(values)
batch_col_ids.extend(col_id)
return batch_hashes, batch_values, batch_col_ids
@staticmethod
def get_tfidf_matrix(count_matrix: sp.sparse.csr_matrix) -> Tuple[sp.sparse.csr_matrix, np.array]:
"""Convert a word count matrix into a tfidf matrix."""
binary = (count_matrix > 0).astype(int)
term_freqs = np.array(binary.sum(1)).squeeze()
idfs = np.log((count_matrix.shape[1] - term_freqs + 0.5) / (term_freqs + 0.5))
idfs[idfs < 0] = 0
idfs = sp.sparse.diags(idfs, 0)
tfs = count_matrix.log1p()
tfidfs = idfs.dot(tfs)
return tfidfs, term_freqs
# PATH='/media/olga/Data/projects/iPavlov/DeepPavlov/download/odqa/ruwiki_tfidf_matrix.npz'
# vectorizer = HashingTfIdfVectorizer(None)
# vectorizer.load(PATH)
# vectorizer.save('/media/olga/Data/projects/iPavlov/DeepPavlov/download/odqa/ruwiki_tfidf_matrix_new.npz')
| [
6738,
17268,
1330,
15034,
198,
6738,
24580,
13,
69,
315,
942,
1330,
10854,
27201,
23002,
38409,
198,
11748,
24580,
198,
6738,
19720,
1330,
7343,
11,
4377,
11,
35986,
11,
309,
29291,
11,
26363,
7680,
11,
27068,
7680,
11,
360,
713,
198,
... | 2.257803 | 1,730 |
# data must be stored in JSON format
# data:
{
"name": "date to timestampe",
"author": "nicky",
"icon": "numbers.png",
"tags": "UNIX date timestamp",
"description": "converts a date to a UNIX timestamp",
"icon_link": "https://cdn-icons-png.flaticon.com/512/3402/3402135.png"
}
# data:
# any import statemnet directed to the main.py must be left outside the script tag
from main import Text
# script:
# script:
| [
2,
1366,
1276,
307,
8574,
287,
19449,
5794,
198,
198,
2,
1366,
25,
220,
198,
90,
198,
220,
366,
3672,
1298,
366,
4475,
284,
4628,
395,
321,
431,
1600,
198,
220,
366,
9800,
1298,
366,
17172,
88,
1600,
198,
220,
366,
4749,
1298,
366... | 2.764331 | 157 |
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from .models import ProblemReport, USER_PROBLEM_CHOICES
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
198,
6738,
764,
27530,
1330,
20647,
19100,
11,
12... | 3.392157 | 51 |
#! python3
# pdfParanoia.py - Add password in command line
# to every PDF in folder and subfolders.
import PyPDF2, os, sys
password = sys.argv[1]
for foldername, subfolders, filenames in os.walk(os.getcwd()):
# Find each PDF after walking through given directory.
for filename in filenames:
if (filename.endswith('.pdf')):
# Rewrite PDF to become encrypted.
pdfPath = os.path.join(foldername, filename)
pdfFile = open(pdfPath, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFile)
pdfWriter = PyPDF2.PdfFileWriter()
for pageNum in range(pdfReader.numPages):
pdfWriter.addPage(pdfReader.getPage(pageNum))
resultFilename = filename[:-4] + '_encrypted.pdf'
resultPath = os.path.join(foldername, resultFilename)
resultPdf = open(resultPath, 'wb')
pdfWriter.encrypt(password)
pdfWriter.write(resultPdf)
# Close original and result PDFs.
pdfFile.close()
resultPdf.close()
# Verify encryption.
verifyReader = PyPDF2.PdfFileReader(open(resultPath, 'rb'))
verifyReader.decrypt(password)
if verifyReader.getPage(0):
print('%s encrypted as %s. Deleting %s.' %
(filename, resultFilename, filename))
# Delete original.
os.unlink(pdfPath)
else:
print('Encryption failed.')
| [
2,
0,
21015,
18,
198,
2,
37124,
10044,
30661,
13,
9078,
532,
3060,
9206,
287,
3141,
1627,
198,
2,
284,
790,
12960,
287,
9483,
290,
850,
11379,
364,
13,
198,
198,
11748,
9485,
20456,
17,
11,
28686,
11,
25064,
198,
198,
28712,
796,
... | 2.619543 | 481 |
from contextlib import contextmanager
import numpy as np
_my_colors = np.array([
[0, 127, 70],
[255, 0, 0],
[255, 217, 38],
[0, 135, 255],
[165, 0, 165],
[255, 167, 255],
[97, 142, 151],
[0, 255, 255],
[255, 96, 38],
[142, 76, 0],
[33, 0, 127],
[0, 0, 0],
[183, 183, 183],
[76, 255, 0],
], dtype=np.float) / 255
class skip(object):
"""A decorator to skip function execution.
Parameters
----------
f : function
Any function whose execution need to be skipped.
Attributes
----------
f
"""
@contextmanager
def skip_run(flag, f):
"""To skip a block of code.
Parameters
----------
flag : str
skip or run.
Returns
-------
None
"""
@contextmanager
try:
yield check_active
except SkipWith:
pass
| [
6738,
4732,
8019,
1330,
4732,
37153,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
201,
198,
62,
1820,
62,
4033,
669,
796,
45941,
13,
18747,
26933,
201,
198,
220,
220,
220,
685,
15,
11,
18112,
11,
4317,
4357,
201,
198,
220,
220... | 2.03532 | 453 |
from rest_framework.generics import ListAPIView
from rest_framework.generics import ListCreateAPIView
from apps.meiduo_admin.serializer.users import UserModelSerializer
from apps.users.models import User
from apps.meiduo_admin.utils import PageNumber
| [
6738,
1334,
62,
30604,
13,
8612,
873,
1330,
7343,
2969,
3824,
769,
198,
6738,
1334,
62,
30604,
13,
8612,
873,
1330,
7343,
16447,
2969,
3824,
769,
198,
6738,
6725,
13,
1326,
312,
20895,
62,
28482,
13,
46911,
7509,
13,
18417,
1330,
1178... | 3.614286 | 70 |
#Função par
print(par(4))
print(par(23))
| [
2,
24629,
16175,
28749,
1582,
198,
4798,
7,
1845,
7,
19,
4008,
198,
4798,
7,
1845,
7,
1954,
4008,
198
] | 2.05 | 20 |
from ciscoaxl import axl
from decouple import config
cucm = config("CUCM_PUBLISHER")
username = config("CUCM_AXL_USERNAME")
password = config("CUCM_AXL_PASSWORD")
version = config("CUCM_VERSION")
ucm = axl(username=username, password=password, cucm=cucm, cucm_version=version)
# TODO: add below methods for testing
# # return a list of phones with the following fields populated
# phone_list = ucm.get_phones(
# tagfilter={
# "name": "",
# "description": "",
# "product": "",
# "model": "",
# "class": "",
# "protocol": "",
# "protocolSide": "",
# "callingSearchSpaceName": "",
# "devicePoolName": "",
# "commonDeviceConfigName": "",
# "commonPhoneConfigName": "",
# "networkLocation": "",
# "locationName": "",
# "mediaResourceListName": "",
# "networkHoldMohAudioSourceId": "",
# "userHoldMohAudioSourceId": "",
# "loadInformation": "",
# "securityProfileName": "",
# "sipProfileName": "",
# "cgpnTransformationCssName": "",
# "useDevicePoolCgpnTransformCss": "",
# "numberOfButtons": "",
# "phoneTemplateName": "",
# "primaryPhoneName": "",
# "loginUserId": "",
# "defaultProfileName": "",
# "enableExtensionMobility": "",
# "currentProfileName": "",
# "loginTime": "",
# "loginDuration": "",
# # "currentConfig": "",
# "ownerUserName": "",
# "subscribeCallingSearchSpaceName": "",
# "rerouteCallingSearchSpaceName": "",
# "allowCtiControlFlag": "",
# "alwaysUsePrimeLine": "",
# "alwaysUsePrimeLineForVoiceMessage": "",
# }
# )
# get_phone = ucm.get_phone(name="SEP0023AF482340")
# print(get_phone)
# user_list = ucm.get_users(
# tagfilter={
# "userid": "",
# "firstName": "",
# "lastName": "",
# "directoryUri": "",
# "telephoneNumber": "",
# "enableCti": "",
# "mailid": "",
# "primaryExtension": {"pattern": "", "routePartitionName": ""},
# "enableMobility": "",
# "homeCluster": "",
# "associatedPc": "",
# "enableEmcc": "",
# "imAndPresenceEnable": "",
# "serviceProfile": {"_value_1": ""},
# "status": "",
# "userLocale": "",
# "title": "",
# "subscribeCallingSearchSpaceName": "",
# }
# )
# for user in user_list:
# print(user.firstName, user.lastName, user.mailid, user.primaryExtension.pattern)
# hunt_pilots = ucm.get_hunt_pilots(
# tagfilter={
# "pattern": "",
# "patternUrgency": "",
# "routePartitionName": "",
# "alertingName": "",
# "blockEnable": "",
# "description": "",
# "forwardHuntBusy": "",
# "forwardHuntNoAnswer": "",
# "queueCalls": "",
# "callingPartyTransformationMask": "",
# "callingPartyPrefixDigits": "",
# "callingLinePresentationBit": "",
# "callingNamePresentationBit": "",
# "calledPartyTransformationMask": "",
# "callPickupGroupName": "",
# "huntListName": "",
# "useCallingPartyPhoneMask": "",
# "maxHuntduration": "",
# "displayConnectedNumber": "",
# "aarNeighborhoodName": "",
# }
# )
# print(hunt_pilots)
# ########################### Hunt List, Line Group, Hunt Pilot ############
# huntlists = ucm.get_hunt_lists()
# print(huntlists)
# new_huntlist = ucm.add_hunt_list(
# name="My_Test_HL",
# callManagerGroupName="Default",
# description="new hunt list",
# routeListEnabled=True,
# )
# print(new_huntlist)
# update_huntlist = ucm.update_hunt_list(name="My_Test_HL", description="modified HL")
# print(update_huntlist)
# hunt_pilots = ucm.get_hunt_pilots()
# print(hunt_pilots)
# new_pilot = ucm.add_hunt_pilot(
# pattern="2500",
# routePartitionName="Internal_PT",
# description="new pilot",
# blockEnable="false",
# useCallingPartyPhoneMask="Off",
# huntListName="My_Test_HL",
# )
# print(new_pilot)
# hunt_pilots = ucm.get_hunt_pilots()
# print(hunt_pilots)
# update_pilot = ucm.update_hunt_pilot(
# pattern=2500, description="modified pilot", routePartitionName="Internal_PT"
# )
# print(update_pilot)
# get_hunt_pilot = ucm.get_hunt_pilot(pattern="2500", routePartitionName="Internal_PT")
# print(get_hunt_pilot)
# line_groups = ucm.get_line_groups()
# print(line_groups)
# get_line_group = ucm.get_line_group(name="Test")
# print(get_line_group)
# new_line_group = ucm.add_line_group(
# name="My_Test_LG",
# distributionAlgorithm="Longest Idle Time",
# rnaReversionTimeOut="10",
# huntAlgorithmNoAnswer="Try next member; then, try next group in Hunt List",
# huntAlgorithmBusy="Try next member; then, try next group in Hunt List",
# huntAlgorithmNotAvailable="Try next member; then, try next group in Hunt List",
# members={
# "member": [
# {
# "lineSelectionOrder": 0,
# "directoryNumber": {
# "pattern": "1102",
# "routePartitionName": "Internal_PT",
# },
# }
# ]
# },
# )
# print(new_line_group)
# update_line_group = ucm.update_line_group(
# name="My_Test_LG", distributionAlgorithm="Broadcast"
# )
# get_line_group = ucm.get_line_group(name="My_Test_LG")
# print(get_line_group)
# call_pickup_groups = ucm.get_call_pickup_groups()
# print(call_pickup_groups)
# new_cpug = ucm.add_call_pickup_group(name="My_Test_CPUG", pattern="2502")
# print(new_cpug)
# update_call_pickup_group = ucm.update_call_pickup_group(
# name="My_Test_CPUG", description="changed description"
# )
# print(update_call_pickup_group)
# get_cpug = ucm.get_call_pickup_group(name="My_Test_CPUG")
# print(get_cpug)
# ##### CLEANUP #####
# del_hp = ucm.delete_hunt_pilot(pattern="2500", routePartitionName="Internal_PT")
# print(del_hp)
# del_hp = ucm.delete_hunt_list(name="My_Test_HL")
# print(del_hp)
# del_lg = ucm.delete_line_group(name="My_Test_LG")
# print(del_hp)
# del_cpug = ucm.delete_call_pickup_group(name="My_Test_CPUG")
# print(del_cpug)
# del_phone = ucm.delete_phone(name="SEP0023AF482340")
# print(del_phone)
# del_dirnum = ucm.delete_directory_number(
# pattern="1102", routePartitionName="Internal_PT"
# )
# print(del_dirnum)
| [
6738,
269,
4861,
897,
75,
1330,
7877,
75,
198,
6738,
875,
43846,
1330,
4566,
198,
198,
66,
1229,
76,
796,
4566,
7203,
34,
9598,
44,
62,
5105,
9148,
1797,
16879,
4943,
198,
29460,
796,
4566,
7203,
34,
9598,
44,
62,
25922,
43,
62,
2... | 2.180743 | 2,960 |
import psycopg2
import logging
import pymysql
# mysql
# connect
| [
11748,
17331,
22163,
70,
17,
198,
11748,
18931,
198,
11748,
279,
4948,
893,
13976,
628,
198,
220,
220,
220,
1303,
48761,
628,
220,
220,
220,
1303,
2018,
198
] | 2.678571 | 28 |
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "0.1.0",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = r"""
---
module: launchdarkly_environment
short_description: Create Launchdarkly Project specific Environment
description:
- Manage LaunchDarkly Project specific Environments.
version_added: "0.1.0"
options:
state:
description:
- Indicate desired state of the resource
choices: [ absent, present ]
default: present
type: str
project_key:
description:
- Project key will group flags together
default: 'default'
environment_key:
description:
- A unique key that will be used to reference the flag in your code.
required: yes
type: str
name:
description:
- Display name for the environment.
type: str
color:
description:
- Color used in dashboard for the environment.
required: no
type: str
default_ttl:
description:
- TTL is only used in our PHP SDK.
type: int
secure_mode:
description:
- Determines if this environment is in safe mode.
default_track_events:
description:
- Set to `true` to send detailed event information for new flags.
type: bool
tags:
description:
- An array of tags for this environment.
type: str
require_comments:
description:
- Determines if this environment requires comments for flag and segment changes.
type: bool
confirm_changes:
description:
- Determines if this environment requires confirmation for flag and segment changes.
type: bool
extends_documentation_fragment: launchdarkly_labs.collection.launchdarkly
"""
EXAMPLES = r"""
---
# Create a new LaunchDarkly Environment
- launchdarkly_environment:
state: present
project_key: test-project-1
environment_key: test_environment-1
color: C9C9C9
# Create a new LaunchDarkly Environment and tag it
- launchdarkly_environment:
state: present
project_key: test-project-1
environment_key: test_environment-1
color: C9C9C9
tags:
- blue
- green
"""
RETURN = r"""
environment:
description: Returns dictionary containing an L(Environment, https://github.com/launchdarkly/api-client-python/blob/2.0.30/docs/Environment.md)
type: dict
returned: on success
"""
import inspect
import traceback
LD_IMP_ERR = None
try:
import launchdarkly_api
from launchdarkly_api.rest import ApiException
HAS_LD = True
except ImportError:
LD_IMP_ERR = traceback.format_exc()
HAS_LD = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback
from ansible.module_utils._text import to_native
from ansible.module_utils.common._json_compat import json
from ansible.module_utils.six import PY2, iteritems, string_types
from ansible_collections.launchdarkly_labs.collection.plugins.module_utils.base import (
configure_instance,
parse_env_param,
fail_exit,
ld_common_argument_spec,
)
from ansible_collections.launchdarkly_labs.collection.plugins.module_utils.environment import (
ld_env_arg_spec,
env_ld_builder,
)
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
834,
4164,
330,
31172,
834,
796,
2099,
198,
198,
15037,
34563,
62,
47123,
2885,
13563,
796,
1391,
... | 2.621418 | 1,326 |
"""
Superclass of experiments and related classes.
"""
import labrad
import traceback
from treedict import TreeDict
class experiment_info(object):
'''
Holds informaton about the experiment
Attributes
----------
name: str
parameters: TreeDict
required_parameters: list
'''
required_parameters = []
name = ''
class experiment(experiment_info):
"""
Main experiment class
"""
def execute(self, ident):
'''
executes the experiment
'''
self.ident = ident
try:
self._connect()
self._initialize(self.cxn, self.context, ident)
self._run(self.cxn, self.context)
self._finalize(self.cxn, self.context)
except Exception as e:
reason = traceback.format_exc()
print(reason)
if hasattr(self, 'sc'):
self.sc.error_finish_confirmed(self.ident, reason)
finally:
if hasattr(self, 'cxn'):
if self.cxn is not None:
self.cxn.disconnect()
self.cxn = None
def _load_parameters_dict(self, params):
'''loads the required parameter into a treedict'''
d = TreeDict()
for collection, parameter_name in params:
try:
value = self.pv.get_parameter(collection, parameter_name)
except Exception as e:
print(e)
message = "In {}: Parameter {} not found among Parameter Vault parameters"
raise Exception (message.format(self.name, (collection, parameter_name)))
else:
d['{0}.{1}'.format(collection, parameter_name)] = value
return d
def set_parameters(self, parameter_dict):
'''
can reload all parameter values from parameter_vault or replace
parameters with values from the provided parameter_dict
'''
if isinstance(parameter_dict, dict):
udpate_dict = TreeDict()
for (collection,parameter_name), value in parameter_dict.items():
udpate_dict['{0}.{1}'.format(collection, parameter_name)] = value
elif isinstance(parameter_dict, TreeDict):
udpate_dict = parameter_dict
else:
message = "Incorrect input type for the replacement dictionary"
raise Exception(message)
self.parameters.update(udpate_dict)
# useful functions to be used in subclasses
@classmethod
def pause_or_stop(self):
'''
allows to pause and to stop the experiment
'''
self.should_stop = self.sc.pause_or_stop(self.ident)
if self.should_stop:
self.sc.stop_confirmed(self.ident)
return self.should_stop
# functions to reimplement in the subclass
def initialize(self, cxn, context, ident):
'''
implemented by the subclass
'''
def run(self, cxn, context, replacement_parameters={}):
'''
implemented by the subclass
'''
def finalize(self, cxn, context):
'''
implemented by the subclass
'''
| [
37811,
198,
12442,
4871,
286,
10256,
290,
3519,
6097,
13,
198,
37811,
198,
11748,
2248,
6335,
198,
11748,
12854,
1891,
198,
6738,
2054,
276,
713,
1330,
12200,
35,
713,
198,
198,
4871,
6306,
62,
10951,
7,
15252,
2599,
198,
220,
220,
22... | 2.235169 | 1,416 |
""" Game fix for Resident Evil 6 / Biohazard 6
"""
#pylint: disable=C0103
from protonfixes import util
def main():
""" Uses wmp11
"""
util.protontricks('wmp11')
| [
37811,
3776,
4259,
329,
22373,
10461,
718,
1220,
16024,
37598,
718,
198,
37811,
198,
2,
79,
2645,
600,
25,
15560,
28,
34,
486,
3070,
198,
198,
6738,
386,
1122,
42624,
1330,
7736,
198,
198,
4299,
1388,
33529,
198,
220,
220,
220,
37227,... | 2.666667 | 66 |
import os
import argparse
import random
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.externals import joblib
import tensorflow as tf
from tensorflow.python import keras as K
import gym
from fn_framework import FNAgent, Trainer, Observer
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="PG Agent Pendulum-v0")
parser.add_argument("--play", action="store_true",
help="play with trained model")
args = parser.parse_args()
main(args.play)
| [
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
8997,
3351,
36213,
198,
6738,
1341,
35720,
13,
1069,
759,
874,
1330,
1693,
8019,
198,
11748,
11192... | 2.967213 | 183 |
############################
# 226. Invert Binary Tree
############################
'''
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if root:
root.left, root.right = self.invertTree(root.right), self.invertTree(root.left)
return root
t = TreeNode(1)
t.left = TreeNode(2)
t.right = TreeNode(3)
t.left.left = TreeNode(4)
t.left.right = TreeNode(5)
t.right.left = TreeNode(6)
t.right.right = TreeNode(7)
print (t.left.left.val)
print (t.left.val)
solu = Solution()
newt = solu.invertTree(t)
print (newt.right.right.val)
print (newt.right.val)
'''
'''
######## my version ########
############################
# 234. Palindrom Linked List
############################
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
# while head:
# if head.val != new.val:
# return False
# head = head.next
# new = new.next
# return True
headnew = head
print (headnew.next)
if head:
prev = None
while head:
cur = head
head = head.next
cur.next = prev
prev = cur
print (headnew.next)
# test 1
# a = ListNode(1)
# a.next = ListNode(1)
# a.next.next = ListNode(2)
# a.next.next.next = ListNode(1)
# test 2
a = ListNode(0)
a.next = ListNode(1)
# print (a.next.val)
solu = Solution()
print (solu.isPalindrome(a))
# b = solu.inv(a)
# print (b.val)
# print (a.val)
# prev = None
# head = a
# while head:
# cur = head
# head = head.next
# cur.next = prev
# prev = cur
# print(cur.next.next.val)
'''
##### solution version #####
############################
# 234. Palindrom Linked List
############################
# Definition for singly-linked list.
# test 1
a = ListNode(1)
a.next = ListNode(1)
a.next.next = ListNode(2)
a.next.next.next = ListNode(1)
solu = Solution()
print (solu.isPalindrome(a))
| [
14468,
7804,
4242,
198,
2,
31510,
13,
554,
1851,
45755,
12200,
198,
14468,
7804,
4242,
198,
7061,
6,
198,
2,
30396,
329,
257,
13934,
5509,
10139,
13,
198,
4871,
12200,
19667,
25,
198,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
1... | 2.208687 | 1,059 |
import mnist_loader
import network
def run_train():
""" The network is trained by executing Network module """
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
net = network.Network([784, 30, 10])
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
network.save_object(net)
| [
11748,
285,
77,
396,
62,
29356,
198,
11748,
3127,
198,
198,
4299,
1057,
62,
27432,
33529,
198,
197,
37811,
383,
3127,
318,
8776,
416,
23710,
7311,
8265,
37227,
198,
197,
198,
197,
34409,
62,
7890,
11,
21201,
62,
7890,
11,
1332,
62,
... | 2.881818 | 110 |
#!/usr/bin/env python3
# Copyright (c) 2017 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
from builtin_interfaces.msg import *
from datetime import datetime
from std_msgs.msg import *
import threading
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
2177,
8180,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,... | 3.6 | 220 |
import cv2
import math
import argparse
def highlightFace(net, frame, conf_threshold=0.7):
'''
This function detects faces on the image using the 'net' passed (if any) and returns the detection output
as well as the cordinates of the faces detected
'''
frameOpencvDnn=frame.copy()
#--------saving the image dimensions as height and width-------#
frameHeight = frameOpencvDnn.shape[0]
frameWidth = frameOpencvDnn.shape[1]
#-----------blob-> Preprocessing the image to required input of the model---------#
blob=cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False)
net.setInput(blob) #setting the image blob as input
detections = net.forward()
'''3rd dimension helps you iterate over predictions and
in the 4th dimension, there are actual results
class_lable = int(inference_results[0, 0, i,1]) --> gives one hot encoded class label for ith box
conf = inference_results[0, 0, i, 2] --> gives confidence of ith box prediction
and 2nd dimension is used when the predictions are made in more than one stages, for example in
YOLO the predictions are done at 3 different layers. you can iterate over these predictions using
2nd dimension like [:,i,:,:]
'''
faceBoxes=[]
for i in range(detections.shape[2]):
confidence=detections[0,0,i,2]
if confidence>conf_threshold:
# TopLeftX,TopLeftY, BottomRightX, BottomRightY = inference_results[0, 0, i, 3:7] --> gives co-ordinates bounding boxes for resized small image
x1=int(detections[0,0,i,3]*frameWidth)
y1=int(detections[0,0,i,4]*frameHeight)
x2=int(detections[0,0,i,5]*frameWidth)
y2=int(detections[0,0,i,6]*frameHeight)
# box = detections[0, 0, i, 3:7] * np.array([frameWidth, frameHeight, frameWidth, frameHeight])
# faceBoxes.append(box.astype("int"))
faceBoxes.append([x1,y1,x2,y2])
cv2.rectangle(frameOpencvDnn, (x1,y1), (x2,y2), (0,255,0), int(round(frameHeight/150)), 8)
return frameOpencvDnn,faceBoxes
#-------Creating and Parsing through the argument passed on the terminal-------------#
parser=argparse.ArgumentParser()
parser.add_argument('--image')
args=parser.parse_args()
#-----------Model File Paths----------------#
faceProto="Models/opencv_face_detector.pbtxt"
faceModel="Models/opencv_face_detector_uint8.pb"
ageProto="Models/age_deploy.prototxt"
ageModel="Models/age_net.caffemodel"
genderProto="Models/gender_deploy.prototxt"
genderModel="Models/gender_net.caffemodel"
#-----------Model Variables---------------#
MODEL_MEAN_VALUES=(78.4263377603, 87.7689143744, 114.895847746)
ageList=['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
genderList=['Male','Female']
#-------------Creating the DNN------------#
faceNet= cv2.dnn.readNet(faceModel,faceProto)
ageNet= cv2.dnn.readNet(ageModel,ageProto)
genderNet= cv2.dnn.readNet(genderModel,genderProto)
#---------Instantiate the Video Capture Object-----------#
video=cv2.VideoCapture(args.image if args.image else 0) #check whether image was passed or not otherwise use the webcam
padding=20
while cv2.waitKey(1)<0:
hasFrame,frame=video.read()
if not hasFrame:
cv2.waitKey()
break
#----------------Face Detection-----------------#
resultImg,faceBoxes=highlightFace(faceNet,frame)
if not faceBoxes:
print('No face detected')
break
for faceBox in faceBoxes:
#-------Crop out the face from the image---------#
face=frame[faceBox[1]:faceBox[3],faceBox[0]:faceBox[2]] #img[y1:y2 , x1:x2]
#------Gender prediction---------#
blob=cv2.dnn.blobFromImage(face, 1.0, (227,227), MODEL_MEAN_VALUES, swapRB=False)
genderNet.setInput(blob)
genderPreds=genderNet.forward()
gender=genderList[genderPreds[0].argmax()]
print(f'Gender: {gender}')
#-------Age Prediction---------#
ageNet.setInput(blob)
agePreds=ageNet.forward()
age=ageList[agePreds[0].argmax()]
print(f'Age: {age[1:-1]} years')
cv2.putText(resultImg, f'{gender}, {age}', (faceBox[0], faceBox[1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,255), 2, cv2.LINE_AA)
cv2.imshow("Detecting age and gender", resultImg)
video.release()
cv2.destroyAllWindows() | [
11748,
269,
85,
17,
198,
11748,
10688,
198,
11748,
1822,
29572,
628,
198,
4299,
7238,
32388,
7,
3262,
11,
5739,
11,
1013,
62,
400,
10126,
28,
15,
13,
22,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
770,
2163,
39382,
6... | 2.461538 | 1,794 |
import sys
import importlib
importlib.reload(sys)
from os import chdir, getcwd, listdir, path
from time import strftime
from pdfminer.pdfparser import PDFParser, PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LAParams, LTTextBox, LTTextLine, LTTextBoxHorizontal
from pdfminer.pdfinterp import PDFTextExtractionNotAllowed
"""
接受输入的文件夹/文件路径,如果路径不存在输出相应报错信息,直到输入的路径存在
:param: prompt -> abs_path: check approriate path and store it in abs_path
:return: string: abs_path
"""
"""
解析pdf文本,保存在txt文件中
:param: name, name of every pdf file in the provided folder
:return: none
"""
print ("\n")
folder = check_path("Provide absolute path for the folder: ")
list=[]
for root,dirs,files in walk(folder):
for filename in files:
if filename.endswith('.pdf'):
t=path.join(folder,filename)
list.append(t)
for item in list:
path=item
head,tail=path.split(path)
var="\\"
tail=tail.replace(".pdf",".txt")
name=head+var+tail
content = ""
parse(name)
| [
11748,
25064,
198,
11748,
1330,
8019,
198,
11748,
8019,
13,
260,
2220,
7,
17597,
8,
198,
6738,
28686,
1330,
442,
15908,
11,
651,
66,
16993,
11,
1351,
15908,
11,
3108,
198,
6738,
640,
1330,
965,
31387,
198,
6738,
37124,
1084,
263,
13,
... | 2.23 | 500 |
from uuid import UUID
import json
import logging
import traceback
import requests
from requests.exceptions import ConnectionError as RequestsConnectionError, MissingSchema
from sqlalchemy.orm.exc import NoResultFound
from .. import app
from ..db import Session
from ..mappings import *
from ..helpers import type_check
@app.celery.task(autoretry_for=(NoResultFound,), max_retries=5)
@type_check
def invoke_webhooks_fileusage(usage_id: int):
''' Call webhooks for a FileUsage entry.
This will extract the relevant data from
the database entry and then POST that data
to all webhooks for the business of the
document owner.
Arguments:
usage_id (int): FileUsage id that should be invoked.
'''
print('invoke_webhooks_fileusage called with {}'.format(usage_id))
session = Session()
(doc_id, type_name, timestamp, data) = (
session
.query(FileUsage)
.join(FileUsageType)
.filter(FileUsage.id == usage_id)
.with_entities(
FileUsage.document_id,
FileUsageType.name,
FileUsage.timestamp,
FileUsage.data
)
.one()
)
post_data = json.dumps({
'doc_id': UUID(bytes=doc_id).hex,
'type': 'document',
'usage_type': type_name,
'timestamp': timestamp.isoformat(),
'data': json.loads(data)
})
__invoke_webhooks(session, doc_id, post_data)
@app.celery.task(autoretry_for=(Exception,), max_retries=5)
@type_check
def invoke_webhooks_fieldusage(usage_id: int):
''' Call webhooks for a FieldUsage entry.
This will extract the relevant data from
the database entry and then POST that data
to all webhooks for the business of the
document owner.
Arguments:
usage_id (int): FieldUsage id that should be invoked.
'''
session = Session()
print('invoke_webhooks_fieldusage called with {}'.format(usage_id))
(doc_id, type_name, timestamp, data, field_id, user_id) = (
session
.query(FieldUsage)
.join(FieldUsageType)
.join(Field)
.filter(FieldUsage.id == usage_id)
.with_entities(
Field.document_id,
FieldUsageType.name,
FieldUsage.timestamp,
FieldUsage.data,
Field.id,
Field.user_id
)
.one()
)
post_data = json.dumps({
'doc_id': UUID(bytes=doc_id).hex,
'field_id': UUID(bytes=field_id).hex,
# user_id is NULL for dependent fields
'user_id': UUID(bytes=user_id).hex if user_id is not None else None,
'type': 'field',
'usage_type': type_name,
'timestamp': timestamp.isoformat(),
'data': json.loads(data)
})
__invoke_webhooks(session, doc_id, post_data)
| [
198,
6738,
334,
27112,
1330,
471,
27586,
198,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
12854,
1891,
198,
11748,
7007,
198,
198,
6738,
7007,
13,
1069,
11755,
1330,
26923,
12331,
355,
9394,
3558,
32048,
12331,
11,
25639,
27054,
261... | 2.213964 | 1,332 |
import os
from pathlib import Path
import argparse
import datetime
import numpy as np
import time
import torch
import json
import random
# import functools
import utils
from create_model import create_model
from create_datasets.prepare_datasets import build_test_dataset
from engine import *
from losses import Uptask_Loss, Downtask_Loss
# Fix random seeds for reproducibility
random_seed = 42
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.cuda.manual_seed_all(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(random_seed)
random.seed(random_seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser('SMART-Net Framework training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
os.environ["CUDA_DEVICE_ORDER"] = args.cuda_device_order
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_visible_devices
main(args)
| [
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
1822,
29572,
198,
11748,
4818,
8079,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
11748,
28034,
198,
11748,
33918,
198,
11748,
4738,
198,
2,
1330,
1257,
310,
10141... | 2.687345 | 403 |
import numpy as np
import torch
import pathlib
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
3108,
8019,
628,
198
] | 3.5 | 14 |
#############################################################################
# Copyright (c) Wolf Vollprecht, QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
#############################################################################
from notebook.utils import url_path_join
from notebook.base.handlers import IPythonHandler
from jupyros import _version
import rospkg
import os
__version__ = _version.__version__
if os.getenv('JUPYROS_DEFAULT_WS'):
envs = os.getenv('JUPYROS_DEFAULT_WS').split(';')
else:
envs = None
r = rospkg.RosPack(envs)
def load_jupyter_server_extension(nb_server_app):
"""
Called when the extension is loaded.
Args:
nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance.
"""
web_app = nb_server_app.web_app
host_pattern = '.*$'
route_pattern = url_path_join(web_app.settings['base_url'], '/rospkg/(.*)')
web_app.add_handlers(host_pattern, [(route_pattern, ROSStaticHandler)])
| [
29113,
29113,
7804,
4242,
2,
198,
2,
15069,
357,
66,
8,
8662,
569,
692,
3866,
21474,
11,
16972,
25896,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.317204 | 558 |
import functools
import json
import uuid
from .common import InfoExtractor
from ..utils import (
determine_ext,
dict_get,
ExtractorError,
float_or_none,
OnDemandPagedList,
traverse_obj,
)
| [
11748,
1257,
310,
10141,
198,
11748,
33918,
198,
11748,
334,
27112,
198,
198,
6738,
764,
11321,
1330,
14151,
11627,
40450,
198,
6738,
11485,
26791,
1330,
357,
198,
220,
220,
220,
5004,
62,
2302,
11,
198,
220,
220,
220,
8633,
62,
1136,
... | 2.626506 | 83 |
# -*- coding: utf-8 -*-
# mypy: ignore-errors
from itertools import combinations
import jax.numpy as jnp
import numpy as np
import pytest
from tinygp.solvers.quasisep.core import (
DiagQSM,
LowerTriQSM,
SquareQSM,
StrictLowerTriQSM,
StrictUpperTriQSM,
SymmQSM,
)
@pytest.fixture(params=["random", "celerite"])
@pytest.fixture
@pytest.fixture
@pytest.mark.parametrize("symm", [True, False])
@pytest.mark.parametrize("name", ["celerite"])
@pytest.mark.parametrize("name", ["celerite"])
@pytest.mark.parametrize("symm", [True, False])
@pytest.mark.parametrize("name", ["celerite"])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
616,
9078,
25,
8856,
12,
48277,
198,
198,
6738,
340,
861,
10141,
1330,
17790,
198,
198,
11748,
474,
897,
13,
77,
32152,
355,
474,
37659,
198,
11748,
299,
32152,
355... | 2.350746 | 268 |
html1 = """<li class="clearfix">
<span class="interface nearby_stations_schools_national_rail_station" title="London Bridge"></span>
<span class="nearby_stations_schools_name" title="London Bridge">London Bridge</span>
(0.4 miles)
</li>"""
html2 = """<li class="clearfix">
<span class="interface nearby_stations_schools_london_underground_station" title="Borough"></span>
<span class="nearby_stations_schools_name" title="Borough">Borough</span>
(1.2 miles)
</li>"""
| [
6494,
16,
796,
37227,
27,
4528,
1398,
2625,
20063,
13049,
5320,
198,
27,
12626,
1398,
2625,
39994,
6716,
62,
301,
602,
62,
14347,
82,
62,
14648,
62,
30224,
62,
17529,
1,
3670,
2625,
23421,
10290,
23984,
12626,
29,
198,
27,
12626,
1398... | 2.641026 | 195 |
# input = "5 1 2 3 7 8 6 4"
input = "1 2 5 3 7 8 6 4"
# input = "2 1 5 3 4"
# input = "2 5 1 3 4"
a = list(map(int, input.rstrip().split()))
minimumBribes(a) | [
628,
220,
220,
220,
220,
198,
2,
5128,
796,
366,
20,
352,
362,
513,
767,
807,
718,
604,
1,
198,
15414,
796,
366,
16,
362,
642,
513,
767,
807,
718,
604,
1,
198,
198,
2,
5128,
796,
366,
17,
352,
642,
513,
604,
1,
198,
2,
5128,... | 2.061728 | 81 |
#!/usr/bin/env python
"""
model.py
Zhiang Chen
Nov 24, 2019
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
import os
if __name__ == "__main__":
inputs = 200
layers = [200, 20, 2]
nn = FCL(inputs, layers)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
19849,
13,
9078,
198,
57,
5303,
648,
12555,
198,
20795,
1987,
11,
13130,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,... | 2.688 | 125 |
# coding=utf-8
# Copyright 2021 TUNiB Inc.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import List, Optional, Union
import numpy as np
from datasets import Dataset, load_dataset
from torch.utils.data import Dataset as TorchDataset
from tqdm import tqdm
from .cpp_binder import get_datasets_utils
class DatasetBlender(TorchDataset):
"""
Dataset blender for multiple datasets
this was copied from Megatron-LM and modified.
Args:
datasets (List[Dataset]): list of datasets
weights (Optional[List[float]]): list of dataset weights.
if None, we make weights list proportional to the length of each dataset automatically.
Usage:
>>> from torch.utils.data import DataLoader
>>> # Define list of the Datasets or PyTorch datasets and weights
>>> datasets = [dataset_1, dataset_2, dataset_3]
>>> weights = [0.8, 0.1, 0.1]
>>> # Dataset blending with weights
>>> blender = DatasetBlender(datasets, weights)
>>> dataloader = DataLoader(blender, ...)
>>> for sample in dataloader: ...
>>> # Dataset blending without weights
>>> # We make weights list proportional to the length of each dataset automatically.
>>> # For example, d1=2000, d2=3000, d3=5000 -> [0.2, 0.3, 0.5]
>>> blender_wo_weights = DatasetBlender(datasets)
>>> dataloader_wo_weights = DataLoader(blender_wo_weights, ...)
>>> for sample in dataloader_wo_weights: ...
"""
@staticmethod
def _build_blending_indices(
dataset_index,
dataset_sample_index,
weights,
num_datasets,
size,
):
"""Python implementation of ``build_blending_indices``"""
current_samples = [0] * num_datasets
for sample_idx in tqdm(range(size)):
sample_idx_double = max(sample_idx, 1.0)
max_error_index = 0
max_error = weights[0] * sample_idx_double - current_samples[0]
for dataset_idx in range(num_datasets):
error = weights[dataset_idx] * sample_idx_double - current_samples[dataset_idx]
if error > max_error:
max_error = error
max_error_index = dataset_idx
dataset_index[sample_idx] = max_error_index
dataset_sample_index[sample_idx] = current_samples[max_error_index]
current_samples[max_error_index] += 1
@staticmethod
def _normalize_weight(weights: List[float]) -> np.ndarray:
"""
Normalize dataset weights into 0 to 1.
Args:
weights (List[float]): list of dataset weights
Returns:
np.ndarray: list of normalized dataset weights
"""
weights = np.array(weights, dtype=np.float64)
sum_weights = np.sum(weights)
assert sum_weights > 0.0, "sum of all the weights is zero. did you input zero length list of dataset?"
return weights / sum_weights
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
33448,
309,
4944,
72,
33,
3457,
13,
198,
2,
15069,
357,
66,
8,
12131,
11,
15127,
23929,
44680,
6234,
13,
220,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
... | 2.535411 | 1,412 |
import numpy as np | [
11748,
299,
32152,
355,
45941
] | 3.6 | 5 |
import numpy as np | [
11748,
299,
32152,
355,
45941
] | 3.6 | 5 |
#MBA and Asteroid Analysis
#Comments are done so for magnitudes less than 18 (LCAM Project standards)
import numpy as np
import sys
file = open("Analysis.txt", "w")
Asteroids = np.genfromtxt("Objects_test_asteroids.txt" , comments='#', delimiter = " | ",dtype=None,invalid_raise=False,usecols=(1), encoding=None)
Class = np.genfromtxt("Objects_test_asteroids.txt" , comments='#', delimiter = " | ",dtype=None,invalid_raise=False,usecols=(4), encoding=None)
Magnitudes = np.genfromtxt("Objects_test_asteroids.txt" , comments='#', delimiter = " | ",dtype=None,invalid_raise=False,usecols=(5), encoding=None)
Unique_Asteroids , ind = np.unique(Asteroids, return_index = True)
file.write("Asteroid Analysis resulting from skybot script: " + "\n" + "\n")
file.write("Total Asteroids found: " + str(Unique_Asteroids.shape[0]) + "\n")
#This is repeated elements, ignore
All_magnitudes = Magnitudes
Magnitudes = Magnitudes[ind]
Magnitudes_indices = np.where(All_magnitudes < 18)
#Limiting our results for only magnitudes less than 18
#DELETE THIS LINE IF U WANT FULL RESULTS DISREGARDING MAGNITUDES FOR OBJECTS
Class = Class[Magnitudes_indices]
Less_than_6 = Magnitudes[Magnitudes < 6]
Less_than_7 = Magnitudes[Magnitudes < 7]
Less_than_8 = Magnitudes[Magnitudes < 8]
Less_than_9 = Magnitudes[Magnitudes < 9]
Less_than_10 = Magnitudes[Magnitudes < 10]
Less_than_11 = Magnitudes[Magnitudes < 11]
Less_than_12 = Magnitudes[Magnitudes < 12]
Less_than_13 = Magnitudes[Magnitudes < 13]
Less_than_14 = Magnitudes[Magnitudes < 14]
Less_than_15 = Magnitudes[Magnitudes < 15]
Less_than_16 = Magnitudes[Magnitudes < 16]
Less_than_17 = Magnitudes[Magnitudes < 17]
Less_than_18 = Magnitudes[Magnitudes < 18]
# Less_than_19 = Magnitudes[Magnitudes < 19]
# Less_than_20 = Magnitudes[Magnitudes < 20]
# Less_than_21 = Magnitudes[Magnitudes < 21]
# Less_than_22 = Magnitudes[Magnitudes < 22]
# Less_than_23 = Magnitudes[Magnitudes < 23]
# Less_than_24 = Magnitudes[Magnitudes < 24]
# Less_than_25 = Magnitudes[Magnitudes < 25]
# Less_than_26 = Magnitudes[Magnitudes < 26]
# Less_than_27 = Magnitudes[Magnitudes < 27]
# Less_than_28 = Magnitudes[Magnitudes < 28]
# Less_than_29 = Magnitudes[Magnitudes < 29]
# Less_than_30 = Magnitudes[Magnitudes < 30]
file.write("Asteroids with visual magnitude less than 6: " + str(Less_than_6.shape[0]) + "\n")
file.write("Asteroids with visual magnitude less than 7: " + str(Less_than_7.shape[0]) + "\n")
file.write("Asteroids with visual magnitude less than 8: " + str(Less_than_8.shape[0]) + "\n")
file.write("Asteroids with visual magnitude less than 9: " + str(Less_than_9.shape[0]) + "\n")
file.write("Asteroids with visual magnitude less than 10: " + str(Less_than_10.shape[0]) + "\n")
file.write("Asteroids with visual magnitude less than 11: " + str(Less_than_11.shape[0]) + "\n")
file.write("Asteroids with visual magnitude less than 12: " + str(Less_than_12.shape[0]) + "\n")
file.write("Asteroids with visual magnitude less than 13: " + str(Less_than_13.shape[0]) + "\n")
file.write("Asteroids with visual magnitude less than 14: " + str(Less_than_14.shape[0]) + "\n")
file.write("Asteroids with visual magnitude less than 15: " + str(Less_than_15.shape[0]) + "\n")
file.write("Asteroids with visual magnitude less than 16: " + str(Less_than_16.shape[0]) + "\n")
file.write("Asteroids with visual magnitude less than 17: " + str(Less_than_17.shape[0]) + "\n")
file.write("Asteroids with visual magnitude less than 18: " + str(Less_than_18.shape[0]) + "\n")
# file.write("Asteroids with visual magnitude less than 19: " + str(Less_than_19.shape[0]) + "\n")
# file.write("Asteroids with visual magnitude less than 20: " + str(Less_than_20.shape[0]) + "\n")
# file.write("Asteroids with visual magnitude less than 21: " + str(Less_than_21.shape[0]) + "\n")
# file.write("Asteroids with visual magnitude less than 22: " + str(Less_than_22.shape[0]) + "\n")
# file.write("Asteroids with visual magnitude less than 23: " + str(Less_than_23.shape[0]) + "\n")
# file.write("Asteroids with visual magnitude less than 24: " + str(Less_than_24.shape[0]) + "\n")
# file.write("Asteroids with visual magnitude less than 25: " + str(Less_than_25.shape[0]) + "\n")
# file.write("Asteroids with visual magnitude less than 26: " + str(Less_than_26.shape[0]) + "\n")
# file.write("Asteroids with visual magnitude less than 27: " + str(Less_than_27.shape[0]) + "\n")
# file.write("Asteroids with visual magnitude less than 28: " + str(Less_than_28.shape[0]) + "\n")
# file.write("Asteroids with visual magnitude less than 29: " + str(Less_than_29.shape[0]) + "\n")
# file.write("Asteroids with visual magnitude less than 30: " + str(Less_than_30.shape[0]) + "\n")
file.write("\n" + "The average magnitude for all asteroids is: " + str(np.average(Magnitudes)) + "\n")
Main_Belt_indexer = np.char.count(Class, "MB>") != 0
Main_Belt_ocurrences = Asteroids[Main_Belt_indexer]
Main_Belt_Asteroids = np.unique(Main_Belt_ocurrences)
file.write("\n" + "There are " + str(Main_Belt_Asteroids.shape[0]) + " main belt asteroids" + "\n")
Comets = np.genfromtxt("Objects_commets.txt" , comments="#", delimiter = " | ",dtype=None,invalid_raise=False,usecols=(1), encoding=None)
Comets = np.unique(Comets)
file.write("\n" + "Over " + str(Comets.shape[0]) + " comets can be seen, these are: " + "\n")
np.savetxt(file, Comets, fmt='%s')
Near_earth_indexer = np.char.count(Class, "NEA>") != 0
Near_earth_ocurrences = Asteroids[Near_earth_indexer]
Near_earth_Asteroids = np.unique(Near_earth_ocurrences)
file.write("\n" + "There are " + str(Near_earth_Asteroids.shape[0]) + " Near earth asteroids")
file.write("\n" + "The average magnitude of these NEA's is: " + str(np.average(All_magnitudes[Near_earth_indexer])) + "\n")
Hungaria_indexer = np.char.count(Class, "Hungaria") != 0
Hungaria_ocurrences = Asteroids[Hungaria_indexer]
Hungaria_asteroids = np.unique(Hungaria_ocurrences)
file.write("\n" + "There are " + str(Hungaria_asteroids.shape[0]) + " Hungaria type asteroids")
file.write("\n" + "The average magnitude of Hungaria type asteroids is: " + str(np.average(All_magnitudes[Hungaria_indexer])) + "\n")
np.savetxt("Hungaria_asteroids.txt" , Hungaria_asteroids, fmt='%s')
Trojan_indexer = np.char.count(Class, "Trojan") != 0
Trojan_ocurrences = Asteroids[Trojan_indexer]
Trojan_asteroids = np.unique(Trojan_ocurrences)
file.write("\n" + str(Trojan_asteroids.shape[0]) + " Trojan Asteroids can be seen" + "\n")
file.write("The average magnitude of the Trojan Asteroids that can be seen is: " + str(np.average(All_magnitudes[Trojan_indexer])) + "\n")
np.savetxt("Trojan_asteroids.txt", Trojan_asteroids, fmt="%s")
Mars_Crosser_indexer = np.char.count(Class, "Mars-Crosser") != 0
Mars_Crosser_ocurrences = Asteroids[Mars_Crosser_indexer]
Mars_Crosser_asteroids = np.unique(Mars_Crosser_ocurrences)
file.write("\n" + str(Mars_Crosser_asteroids.shape[0]) + " Mars-crosser asteroids can be seen" + "\n")
file.write("The average magnitude of Mars-crosser asteroids that can be seen is: " + str(np.average(All_magnitudes[Mars_Crosser_indexer])) + "\n")
np.savetxt("Mars_crossers.txt", Mars_Crosser_asteroids, fmt="%s")
file.close()
| [
2,
44,
4339,
290,
38484,
1868,
14691,
198,
2,
23903,
389,
1760,
523,
329,
7842,
10455,
1342,
621,
1248,
357,
5639,
2390,
4935,
5423,
8,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
25064,
628,
198,
198,
7753,
796,
1280,
... | 2.812083 | 2,549 |
# -*- coding: utf-8 -*-
"""
@package tcmi
@copyright Copyright (c) 2018+ Fritz Haber Institute of the Max Planck Society,
Benjamin Regler <regler@fhi-berlin.mpg.de>
@license See LICENSE file for details.
Licensed under the Apache License, Version 2.0 (the 'License').
You may not use this file except in compliance with the License.
"""
# Metadata
__name__ = "tcmi"
__version__ = "1.1.1"
__description__ = "A Python package for estimating mutual dependencies of multivariate continuous distributions"
__author__ = "Benjamin Regler <regler@fhi-berlin.mpg.de>"
__url__ = "https://github.com/benjaminregler/tcmi"
__email__ = "regler@fhi-berlin.mpg.de"
__copyright__ = "Copyright 2018+, Fritz Haber Institute of the Max Planck Society"
__license__ = "Apache License, Version 2.0"
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
26495,
220,
220,
220,
256,
11215,
72,
198,
198,
31,
22163,
4766,
220,
15069,
357,
66,
8,
2864,
10,
45954,
19654,
263,
5136,
286,
262,
5436,
5224,
694,
... | 2.923077 | 273 |
from ibis.pyspark.client import PySparkClient
from ibis.pyspark.compiler import dialect # noqa: F401
def connect(session):
"""
Create a `SparkClient` for use with Ibis. Pipes **kwargs into SparkClient,
which pipes them into SparkContext. See documentation for SparkContext:
https://spark.apache.org/docs/latest/api/python/_modules/pyspark/context.html#SparkContext
"""
client = PySparkClient(session)
# Spark internally stores timestamps as UTC values, and timestamp data that
# is brought in without a specified time zone is converted as local time to
# UTC with microsecond resolution.
# https://spark.apache.org/docs/latest/sql-pyspark-pandas-with-arrow.html#timestamp-with-time-zone-semantics
client._session.conf.set('spark.sql.session.timeZone', 'UTC')
return client
| [
6738,
24283,
271,
13,
79,
893,
20928,
13,
16366,
1330,
9485,
4561,
668,
11792,
198,
6738,
24283,
271,
13,
79,
893,
20928,
13,
5589,
5329,
1330,
23637,
220,
1303,
645,
20402,
25,
376,
21844,
628,
198,
4299,
2018,
7,
29891,
2599,
198,
... | 3.093633 | 267 |
#!/usr/bin/python
""" OneLoginClient class
Copyright (c) 2017, OneLogin, Inc.
All rights reserved.
OneLoginClient class of the OneLogin's Python SDK.
"""
import datetime
from dateutil import tz
import requests
from defusedxml.lxml import fromstring
from onelogin.api.util.urlbuilder import UrlBuilder
from onelogin.api.util.constants import Constants
from onelogin.api.models.app import App
from onelogin.api.models.auth_factor import AuthFactor
from onelogin.api.models.event import Event
from onelogin.api.models.embed_app import EmbedApp
from onelogin.api.models.event_type import EventType
from onelogin.api.models.factor_enrollment_response import FactorEnrollmentResponse
from onelogin.api.models.group import Group
from onelogin.api.models.mfa import MFA
from onelogin.api.models.onelogin_token import OneLoginToken
from onelogin.api.models.otp_device import OTP_Device
from onelogin.api.models.rate_limit import RateLimit
from onelogin.api.models.role import Role
from onelogin.api.models.saml_endpoint_response import SAMLEndpointResponse
from onelogin.api.models.session_token_info import SessionTokenInfo
from onelogin.api.models.session_token_mfa_info import SessionTokenMFAInfo
from onelogin.api.models.user import User
from onelogin.api.version import __version__
class OneLoginClient(object):
'''
The OneLoginClient makes the API calls to the Onelogin's platform described
at https://developers.onelogin.com/api-docs/1/getting-started/dev-overview.
'''
client_id = None
client_secret = None
CUSTOM_USER_AGENT = "onelogin-python-sdk %s" % __version__
def __init__(self, client_id, client_secret, region='us', max_results=1000):
"""
Create a new instance of OneLoginClient.
:param path: Path where the sdk config file is located.
:type path: string
"""
self.client_id = client_id
self.client_secret = client_secret
self.max_results = max_results
self.url_builder = UrlBuilder(region)
self.user_agent = self.CUSTOM_USER_AGENT
self.access_token = self.refresh_token = self.expiration = None
self.error = None
self.error_description = None
def clean_error(self):
"""
Clean any previous error registered at the client.
"""
self.error = None
self.error_description = None
# OAuth 2.0 Tokens Methods
def get_access_token(self):
"""
Generates an access token and refresh token that you may use to
call Onelogin's API methods.
Returns the generated OAuth Token info
:return: OAuth Token info
:rtype: OneLoginToken
See https://developers.onelogin.com/api-docs/1/oauth20-tokens/generate-tokens Generate Tokens documentation.
"""
self.clean_error()
try:
url = self.get_url(Constants.TOKEN_REQUEST_URL)
data = {
'grant_type': 'client_credentials'
}
headers = self.get_authorized_headers(bearer=False)
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
token = OneLoginToken(response.json())
self.access_token = token.access_token
self.refresh_token = token.refresh_token
self.expiration = token.created_at + datetime.timedelta(seconds=token.expires_in)
return token
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def regenerate_token(self):
"""
Refreshing tokens provides a new set of access and refresh tokens.
Returns the refreshed OAuth Token info
:return: OAuth Token info
:rtype: OneLoginToken
See https://developers.onelogin.com/api-docs/1/oauth20-tokens/refresh-tokens Refresh Tokens documentation
"""
self.clean_error()
try:
url = self.get_url(Constants.TOKEN_REQUEST_URL)
headers = self.get_headers()
data = {
'grant_type': 'refresh_token',
'access_token': self.access_token,
'refresh_token': self.refresh_token
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
token = OneLoginToken(response.json())
self.access_token = token.access_token
self.refresh_token = token.refresh_token
self.expiration = token.created_at + datetime.timedelta(seconds=token.expires_in)
return token
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def revoke_token(self):
"""
Revokes an access token and refresh token pair.
See https://developers.onelogin.com/api-docs/1/oauth20-tokens/revoke-tokens Revoke Tokens documentation
"""
self.clean_error()
try:
url = self.get_url(Constants.TOKEN_REVOKE_URL)
headers = self.get_authorized_headers(bearer=False)
data = {
'access_token': self.access_token,
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
self.access_token = None
self.refresh_token = None
self.expiration = None
return True
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
return False
except Exception as e:
self.error = 500
self.error_description = e.args[0]
return False
def get_rate_limits(self):
"""
Gets current rate limit details about an access token.
Returns the rate limit info
:return: rate limit info
:rtype: RateLimit
See https://developers.onelogin.com/api-docs/1/oauth20-tokens/get-rate-limit Get Rate Limit documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.GET_RATE_URL)
headers = self.get_authorized_headers()
response = requests.get(url, headers=headers)
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
rate_limit = RateLimit(json_data['data'])
return rate_limit
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
# User Methods
def get_users(self, query_parameters=None, max_results=None):
"""
Gets a list of User resources.
:param query_parameters: Parameters to filter the result of the list
:type query_parameters: dict
:param max_results: Limit the number of users returned (optional)
:type max_results: int
Returns the list of users
:return: users list
:rtype: list[User]
See https://developers.onelogin.com/api-docs/1/users/get-users Get Users documentation
"""
self.clean_error()
self.prepare_token()
if max_results is None:
max_results = self.max_results
try:
url = self.get_url(Constants.GET_USERS_URL)
headers = self.get_authorized_headers()
users = []
response = None
after_cursor = None
while (not response) or (len(users) > max_results or after_cursor):
response = requests.get(url, headers=headers, params=query_parameters)
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
for user_data in json_data['data']:
if user_data and len(users) < max_results:
users.append(User(user_data))
else:
return users
after_cursor = self.get_after_cursor(response)
if after_cursor:
if not query_parameters:
query_parameters = {}
query_parameters['after_cursor'] = after_cursor
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
break
return users
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def get_user(self, user_id):
"""
Gets User by ID.
:param user_id: Id of the user
:type user_id: int
Returns the user identified by the id
:return: user
:rtype: User
See https://developers.onelogin.com/api-docs/1/users/get-user-by-id Get User by ID documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.GET_USER_URL, user_id)
headers = self.get_authorized_headers()
response = requests.get(url, headers=headers)
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
return User(json_data['data'][0])
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def get_user_apps(self, user_id):
"""
Gets a list of apps accessible by a user, not including personal apps.
:param user_id: Id of the user
:type user_id: int
Returns the apps user identified by the id
:return: App list of the user
:rtype: list[App]
See https://developers.onelogin.com/api-docs/1/users/get-apps-for-user Get Apps for a User documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.GET_APPS_FOR_USER_URL, user_id)
headers = self.get_authorized_headers()
apps = []
response = requests.get(url, headers=headers)
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
for app_data in json_data['data']:
if app_data:
apps.append(App(app_data))
return apps
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def get_user_roles(self, user_id):
"""
Gets a list of role IDs that have been assigned to a user.
:param user_id: Id of the user
:type user_id: int
Returns the role ids of the user identified by the id
:return: role ids
:rtype: list[int]
See https://developers.onelogin.com/api-docs/1/users/get-roles-for-user Get Roles for a User documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.GET_ROLES_FOR_USER_URL, user_id)
headers = self.get_authorized_headers()
role_ids = []
response = requests.get(url, headers=headers)
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
role_ids = json_data['data'][0]
return role_ids
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def get_custom_attributes(self):
"""
Gets a list of all custom attribute fields (also known as custom user fields) that have been defined for OL account.
Returns the custom attributes of the account
:return: custom attribute list
:rtype: list[str]
See https://developers.onelogin.com/api-docs/1/users/get-custom-attributes Get Custom Attributes documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.GET_CUSTOM_ATTRIBUTES_URL)
headers = self.get_authorized_headers()
custom_attributes = []
response = requests.get(url, headers=headers)
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
custom_attributes = json_data['data'][0]
return custom_attributes
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def create_user(self, user_params):
"""
Creates an user
:param user_params: User data (firstname, lastname, email, username, company,
department, directory_id, distinguished_name,
external_id, group_id, invalid_login_attempts,
locale_code, manager_ad_id, member_of,
openid_name, phone, samaccountname, title,
userprincipalname)
:type user_params: dict
Returns the created user
:return: user
:rtype: User
See https://developers.onelogin.com/api-docs/1/users/create-user Create User documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.CREATE_USER_URL)
headers = self.get_authorized_headers()
response = requests.post(url, headers=headers, json=user_params)
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
return User(json_data['data'][0])
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def update_user(self, user_id, user_params):
"""
Updates an user
:param user_id: Id of the user
:type user_id: int
:param user_params: User data (firstname, lastname, email, username, company,
department, directory_id, distinguished_name,
external_id, group_id, invalid_login_attempts,
locale_code, manager_ad_id, member_of,
openid_name, phone, samaccountname, title,
userprincipalname)
:type user_params: dict
Returns the modified user
:return: user
:rtype: User
See https://developers.onelogin.com/api-docs/1/users/update-user Update User by ID documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.UPDATE_USER_URL, user_id)
headers = self.get_authorized_headers()
response = requests.put(url, headers=headers, json=user_params)
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
return User(json_data['data'][0])
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def assign_role_to_user(self, user_id, role_ids):
"""
Assigns Roles to User
:param user_id: Id of the user
:type user_id: int
:param role_ids: List of role ids to be added
:type user_params: integer array
Returns if the action succeed
:return: true if success
:rtype: bool
See https://developers.onelogin.com/api-docs/1/users/assign-role-to-user Assign Role to User documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.ADD_ROLE_TO_USER_URL, user_id)
headers = self.get_authorized_headers()
data = {
'role_id_array': role_ids
}
response = requests.put(url, headers=headers, json=data)
if response.status_code == 200:
return self.handle_operation_response(response)
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def remove_role_from_user(self, user_id, role_ids):
"""
Remove Role from User
:param user_id: Id of the user
:type user_id: int
:param role_ids: List of role ids to be removed
:type role_ids: integer array
Returns if the action succeed
:return: true if success
:rtype: bool
See https://developers.onelogin.com/api-docs/1/users/remove-role-from-user Remove Role from User documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.DELETE_ROLE_TO_USER_URL, user_id)
headers = self.get_authorized_headers()
data = {
'role_id_array': role_ids
}
response = requests.put(url, headers=headers, json=data)
if response.status_code == 200:
return self.handle_operation_response(response)
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def set_password_using_clear_text(self, user_id, password, password_confirmation, validate_policy=False):
"""
Sets Password by ID Using Cleartext
:param user_id: Id of the user
:type user_id: int
:param password: Set to the password value using cleartext.
:type password: string
:param password_confirmation: Ensure that this value matches the password value exactly.
:type password_confirmation: string
:param validate_policy: Defaults to false. This will validate the password against the users OneLogin password policy..
:type validate_policy: boolean
Returns if the action succeed
:return: true if success
:rtype: bool
See https://developers.onelogin.com/api-docs/1/users/set-password-in-cleartext Set Password by ID Using Cleartext documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.SET_PW_CLEARTEXT, user_id)
headers = self.get_authorized_headers()
data = {
'password': password,
'password_confirmation': password_confirmation,
'validate_policy': validate_policy
}
response = requests.put(url, headers=headers, json=data)
if response.status_code == 200:
return self.handle_operation_response(response)
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def set_password_using_hash_salt(self, user_id, password, password_confirmation, password_algorithm, password_salt=None):
"""
Set Password by ID Using Salt and SHA-256
:param user_id: Id of the user
:type user_id: int
:param password: Set to the password value using a SHA-256-encoded value.
:type password: string
:param password_confirmation: Ensure that this value matches the password value exactly.
:type password_confirmation: string
:param password_algorithm: Set to salt+sha256.
:type password_algorithm: string
:param password_salt: (Optional) To provide your own salt value.
:type password_salt: string
Returns if the action succeed
:return: true if success
:rtype: bool
See https://developers.onelogin.com/api-docs/1/users/set-password-using-sha-256 Set Password by ID Using Salt and SHA-256 documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.SET_PW_SALT, user_id)
headers = self.get_authorized_headers()
data = {
'password': password,
'password_confirmation': password_confirmation,
'password_algorithm': password_algorithm
}
if password_salt:
data["password_salt"] = password_salt
response = requests.put(url, headers=headers, json=data)
if response.status_code == 200:
return self.handle_operation_response(response)
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def set_state_to_user(self, user_id, state):
"""
Set the State for a user.
:param user_id: Id of the user
:type user_id: int
:param state: Set to the state value. Valid values: 0-3
:type state: int
Returns if the action succeed
:return: true if success
:rtype: bool
See https://developers.onelogin.com/api-docs/1/users/set-state Set User State documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.SET_STATE_TO_USER_URL, user_id)
headers = self.get_authorized_headers()
data = {
'state': state
}
response = requests.put(url, headers=headers, json=data)
if response.status_code == 200:
return self.handle_operation_response(response)
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def set_custom_attribute_to_user(self, user_id, custom_attributes):
"""
Set Custom Attribute Value
:param user_id: Id of the user
:type user_id: int
:param custom_attributes: Provide one or more key value pairs composed of the custom attribute field shortname and the value that you want to set the field to.
:type custom_attributes: dict
Returns if the action succeed
:return: true if success
:rtype: bool
See https://developers.onelogin.com/api-docs/1/users/set-custom-attribute Set Custom Attribute Value documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.SET_CUSTOM_ATTRIBUTE_TO_USER_URL, user_id)
headers = self.get_authorized_headers()
data = {
'custom_attributes': custom_attributes
}
response = requests.put(url, headers=headers, json=data)
if response.status_code == 200:
return self.handle_operation_response(response)
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def log_user_out(self, user_id):
"""
Log a user out of any and all sessions.
:param user_id: Id of the user to be logged out
:type user_id: int
Returns if the action succeed
:return: true if success
:rtype: bool
See https://developers.onelogin.com/api-docs/1/users/log-user-out Log User Out documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.LOG_USER_OUT_URL, user_id)
headers = self.get_authorized_headers()
response = requests.put(url, headers=headers)
if response.status_code == 200:
return self.handle_operation_response(response)
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def lock_user(self, user_id, minutes):
"""
Use this call to lock a user's account based on the policy assigned to
the user, for a specific time you define in the request, or until you
unlock it.
:param user_id: Id of the user to be locked.
:type user_id: int
:param minutes: Set to the number of minutes for which you want to lock the user account. (0 to delegate on policy)
:type minutes: int
Returns if the action succeed
:return: true if success
:rtype: bool
See https://developers.onelogin.com/api-docs/1/users/lock-user-account Lock User Account documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.LOCK_USER_URL, user_id)
headers = self.get_authorized_headers()
data = {
'locked_until': minutes
}
response = requests.put(url, headers=headers, json=data)
if response.status_code == 200:
return self.handle_operation_response(response)
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def delete_user(self, user_id):
"""
Deletes an user
:param user_id: Id of the user to be logged out
:type user_id: int
Returns if the action succeed
:return: true if success
:rtype: bool
See https://developers.onelogin.com/api-docs/1/users/delete-user Delete User by ID documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.DELETE_USER_URL, user_id)
headers = self.get_authorized_headers()
response = requests.delete(url, headers=headers)
if response.status_code == 200:
return self.handle_operation_response(response)
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
# Custom Login Pages
def create_session_login_token(self, query_params, allowed_origin=''):
"""
Generates a session login token in scenarios in which MFA may or may not be required.
A session login token expires two minutes after creation.
:param query_params: Query Parameters (username_or_email, password, subdomain, return_to_url,
ip_address, browser_id)
:type query_params: dict
:param allowed_origin: Custom-Allowed-Origin-Header. Required for CORS requests only.
Set to the Origin URI from which you are allowed to send a request
using CORS.
:type allowed_origin: string
Returns a session token
:return: return the object if success
:rtype: SessionTokenInfo/SessionTokenMFAInfo
See https://developers.onelogin.com/api-docs/1/users/create-session-login-token Create Session Login Token documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.SESSION_LOGIN_TOKEN_URL)
headers = self.get_authorized_headers()
if allowed_origin:
headers.update({'Custom-Allowed-Origin-Header-1': allowed_origin})
response = requests.post(url, headers=headers, json=query_params)
if response.status_code == 200:
return self.handle_session_token_response(response)
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def get_session_token_verified(self, device_id, state_token, otp_token=None, allowed_origin=''):
"""
Verify a one-time password (OTP) value provided for multi-factor authentication (MFA).
:param device_id: Provide the MFA device_id you are submitting for verification.
:type device_id: string
:param state_token: Provide the state_token associated with the MFA device_id you are submitting for verification.
:type state_token: string
:param otp_token: Provide the OTP value for the MFA factor you are submitting for verification.
:type otp_token: string
:param allowed_origin: Required for CORS requests only. Set to the Origin URI from which you are allowed to send a request using CORS.
:type allowed_origin: string
Returns a session token
:return: return the object if success
:rtype: SessionTokenInfo
See https://developers.onelogin.com/api-docs/1/users/verify-factor Verify Factor documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.GET_TOKEN_VERIFY_FACTOR)
headers = self.get_authorized_headers()
if allowed_origin:
headers['Custom-Allowed-Origin-Header-1'] = allowed_origin
data = {
'device_id': str(device_id),
'state_token': state_token
}
if otp_token:
data['otp_token'] = otp_token
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
return self.handle_session_token_response(response)
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def create_session_via_token(self, session_token):
"""
Post a session token to this API endpoint to start a session and set a cookie to log a user into an app.
:param session_token: The session token
:type session_token: string
Returns Header 'Set-Cookie' value
:return: return the 'Set-Cookie' value of the HTTP Header if any
:rtype: str
See https://developers.onelogin.com/api-docs/1/login-page/create-session-via-token Create Session Via API Token documentation
"""
self.clean_error()
url = self.get_url(Constants.SESSION_API_TOKEN_URL)
headers = {
'Content-Type': 'application/json;charset=UTF-8',
'User-Agent': self.user_agent
}
data = {}
data['session_token'] = session_token
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
if 'Set-Cookie' in response.headers.keys():
return response.headers['Set-Cookie']
else:
self.error = str(response.status_code)
# Role Methods
def get_roles(self, query_parameters=None, max_results=None):
"""
Gets a list of Role resources.
:param query_parameters: Parameters to filter the result of the list
:type query_parameters: dict
:param max_results: Limit the number of roles returned (optional)
:type max_results: int
Returns the list of roles
:return: role list
:rtype: list[Role]
See https://developers.onelogin.com/api-docs/1/roles/get-roles Get Roles documentation
"""
self.clean_error()
self.prepare_token()
if max_results is None:
max_results = self.max_results
try:
url = self.get_url(Constants.GET_ROLES_URL)
headers = self.get_authorized_headers()
roles = []
response = None
after_cursor = None
while (not response) or (len(roles) > max_results or after_cursor):
response = requests.get(url, headers=headers, params=query_parameters)
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
for role_data in json_data['data']:
if role_data and len(roles) < max_results:
roles.append(Role(role_data))
else:
return roles
after_cursor = self.get_after_cursor(response)
if after_cursor:
if not query_parameters:
query_parameters = {}
query_parameters['after_cursor'] = after_cursor
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
break
return roles
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def get_role(self, role_id):
"""
Gets Role by ID.
:param role_id: Id of the Role
:type role_id: int
Returns the role identified by the id
:return: role
:rtype: Role
See https://developers.onelogin.com/api-docs/1/roles/get-role-by-id Get Role by ID documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.GET_ROLE_URL, role_id)
headers = self.get_authorized_headers()
response = requests.get(url, headers=headers)
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
return Role(json_data['data'][0])
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
# Event Methods
def get_event_types(self):
"""
List of all OneLogin event types available to the Events API.
Returns the list of event type
:return: event type list
:rtype: list[EventType]
See https://developers.onelogin.com/api-docs/1/events/event-types Get Event Types documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.GET_EVENT_TYPES_URL)
headers = self.get_authorized_headers()
event_types = []
response = None
response = requests.get(url, headers=headers)
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
for event_type_data in json_data['data']:
if event_type_data:
event_types.append(EventType(event_type_data))
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
return event_types
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def get_events(self, query_parameters=None, max_results=None):
"""
Gets a list of Event resources.
:param query_parameters: Parameters to filter the result of the list
:type query_parameters: dict
:param max_results: Limit the number of events returned (optional)
:type max_results: int
Returns the list of events
:return: event list
:rtype: list[Event]
See https://developers.onelogin.com/api-docs/1/events/get-events Get Events documentation
"""
self.clean_error()
self.prepare_token()
if max_results is None:
max_results = self.max_results
try:
url = self.get_url(Constants.GET_EVENTS_URL)
headers = self.get_authorized_headers()
events = []
response = None
after_cursor = None
while (not response) or (len(events) > max_results or after_cursor):
response = requests.get(url, headers=headers, params=query_parameters)
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
for event_data in json_data['data']:
if event_data and len(events) < max_results:
events.append(Event(event_data))
else:
return events
after_cursor = self.get_after_cursor(response)
if after_cursor:
if not query_parameters:
query_parameters = {}
query_parameters['after_cursor'] = after_cursor
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
break
return events
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def get_event(self, event_id):
"""
Gets Event by ID.
:param role_id: Id of the Event
:type role_id: int
Returns the result of the operation
:return: event
:rtype: Event
See https://developers.onelogin.com/api-docs/1/events/get-event-by-id Get Event by ID documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.GET_EVENT_URL, event_id)
headers = self.get_authorized_headers()
response = requests.get(url, headers=headers)
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
return Event(json_data['data'][0])
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def create_event(self, event_params):
"""
Create an event in the OneLogin event log.
:param event_params: Event data (event_type_id, account_id, actor_system,
actor_user_id, actor_user_name, app_id,
assuming_acting_user_id, custom_message,
directory_sync_run_id, group_id, group_name,
ipaddr, otp_device_id, otp_device_name,
policy_id, policy_name, role_id, role_name,
user_id, user_name)
:type event_params: dict
Returns if the action succeed
:return: true if success
:rtype: bool
See https://developers.onelogin.com/api-docs/1/events/create-event Create Event documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.CREATE_EVENT_URL)
headers = self.get_authorized_headers()
response = requests.post(url, headers=headers, json=event_params)
if response.status_code == 200:
return self.handle_operation_response(response)
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
# Group Methods
def get_groups(self, max_results=None):
"""
Gets a list of Group resources (element of groups limited with the max_results parameter, or client attribute).
:param max_results: Limit the number of groups returned (optional)
:type max_results: int
Returns the list of groups
:return: group list
:rtype: list[Group]
See https://developers.onelogin.com/api-docs/1/groups/get-groups Get Groups documentation
"""
self.clean_error()
self.prepare_token()
if max_results is None:
max_results = self.max_results
try:
url = self.get_url(Constants.GET_GROUPS_URL)
headers = self.get_authorized_headers()
query_parameters = {}
groups = []
response = None
after_cursor = None
while (not response) or (len(groups) > max_results or after_cursor):
response = requests.get(url, headers=headers, params=query_parameters)
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
for group_data in json_data['data']:
if group_data and len(groups) < max_results:
groups.append(Group(group_data))
else:
return groups
after_cursor = self.get_after_cursor(response)
if after_cursor:
query_parameters['after_cursor'] = after_cursor
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
break
return groups
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def get_group(self, group_id):
"""
Gets Group by ID.
:param role_id: Id of the group
:type role_id: int
Returns the group identified by the id
:return: group
:rtype: Group
See https://developers.onelogin.com/api-docs/1/groups/get-group-by-id Get Group by ID documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.GET_GROUP_URL, group_id)
headers = self.get_authorized_headers()
response = requests.get(url, headers=headers)
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
return Group(json_data['data'][0])
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
# SAML Assertion Methods
def get_saml_assertion(self, username_or_email, password, app_id, subdomain, ip_address=None):
"""
Generates a SAML Assertion.
:param username_or_email: username or email of the OneLogin user accessing the app
:type username_or_email: string
:param password: Password of the OneLogin user accessing the app
:type password: string
:param app_id: App ID of the app for which you want to generate a SAML token
:type app_id: integer
:param subdomain: subdomain of the OneLogin account related to the user/app
:type subdomain: string
:param ip_address: whitelisted IP address that needs to be bypassed (some MFA scenarios).
:type ip_address: string
Returns a SAMLEndpointResponse object with an encoded SAMLResponse
:return: true if success
:rtype: SAMLEndpointResponse
See https://developers.onelogin.com/api-docs/1/saml-assertions/generate-saml-assertion Generate SAML Assertion documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.GET_SAML_ASSERTION_URL)
headers = self.get_authorized_headers()
data = {
'username_or_email': username_or_email,
'password': password,
'app_id': app_id,
'subdomain': subdomain,
}
if ip_address:
data['ip_address'] = ip_address
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
return self.handle_saml_endpoint_response(response)
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def get_saml_assertion_verifying(self, app_id, device_id, state_token, otp_token=None, url_endpoint=None):
"""
Verify a one-time password (OTP) value provided for a second factor when multi-factor authentication (MFA) is required for SAML authentication.
:param app_id: App ID of the app for which you want to generate a SAML token
:type app_id: integer
:param devide_id: Provide the MFA device_id you are submitting for verification.
:type subdomain: integer
:param state_token: Provide the state_token associated with the MFA device_id you are submitting for verification.
:type state_token: string
:param otp_token: Provide the OTP value for the MFA factor you are submitting for verification.
:type otp_token: string
:param url_endpoint: Specify an url where return the response.
:type url_endpoint: string
Returns a SAMLEndpointResponse object with an encoded SAMLResponse
:return: true if success
:rtype: SAMLEndpointResponse
See https://developers.onelogin.com/api-docs/1/saml-assertions/verify-factor Verify Factor documentation
"""
self.clean_error()
self.prepare_token()
try:
if url_endpoint:
url = url_endpoint
else:
url = self.get_url(Constants.GET_SAML_VERIFY_FACTOR)
headers = self.get_authorized_headers()
data = {
'app_id': app_id,
'device_id': str(device_id),
'state_token': state_token
}
if otp_token:
data['otp_token'] = otp_token
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
return self.handle_saml_endpoint_response(response)
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
# Multi-factor Auth Methods
def get_factors(self, user_id):
"""
Returns a list of authentication factors that are available for user enrollment via API.
:param user_id: Set to the id of the user.
:type user_id: integer
:return: AuthFactor list
:rtype: list[AuthFactor]
See https://developers.onelogin.com/api-docs/1/multi-factor-authentication/available-factors Get Available Authentication Factors documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.GET_FACTORS_URL, user_id)
headers = self.get_authorized_headers()
response = requests.get(url, headers=headers)
auth_factors = []
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
for auth_factor_data in json_data['data']['auth_factors']:
if auth_factor_data:
auth_factors.append(AuthFactor(auth_factor_data))
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
return auth_factors
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def enroll_factor(self, user_id, factor_id, display_name, number):
"""
Enroll a user with a given authentication factor.
:param user_id: Set to the id of the user.
:type user_id: integer
:param factor_id: The identifier of the factor to enroll the user with.
:type factor_id: integer
:param display_name: A name for the users device.
:type display_name: string
:param number: The phone number of the user in E.164 format..
:type number: string
:return: MFA device
:rtype: OTP_Device
See https://developers.onelogin.com/api-docs/1/multi-factor-authentication/enroll-factor Enroll an Authentication Factor documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.ENROLL_FACTOR_URL, user_id)
headers = self.get_authorized_headers()
data = {
'factor_id': int(factor_id),
'display_name': display_name,
'number': number
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
return OTP_Device(json_data['data'][0])
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def get_enrolled_factors(self, user_id):
"""
Return a list of authentication factors registered to a particular user for multifactor authentication (MFA)
:param user_id: Set to the id of the user.
:type user_id: integer
:return: OTP_Device list
:rtype: list[OTP_Device]
See https://developers.onelogin.com/api-docs/1/multi-factor-authentication/enrolled-factors Get Enrolled Authentication Factors documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.GET_ENROLLED_FACTORS_URL, user_id)
headers = self.get_authorized_headers()
response = requests.get(url, headers=headers)
otp_devices = []
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
otp_devices_data = json_data['data'].get('otp_devices', None)
if otp_devices_data:
for otp_device_data in otp_devices_data:
otp_devices.append(OTP_Device(otp_device_data))
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
return otp_devices
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def activate_factor(self, user_id, device_id):
"""
Triggers an SMS or Push notification containing a One-Time Password (OTP)
that can be used to authenticate a user with the Verify Factor call.
:param user_id: Set to the id of the user.
:type user_id: integer
:param device_id: Set to the device_id of the MFA device.
:type device_id: integer
:return: Info with User Id, Device Id, and otp_device
:rtype: FactorEnrollmentResponse
See https://developers.onelogin.com/api-docs/1/multi-factor-authentication/activate-factor Activate an Authentication Factor documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.ACTIVATE_FACTOR_URL, user_id, device_id)
headers = self.get_authorized_headers()
response = requests.post(url, headers=headers)
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
return FactorEnrollmentResponse(json_data['data'][0])
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def verify_factor(self, user_id, device_id, otp_token=None, state_token=None):
"""
Authenticates a one-time password (OTP) code provided by a multifactor authentication (MFA) device.
:param user_id: Set to the id of the user.
:type user_id: integer
:param device_id: Set to the device_id of the MFA device.
:type device_id: integer
:param otp_token: OTP code provided by the device or SMS message sent to user.
When a device like OneLogin Protect that supports Push has
been used you do not need to provide the otp_token.
:type otp_token: string
:param state_token: The state_token is returned after a successful request
to Enroll a Factor or Activate a Factor.
MUST be provided if the needs_trigger attribute from
the proceeding calls is set to true.
:type state_token: string
:return: true if action succeed
:rtype: bool
See https://developers.onelogin.com/api-docs/1/multi-factor-authentication/verify-factor Verify an Authentication Factor documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.VERIFY_FACTOR_URL, user_id, device_id)
headers = self.get_authorized_headers()
data = {}
if otp_token:
data['otp_token'] = otp_token
if state_token:
data['state_token'] = state_token
if data:
response = requests.post(url, headers=headers, json=data)
else:
response = requests.post(url, headers=headers)
if response.status_code == 200:
return self.handle_operation_response(response)
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def remove_factor(self, user_id, device_id):
"""
Remove an enrolled factor from a user.
:param user_id: Set to the id of the user.
:type user_id: integer
:param device_id: The device_id of the MFA device.
:type device_id: integer
:return: true if action succeed
:rtype: bool
See https://developers.onelogin.com/api-docs/1/multi-factor-authentication/remove-factor Remove a Factor documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.DELETE_FACTOR_URL, user_id, device_id)
headers = self.get_authorized_headers()
response = requests.delete(url, headers=headers)
if response.status_code == 200:
return True
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
return False
except Exception as e:
self.error = 500
self.error_description = e.args[0]
# Invite Links Methods
def generate_invite_link(self, email):
"""
Generates an invite link for a user that you have already created in your OneLogin account.
:param email: Set to the email address of the user that you want to generate an invite link for.
:type email: string
Returns the invitation link
:return: link
:rtype: str
See https://developers.onelogin.com/api-docs/1/invite-links/generate-invite-link Generate Invite Link documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.GENERATE_INVITE_LINK_URL)
headers = self.get_authorized_headers()
data = {
'email': email
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
json_data = response.json()
if json_data and json_data.get('data', None):
return json_data['data'][0]
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
def send_invite_link(self, email, personal_email=None):
"""
Sends an invite link to a user that you have already created in your OneLogin account.
:param email: Set to the email address of the user that you want to send an invite link for.
:type email: string
:param personal_email: If you want to send the invite email to an email other than the
one provided in email, provide it here. The invite link will be
sent to this address instead.
:type personal_email: string
Returns the result of the operation
:return: True if the mail with the link was sent
:rtype: bool
See https://developers.onelogin.com/api-docs/1/invite-links/send-invite-link Send Invite Link documentation
"""
self.clean_error()
self.prepare_token()
try:
url = self.get_url(Constants.SEND_INVITE_LINK_URL)
headers = self.get_authorized_headers()
data = {
'email': email
}
if personal_email:
data['personal_email'] = personal_email
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
return self.handle_operation_response(response)
else:
self.error = str(response.status_code)
self.error_description = self.extract_error_message_from_response(response)
except Exception as e:
self.error = 500
self.error_description = e.args[0]
# Embed Apps Method
def get_embed_apps(self, token, email):
"""
Lists apps accessible by a OneLogin user.
:param token: Provide your embedding token.
:type token: string
:param email: Provide the email of the user for which you want to return a list of embeddable apps.
:type email: string
Returns the embed apps
:return: A list of Apps
:rtype: list[App]
See https://developers.onelogin.com/api-docs/1/embed-apps/get-apps-to-embed-for-a-user Get Apps to Embed for a User documentation
"""
self.clean_error()
try:
url = Constants.EMBED_APP_URL
data = {
'token': token,
'email': email
}
headers = {
'User-Agent': self.user_agent
}
response = requests.get(url, headers=headers, params=data)
if response.status_code == 200 and response.content:
return self.retrieve_apps_from_xml(response.content)
else:
self.error = str(response.status_code)
if response.content:
self.error_description = response.content
except Exception as e:
self.error = 500
self.error_description = e.args[0]
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
37811,
1881,
47790,
11792,
1398,
198,
198,
15269,
357,
66,
8,
2177,
11,
1881,
47790,
11,
3457,
13,
198,
3237,
2489,
10395,
13,
198,
198,
3198,
47790,
11792,
1398,
286,
262,
1881,
47790,
... | 2.162483 | 29,720 |
# BOJ 1074
import sys
si = sys.stdin.readline
n, r, c = map(int, si().split())
print(divide(r, c, n))
| [
2,
16494,
41,
838,
4524,
198,
11748,
25064,
198,
198,
13396,
796,
25064,
13,
19282,
259,
13,
961,
1370,
628,
198,
198,
77,
11,
374,
11,
269,
796,
3975,
7,
600,
11,
33721,
22446,
35312,
28955,
198,
4798,
7,
7146,
485,
7,
81,
11,
... | 2.163265 | 49 |
import unittest
import matplotlib.lines
from pyrolite.util.plot.style import (
marker_cycle,
linekwargs,
scatterkwargs,
patchkwargs,
_mpl_sp_kw_split,
)
if __name__ == "__main__":
unittest.main()
| [
11748,
555,
715,
395,
201,
198,
11748,
2603,
29487,
8019,
13,
6615,
201,
198,
201,
198,
6738,
12972,
3225,
578,
13,
22602,
13,
29487,
13,
7635,
1330,
357,
201,
198,
220,
220,
220,
18364,
62,
13696,
11,
201,
198,
220,
220,
220,
1627,... | 2.115044 | 113 |
"""
Data output to a binary file.
"""
# Local imports
import brokkr.pipeline.baseoutput
import brokkr.pipeline.utils
| [
37811,
198,
6601,
5072,
284,
257,
13934,
2393,
13,
198,
37811,
198,
198,
2,
10714,
17944,
198,
11748,
1379,
28747,
81,
13,
79,
541,
4470,
13,
8692,
22915,
198,
11748,
1379,
28747,
81,
13,
79,
541,
4470,
13,
26791,
628
] | 2.975 | 40 |
#Tool by Ghast with some changes
with open("path/file.csv", 'r') as file:
txt = "".join(file.readlines())
print("([ " + "\n ".join(["[{}],".format(t) for t in txt.split("\n") if t])[:-1] + " ])")
| [
2,
25391,
416,
11972,
459,
351,
617,
2458,
198,
198,
4480,
1280,
7203,
6978,
14,
7753,
13,
40664,
1600,
705,
81,
11537,
355,
2393,
25,
198,
220,
220,
220,
256,
742,
796,
366,
1911,
22179,
7,
7753,
13,
961,
6615,
28955,
198,
220,
2... | 2.3 | 90 |
from rest_framework.viewsets import ViewSetMixin
from pandas_drf_tools import generics, mixins
class GenericDataFrameViewSet(ViewSetMixin, generics.GenericDataFrameAPIView):
"""
The GenericDataFrameViewSet class does not provide any actions by default,
but does include the base set of generic view behavior, such as
the `get_object` and `get_dataframe` methods.
"""
pass
class ReadOnlyDataFrameViewSet(mixins.RetrieveDataFrameMixin,
mixins.ListDataFrameMixin,
GenericDataFrameViewSet):
"""
A viewset that provides default `list()` and `retrieve()` actions.
"""
pass
class DataFrameViewSet(mixins.CreateDataFrameMixin,
mixins.RetrieveDataFrameMixin,
mixins.UpdateDataFrameMixin,
mixins.DestroyDataFrameMixin,
mixins.ListDataFrameMixin,
GenericDataFrameViewSet):
"""
A viewset that provides default `create()`, `retrieve()`, `update()`,
`partial_update()`, `destroy()` and `list()` actions.
"""
pass
| [
6738,
1334,
62,
30604,
13,
1177,
28709,
1330,
3582,
7248,
35608,
259,
198,
198,
6738,
19798,
292,
62,
7109,
69,
62,
31391,
1330,
1152,
873,
11,
5022,
1040,
628,
198,
4871,
42044,
6601,
19778,
7680,
7248,
7,
7680,
7248,
35608,
259,
11,... | 2.327902 | 491 |
import cgi
import time
import threading
lock = threading.Lock()
connections = set()
next_test_id = 0
def new_test(request):
"""Allocate a unique test id."""
global lock, next_test_id
with lock:
request.response = str(next_test_id)
next_test_id += 1
def do_test(request, params):
"""Check that no other connection is happening at the same time."""
global lock, connections
id = params["id"][0]
with lock:
if id in connections:
request.response = "FAIL"
return
connections.add(id)
time.sleep(0.05)
with lock:
connections.remove(id)
request.response = "PASS"
| [
11748,
269,
12397,
198,
11748,
640,
198,
11748,
4704,
278,
628,
198,
5354,
796,
4704,
278,
13,
25392,
3419,
198,
8443,
507,
796,
900,
3419,
198,
19545,
62,
9288,
62,
312,
796,
657,
628,
198,
198,
4299,
649,
62,
9288,
7,
25927,
2599,... | 2.455882 | 272 |
#IJPOpen IssueTitles by issue number (used to sort IJPOpen by issuetitle)
IJPOPENISSUES = {
"Announcement": 1,
"Annuals Report": 2,
"Book Review Essay": 3,
"Book Reviews": 4,
"Book and Journal Reviews": 5,
"Child and Adolescent Psychoanalysis": 6,
"Clinical Communications": 7,
"Correction Note":9,
"Editorial":10,
"Education Section":11,
"Educational and Professional Issues": 12,
"Film Essay": 14,
"History of Psychoanalysis": 15,
"IPA Congress": 17,
"IPA Major Panel Paper": 18,
"IPA Special Issue": 19,
"Interdisciplinary Studies": 20,
"Key Papers": 22,
"Letters From...": 23,
"Letters to the Editor": 24,
"Obituaries": 25,
"Original Articles": 26,
"Psychoanalytic Controversies": 27,
"Psychoanalytic Theory & Technique": 28,
"Psychoanalytic Theory and Technique": 28,
"Reports of Panels": 30,
"Research": 31,
"The Analyst at Work": 33,
}
| [
198,
2,
40,
12889,
11505,
18232,
51,
30540,
416,
2071,
1271,
357,
1484,
284,
3297,
314,
12889,
11505,
416,
25731,
316,
2578,
8,
198,
40,
12889,
3185,
1677,
1797,
12564,
1546,
796,
1391,
198,
220,
220,
220,
366,
18858,
8652,
434,
1298,... | 2.58871 | 372 |
import pytest
from abra import Experiment, HypothesisTest
from abra.inference.bayesian import get_stan_model_code
from abra.inference.bayesian.models.binary import beta_binomial, binomial, bernoulli
from abra.inference.bayesian.models.continuous import gaussian, exp_student_t
from abra.inference.bayesian.models.counts import gamma_poisson
def test_binary_model_specs():
"Can we better test compilation here?"
assert isinstance(binomial(), str)
assert isinstance(bernoulli(), str)
assert binomial() == get_stan_model_code('binomial')
assert beta_binomial() == binomial()
def test_continuous_model_specs():
"Can we better test compilation here?"
assert isinstance(gaussian(), str)
assert gaussian() == get_stan_model_code('gaussian')
assert isinstance(exp_student_t(), str)
assert exp_student_t() == get_stan_model_code('exp_student_t')
def test_counts_model_specs():
"Can we better test compilation here?"
assert isinstance(gamma_poisson(), str)
assert gamma_poisson() == get_stan_model_code('gamma_poisson')
@pytest.mark.stan_test
@pytest.mark.stan_test
@pytest.mark.stan_test
@pytest.mark.stan_test
| [
11748,
12972,
9288,
198,
6738,
450,
430,
1330,
29544,
11,
21209,
313,
8497,
14402,
198,
6738,
450,
430,
13,
259,
4288,
13,
24406,
35610,
1330,
651,
62,
14192,
62,
19849,
62,
8189,
198,
6738,
450,
430,
13,
259,
4288,
13,
24406,
35610,
... | 2.927318 | 399 |
from django.urls import path
from itembase.core.views.staffing_views import ProjectManagerListView, TeamLeadListView, TeamListView, \
TeamMemberCreateView, TeamMemberClientCreateView, TeamMemberDetailView, TeamMemberUpdateView
app_name = "staff"
urlpatterns = [
# Client URL Patterns
path('', TeamListView.as_view(), name='team-list'),
path('pm/', ProjectManagerListView.as_view(), name='pm-list'),
path('tl/', TeamLeadListView.as_view(), name='teamlead-list'),
path('newtm/', TeamMemberCreateView.as_view(), name='team-new'),
path('newtm/<slug:slug>/', TeamMemberClientCreateView.as_view(), name='client-team-new'),
path('<int:pk>/', TeamMemberDetailView.as_view(), name='team-view'),
path('edittm/', TeamMemberUpdateView.as_view(), name='team-edit'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
2378,
8692,
13,
7295,
13,
33571,
13,
28120,
278,
62,
33571,
1330,
4935,
13511,
8053,
7680,
11,
4816,
20451,
8053,
7680,
11,
4816,
8053,
7680,
11,
3467,
198,
220,
220,
220,
... | 2.870036 | 277 |
#!/usr/bin/python
import sys
import os
import re
import subprocess
if len(sys.argv) < 3:
print("provide problem name and number of nodes")
exit(1)
problemName = sys.argv[1]
nodeCount = sys.argv[2]
for file in os.listdir('.'):
if not re.match(problemName + '.+\.h$', file):
# print(file, "X")
continue
name = file.replace('.h', '')
print(name)
subprocess.run(["cp", file, problemName + ".h"], stdout=subprocess.PIPE)
command = ["C:\\Program Files\\Git\\bin\\bash.exe", '-c',
' '.join(['/c/Users/mgoncharov/etc/contests/scripts/dcj/dcj.sh',
'test',
'--source', problemName + '.cpp',
'--nodes', nodeCount,
'--output all'])]
# print(command)
out = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if (out.returncode != 0):
print("FAILED")
print(out.stdout.decode('utf-8'))
print(out.stderr.decode('utf-8'))
continue
with open(name + ".output", 'wb') as actual:
actual.write(out.stdout)
diff = subprocess.run(["diff", name + ".out", name + '.output'], stdout=subprocess.PIPE)
if (diff.returncode == 0):
print("PASSED\n{}".format(out.stderr.decode('utf-8')))
continue
print(diff.stdout.decode('utf-8'))
print(out.stderr.decode('utf-8'))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
850,
14681,
198,
361,
18896,
7,
17597,
13,
853,
85,
8,
1279,
513,
25,
198,
220,
220,
220,
3601,
7203,
15234,
485,
1917,
1438,
2... | 1.987722 | 733 |
import os
import logging
from flask import Flask,request,render_template
from flask_cache import Cache
from flask_compress import Compress
from lib import gscrape
from time import time
chap = "1blZPkNxrw3ovfLvqD76eUzTuME34N2bl1fSTdaMjX2c"
marq = "1wFK2aPJmXjAQAx3rT2vF7H-8NvveWQ7tuXdEoemhZ1M"
board = "1JbaMwsPHbN3og_igSOJfJEzf2IJimAIqzrbf_ra6Pp0"
osuexec = "1RVdYx88BnZ7IEWP1Kidk00-K80V96cCWOMCIrqfpw9U"
uclaexec = "11GzqdZtLeu2tlfwQ4Mk0sy387Hh9H2l_fDg6-at_EG0"
logging.basicConfig(filename='log/app.log',format="%(asctime)s, log_level=%(levelname)s, %(message)s",level=logging.DEBUG)
logger_app = logging.getLogger("app")
logger_perf = logging.getLogger("app.perf")
logger_perf.setLevel(logging.ERROR)
app = Flask(__name__)
app.config['CACHE_TYPE'] = 'simple'
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 300
app.cache=Cache(app)
Compress(app)
@app.cache.cached(key_prefix="marquee")
@app.cache.cached(key_prefix="appfeed")
@app.cache.cached(key_prefix="board")
@app.cache.cached(key_prefix="memberfeed")
@app.route('/')
@app.route('/about')
@app.route('/projects')
@app.route('/chapters')
@app.route('/donate')
@app.route('/locations')
@app.route('/privacy-policy')
@app.errorhandler(404)
@app.errorhandler(500)
if __name__=="__main__":
app.run(debug=False)
| [
11748,
28686,
198,
11748,
18931,
198,
6738,
42903,
1330,
46947,
11,
25927,
11,
13287,
62,
28243,
198,
6738,
42903,
62,
23870,
1330,
34088,
198,
6738,
42903,
62,
5589,
601,
1330,
3082,
601,
198,
6738,
9195,
1330,
308,
1416,
13484,
198,
6... | 2.152941 | 595 |
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from rapids_triton import Client
from rapids_triton.testing import array_close
@pytest.fixture
# TODO(template): Add data generation/retrieval for model tests
@pytest.fixture
# TODO(template): Compute size (in bytes) of outputs and return as
# dictionary mapping output names to sizes
#TODO(template): Return ground truth expected for given inputs
#TODO(template): Provide names of each model to test
@pytest.mark.parametrize(
"model_name", ['REPLACE_ME']
)
| [
2,
15069,
357,
66,
8,
33448,
11,
15127,
23929,
44680,
6234,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262... | 3.41358 | 324 |
import unittest
from decode import hex_to_dec, get_bool, hex_to_binary_int_string, hex_to_binary_string_list, invert_string, get_data_status
from decode import format_hex, bin_to_dec, binary_to_on_off, get_digital_io_status, get_alarm, get_reverse
from decode import get_battery, hex_to_float, make_decimal
if __name__ == '__main__':
unittest.main() | [
11748,
555,
715,
395,
198,
198,
6738,
36899,
1330,
17910,
62,
1462,
62,
12501,
11,
651,
62,
30388,
11,
17910,
62,
1462,
62,
39491,
62,
600,
62,
8841,
11,
17910,
62,
1462,
62,
39491,
62,
8841,
62,
4868,
11,
287,
1851,
62,
8841,
11,... | 2.240437 | 183 |
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
import os
email = 'xxxx@xxxx.com'
password = 'xxxx'
send_to_emails = ['xxxx@xxxx.com', 'xxxx@xxxx.com'] # List of email addresses
subject = 'xxxx'
message = 'xxxx'
file_location = 'xxxx'
while True:
filename = None
for files in os.listdir(file_location):
if os.path.isfile(os.path.join(file_location, files)):
filename = files
if filename != None:
attachment = open(file_location+filename, "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload(attachment.read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(email, password)
for send_to_email in send_to_emails:
msg = MIMEMultipart()
msg['From'] = email
msg['To'] = send_to_email
msg['Subject'] = subject
msg.attach(MIMEText(message, 'plain'))
msg.attach(part)
server.sendmail(email, send_to_email, msg.as_string())
os.remove(file_location+filename)
server.quit()
| [
11748,
895,
83,
489,
571,
198,
6738,
3053,
13,
76,
524,
13,
5239,
1330,
337,
3955,
2767,
2302,
198,
6738,
3053,
13,
76,
524,
13,
16680,
541,
433,
1330,
337,
3955,
3620,
586,
541,
433,
198,
6738,
3053,
13,
76,
524,
13,
8692,
1330,
... | 2.292517 | 588 |
#! /usr/bin/env python
from django.core.management.base import BaseCommand
from face_manager import models as face_models
from facenet_pytorch import MTCNN, InceptionResnetV1
from filepopulator import models as file_models
from io import BytesIO
from PIL import Image, ExifTags
import cv2
import matplotlib.pyplot as plt
import numpy as np
import PIL
import time
import torch
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
6738,
1986,
62,
37153,
1330,
4981,
355,
1986,
62,
27530,
198,
6738,
1777,
268,
316,
62,
9078,
13165,
... | 3.267241 | 116 |
# encoding=utf-8
from __future__ import unicode_literals, print_function
import datetime
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
| [
2,
21004,
28,
40477,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
11,
3601,
62,
8818,
198,
198,
11748,
4818,
8079,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
11,
9455,
... | 3.557143 | 70 |
import tensorflow as tf
import tensorflow.keras.layers as KL
from utils import conv_block
def FCOS(cfg, backbone=None, top_layer=None):
"""return FCOS as keras model
Arguments:
cfg: dict
backbone: str or keras model
top_layer:
Returns:
keras model of FCOS
"""
# init backbone
# TODO
inputs = backbone.outputs
top_layer = None
outputs = fcos_head_graph(inputs, cfg, top_layer)
model = tf.keras.Model(inputs, outputs, name='fcos')
return model
def fcos_head_graph(inputs, cfg, top_layer=None):
"""return FCOS graph
Arguments:
inputs: input tensors
cfg: dict
top_layer:
p:
Returns:
output tensors of FCOS
"""
# FCOS Head
cls_logits = []
reg_logits = []
ctr_logits = []
top_feats = []
input_head1 = KL.Input(shape=[None, None, 256], name='cls_head')
x = input_head1
for _ in range(4):
x = conv_block(x, filters=256)
_cls_logits = KL.Conv2D(cfg.DATA.NUM_CLASSES, kernel_size=3, strides=1, padding="same", name="cls_logits")(x)
_ctr_logits = KL.Conv2D(1, kernel_size=3, strides=1, padding="same", name="ctr_logits")(x)
_cls_logits = KL.Reshape(target_shape=[-1, cfg.DATA.NUM_CLASSES], name="cls_logits_reshape")(_cls_logits)
_ctr_logits = KL.Reshape(target_shape=[-1, 1], name="ctr_logits_reshape")(_ctr_logits)
cls_head = tf.keras.Model(inputs=[input_head1], outputs=[_cls_logits, _ctr_logits], name='cls_head')
input_head2 = KL.Input(shape=[None, None, 256], name='reg_head')
x = input_head2
for _ in range(4):
x = conv_block(x, filters=256)
_reg_logits = KL.Conv2D(4, kernel_size=3, strides=1, padding="same", name="reg_logits")(x)
reg_head = tf.keras.Model(inputs=[input_head2], outputs=[_reg_logits], name='reg_head')
for feature in inputs:
cls_feature = cls_head(feature)
reg_feature = reg_head(feature)
cls_logits.append(cls_feature[0])
ctr_logits.append(cls_feature[1])
reg_logits.append(KL.Reshape(target_shape=[-1, 4])(reg_feature))
if top_layer is not None:
top_feat = top_layer(reg_feature)
top_feat = KL.Reshape([-1, 784])(top_feat)
top_feats.append(top_feat)
# cls_logits = KL.Concatenate(axis=1)(cls_logits)
# reg_logits = KL.Concatenate(axis=1)(reg_logits)
# ctr_logits = KL.Concatenate(axis=1)(ctr_logits)
# top_feats = KL.Concatenate(axis=1)(top_feats)
return cls_logits, reg_logits, ctr_logits, top_feats
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
11192,
273,
11125,
13,
6122,
292,
13,
75,
6962,
355,
48253,
198,
198,
6738,
3384,
4487,
1330,
3063,
62,
9967,
628,
198,
4299,
10029,
2640,
7,
37581,
11,
32774,
28,
14202,
11,
1353,
62,... | 2.158824 | 1,190 |
standard_tickets = 0
student_tickets = 0
kid_tickets = 0
finished = False
while not finished:
movie = input()
seats = int(input())
capacity = seats
while True:
ticket = input()
if ticket == 'Finish' or ticket == 'End':
if ticket == 'Finish':
finished = True
cap_percentage = 100 * seats / capacity
print(f'{movie} - {100 - cap_percentage:.2f}% full.')
break
if ticket == 'standard':
standard_tickets += 1
elif ticket == 'student':
student_tickets += 1
else:
kid_tickets += 1
seats -= 1
if seats == 0:
print(f'{movie} - 100% full.')
finished = True
break
total_tickets = student_tickets + standard_tickets + kid_tickets
print(f'Total tickets: {total_tickets}')
print(f'{(student_tickets / total_tickets * 100):.2f}% student tickets.')
print(f'{(standard_tickets / total_tickets * 100):.2f}% standard tickets.')
print(f'{(kid_tickets / total_tickets * 100):.2f}% kids tickets.')
| [
20307,
62,
83,
15970,
796,
657,
201,
198,
50139,
62,
83,
15970,
796,
657,
201,
198,
38439,
62,
83,
15970,
796,
657,
201,
198,
201,
198,
43952,
796,
10352,
201,
198,
201,
198,
4514,
407,
5201,
25,
201,
198,
201,
198,
220,
220,
220,... | 2.097967 | 541 |
import abc
import copy
import csv
import json
import multiprocessing
import os
import time
from octue.cloud import storage
from octue.cloud.storage.client import GoogleCloudStorageClient
from octue.utils.persistence import calculate_disk_usage, get_oldest_file_in_directory
logger = multiprocessing.get_logger()
DEFAULT_OUTPUT_DIRECTORY = "data_gateway"
class NoOperationContextManager:
"""A no-operation context manager that can be used to fill in for cases where the context-managed object is not
needed but the context-managed block is.
:return None:
"""
def force_persist(self):
"""Do nothing.
:return None:
"""
pass
class TimeBatcher:
"""A batcher that groups the given data into time windows.
:param iter(str) sensor_names: names of sensors to group data for
:param float window_size: length of time window in seconds
:param str output_directory: directory to write windows to
:return None:
"""
_file_prefix = "window"
def add_to_current_window(self, sensor_name, data):
"""Add data to the current window for the given sensor name.
:param str sensor_name: name of sensor
:param iter data: data to add to window
:return None:
"""
# Finalise the window and persist it if enough time has elapsed.
if time.perf_counter() - self._start_time >= self.window_size:
self.finalise_current_window()
self._persist_window()
self._prepare_for_next_window()
# Then add data to the current/new window.
self.current_window["sensor_data"][sensor_name].append(data)
def finalise_current_window(self):
"""Finalise the current window for the given sensor name. This puts the current window into the queue of ready
windows, resets the clock for the next window, and increases the window number for it.
:return None:
"""
for sensor_name, data in self.current_window["sensor_data"].items():
if data:
self.ready_window["sensor_data"][sensor_name] = copy.deepcopy(data)
data.clear()
def force_persist(self):
"""Persist all current windows, regardless of whether a complete time window has passed.
:return None:
"""
self.finalise_current_window()
self._persist_window()
self._prepare_for_next_window()
@abc.abstractmethod
def _persist_window(self):
"""Persist the window to whatever medium is required (e.g. to disk, to a database, or to the cloud).
:return None:
"""
pass
def _prepare_for_next_window(self):
"""Prepare the batcher for the next window.
:return None:
"""
self._window_number += 1
self.ready_window["sensor_data"] = {}
self._start_time = time.perf_counter()
@abc.abstractmethod
def _generate_window_path(self):
"""Generate the path that the window should be persisted to. This should start by joining the output directory
and the session subdirectory.
:return str:
"""
pass
class BatchingFileWriter(TimeBatcher):
"""A file writer that groups the given into time windows, saving each window to disk.
:param iter(str) sensor_names: names of sensors to make windows for
:param float window_size: length of time window in seconds
:param str output_directory: directory to write windows to
:param int storage_limit: storage limit in bytes (default is 1 GB)
:return None:
"""
def _persist_window(self, window=None):
"""Write a window of serialised data to disk, deleting the oldest window first if the storage limit has been
reached.
:param dict|None window: window to persist
:return None:
"""
self._manage_storage()
window = window or self.ready_window
window_path = self._generate_window_path()
with open(window_path, "w") as f:
json.dump(window, f)
logger.info("%s %d written to disk.", self._file_prefix.capitalize(), self._window_number)
if self._save_csv_files:
for sensor in window["sensor_data"]:
csv_path = os.path.join(os.path.dirname(window_path), f"{sensor}.csv")
logger.info("Saving %s data to csv file.", sensor)
with open(csv_path, "w", newline="") as f:
writer = csv.writer(f, delimiter=",")
for row in self.ready_window["sensor_data"][sensor]:
writer.writerow(row)
def _manage_storage(self):
"""Check if the output directory has reached its storage limit and, if it has, delete the oldest window.
:return None:
"""
filter = lambda path: os.path.split(path)[-1].startswith("window") # noqa
storage_limit_in_mb = self.storage_limit / 1024 ** 2
if calculate_disk_usage(self.output_directory, filter) >= self.storage_limit:
oldest_window = get_oldest_file_in_directory(self.output_directory, filter)
logger.warning(
"Storage limit reached (%s MB) - deleting oldest window (%r).",
storage_limit_in_mb,
oldest_window,
)
os.remove(oldest_window)
elif calculate_disk_usage(self.output_directory, filter) >= 0.9 * self.storage_limit:
logger.warning("90% of storage limit reached - %s MB remaining.", 0.1 * storage_limit_in_mb)
def _generate_window_path(self):
"""Generate the path that the window should be persisted to.
:return str:
"""
filename = f"{self._file_prefix}-{self._window_number}.json"
return os.path.join(self.output_directory, filename)
class BatchingUploader(TimeBatcher):
"""A Google Cloud Storage uploader that will group the given data into time windows and upload it to a Google Cloud
Storage. If upload fails for a window, it will be written to the backup directory. If the `upload_backup_files`
flag is `True`, its upload will then be reattempted after the upload of each subsequent window.
:param iter(str) sensor_names: names of sensors to group data for
:param str bucket_name: name of Google Cloud bucket to upload to
:param float window_size: length of time window in seconds
:param str output_directory: directory to write windows to
:param float upload_timeout: time after which to give up trying to upload to the cloud
:param bool upload_backup_files: attempt to upload backed-up windows on next window upload
:return None:
"""
def _persist_window(self):
"""Upload a window to Google Cloud storage. If the window fails to upload, it is instead written to disk.
:return None:
"""
try:
self.client.upload_from_string(
string=json.dumps(self.ready_window),
cloud_path=storage.path.generate_gs_path(self.bucket_name, self._generate_window_path()),
metadata=self.metadata,
timeout=self.upload_timeout,
)
except Exception as e:
logger.exception(e)
logger.warning(
"Upload of window may have failed - writing to disk at %r.",
self._backup_writer._generate_window_path(),
)
self._backup_writer._persist_window(window=self.ready_window)
return
logger.info("%s %d uploaded to cloud.", self._file_prefix.capitalize(), self._window_number)
if self.upload_backup_files:
self._attempt_to_upload_backup_files()
def _generate_window_path(self):
"""Generate the path that the window should be persisted to.
:return str:
"""
filename = f"{self._file_prefix}-{self._window_number}.json"
return storage.path.join(self.output_directory, filename)
def _attempt_to_upload_backup_files(self):
"""Check for backup files and attempt to upload them to cloud storage again.
:return None:
"""
for filename in os.listdir(self._backup_directory):
if not filename.startswith(self._file_prefix):
continue
local_path = os.path.join(self._backup_directory, filename)
path_in_bucket = storage.path.join(self.output_directory, filename)
try:
self.client.upload_file(
local_path=local_path,
cloud_path=storage.path.generate_gs_path(self.bucket_name, path_in_bucket),
timeout=self.upload_timeout,
metadata=self.metadata,
)
except Exception:
return
os.remove(local_path)
| [
11748,
450,
66,
198,
11748,
4866,
198,
11748,
269,
21370,
198,
11748,
33918,
198,
11748,
18540,
305,
919,
278,
198,
11748,
28686,
198,
11748,
640,
198,
198,
6738,
19318,
518,
13,
17721,
1330,
6143,
198,
6738,
19318,
518,
13,
17721,
13,
... | 2.496194 | 3,547 |
import unittest
import sys
import numpy as np
import src.utils.cifar_10_data as cifar_10
| [
11748,
555,
715,
395,
198,
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
12351,
13,
26791,
13,
66,
361,
283,
62,
940,
62,
7890,
355,
269,
361,
283,
62,
940,
628
] | 2.676471 | 34 |
import qiskit as qk
from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit
from qiskit import execute, Aer
from qiskit import IBMQ
from qiskit.visualization import plot_histogram
import matplotlib
secret_unitary = "hz"
# simple function that applies a series of unitary gates from a given string
# Create the quantum circuit with 3 qubits and 3 classical bits
qc = QuantumCircuit(3, 3)
print("Original Circuit:")
print(qc)
''' Qubit ordering as follows (classical registers will just contain measured values of the corresponding qubits):
q[0]: qubit to be teleported (Alice's first qubit. It was given to her after the application of a secret unitary
which she doesn't know)
q[1]: Alice's second qubit
q[2]: Bob's qubit, which will be the destination for the teleportation
'''
# Apply the secret unitary that we are using to generate the state to teleport. You can change it to any unitary
apply_secret_unitary(secret_unitary, qc.qubits[0], qc, dagger = 0)
qc.barrier()
# Next, generate the entangled pair between Alice and Bob (Remember: Hadamard followed by CX generates a Bell pair)
qc.h(1)
qc.cx(1, 2)
qc.barrier()
print("Bell circuit:")
print(qc)
# Next, apply the teleportation protocol.
qc.cx(0, 1)
qc.h(0)
qc.measure(0, 0)
qc.measure(1, 1)
qc.cx(1, 2)
qc.cz(0, 2)
qc.barrier()
print("Teleportation protocol:")
print(qc)
'''
In principle, if the teleportation protocol worked, we have q[2] = secret_unitary|0>
As a result, we should be able to recover q[2] = |0> by applying the reverse of secret_unitary
since for a unitary u, u^dagger u = I.
'''
apply_secret_unitary(secret_unitary, qc.qubits[2], qc, dagger=1)
qc.measure(2, 2)
print("Final Circuit:")
print(qc)
backend = Aer.get_backend('qasm_simulator')
job_sim = execute(qc, backend, shots=1024)
sim_result = job_sim.result()
measurement_result = sim_result.get_counts(qc)
print(measurement_result)
plot_histogram(measurement_result)
| [
11748,
10662,
1984,
270,
355,
10662,
74,
198,
6738,
10662,
1984,
270,
1330,
43680,
38804,
11,
29082,
38804,
11,
29082,
31560,
5013,
198,
6738,
10662,
1984,
270,
1330,
12260,
11,
15781,
198,
6738,
10662,
1984,
270,
1330,
19764,
48,
198,
... | 2.922727 | 660 |
import os
import numpy as np
import tensorflow as tf
import uncertainty_wizard as uwiz
from emp_uncertainty.case_studies.case_study import BASE_MODEL_SAVE_FOLDER, BASE_OUTPUTS_SAVE_FOLDER
from emp_uncertainty.case_studies.plain_classifiers import cifar10
from emp_uncertainty.dropout_rate.utils import rate_from_model_id, pred_identity, save_quantifications, \
CpuOnlyContext
MODEL_FOLDER = f"{BASE_MODEL_SAVE_FOLDER}/dropout-experiments/cifar"
OUTPUTS_FOLDER = f"{BASE_OUTPUTS_SAVE_FOLDER}/dropout-experiments/cifar"
OOD_SEVERITY = 3
if __name__ == '__main__':
ensemble = uwiz.models.LazyEnsemble(num_models=90,
model_save_path=MODEL_FOLDER,
delete_existing=False,
expect_model=False,
default_num_processes=3)
ensemble.create(create_stochastic_model)
ensemble.consume(run_benchmark)
ensemble.run_model_free(quantify, context=CpuOnlyContext, num_processes=15)
| [
11748,
28686,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
13479,
62,
86,
8669,
355,
334,
86,
528,
198,
198,
6738,
795,
79,
62,
19524,
1425,
774,
13,
7442,
62,
19149,
444,
13,
7442,
... | 2.052838 | 511 |
"""
Copyright (c) 2021 Aiven Ltd
See LICENSE for details
"""
from astacus.client import create_client_parsers
from astacus.common.ipc import Plugin
from astacus.common.rohmustorage import (
RohmuCompression, RohmuCompressionType, RohmuConfig, RohmuEncryptionKey, RohmuLocalStorageConfig, RohmuStorageType
)
from astacus.config import GlobalConfig, UvicornConfig
from astacus.coordinator.config import CoordinatorConfig, CoordinatorNode
from astacus.coordinator.plugins import ClickHousePlugin
from astacus.coordinator.plugins.clickhouse.client import HttpClickHouseClient
from astacus.coordinator.plugins.clickhouse.config import (
ClickHouseConfiguration, ClickHouseNode, ZooKeeperConfiguration, ZooKeeperNode
)
from astacus.node.config import NodeConfig
from pathlib import Path
from tests.system.conftest import background_process, wait_url_up
from tests.utils import CONSTANT_TEST_RSA_PRIVATE_KEY, CONSTANT_TEST_RSA_PUBLIC_KEY
from typing import AsyncIterator, Awaitable, Dict, Iterator, List, Optional, Union
import argparse
import asyncio
import contextlib
import dataclasses
import logging
import pytest
import sys
import tempfile
logger = logging.getLogger(__name__)
pytestmark = [pytest.mark.clickhouse]
USER_CONFIG = """
<yandex>
<users>
<default>
<password>secret</password>
<access_management>true</access_management>
</default>
</users>
<profiles>
<default>
<allow_experimental_database_replicated>true</allow_experimental_database_replicated>
</default>
</profiles>
<quotas><default></default></quotas>
</yandex>
"""
@pytest.fixture(scope="module", name="event_loop")
@dataclasses.dataclass
@dataclasses.dataclass
@pytest.fixture(scope="session", name="ports")
@pytest.fixture(scope="module", name="clickhouse")
@contextlib.asynccontextmanager
@pytest.fixture(scope="module", name="zookeeper")
@contextlib.asynccontextmanager
@contextlib.asynccontextmanager
@contextlib.asynccontextmanager
@contextlib.asynccontextmanager
@contextlib.asynccontextmanager
| [
37811,
198,
15269,
357,
66,
8,
33448,
317,
1469,
12052,
198,
6214,
38559,
24290,
329,
3307,
198,
37811,
198,
6738,
6468,
48628,
13,
16366,
1330,
2251,
62,
16366,
62,
79,
945,
364,
198,
6738,
6468,
48628,
13,
11321,
13,
541,
66,
1330,
... | 2.818301 | 765 |
# -*- coding: utf-8; mode: Python; -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
from unittest import TestCase
from unittest.mock import mock_open, patch
import requests
from ytreporty.api import jobs_list
from ytreporty.context import Environment
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
26,
4235,
25,
11361,
26,
532,
9,
12,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
198,
2,
743,
407,
779,
428,
2393,
2845,... | 3.570755 | 212 |
base= int(input("Digite o numero base da potencia"))
expoente= int(input("Digite o numero expoente da potencia"))
if expoente <0:
print("Erro: O expoente não pode ser negativo")
else:
contador= 0
resultado= 1
while contador < expoente:
resultado=resultado*base
contador=contador+1
print ("O valor da potencia é " , resultado)
| [
8692,
28,
493,
7,
15414,
7203,
19511,
578,
267,
997,
3529,
2779,
12379,
1787,
29634,
48774,
198,
1069,
7501,
21872,
28,
493,
7,
15414,
7203,
19511,
578,
267,
997,
3529,
1033,
78,
21872,
12379,
1787,
29634,
48774,
198,
198,
361,
1033,
... | 2.136126 | 191 |
import requests
import html.parser
import re
H = html.parser.HTMLParser()
CONTENT = "https://www.churchofjesuschrist.org/content/api/v2"
def clean_html(text):
"""Takes in html code and cleans it. Note that footnotes
are replaced with # for word counting later.
Parameters
-----------
text : string
html to clean
Returns
--------
text : string
cleaned text"""
# convert all html characters
text = html.unescape(text)
# footnotes followed by punctuation make the punctuation be counted as a
# word... sigh.
punc_footnotes = re.compile(
r'<sup class=\"marker\">\w</sup>(\w*)</a>([!?.,])')
text = re.sub(punc_footnotes, r'#\1#\2', text)
# remove footnotes (also counts as words)
no_footnotes = re.compile(r'<sup class=\"marker\">\w</sup>')
text = re.sub(no_footnotes, '#', text)
# remove rest of html tags
clean = re.compile('<.*?>')
text = re.sub(clean, '', text)
# remove peksy leftover
return text.replace(u'\xa0', u' ')
class Content:
"""Class that pulls/represents content from anywhere
on churchofjesuschrist.org/study (theoretically)
Parameters
----------
json : dict
Dictionary made from json pull from lds.org's API.
Attributes
-----------
content : string
Book, talk, or section of content.
headline : string
The content (see above) with verse number in case of scriptures.
publication : string
Overarching publication. Think BoM, DoC, General Conference 2020, etc.
url : string
URL of where the content is located (including the paragraph/verse).
uri : string
URI that it was pulled with.
p_start : string
First verse/paragraph pulled.
p_end : string
Last verse/paragraph pulled.
"""
__repr__ = __print__
@staticmethod
def fetch(uris, json=False):
"""Method to actually make content. This is where the magic happens.
Requires a proper URI to fetch content.
Parameters
----------
uris : list
List of URIs to pull from lds.org. See below for example.
json : bool
Whether to return as list of Content objects or the raw dictionaries. Most useful in debugging. Defaults to False.
Returns
--------
Either a list of Content objects, or a list of strings.
Examples
---------
>>> Content.fetch(["/eng/scriptures/bofm/hel/3.p29"])
[29 Yea, we see that whosoever will may lay hold upon the word of God, which is quick and powerful, which shall divide asunder all the cunning and the snares and the wiles of the devil, and lead the man of Christ in a strait and narrow course across that everlasting gulf of misery which is prepared to engulf the wicked—]
>>> Content.fetch(["/eng/scriptures/bofm/hel/3.p29"], json=True)
[{'content': [{'displayId': '29',
'id': 'p29',
'markup': '<p class="verse" data-aid="128356897" id="p29"><span '
'class="verse-number">29 </span>Yea, we see that '
'whosoever will may lay hold upon the <a '
'class="study-note-ref" href="#note29a"><sup '
'class="marker">a</sup>word</a> of God, which is <a '
'class="study-note-ref" href="#note29b"><sup '
'class="marker">b</sup>quick</a> and powerful, which '
'shall <a class="study-note-ref" href="#note29c"><sup '
'class="marker">c</sup>divide</a> asunder all the '
'cunning and the snares and the wiles of the devil, '
'and lead the man of Christ in a strait and <a '
'class="study-note-ref" href="#note29d"><sup '
'class="marker">d</sup>narrow</a> course across that '
'everlasting <a class="study-note-ref" '
'href="#note29e"><sup class="marker">e</sup>gulf</a> '
'of misery which is prepared to engulf the '
'wicked—</p>'}],
'headline': 'Helaman 3',
'image': {},
'publication': 'Book of Mormon',
'referenceURI': '/eng/scriptures/bofm/hel/3.p29?lang=eng#p29',
'referenceURIDisplayText': 'Helaman 3:29',
'type': 'chapter',
'uri': '/eng/scriptures/bofm/hel/3.p29'}]
""" # noqa: E501
resp = requests.post(url=CONTENT,
data={"uris": uris}).json()
if json:
return [resp[u] for u in uris]
else:
return [Content(resp[u]) for u in uris]
| [
11748,
7007,
198,
11748,
27711,
13,
48610,
198,
11748,
302,
198,
198,
39,
796,
27711,
13,
48610,
13,
28656,
46677,
3419,
198,
37815,
3525,
796,
366,
5450,
1378,
2503,
13,
36964,
1659,
73,
274,
385,
43533,
13,
2398,
14,
11299,
14,
1504... | 2.173263 | 2,274 |
# author: Bartlomiej "furas" Burek (https://blog.furas.pl)
# date: 2022.04.11
from selenium import webdriver
from selenium.webdriver.common.by import By
#from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
import time
url = 'https://www.tripadvisor.com/Profile/yes2luvtravel?tab=reviews'
#driver = webdriver.Chrome(executable_path=ChromeDriverManager().install())
driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())
driver.get(url)
time.sleep(3)
# accept cookies
buttons = driver.find_elements(By.XPATH, '//button[@id="onetrust-accept-btn-handler"]')
if buttons:
print('click Accept')
buttons[0].click()
# click `Show More`
while True:
time.sleep(3)
buttons = driver.find_elements(By.XPATH, '//div[@id="content"]//button')
if not buttons:
break
print('click button')
buttons[0].click()
all_items = driver.find_elements(By.XPATH, '//div[@id="content"]//div[contains(@class, "section")]')
print('len(all_items):', len(all_items))
| [
2,
1772,
25,
13167,
75,
296,
494,
73,
366,
69,
17786,
1,
347,
495,
74,
357,
5450,
1378,
14036,
13,
69,
17786,
13,
489,
8,
198,
2,
3128,
25,
33160,
13,
3023,
13,
1157,
198,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
673... | 2.749354 | 387 |
#!/usr/bin/env python3
# Required imports
# -----------------------------------------------------------------------------
import json
import os
import functools as ft
import multiprocessing as mp
import xmltodict as xd
# Function definitions
# -----------------------------------------------------------------------------
# Helper to read and parse an XML (as dict)
# Helper to attempt to acquire a specific nested dict element
# Returns `None` if no exact match
# Helper function to get data from a current object according to node spec
# Shorthand function to read an XML (and pick metabolite section only),
# then run `get_data`
# Helper to print a formatted message in the console
# Operations
# -----------------------------------------------------------------------------
# Set file paths
path = {
"in": "../../hmdb/data/hmdb_metabolites",
"out": "../_data/outCollect08Hmdb.json"
}
# Specify node structure to check against
nodes = {
"SMILES": "smiles",
"Name": "name",
"Alias": "synonyms|synonym",
"Molecular Formula": "chemical_formula",
"KEGG ID": "kegg_id",
"HMDB ID": "accession",
"CAS Registry Number": "cas_registry_number",
"InChI": "inchi",
"InChI Key": "inchikey",
"ChEBI ID": "chebi_id",
"ChemSpider ID": "chemspider_id",
"PubChem ID": "pubchem_compound_id"
}
# Read the files in the input directory
files = os.listdir(path["in"])
files.sort()
# Extract the HMDB IDs -- all are formatted as `ID.xml`, so split by dot
ids = [f.split(".")[0] for f in files]
# Get the "full" file path for each file
files = [f'{path["in"]}/{f}' for f in files]
# Open the worker pool and collect the data
msg("info", "Collecting data")
with mp.Pool(14) as pool:
collect = ft.partial(read_get, node_set=nodes)
result = pool.map(collect, files)
# Write the output file
msg("info", "Writing output file")
with open(path["out"], "w") as file:
file.write(f'{json.dumps(result, indent=2)}\n')
# All done
msg("ok", "End of operations")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
628,
198,
2,
20906,
17944,
198,
2,
16529,
32501,
628,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
1257,
310,
10141,
355,
10117,
198,
11748,
18540,
305,
919,
278,
355,
29034,
198,
... | 3.072948 | 658 |
#!/usr/bin/env python
import _init_paths
from evb.test import test_net
from evb.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='detrac', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--vis', dest='vis', help='visualize detections',
action='store_true')
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=100, type=int)
parser.add_argument('--test', dest='test_order',
help='test file',
default=01, type=int)
parser.add_argument('--data', dest='data_path',
help='set training and testing data path', default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
cfg.DATASET_DIR = args.data_path
print('Using config:')
pprint.pprint(cfg)
while not os.path.exists(args.caffemodel) and args.wait:
print('Waiting for {} to exist...'.format(args.caffemodel))
time.sleep(10)
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
imdb = get_imdb(args.imdb_name)
imdb.competition_mode(args.comp_mode)
test_file = s = "%02d" % (args.test_order)
print 'test_file:', test_file
print 'max per image:',args.max_per_image
test_net(net, imdb, max_per_image=args.max_per_image, vis=args.vis,test=test_file)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
4808,
15003,
62,
6978,
82,
198,
6738,
819,
65,
13,
9288,
1330,
1332,
62,
3262,
198,
6738,
819,
65,
13,
11250,
1330,
30218,
70,
11,
30218,
70,
62,
6738,
62,
7753,
11,
30218,
... | 2.108197 | 1,525 |
import base64
import os
import string
from pathlib import Path
import pytest
import xdelta3
value_one = b'this is a short string to test with. It is suitable for delta encoding.'
value_two = b'this is a different short string to test with. It is suitable for delta encoding.'
expected_delta = b'\xd6\xc3\xc4\x00\x00\x01G\x00\x14Q\x00\t\x04\x02different\x1a\n\x13>\x00\t'
| [
11748,
2779,
2414,
198,
11748,
28686,
198,
11748,
4731,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
12972,
9288,
198,
198,
11748,
2124,
67,
12514,
18,
198,
198,
8367,
62,
505,
796,
275,
470,
14363,
318,
257,
1790,
4731,
284,
... | 2.690141 | 142 |
# Echo client program
# This is a simple test client
import socket
import sys
PORT = 80 # The same port as used by the server
| [
2,
21455,
5456,
1430,
198,
2,
770,
318,
257,
2829,
1332,
5456,
198,
11748,
17802,
198,
11748,
25064,
198,
15490,
796,
4019,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1303,
383,
976,
2493,
355,
973,
416,
262,
4382,
... | 3.136364 | 44 |
import numpy as np
## GEAR FORCES AND POWER LOSS #################################################
## LINES OF CONTACT ###########################################################
## HERTZIAN CONTACT ########################################################### | [
11748,
299,
32152,
355,
45941,
198,
2235,
43790,
7473,
34,
1546,
5357,
40295,
406,
18420,
1303,
29113,
14468,
198,
2235,
43277,
1546,
3963,
22904,
10659,
1303,
29113,
14468,
7804,
2235,
198,
2235,
367,
17395,
57,
16868,
22904,
10659,
1303,
... | 6 | 43 |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data=pd.read_csv(path)
#Code starts here
data['Rating'].plot.hist()
plt.show()
data=data[data['Rating']<=5]
data['Rating'].plot.hist()
plt.show()
#Code ends here
# --------------
# code starts here
total_null=data.isnull().sum()
percent_null=total_null/data.isnull().count()
missing_data=pd.concat([total_null,percent_null],axis=1,keys=['Total','Percent'])
print(missing_data)
data=data.dropna()
total_null_1=data.isnull().sum()
percent_null_1=total_null_1/data.isnull().count()
missing_data_1=pd.concat([total_null_1,percent_null_1],axis=1,keys=['Total','Percent'])
print(missing_data_1)
# code ends here
# --------------
'''Category vs Rating
Let's first check if category and ratings have any sort of relation'''
#Code starts here
sns.catplot(x="Category",y="Rating",data=data, kind="box",height=10)
plt.xticks(rotation=90)
plt.title('Rating vs Category [BoxPlot]')
#Code ends here
# --------------
#Importing header files
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
#Code starts here
print(data['Installs'].value_counts())
data['Installs']=data['Installs'].str.replace('+','').str.replace(',','')
data['Installs']=data['Installs'].astype(int)
le=LabelEncoder()
data['Installs']=le.fit_transform(data['Installs'])
sns.regplot(x="Installs", y="Rating",data=data)
plt.title('Rating vs Installs [RegPlot]')
#Code ends here
# --------------
#Price vs Ratings
#Code starts here
#Print value counts of Price column
print(data['Price'].value_counts())
#Remove dollar sign from Price column
data['Price']=data['Price'].str.replace('$','')
#Convert the Price column to datatype float
data['Price']=data['Price'].astype(float)
#plotting the regression line.
sns.regplot(x="Price", y="Rating",data=data)
plt.title('Rating vs Price [RegPlot]')
#Code ends here
# --------------
#Genre vs Rating
#Code starts here
#unique values of the column Genres
print(data['Genres'].unique())
# Split the values of column Genres by ;
data['Genres']=data['Genres'].str.split(';').str[0]
#Group Genres and Rating
gr_mean=data[['Genres', 'Rating']].groupby(['Genres'], as_index=False).mean()
#Sort the values
gr_mean=gr_mean.sort_values(by=['Rating'])
#Print the first and last value of gr_mean
print('First value=',gr_mean.head(1))
print('Last value=',gr_mean.tail(1))
#Code ends here
# --------------
#Last Updated vs Rating
#Code starts here
#Print and visualise the values of Last Updated column of 'data'
print(data['Last Updated'])
#Convert Last Updated to datetime format
data['Last Updated'] = pd.to_datetime(data['Last Updated'])
#Find out the max value in Last Updated column
max_date=data['Last Updated'].max()
#Create new column Last Updated Days which is the difference between max_date and values of column Last Updated in days using "dt.days" function
data['Last Updated Days']= max_date-data['Last Updated']
data['Last Updated Days']=data['Last Updated Days'].dt.days
#plot the regression line for Rating vs Last Updated [RegPlot]
plt.figure(figsize = (10,10))
sns.regplot(x="Last Updated Days", y="Rating", data=data)
plt.title("Rating vs Last Updated [RegPlot]")
plt.show()
#Code ends here
| [
2,
220,
26171,
198,
2,
20939,
278,
13639,
3696,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
198,
7890,
28,
30094,
13,
961,
62,
... | 2.967153 | 1,096 |
from .imaging_serial import *
| [
198,
6738,
764,
320,
3039,
62,
46911,
1330,
1635,
198
] | 3.1 | 10 |
#
# Copyright (c) 2020 Intel Corporation, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import Text
ENGINE = 'InnoDB'
CHARSET = 'utf8'
def upgrade(migrate_engine):
"""
This database upgrade creates a new kube_rootca_update
table and a new kube_rootca_host_update table
"""
meta = MetaData()
meta.bind = migrate_engine
Table('i_host', meta, autoload=True)
# Define and create the kube_rootca_update table.
kube_rootca_update = Table(
'kube_rootca_update',
meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True,
unique=True, nullable=False),
Column('uuid', String(36), unique=True),
Column('from_rootca_cert', String(255)),
Column('to_rootca_cert', String(255)),
Column('state', String(255)),
Column('capabilities', Text),
Column('reserved_1', String(255)),
Column('reserved_2', String(255)),
Column('reserved_3', String(255)),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
kube_rootca_update.create()
kube_rootca_host_update = Table(
'kube_rootca_host_update',
meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True,
unique=True, nullable=False),
Column('uuid', String(36), unique=True),
Column('target_rootca_cert', String(255)),
Column('effective_rootca_cert', String(255)),
Column('state', String(255)),
Column('host_id', Integer,
ForeignKey('i_host.id', ondelete='CASCADE')),
Column('capabilities', Text),
Column('reserved_1', String(255)),
Column('reserved_2', String(255)),
Column('reserved_3', String(255)),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
kube_rootca_host_update.create()
| [
2,
198,
2,
15069,
357,
66,
8,
12131,
8180,
10501,
11,
3457,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
2,
198,
198,
6738,
44161,
282,
26599,
1330,
29201,
198,
6738,
44161,
282,
265... | 2.370332 | 964 |
"""
Created on Tue Nov 24 21:41:40 2020
@author: mauro
"""
import os
from typing import Dict
import graphviz
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
from explanation.ExplanationBase import ExplanationBase
from explanation.Global.SurrogatePlot import SurrogatePlot
from explanation.Global.SurrogateText import SurrogateText
class SurrogateModelExplanation(ExplanationBase):
"""
Contrastive, global Explanation (global surrogate model)
"""
def calculate_explanation(self, max_leaf_nodes=100):
"""
Train a surrogate model (Decision Tree) on the predicted values
from the original model
"""
y_hat = self.model.predict(self.X.values)
self.surrogate_model = DecisionTreeRegressor(
max_depth=self.num_features, max_leaf_nodes=max_leaf_nodes
)
self.surrogate_model.fit(self.X, y_hat)
self.logger.info(
"Surrogate Model R2 score: {:.2f}".format(
self.surrogate_model.score(self.X, y_hat)
)
)
def plot(self):
"""
use garphix to plot the decision tree
"""
surrogatePlot = SurrogatePlot()
dot_file = surrogatePlot(
model=self.surrogate_model,
feature_names=self.feature_names,
precision=self.precision,
)
name, extension = os.path.splitext(self.plot_name)
graphviz.Source(
dot_file,
filename=os.path.join(self.path_plot, name),
format=extension.replace('.', ''),
).view()
if self.save:
with open(
os.path.join(self.path_plot, "{}.dot".format(self.plot_name)), "w"
) as file:
file.write(dot_file)
def get_method_text(self):
"""
Define the method introduction text of the explanation type.
Returns:
None.
"""
return self.method_text_empty.format(
self.num_to_str[self.number_of_groups]
)
def get_natural_language_text(self):
"""
Define the natural language output using the feature names and its
values for this explanation type
Returns:
None.
"""
surrogateText = SurrogateText(
text=self.sentence,
model=self.surrogate_model,
X=self.X,
feature_names=self.feature_names,
)
sentences = surrogateText.get_text()
return self.natural_language_text_empty.format(
sentences
)
def setup(self):
"""
Calculate the feature importance and create the text once
Returns:
None.
"""
self.calculate_explanation()
self.natural_language_text = self.get_natural_language_text()
self.method_text = self.get_method_text()
self.plot()
def main(self, sample_index, sample):
"""
main function to create the explanation of the given sample. The
method_text, natural_language_text and the plots are create per sample.
Args:
sample (int): number of the sample to create the explanation for
Returns:
None.
"""
self.get_prediction(sample_index)
self.score_text = self.get_score_text(self.number_of_groups)
self.save_csv(sample)
return self.score_text, self.method_text, self.natural_language_text
if __name__ == "__main__":
from sklearn.datasets import load_diabetes
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
diabetes = load_diabetes()
X_train, X_val, y_train, y_val = train_test_split(
diabetes.data, diabetes.target, random_state=0
)
model = RandomForestRegressor().fit(X_train, y_train)
# model = sklearn.linear_model.LinearRegression().fit(X_train, y_train)
print(model.score(X_val, y_val))
# DF, based on which importance is checked
X_val = pd.DataFrame(X_val, columns=diabetes.feature_names)
sparse = True
text = "{}"
X = X_val
y = y_val
sample = 10
surrogateExplanation = SurrogateModelExplanation(X, y, model, sparse)
surrogateExplanation.main(sample)
| [
37811,
198,
41972,
319,
30030,
5267,
1987,
2310,
25,
3901,
25,
1821,
12131,
198,
198,
31,
9800,
25,
285,
559,
305,
198,
37811,
198,
11748,
28686,
198,
6738,
19720,
1330,
360,
713,
198,
198,
11748,
4823,
85,
528,
198,
11748,
19798,
292... | 2.263874 | 1,910 |