content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 30 13:09:07 2021
@author: Easin
"""
print(matchingStrings([1,2,3,4],[1,2,2,3]))
'''
There is a collection of input strings and a collection of query strings. For each query string, determine how many times it occurs in the list of input strings. Return an array of the results.
Example
There are instances of ', of '' and of ''. For each query, add an element to the return array, .
Function Description
Complete the function matchingStrings in the editor below. The function must return an array of integers representing the frequency of occurrence of each query string in strings.
matchingStrings has the following parameters:
string strings[n] - an array of strings to search
string queries[q] - an array of query strings
Returns
int[q]: an array of results for each query
Input Format
The first line contains and integer , the size of .
Each of the next lines contains a string .
The next line contains , the size of .
Each of the next lines contains a string .
''' | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
26223,
4280,
1542,
1511,
25,
2931,
25,
2998,
33448,
201,
198,
201,
198,
31,
9800,
25,
38647,
259,
201,
198,
37811,
201,
198,
201,
198... | 3.424437 | 311 |
cloud_config = {
"<cloud_config>"
}
email_config = {
"SEND_GRID_KEY":'<SEND_GRID_KEY>'
}
invite_url = "<invite_url>" | [
198,
17721,
62,
11250,
796,
1391,
628,
33490,
17721,
62,
11250,
24618,
628,
198,
92,
198,
198,
12888,
62,
11250,
796,
1391,
198,
220,
220,
220,
366,
50,
10619,
62,
10761,
2389,
62,
20373,
1298,
6,
27,
50,
10619,
62,
10761,
2389,
62,... | 2.081967 | 61 |
from labler.api import IImager
from PIL import Image
from PIL import ImageFile
import base64
import os
ImageFile.LOAD_TRUNCATED_IMAGES = True
| [
6738,
2248,
1754,
13,
15042,
1330,
2873,
76,
3536,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
350,
4146,
1330,
7412,
8979,
198,
11748,
2779,
2414,
198,
11748,
28686,
198,
198,
5159,
8979,
13,
35613,
62,
5446,
4944,
34,
11617,
62,
39... | 3.130435 | 46 |
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'meshmergerwidget.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from opencmiss.zincwidgets.sceneviewerwidget import SceneviewerWidget
# setupUi
# retranslateUi
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
29113,
29113,
14468,
198,
2235,
5178,
7560,
422,
3555,
12454,
2393,
705,
76,
5069,
647,
1362,
42655,
13,
9019,
6,
198,
2235,
198,
2235,
15622,
416,
25,
33734,
11787... | 3.8 | 155 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: chat.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='chat.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\nchat.proto*\x1c\n\tChatMsgId\x12\x0f\n\x0b\x43hat_MSG_ID\x10\x00\x62\x06proto3'
)
_CHATMSGID = _descriptor.EnumDescriptor(
name='ChatMsgId',
full_name='ChatMsgId',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='Chat_MSG_ID', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=14,
serialized_end=42,
)
_sym_db.RegisterEnumDescriptor(_CHATMSGID)
ChatMsgId = enum_type_wrapper.EnumTypeWrapper(_CHATMSGID)
Chat_MSG_ID = 0
DESCRIPTOR.enum_types_by_name['ChatMsgId'] = _CHATMSGID
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
# @@protoc_insertion_point(module_scope)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
8537,
13,
1676,
1462,
198,
37811,
8645,
515,
8435,
11876,
2438,
526,
15931... | 2.57315 | 581 |
import numpy as np
import subprocess
from polharmonic import util, gaunt
import polharmonic.shcoeffs as sh
import matplotlib.pyplot as plt
from PIL import Image
import os
P = np.load(os.path.join(os.path.dirname(__file__), 'chcoeff_n2.npy'))
G = np.load(os.path.join(os.path.dirname(__file__), 'gaunt_l4.npy'))
class TFCoeffs:
"""A TFCoeffs object stores the transfer function coefficients for a
microscope's point response function.
The coefficients are stored in 2D array of spherical and circular harmonic
coefficients.
The first index of self.coeffs corresponds to the cirular harmonics and the
second index corresponds to the spherical harmonics. We use the following
"lexicographic ordering" of the harmonics:
z_0 y_0^0, z_0 y_2^-2, z_0 y_2^0, ...
z_-2 y_0^0, z_-2 y_2^-2, z_-2 y_2^0,
z_2 y_0^0, z_2 y_2^-2, z_2 y_2^0,
.
.
.
"""
# def plot(self, folder=''):
# if not os.path.exists(folder):
# os.makedirs(folder)
# self.plot_dist(filename=folder+'/dist.png')
# self.plot_spectrum(filename=folder+'/spectrum.pdf')
# def plot_spectrum(self, filename='spectrum.pdf'):
# print('Plotting: ' + filename)
# f, ax = plt.subplots(1, 1, figsize=(4, 4))
# # Create image of spherical harmonic coefficients
# image = np.zeros((self.rmax, self.mmax))
# for j, c in enumerate(self.coeffs):
# l, m = util.j2lm(j)
# image[int(l/2), self.lmax + m] = c
# # Label rows and columns
# for l in range(self.lmax + 1):
# if l == 0:
# prepend = 'l='
# else:
# prepend = ''
# if l%2 == 0:
# ax.annotate(r'$'+prepend+str(l)+'$', xy=(1, 1), xytext=(-0.75, int(l/2)),
# textcoords='data', ha='right', va='center')
# ax.annotate(r'$m=$', xy=(1, 1), xytext=(-0.75, -0.75),
# textcoords='data', ha='right', va='center')
# for m in range(2*self.lmax + 1):
# ax.annotate('$'+str(m - self.lmax)+'$', xy=(1, 1),
# xytext=(int(m), -0.75),
# textcoords='data', ha='center', va='center')
# # Label each pixel
# for (y,x), value in np.ndenumerate(image):
# if value != 0:
# ax.annotate("{0:.2f}".format(value), xy=(1, 1), xytext=(x, y),
# textcoords='data', ha='center', va='center')
# ax.imshow(image, cmap='bwr', interpolation='nearest',
# vmin=-np.max(self.coeffs), vmax=np.max(self.coeffs))
# ax.axis('off')
# f.savefig(filename, bbox_inches='tight')
# def plot_dist(self, filename='dist.png', n_pts=2500, r=1, mag=1, show=False):
# from mayavi import mlab
# print('Plotting: ' + filename)
# # Calculate radii
# tp = util.fibonacci_sphere(n_pts)
# xyz = util.fibonacci_sphere(n_pts, xyz=True)
# radii = np.zeros(tp.shape[0])
# for i, c in enumerate(self.coeffs):
# l, m = util.j2lm(i)
# radii += c*util.spZnm(l, m, tp[:,0], tp[:,1])
# radii = radii/np.max(radii)
# # Split into positive and negatives
# n = radii.clip(max=0)
# p = radii.clip(min=0)*(-1)
# # Triangulation
# from scipy.spatial import ConvexHull
# ch = ConvexHull(xyz)
# triangles = ch.simplices
# # Create figure
# mlab.figure(1, bgcolor=(1, 1, 1), fgcolor=(0, 0, 0), size=(400, 400))
# mlab.clf()
# # Plot
# mlab.triangular_mesh(p*xyz[:,0], p*xyz[:,1], p*xyz[:,2], triangles, color=(1, 0, 0))
# s = mlab.triangular_mesh(n*xyz[:,0], n*xyz[:,1], n*xyz[:,2], triangles, color=(0, 0, 1))
# s.scene.light_manager.light_mode = "vtk"
# # View and save
# mlab.view(azimuth=45, elevation=45, distance=5, focalpoint=None,
# roll=None, reset_roll=True, figure=None)
# mlab.savefig(filename, magnification=mag)
# subprocess.call(['convert', filename, '-transparent', 'white', filename])
# if show:
# mlab.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
850,
14681,
198,
6738,
755,
29155,
9229,
1330,
7736,
11,
308,
12968,
198,
11748,
755,
29155,
9229,
13,
1477,
1073,
14822,
82,
355,
427,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,... | 1.938005 | 2,226 |
#!/usr/bin/env python
import sys
import os
import web
import json
import urllib
import logging
import socket
import uuid
from time import time, sleep
from threading import Thread
"""
Main application that will monitor TenantUpdate threads.
"""
"""
TenantUpdate is used for updating the tenant quota list every set interval.
"""
"""
Webpy webserver used to serve up tenant quotas and API calls. Can run as either the development server or under mod_wsgi.
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
3992,
198,
11748,
33918,
198,
11748,
2956,
297,
571,
198,
11748,
18931,
198,
11748,
17802,
198,
11748,
334,
27112,
198,
198,
6738,
640,
1330,
... | 3.648855 | 131 |
#!/usr/bin/env python
"""
_Impl_
Scenario Implementations
"""
__all__ = []
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
62,
29710,
62,
198,
198,
3351,
39055,
48282,
602,
198,
198,
37811,
198,
834,
439,
834,
796,
17635,
198
] | 2.566667 | 30 |
# Copyright (c) 2013--2014 King's College London
# Created by the Software Development Team <http://soft-dev.org/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from grammar_parser.gparser import IndentationTerminal
from incparser.astree import TextNode, BOS
| [
2,
15069,
357,
66,
8,
2211,
438,
4967,
2677,
338,
5535,
3576,
198,
2,
15622,
416,
262,
10442,
7712,
4816,
1279,
4023,
1378,
4215,
12,
7959,
13,
2398,
15913,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
28... | 3.851515 | 330 |
# -*- coding: utf-8 -*-
"""
gui for viscoindent, v. May-2021
with images for viscomodels
gui_Viscoindent
"""
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QSizePolicy,\
QVBoxLayout, QHBoxLayout, QLabel,\
QPushButton, QComboBox
from PyQt5.QtCore import Qt, QPersistentModelIndex, QModelIndex
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
# import matplotlib.pyplot as plt
import numpy as np
# import csv
from tingFC_constructor import tingFC_constructor
from relaxation_functions import relaxation_function, modellist
if __name__ == '__main__':
try:
del app
except:
print('noapp')
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
48317,
329,
1490,
1073,
521,
298,
11,
410,
13,
1737,
12,
1238,
2481,
201,
198,
4480,
4263,
329,
1490,
785,
375,
1424,
201,
198,
48317,
62,
53,
... | 2.250608 | 411 |
#!/usr/bin/env python
# Copyright (C) 2015 Indeed Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import json
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
15069,
357,
34,
8,
1853,
9676,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
407,
779,
428,
2393,... | 3.848485 | 165 |
# External imports
import pandas as pd
import numpy as np
from collections import defaultdict
import networkx as nx
import logging
from operator import itemgetter
import logging
import random
import pandas as pd
from stop_words import get_stop_words
from unidecode import unidecode
from fuzzywuzzy.utils import asciidammit
from pprint import pformat
from pprint import pprint
import pdb
import itertools
# Local imports
from preproc import find_heads
from preproc import chunk_by_func
from preproc import is_verb_tag
from preproc import is_adverb_tag
from preproc import enum
from preproc import group_consecutive
from spacy_wrapper import spacy_with_whitespace_tokenizer as spacy_ws
from preproc import is_determiner_tag
from preproc import is_prepositional_tag
from preproc import is_modal_tag
from preproc import is_wh_question
from preproc import is_noun_tag
from qa_template_to_oie import NonterminalGenerator
from qa_template_to_oie import OIE
from fuzzywuzzy.utils import asciidammit
class Sentence:
"""
Container for all words and QA pairs pretaining to a single sentence.
"""
def __init__(self,
sentence,
pos_tags,
template_extractor,
sentence_id):
"""
sentence - tokenized sentence
pos_tags - corresponding pos tags
template_extractor - a Templateextractor instance to accumulate
templates across sentences
"""
# if len(sentence) != len(pos_tags):
# pdb.set_trace()
# assert len(sentence) == len(pos_tags), "sent: {}({})\npos tags:{}({})".format(sentence, len(sentence), pos_tags, len(pos_tags))
self.sentence = sentence
self.pos_tags = pos_tags
self.sentence_id = sentence_id
self.sentence_str = " ".join(self.sentence)
self.qa_pairs = []
self.workers = set()
self.template_extractor = template_extractor
# Enum of different possible feature combinations
features = enum(PREDICATE = 0,
POS = 1,
DEP = 2,
CONST = 3,
PASS = 4)
def to_see_format(self, feats, see_format):
"""
Returns abisee's story format for this sentence.
feats is a list of Sentence.features indicating what features
to include in the output.
"""
ret = []
for qa in self.qa_pairs:
if qa.oie:
pred_indices = qa.oie.get_sentence_indices()[0]
if pred_indices:
cur_sent = " ".join([self.apply_feats(word,
word_ind,
feats,
pred_indices = pred_indices,
pos_tags = self.pos_tags)
for word_ind, word
in enumerate(self.sentence)])
ret.append("{}\n\n@highlight\n\n{}".format(cur_sent,
qa.oie.to_see_format(see_format)))
return ret
def get_all_implicit_relations(self):
"""
Get all of the relations between two elements in questions.
"""
ret = set()
for qa in self.qa_pairs:
for c in qa.get_implicit_relations():
ret.add(c)
return ret
def get_all_explicit_relations(self):
"""
Get all of the relations between elements in answers and respective
questions.
"""
ret = set()
for qa in self.qa_pairs:
for c in qa.get_explicit_relations():
ret.add(c)
return ret
def get_missing_questions(self):
"""
Get implicit relations for which there are no explcit
questions.
"""
implicit = self.get_all_implicit_relations()
explicit = self.get_all_explicit_relations()
ret = []
for (s1, s2) in map(tuple, implicit):
s1 = set(s1)
s2 = set(s2)
covered = False
for (s3, s4) in map(tuple, explicit):
s3 = set(s3)
s4 = set(s4)
if (s1.issubset(s3)) and (s2.issubset(s4)) or \
(s1.issubset(s4)) and (s2.issubset(s3)):
covered = True
break
if not covered:
ret.append((list(s1), list(s2)))
return ret
def apply_feats(self, word, word_ind, feats, **args):
"""
Apply all requested features on this word.
"""
ret = word
# Collect features
for func in [Sentence.add_pos_feat,
Sentence.add_predicate_feat,
Sentence.add_pass_feat]:
ret = func(ret,
word_ind,
feats,
**args)
return ret
@staticmethod
def add_pass_feat(word, word_ind, feats, **args):
"""
Don't add any feature.
"""
return word
@staticmethod
def add_predicate_feat(word, word_ind, feats, **args):
"""
Return this word with predicate features, if indicated
"""
return "(PRED {} )PRED".format(word) \
if (word_ind in args['pred_indices']) and \
(Sentence.features.PREDICATE in feats) \
else word
@staticmethod
def add_pos_feat(word, word_ind, feats, **args):
"""
Return this word with POS features, if indicated
"""
return "({pos} {word} ){pos}".format(word = word,
pos = args["pos_tags"][word_ind]) \
if Sentence.features.POS in feats \
else word
def add_qa_pair(self,
worker_id,
special_word,
raw_question,
raw_answer,
aligned_question,
aligned_answer):
"""
Add a QA pair to this sentence's annotation
"""
qa = QA_pair(worker_id,
special_word,
raw_question,
raw_answer,
aligned_question,
aligned_answer,
self.pos_tags,
self)
# qa.template_str = self.template_extractor.get_qa_template(qa)
self.qa_pairs.append(qa)
self.workers.add(worker_id)
def consolidate_qas(self):
"""
Consolidate QAs corresponding to the same question
Currently chooses the longest answer.
Done this way to perhaps incoporate more complicated measures at this point.
"""
consolidated_questions = [] # Keep track of questions already dealt with
qa_pairs = []
for qa in self.qa_pairs:
if qa.raw_question_str in consolidated_questions:
# already dealt with this question
continue
corresponding_qas = self.get_qas_by_question(qa.raw_question_str)
qa_pairs.append(max(corresponding_qas,
key = lambda qa: len(qa.raw_answer_str)))
consolidated_questions.append(qa.raw_question_str)
self.qa_pairs = qa_pairs
def get_qa_pairs(self, num_of_workers):
"""
Get the qa pairs
num of workers indicates how many workers to allow (sequntally ordered)
use -1 if you want all of them
"""
if num_of_workers == -1:
return self.qa_pairs
allowed_workers = list(self.workers)[: num_of_workers]
return [qa for qa in self.qa_pairs if qa.worker_id in allowed_workers]
def get_qas_by_question(self, raw_question_str):
"""
Returns a list of QAs by the raw question they contain.
"""
return [qa
for qa
in self.qa_pairs
if qa.raw_question_str == raw_question_str]
def get_num_of_workers(self):
"""
Get the numer of all of the workers on this sentence
"""
return len(self.workers)
def __len__(self):
"""
Returns the number of QA pairs
"""
return len(self.qa_pairs)
def __getitem__(self, i):
"""
Return the ith question.
"""
return self.qa_pairs[i]
class Edge:
"""
Container for an edge in the sentence structure
"""
def __init__(self, sent, src, dst, label, wid):
"""
Edge from <src> to <dst> with the relation <label>
Sent (the original sentence) is used for printing purposes
"""
self.sent = sent
self.src = src
self.dst = dst
self.label = label
self.wid = wid
def __str__(self):
"""
Textual representation of an edge
"""
return "{} -> {} ({})".format(' '.join(self.sent.sentence[self.src[0] : self.src[1]]),
' '.join(self.sent.sentence[self.dst[0] : self.dst[1]]),
' '.join(self.label))
class EdgePossibilities(Edge):
"""
Container class for uncertain edges - where a question contains
multiple sentence elements
"""
def __init__(self, sent, src, dst, label, wid):
"""
Basically just calls the super class' constructor
"""
Edge.__init__(self, sent, src, dst, label, wid)
class QA_pair:
"""
Container for raw and aligned QA pairs
"""
# Static spaCy parser instance
parser = spacy_ws.parser
def __init__(self, worker_id, special_word, raw_question, raw_answer,
aligned_question, aligned_answer, pos_tags, sentence):
"""
Initialize from the input file format
pos_tags - pos tags from the original sentence
"""
self.np_chunk_counter = 0
self.vp_chunk_counter = 0
self.advp_chunk_counter = 0
self.worker_id = worker_id
self.special_word = special_word
self.sentence = sentence
# Normalize questions by removing all question marks
# (In the dataset some of the questions end with it while other don't)
self.raw_question = [s
for s in raw_question
if s != "?"]
self.raw_question_str = " ".join(raw_question)
self.aligned_question = [Word_alignment(word, word_index, self.raw_question) for
(word_index, word)
in enumerate([w
for w in aligned_question
if "?" not in w])]
self.raw_answer = raw_answer
self.raw_answer_str = " ".join(raw_answer)
self.aligned_answer = [Word_alignment(word, word_index, self.raw_answer) for
(word_index, word) in enumerate(aligned_answer)]
self.pos_tags = pos_tags
self.chunks = {}
## Calculated fields
try:
self.question_spacy_parse = QA_pair.parser(unicode(self.raw_question_str))
except Exception as e:
# Some questions fail when decoding to unicode
# Store None as parse in those cases
self.question_spacy_parse = None
# self.template_question = self.extract_template()
# self.template_question_str = " ".join([word.source_word
# for word in self.template_question])
# Enum of different chunk types in questions
chunk_types = enum(UNMAPPED = 0,
MAPPED_VERB = 1,
UNMAPPED_VERB = 2,
MAPPED_NOUN = 3,
UNMAPPED_NOUN = 4,
MAPPED_ADV = 5,
UNMAPPED_ADV = 6)
# Words which play special role in questions
# and we'd like to exempt them from certain handling
qa_special_words = ["be",
"have",
"do",
"kind",
"type"]
def get_implicit_relations(self):
"""
Returns a list of list of indexes which appear in
the question.
"""
elements = []
for chunk in self.chunks.values():
elements.extend(map(tuple,
group_consecutive(chunk.get_sentence_indices())))
return map(frozenset,
itertools.combinations(elements,
2))
def get_explicit_relations(self):
"""
Get a list of relations between question and answer.
"""
elements = []
for chunk in self.chunks.values():
elements.extend(map(tuple,
group_consecutive(chunk.get_sentence_indices())))
return map(frozenset,
[(elem, tuple(map(lambda word: word.target_word_ind,
self.aligned_answer)))
for elem in elements])
@staticmethod
def is_mapped_type(chunk_type):
"""
Return True iff the chunk represents a mapped type
"""
return chunk_type in [QA_pair.chunk_types.MAPPED_VERB,
QA_pair.chunk_types.MAPPED_NOUN,
QA_pair.chunk_types.MAPPED_ADV]
@staticmethod
def is_verb_type(chunk_type):
"""
Return True iff the chunk represents a verb type
"""
return chunk_type in [QA_pair.chunk_types.MAPPED_VERB,
QA_pair.chunk_types.UNMAPPED_VERB]
@staticmethod
def is_adverb_type(chunk_type):
"""
Return True iff the chunk represents a verb type
"""
return chunk_type in [QA_pair.chunk_types.MAPPED_ADV,
QA_pair.chunk_types.UNMAPPED_ADV]
def get_spacy_tok(self, aligned_word):
"""
Return the associated spacy token for a given index
in the question
Assumes it's *not* mapped.
"""
# assert(not aligned_word.is_mapped)
return self.question_spacy_parse[aligned_word.source_word_ind]
def get_np_symbol(self):
"""
Return the appropriate NP symbol
"""
ret = "NP{}".format(self.np_chunk_counter)
self.np_chunk_counter += 1
return ret
def get_vp_symbol(self):
"""
Return the appropriate VP symbol
"""
ret = "VP{}".format(self.vp_chunk_counter)
self.vp_chunk_counter += 1
return ret
def get_advp_symbol(self):
"""
Return the appropriate VP symbol
"""
ret = "ADVP{}".format(self.advp_chunk_counter)
self.advp_chunk_counter += 1
return ret
def get_chunk(self, chunk_type, target_words):
"""
Return an appropriate Chunk
"""
if QA_pair.is_verb_type(chunk_type):
source_word = self.get_vp_symbol()
elif QA_pair.is_adverb_type(chunk_type):
source_word = self.get_advp_symbol()
else:
source_word = self.get_np_symbol()
ret = Chunk(source_word = source_word,
target_words = list(reversed(target_words)))
self.chunks[ret.source_word] = ret
return ret
def chunk(self):
"""
Chunk by:
- If word are aligned:
(1) Whether the words form a consecutive chunk in the original sentence
(2) Whether the POS (in the question) matches
- Otherwise
- Just (1) above
"""
ret = []
target_words = []
prev_chunk_type = None
cur_chunk_type = None
if self.question_spacy_parse is None:
# We'll use the spacy POS, so stop now if not available
# Doesn't happen often
return []
for word_ind, word in reversed(list(enumerate(self.aligned_question))):
# Chunking in reverse to ease the addition of pre-modifiers to the chunk
pdb.set_trace()
cur_pos = self.question_spacy_parse[word_ind].tag_
cur_lemma = self.question_spacy_parse[word_ind].lemma_
if is_determiner_tag(cur_pos) and \
target_words:
# Add a determiner to any existing chunk
target_words.append(word)
continue
if is_prepositional_tag(cur_pos) or \
is_modal_tag(cur_pos) or \
(word_ind == 0) or \
(cur_lemma in QA_pair.qa_special_words):
if target_words:
# Special words are transffered to the output as is
ret.append(self.get_chunk(prev_chunk_type,
target_words))
target_words = []
ret.append(word)
continue
# Determine new chunk type
if word.is_mapped:
if is_verb_tag(cur_pos):
cur_chunk_type = QA_pair.chunk_types.MAPPED_VERB
elif is_adverb_tag(cur_pos):
cur_chunk_type = QA_pair.chunk_types.MAPPED_ADV
else:
cur_chunk_type = QA_pair.chunk_types.MAPPED_NOUN
else:
if is_verb_tag(cur_pos):
cur_chunk_type = QA_pair.chunk_types.UNMAPPED_VERB
elif is_adverb_tag(cur_pos):
cur_chunk_type = QA_pair.chunk_types.UNMAPPED_ADV
else:
cur_chunk_type = QA_pair.chunk_types.UNMAPPED_NOUN
if (prev_chunk_type is None) or (not target_words) or \
((cur_chunk_type == prev_chunk_type) and \
((not QA_pair.is_mapped_type(cur_chunk_type)) or \
(target_words[-1].source_word_ind - word.source_word_ind) == 1)):
# This continues previous chunk
target_words.append(word)
else:
# This is a new chunk
ret.append(self.get_chunk(prev_chunk_type,
target_words))
target_words = [word]
prev_chunk_type = cur_chunk_type
if target_words:
ret.append(self.get_chunk(prev_chunk_type,
target_words))
self.reverse_template_numbers()
return list(reversed(ret))
def reverse_template_numbers(self):
"""
Reverse template elements numbers such that they appear in the
lexical order.
"""
nps = [np_chunk
for np_chunk in self.chunks.iterkeys()
if np_chunk.startswith("NP")]
vps = [np_chunk
for np_chunk in self.chunks.iterkeys()
if np_chunk.startswith("VP")]
advps = [chunk
for chunk in self.chunks.iterkeys()
if chunk.startswith("ADVP")]
new_chunks = {}
for ls, prefix in [(nps, "NP"),
(vps, "VP"),
(advps, "ADVP")]:
for elem_name in ls:
new_elem_name = "{}{}".format(prefix,
len(ls) - int(elem_name.replace(prefix, "")) - 1)
self.chunks[elem_name].source_word = new_elem_name
new_chunks[new_elem_name] = self.chunks[elem_name]
self.chunks = new_chunks
def extract_template(self):
"""
Extract a template from this question
"""
return self.chunk()
class Word_alignment:
"""
Container for word alignment between word in element (question or answer) to
the original sentence.
"""
def __init__(self, map_word, source_word_ind, raw_source):
"""
Map from source word and index to a target word and index.
Parses the input mapping format: {{target-index target-word}} | {source-word}
"""
self.source_word_ind = source_word_ind
self.source_word = raw_source[self.source_word_ind]
if map_word.startswith("{{") and \
map_word.endswith("}}"):
# Parse mapping format
map_word = map_word.replace("{{", '').replace("}}", '')
self.target_word_ind, self.target_word = map_word.split("|", 1)
self.target_word_ind = int(self.target_word_ind)
else:
# This word isn't mapped
self.target_word_ind = -1
self.target_word = None
# Indicate whether this word is mapped
self.is_mapped = (self.target_word is not None)
class Chunk:
"""
Chunk container class
"""
def __init__(self,
source_word,
target_words):
"""
Simple container
source_word - chunk name
target_words - list of Word_alignment instances
"""
self.source_word = source_word
self.target_words = target_words
def get_source_indices(self):
"""
Return the source indices of all words in this chunk.
"""
return [word.source_word_ind
for word
in self.target_words]
def get_sentence_indices(self):
"""
Get a list of sentence word indices participating
in the element.
"""
possible_mappings = [word
for word
in self.target_words
if not type(word) is str]
return [w.target_word_ind
for w in possible_mappings
if w.is_mapped]
def is_mapped(self):
"""
Return True iff at least one of the words
in this chunk is mapped.
"""
return any([word.is_mapped
for word in self.target_words])
def __str__(self):
"""
Textual representation of this chunk
"""
return " ".join([str(w) for w in self.target_words])
class Graph_to_amr:
"""
NetworkX to AMR
"""
def __init__(self, sent, amr_head):
"""
sent - a list of words in the sentence
"""
self.sent = sent
self.digraph = nx.DiGraph()
self.var_names = defaultdict(lambda : [])
self.span_to_var = {}
self.head = amr_head
def add_edge(self, u, v, label):
"""
Add and AMR edge from u to v with the the given label
will override any previous label between these two nodes
Both u and v are spans of indexes in sent
"""
self.digraph.add_edge(u,v)
self.digraph[u][v]["label"] = label
def get_var_name(self, span):
"""
Given a span, return its variable name.
Will create it if necessary.
Also returns whether this is a reentry.
"""
reent = True
if span not in self.span_to_var:
self.span_to_var[span] = self.gen_var(span)
reent = False
return self.span_to_var[span], reent
def get_pred_name(self, span):
"""
Similar to get_var_name,
but never reentrant
"""
return self.gen_var(span)
def get_pseudo_pb_sense(self, span):
"""
Fake a PropBank span
"""
return self.get_amr_concept(span)
def get_amr(self):
"""
Output AMR format
"""
# heads = find_heads(self.digraph, self.digraph.nodes())
# assert len(heads) == 1, "More/less than one head: {}".format(heads)
# self.head = heads[0]
return AMR(self.rooted_graph_to_amr(self.head))
def get_amr_concept(self, span):
"""
Get concept from sentence span
"""
# I assume that spaces are invalid
return "-".join(self.sent[span[0]: span[1]])
def rooted_graph_to_amr(self, head):
"""
Recursively create the string of an amr
TODO: what if there are cycles? should this create the arg-i-of?
"""
args = [":{} {}".format(self.simplify_label(feats["label"]),
self.rooted_graph_to_amr(neighbour))
for neighbour, feats in self.digraph[head].iteritems()]
var, reent = (self.get_pred_name(head), False) if args \
else self.get_var_name(head)
concept = self.get_pseudo_pb_sense(head) if args \
else self.get_amr_concept(head)
top = var if reent \
else "{} / {}".format(var, concept)
return top if (not args and reent) \
else "({} {})".format(top,
" ".join(args))
def gen_var(self, span):
"""
generate a new unique variable name for word
"""
# Use first letter convention
let = self.sent[span[0]][0].lower()
if not let.isalpha():
let = "na"
ret = "{}{}".format(let,
len(self.var_names[let]))
self.var_names[let].append(ret)
return ret
def simplify_label(self, label):
"""
Return a single word (hopefully wh-question) representing this label (a list of words)
to comply with AMR convention.
"""
opts = [qw for qw in question_words
if qw.lower() in map(lambda w: w.lower(), label)]
return opts[0]\
if opts else label[0]
class TemplateExtractor:
"""
Container class for template extraction functions
"""
def __init__(self):
"""
- Store English stop words
"""
self.stopwords = get_stop_words("en") + ['?']
self.vocab = set()
self.invalid_questions = [] # Questions not passing the filter
self.template_dictionary = defaultdict(list) # From template to questions
def is_valid_question(self,
aligned_question,
parsed_question):
"""
Returns True iff the given template is a valid relation:
(1) Includes at least 1 mapped chunk
(2) all other words are stopwords
(3) includes at least 1 verb
"""
return True
if parsed_question is None:
# This question failed to parse - return invalid question
return False
return \
(not aligned_question[0].is_mapped) and\
is_wh_question(parsed_question[0].tag_) and \
any([word.is_mapped for word in aligned_question]) and \
all([word.source_word.lower() in self.stopwords
for word in aligned_question
if (not word.is_mapped)]) and \
any([is_verb_tag(word.tag_)
for word in parsed_question])
def templatize_spacy_tok(self,
tok,
non_terminal_generator):
"""
Return a template version of the given spaCy token.
Returns None if the token should be omitted in the template.
"""
tag = tok.tag_
if is_determiner_tag(tag):
return None
if is_prepositional_tag(tag) or \
is_modal_tag(tag):
# Template prepositions and modals
ret = non_terminal_generator.generate(tag)
else:
# Otherwise return the lemma of the word
ret = non_terminal_generator.generate(tok.lemma_)
self.vocab.add(ret)
return ret
def get_qa_template(self, qa):
"""
Get a template version (list of tokens) of the given question
Modifies qa's chunks accordingly
"""
nt_generator = NonterminalGenerator()
if not self.is_valid_question(qa.aligned_question,
qa.question_spacy_parse):
return None
if qa.question_spacy_parse:
# Take the lemma from the raw question for all non-mapped elements (= non chunks)
# Ommit determiners from the question template
template_ls = []
for w in qa.template_question:
if any([w.source_word.startswith(pref)
for pref in ["NP", "VP", "ADVP"]]):
# this is a chunk - leave as is
template_ls.append(w.source_word)
else:
template_word = self.templatize_spacy_tok(qa.get_spacy_tok(w),
nt_generator)
if template_word is not None:
template_ls.append(template_word)
# Add the interpretation of this template word to the
# chunks of this QA
qa.chunks[template_word] = Chunk(template_word,
[w])
template_str = ' '.join(template_ls)
else:
# Else - return the raw template_question
template_str = qa.template_question_str
# Normalize by adding a question mark
# (We make sure that the template wouldn't have a question mark before that)
template_str += " ?"
# Record and return
self.template_dictionary[template_str].append(qa)
return template_str
# def extract_templates(self,
# sents,
# output_fn,
# apply_filter,
# lemmatize):
# """
# 1. Filter templates
# 2. Extract templates with stats to file
# @param: apply filter -
# bool, controls whether to filter the templates(using self.is_valid_question
# @param: lemmatize -
# bool, controls whether to lemmatize the template
# """
# d = defaultdict(list)
# for sent in sents:
# for qa in sent.qa_pairs:
# nt_generator = NonterminalGenerator()
# if apply_filter and self.is_valid_question(qa.aligned_question,
# qa.question_spacy_parse):
# if lemmatize and qa.question_spacy_parse:
# # Take the lemma from the raw question for all non-mapped elements (= non chunks)
# # Ommit determiners from the question template
# template_ls = []
# for w in qa.template_question:
# if w.is_mapped:
# # this is a chunk - leave as is
# template_ls.append(w.source_word)
# else:
# template_word = self.templatize_spacy_tok(qa.get_spacy_tok(w),
# nt_generator)
# if template_word is not None:
# template_ls.append(template_word)
# # Add the interpretation of this template word to the
# # chunks of this QA
# qa.chunks[template_word] = Chunk(template_word,
# [w.source_word])
# template_str = ' '.join(template_ls)
# else:
# # Else - return the raw template_question
# template_str = qa.template_question_str
# # Normalize by adding a question mark
# # (We make sure that the template wouldn't have a question mark before that)
# template_str += " ?"
# d[template_str].append(qa)
# qa.template_str = template_str
# else:
# # Not a valid question
# self.invalid_questions.append(qa)
# ordered_templates = sorted(d.iteritems(),
# key = lambda(s,
# ls): len(ls),
# reverse = True)
# total = sum(map(len,
# map(itemgetter(1),
# ordered_templates))) * 1.0
# logging.info("total # of valid questions: {}".format(total))
# data = []
# cdf = 0
# for i, (s, ls) in enumerate(ordered_templates):
# cur = len(ls)
# cdf += cur
# if ((cdf / total) > .8) and \
# ((cdf - cur) / total <= .8):
# logging.info("CDF .8 reached in {} templates".format(i))
# data.append(pd.Series([s,
# round(cur / total, 4),
# round(cdf / total, 4),
# "\n".join(["{} {}\n{}".format(qa.raw_question_str,
# qa.raw_answer_str,
# pformat([(chunk_name, str(chunk))
# for chunk_name, chunk
# in qa.chunks.iteritems()]))
# for qa in random.sample(ls,
# min(len(ls), 3))])]))
# df = pd.DataFrame(data)
# df.columns = ["Template", "Prob", "CDF", "Examples"]
# df.to_csv(out_fn,
# header = True,
# index = False)
# return ordered_templates
def load_sentences(fn):
"""
Returns a list of sentences as annotated in the input file
"""
df = pd.read_csv(fn, names = ["WID", "special",
"raw_question", "raw_answer",
"aligned_question", "aligned_answer"])
ret = []
cur_sent = None
template_extractor = TemplateExtractor()
for row_index, row in df.iterrows():
if not(isinstance(row["raw_question"], basestring) and \
isinstance(row["raw_answer"], basestring)):
# This is a row introducing a new sentence
if cur_sent:
cur_sent.consolidate_qas()
ret.append(cur_sent)
pos_tags = [word.tag_
for word
in spacy_ws.parser(unicode(row["WID"],
encoding = 'utf8'))]
cur_sent = Sentence(row["WID"].split(" "),
pos_tags, # POS tags
template_extractor,
row["raw_question"]) # = Sentence id
else:
# This is a QA pair relating to a previously introduced sentence
cur_sent.add_qa_pair(row["WID"], row["special"],
row["raw_question"].split(" "), row["raw_answer"].split(" "),
row["aligned_question"].split(" "), row["aligned_answer"].split(" "))
cur_sent.consolidate_qas()
ret.append(cur_sent) # Append the last sentence
return ret
question_words = ["what",
"when",
"where",
"which",
"who",
"whom",
"whose",
"why",
"how"]
if __name__ == "__main__":
amr = Graph_to_amr("orange is the new black".split())
amr.add_edge((0,1),(1,2), "what is my name?".split())
print amr.get_amr()
| [
2,
34579,
17944,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
17268,
1330,
4277,
11600,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
18931,
198,
6738,
10088,
1330,
2378,
1136,
353,
198,
11748,
... | 1.89378 | 19,149 |
from docutils import nodes
from docutils import statemachine
from docutils.parsers.rst import Directive
latex_preamble = r"""
\usepackage{mdframed}
\usepackage{tikz}
\newenvironment{jsonframe}[2]{%
\ifstrempty{#1}%
{}%
{\mdfsetup{%
skipabove=10pt,
frametitle={%
\tikz[baseline=(current bounding box.east),outer sep=0pt,text=white]
\node[anchor=east,rectangle,fill=#2]
{\strut \textsf{ #1 }};}}%
}%
\mdfsetup{innertopmargin=10pt,linecolor=#2,%
skipabove=10pt,
linewidth=1pt,topline=true,nobreak=true,
frametitleaboveskip=\dimexpr-\ht\strutbox\relax,}
\begin{mdframed}[]\relax%
}{\end{mdframed}}
"""
| [
6738,
2205,
26791,
1330,
13760,
198,
6738,
2205,
26791,
1330,
1185,
368,
20480,
198,
6738,
2205,
26791,
13,
79,
945,
364,
13,
81,
301,
1330,
34736,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
198,
17660,
87,
62,
79,
1476,... | 2.095238 | 336 |
#!/usr/bin/env python
import fileinput
import re
acronym = ""
for line in fileinput.input():
words = re.split('\s+', line)
for word in words:
if (len(word) > 0 and word[0] == word[0].upper()):
acronym += word[0]
print(acronym)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
2393,
15414,
198,
11748,
302,
198,
198,
330,
1313,
4948,
796,
13538,
198,
1640,
1627,
287,
2393,
15414,
13,
15414,
33529,
198,
220,
220,
220,
2456,
796,
302,
13,
35312,
107... | 2.278261 | 115 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
def cutoff(log,phi=None,sw=None,vsh=None,elbow=0.9,swaq=0.7,ax=None, plot=True):
"""cutoff [summary]
Parameters
----------
log : [type]
[description]
phi : [type], optional
[description], by default None
sw : [type], optional
[description], by default None
vsh : [type], optional
[description], by default None
elbow : float, optional
[description], by default 0.9
swaq : float, optional
[description], by default 0.7
ax : [type], optional
[description], by default None
plot : bool, optional
[description], by default True
Returns
-------
[type]
[description]
"""
step=np.mean(np.diff(log.index))
Hc=log.loc[log[sw]<swaq,phi]*(1-log.loc[log[sw]<swaq,sw])*step
Hct=sum(Hc[~np.isnan(Hc)])
vshr=np.linspace(0,1,20)
phir=np.linspace(0,0.4,20)
swr=np.linspace(0,1,20)
ranges=[phir,swr,vshr]
symbols=[phi,sw,vsh]
hcl=[]
co={}
re=[]
for (i,s) in enumerate(symbols):
d=[]
for j in ranges[i]:
if i!=0:
x=np.sum(Hc[log[s]<=j])/Hct
else:
x=np.sum(Hc[log[s]>=j])/Hct
d.append(x)
hcl.append(d)
f = interp1d(hcl[i],ranges[i])
co[s]=f(elbow)
re.append(co)
if plot:
ax = ax or plt.gca()
ax.plot(phir,hcl[0],color='red',label='Phie CutOff={}'.format(np.round(co[symbols[0]],decimals=2)))
ax.hlines(elbow,0,1,linestyle='--')
ax.vlines(co[symbols[0]],0,1,linestyle='--')
ax.set_xlim([0,1])
ax.set_ylim([0,1])
ax.plot(swr,hcl[1],color='blue',label="Sw CutOff={}".format(np.round(co[symbols[1]],decimals=2)))
ax.vlines(co[symbols[1]],0,1,linestyle='--')
ax.plot(vshr,hcl[2],color='gold',label='Vsh CutOff={}'.format(np.round(co[symbols[2]],decimals=2)))
ax.vlines(co[symbols[2]],0,1,linestyle='--')
ax.set_xlabel('Phi-Sw-Vsh CutOff')
ax.set_ylabel('Norm Hydrocarbon Column')
ax.set_title('CutOff Estimation HC')
ax.legend()
re.append(ax)
return re
def pickett(rw=0.15,a=1,m=2,n=2,swr=np.linspace(0.1,1,5)):
"""pickett [summary]
Parameters
----------
rw : float, optional
[description], by default 0.15
a : int, optional
[description], by default 1
m : int, optional
[description], by default 2
n : int, optional
[description], by default 2
swr : [type], optional
[description], by default np.linspace(0.1,1,5)
Returns
-------
[type]
[description]
"""
Phi_rt1=np.power(rw,1/m) #Phi at Rt=1. Log(phi)=-mlog(Rt/Rw)
rts=(a*rw)/(np.power(Phi_rt1,m)*np.power(swr,n))
Rti=rts*np.power(Phi_rt1,m)/a
phir=np.logspace(-2,0,num=9)
RT=[]
for i in Rti:
rtr=i*a*np.power(phir,-m)
RT.append(rtr)
ax=plt.gca()
for (k,i) in enumerate(RT):
ax.loglog(i,phir,label='Sw={}'.format(swr[k]))
ax.set_xticks(np.logspace(-1,3,5))
ax.set_xticklabels(np.logspace(-1,3,5))
ax.set_yticks(np.logspace(-2,0,3))
ax.set_yticklabels(np.logspace(-2,0,3))
ax.legend()
ax.set_ylabel('Porosity[]')
ax.set_xlabel('Resistivity [Ohm m]')
ax.set_title('Pickett Plot- a={},m={},n={},rw={}'.format(a,m,n,rw))
return ax
def windland(phirange=None,r35range=None):
"""windland [summary]
Parameters
----------
phirange : [type], optional
[description], by default None
r35range : [type], optional
[description], by default None
Returns
-------
[type]
[description]
"""
if phirange is None:
phirange=np.linspace(0.05,0.30,5)
else:
phirange=np.asarray(phirange)
if r35range is None:
r35range=np.logspace(0,1,5)
else:
r35range=np.asarray(r35range)
A=0.735
B=0.588
C=0.8641
K=np.zeros((phirange.shape[0],r35range.shape[0]))
for (i,v) in enumerate(r35range):
k=np.power((v*np.power(phirange*100,C)*np.exp(-A)),1/B)
K[:,i]=k
ax=plt.gca()
ax.plot(phirange,K)
ax.set_yscale('log')
ax.set_xlabel('Phi[]')
ax.set_ylabel('Permeability [md]')
ax.legend([i for i in r35range])
return ax
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
220,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
220,
198,
6738,
629,
541,
88,
13,
3849,
16104,
378,
1330,
987,
79,
16,
67,
198,
198,
4299,
45616... | 1.899957 | 2,339 |
from __future__ import absolute_import
from builtins import str
from builtins import range
from .base import BaseVnVmTest
import traffic_tests
from vn_test import *
from vm_test import *
from floating_ip import *
from policy_test import *
from compute_node_test import ComputeNodeFixture
from user_test import UserFixture
from multiple_vn_vm_test import *
from tcutils.wrappers import preposttest_wrapper
sys.path.append(os.path.realpath('tcutils/pkgs/Traffic'))
from traffic.core.stream import Stream
from traffic.core.profile import create, ContinuousProfile
from traffic.core.helpers import Host
from traffic.core.helpers import Sender, Receiver
from common import isolated_creds
import inspect
from tcutils.util import skip_because, is_almost_same
from tcutils.tcpdump_utils import start_tcpdump_for_intf,\
stop_tcpdump_for_intf, verify_tcpdump_count
import test
from tcutils.contrail_status_check import ContrailStatusChecker
from tcutils.traffic_utils.hping_traffic import Hping3
# end TestBasicVMVN0
# end TestMetadataSSL
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
3170,
1040,
1330,
965,
198,
6738,
3170,
1040,
1330,
2837,
198,
6738,
764,
8692,
1330,
7308,
53,
77,
53,
76,
14402,
198,
11748,
4979,
62,
41989,
198,
6738,
410,
77,
62,
9288,
... | 3.257862 | 318 |
# This is a client to readtags data from ID3 tags and POST the data to the mushme database
# @Author rootavish, copyright the DBMS team, 2014
import os
import sys
#To create a json to throw at the server
import json
#To encode the image into the json
import base64
#For the network capabilities
import urllib2
import urllib
#For parsing the ID3 tags
import mutagen
from mutagen.easyid3 import EasyID3
from mutagen.mp3 import MP3
from mutagen import File
#for reading passwords
import getpass
#For hashing said passwords
import hashlib
'''
MAIN starts here
'''
if __name__=="__main__":
main()
| [
2,
770,
318,
257,
5456,
284,
1100,
31499,
1366,
422,
4522,
18,
15940,
290,
24582,
262,
1366,
284,
262,
15026,
1326,
6831,
198,
2,
2488,
13838,
6808,
615,
680,
11,
6634,
262,
20137,
5653,
1074,
11,
1946,
198,
198,
11748,
28686,
198,
... | 3.318681 | 182 |
import asyncio
from copy import copy
from time import time_ns
from brick.exceptions import ValidationError
from brick.hardware import Hardware, register_hardware
from brick.hardware.i2c import i2c_manager
from brick.hardware.base import DigitalInput, DigitalOutput
from brick.hardware.mcp import mcp230xx
MCP230XX_DEFAULT_ADDRESS = 0x20
MCP23017_DIRECTION_REGISTER = dict(a=0x00, b=0x01) # bit 1=in 0=out
MCP23017_PULL_UP_REGISTER = dict(a=0x0C, b=0x0D) # bit 1 -> pup 100k
MCP23017_INPUT_REGISTER = dict(a=0x12, b=0x13)
MCP23017_OUTPUT_REGISTER = dict(a=0x14, b=0x15)
@register_hardware()
| [
11748,
30351,
952,
198,
6738,
4866,
1330,
4866,
198,
6738,
640,
1330,
640,
62,
5907,
198,
6738,
17214,
13,
1069,
11755,
1330,
3254,
24765,
12331,
198,
6738,
17214,
13,
10424,
1574,
1330,
28715,
11,
7881,
62,
10424,
1574,
198,
6738,
1721... | 2.533898 | 236 |
import os
import h5py
import json
import sys
sys.path.append('.')
import argparse
if __name__ == '__main__':
args = parse_arguments()
scenes = args.scenes
for scene in scenes:
try:
with open('{}/{}/metadata.json'.format(args.data_dir, scene), 'r') as f:
metadata_list = json.load(f)
visible_map = {}
for k in metadata_list:
metadata = metadata_list[k]
for obj in metadata['objects']:
if obj['visible']:
objId = obj['objectId']
if objId not in visible_map:
visible_map[objId] = []
visible_map[objId].append(k)
with open('{}/{}/visible_object_map.json'.format(args.data_dir, scene), 'w') as f:
json.dump(visible_map, f)
print("Finished visible_object_map.json for scene {}".format(scene))
except Exception as e:
print(scene, e) | [
11748,
28686,
198,
11748,
289,
20,
9078,
198,
11748,
33918,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
10786,
2637,
8,
198,
11748,
1822,
29572,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
628,
220,
220,
... | 1.935115 | 524 |
import pytest
import pennylane as qml
from pennylane import numpy as np
import qnetvo as qnet
| [
11748,
12972,
9288,
198,
11748,
22429,
2645,
1531,
355,
10662,
4029,
198,
6738,
22429,
2645,
1531,
1330,
299,
32152,
355,
45941,
198,
198,
11748,
10662,
3262,
13038,
355,
10662,
3262,
628
] | 3.096774 | 31 |
from django.shortcuts import render, get_object_or_404
from django.views.generic import ListView, DetailView
from .models import Product
from django.http import Http404
# Create your views here.
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
651,
62,
15252,
62,
273,
62,
26429,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
7343,
7680,
11,
42585,
7680,
198,
6738,
764,
27530,
1330,
8721,
198,
6738,
42625,
14208,
13,
... | 3.611111 | 54 |
__auther__ = "Will"
__project__ = "Prac 2"
import math
import matplotlib.pyplot as mpl
if __name__ == '__main__':
main() | [
834,
2306,
372,
834,
796,
366,
8743,
1,
198,
834,
16302,
834,
796,
366,
6836,
330,
362,
1,
198,
198,
11748,
10688,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
285,
489,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417... | 2.490196 | 51 |
from abc import ABC
from .module import Module
from .. import functional as F
from cranet import Tensor
from typing import (
Optional,
)
| [
6738,
450,
66,
1330,
9738,
198,
198,
6738,
764,
21412,
1330,
19937,
198,
6738,
11485,
1330,
10345,
355,
376,
198,
6738,
41286,
316,
1330,
309,
22854,
198,
198,
6738,
19720,
1330,
357,
198,
220,
220,
220,
32233,
11,
198,
8,
628,
628,
... | 3.386364 | 44 |
import atexit
import errno
import inspect
import os
import stat
import subprocess
from threading import Thread
from uuid import uuid4
try:
import fuse # pytype: disable=import-error
except ImportError: # pragma: no cover
raise ImportError(
inspect.cleandoc(
'''
Failed to import fuse, the following steps show you how to install it:
sudo apt install -y fuse libfuse-dev
pip3 install fuse-python --user
'''))
if not hasattr(fuse, '__version__'): # pragma: no cover
raise RuntimeError(
"your fuse-py doesn't know of fuse.__version__, probably it's too old.")
fuse.fuse_python_api = (0, 2)
fakefs = FakeFS()
| [
11748,
379,
37023,
198,
11748,
11454,
3919,
198,
11748,
10104,
198,
11748,
28686,
198,
11748,
1185,
198,
11748,
850,
14681,
198,
6738,
4704,
278,
1330,
14122,
198,
6738,
334,
27112,
1330,
334,
27112,
19,
198,
198,
28311,
25,
198,
220,
2... | 2.519435 | 283 |
"""
14499 : 주사위 굴리기
URL : https://www.acmicpc.net/problem/14499
Input #1 :
4 2 0 0 8
0 2
3 4
5 6
7 8
4 4 4 1 3 3 3 2
Output #1 :
0
0
3
0
0
8
6
3
Input #2 :
3 3 1 1 9
1 2 3
4 0 5
6 7 8
1 3 2 2 4 4 1 1 3
Output #2 :
0
0
0
3
0
1
0
6
0
Input #3 :
2 2 0 0 16
0 2
3 4
4 4 4 4 1 1 1 1 3 3 3 3 2 2 2 2
Output #3 :
0
0
0
0
Input #4 :
3 3 0 0 16
0 1 2
3 4 5
6 7 8
4 4 1 1 3 3 2 2 4 4 1 1 3 3 2 2
Output #4 :
0
0
0
6
0
8
0
2
0
8
0
2
0
8
0
2
"""
EAST = 1
WEST = 2
NORTH = 3
SOUTH = 4
top = 0
back = 0
right = 0
left = 0
front = 0
bottom = 0
matrix = []
n, m, y, x, k = map(int, input().split())
for i in range(n):
row = list(map(int, input().split()))
matrix.append(row)
commands = list(map(int, input().split()))
for command in commands:
if command == EAST:
if (x + 1) > (m - 1):
continue
x = x + 1
next_top = left
next_back = back
next_right = top
next_left = bottom
next_front = front
next_bottom = right
elif command == WEST:
if (x - 1) < 0:
continue
x = x - 1
next_top = right
next_back = back
next_right = bottom
next_left = top
next_front = front
next_bottom = left
elif command == NORTH:
if (y - 1) < 0:
continue
y = y - 1
next_top = front
next_back = top
next_right = right
next_left = left
next_front = bottom
next_bottom = back
elif command == SOUTH:
if (y + 1) > (n - 1):
continue
y = y + 1
next_top = back
next_back = bottom
next_right = right
next_left = left
next_front = top
next_bottom = front
if matrix[y][x] == 0:
matrix[y][x] = next_bottom
else:
next_bottom = matrix[y][x]
matrix[y][x] = 0
top = next_top
back = next_back
right = next_right
left = next_left
front = next_front
bottom = next_bottom
print(top)
| [
37811,
198,
220,
220,
220,
20224,
2079,
1058,
23821,
96,
120,
168,
8955,
168,
250,
226,
220,
166,
113,
112,
167,
99,
105,
166,
116,
108,
198,
220,
220,
220,
10289,
1058,
3740,
1378,
2503,
13,
330,
9383,
14751,
13,
3262,
14,
45573,
... | 1.670439 | 1,502 |
#!/usr/bin/env python
# sorry, this is very ugly, but I'm in python 2.5
import sys
sys.path.insert(0,"../..")
from ImpactDecoder import RadioTapDecoder
import dot11, ImpactPacket
from binascii import hexlify
import unittest
suite = unittest.TestLoader().loadTestsFromTestCase(TestRadioTapDecoder)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
7926,
11,
428,
318,
845,
13400,
11,
475,
314,
1101,
287,
21015,
362,
13,
20,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
28463,
7,
15,
553,
40720,
492,
4943,
198,
198,
673... | 2.81746 | 126 |
import os
| [
11748,
28686,
201,
198,
201,
198
] | 2.166667 | 6 |
from django.shortcuts import render, reverse
from django.views import View
from subject.models import subjectModel
from django.shortcuts import get_object_or_404
from video.models import videoModel
from line.models import lineModel
# rest framework
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.generics import GenericAPIView, mixins
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework import status
from subject.serializers import SubjectSerializer, SubjectAppSerializer
from rest_framework.pagination import PageNumberPagination
# 过滤
from rest_framework import filters
from django_filters.rest_framework import DjangoFilterBackend
# Create your views here.
class SubjectViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin, mixins.CreateModelMixin, mixins.UpdateModelMixin,
viewsets.GenericViewSet):
"""
列出所有的主题或者创建一个新的主题。
"""
queryset = subjectModel.objects.all()
serializer_class = SubjectSerializer
pagination_class = SubjectPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
filter_fields = ('category', 'state', 'pub_date', 'area')
search_fields = ('name', 'tags', 'director', 'actress')
ordering_fields = ('pub_date', 'update_time')
def get_serializer_class(self):
'''
定义序列号类
:return:
'''
# if self.action == "list":
# return APUserListSerizlizer
# elif self.action == "create":
# return APUserBindSerizlizer
# date_type = self.request.query_params.get('type', None)
cient_type = self.request.META.get('HTTP_CLIENTTYPE', None)
if cient_type and cient_type.upper() == 'APP' or self.request.query_params.get('CLIENTTYPE',
'').upper() == 'APP':
return SubjectAppSerializer
return SubjectSerializer
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
9575,
198,
6738,
42625,
14208,
13,
33571,
1330,
3582,
198,
6738,
2426,
13,
27530,
1330,
2426,
17633,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
651,
62,
15252,
62,
273,
62,
264... | 2.515528 | 805 |
class MyMeta(type):
""" Diese Metaklasse soll die Conventionen der Libary überwachen.
Conventionen: Alle Methoden und Attribute müssen klein geschrieben sein"""
# Klasse die gegen die Conventionen verstößt.
mc = MyClass(1)
print(mc.__class__.__mro__)
| [
4871,
2011,
48526,
7,
4906,
2599,
198,
220,
220,
220,
37227,
360,
444,
68,
3395,
461,
75,
21612,
523,
297,
4656,
11680,
268,
4587,
7980,
560,
6184,
120,
527,
86,
330,
831,
13,
198,
220,
220,
220,
220,
198,
220,
220,
220,
11680,
26... | 2.445455 | 110 |
import unittest
from envirocar.client.api.track_api import _parse_track_df, _parse_tracks_list_df
if __name__ == '__main__':
unittest.main() | [
11748,
555,
715,
395,
198,
198,
6738,
17365,
7058,
7718,
13,
16366,
13,
15042,
13,
11659,
62,
15042,
1330,
4808,
29572,
62,
11659,
62,
7568,
11,
4808,
29572,
62,
46074,
62,
4868,
62,
7568,
198,
198,
361,
11593,
3672,
834,
6624,
705,
... | 2.607143 | 56 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import mock
from airflow.providers.google.suite.transfers.gcs_to_sheets import GCSToGoogleSheetsOperator
GCP_CONN_ID = "test"
IMPERSONATION_CHAIN = ["ACCOUNT_1", "ACCOUNT_2", "ACCOUNT_3"]
SPREADSHEET_ID = "1234567890"
VALUES = [[1, 2, 3]]
BUCKET = "destination_bucket"
PATH = "path/to/reports"
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
383,
7054,... | 3.428125 | 320 |
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for generating main.tf file to configure Terraform Provider."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.util.declarative import flags
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.console import progress_tracker
from googlecloudsdk.core.util import files
from mako import runtime
from mako import template
_SCHEMA_VERSION = '0.2'
_MIN_PROVIDER_VERSION = 'v3.90.0'
_SUPPORTED_MSG = ('This command supports Google Terraform Provider version'
' {}+ and Terraform Provider Schema {}'.format(
_MIN_PROVIDER_VERSION, _SCHEMA_VERSION))
_DETAILED_HELP = {
'DESCRIPTION':
"""{description}
"""+ _SUPPORTED_MSG,
'EXAMPLES':
"""
To generate a `main.tf` file in the current directory using the gcloud default values for `zone`, `region` and `project` run:
$ {command}
To generate a `main.tf` file in the current directory using the user suppplied values for `zone`, `region` and `project` run:
$ {command} --project="my-project-id" --region="us-central1" --zone="us-central1-c
To generate a `main.tf` file in the current directory using the gcloud default `billing_project` run:
$ {command} --use-gcloud-billing-project
To generate a `main.tf` file in the current directory using user specified `billing_project` value run:
$ {command} --tf-user-project-override --tf-billing-project="my-other-project-id"
"""
}
_INIT_TEMPLATE_NAME = os.path.join(
os.path.dirname(__file__), 'templates', 'main_tf.tpl')
INIT_FILE_TEMPLATE = template.Template(filename=_INIT_TEMPLATE_NAME)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class InitProvider(base.DeclarativeCommand):
"""Generate main.tf file to configure Google Cloud Terraform Provider."""
detailed_help = _DETAILED_HELP
def _GetBillingParams(self, args_namspace):
"""Process billing project flags in args and return Terraform settings."""
use_gcloud_billing = args_namspace.use_gcloud_billing_project
user_project_override = billing_project = None
if use_gcloud_billing:
billing_project = properties.VALUES.billing.quota_project.Get()
user_project_override = 'true'
elif args_namspace.tf_user_project_override:
billing_project = args_namspace.tf_billing_project
user_project_override = 'true'
return user_project_override, billing_project
@classmethod
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
1303,
198,
2,
15069,
33448,
3012,
11419,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198... | 2.997258 | 1,094 |
"""guess_rgb, guess_multiscale, guess_labels.
"""
import numpy as np
def guess_rgb(shape):
"""Guess if the passed shape comes from rgb data.
If last dim is 3 or 4 assume the data is rgb, including rgba.
Parameters
----------
shape : list of int
Shape of the data that should be checked.
Returns
-------
bool
If data is rgb or not.
"""
ndim = len(shape)
last_dim = shape[-1]
return ndim > 2 and last_dim < 5
def guess_multiscale(data):
"""Guess whether the passed data is multiscale, process it accordingly.
If shape of arrays along first axis is strictly decreasing, the data is
multiscale. If it is the same shape everywhere, it is not. Various
ambiguous conditions in between will result in a ValueError being raised,
or in an "unwrapping" of data, if data contains only one element.
Parameters
----------
data : array or list of array
Data that should be checked.
Returns
-------
multiscale : bool
True if the data is thought to be multiscale, False otherwise.
data : list or array
The input data, perhaps with the leading axis removed.
"""
# If the data has ndim and is not one-dimensional then cannot be multiscale
# If data is a zarr array, this check ensure that subsets of it are not
# instantiated. (`for d in data` instantiates `d` as a NumPy array if
# `data` is a zarr array.)
if hasattr(data, 'ndim') and data.ndim > 1:
return False, data
shapes = [d.shape for d in data]
sizes = np.array([np.prod(shape, dtype=np.uint64) for shape in shapes])
if len(sizes) == 1 and (isinstance(data, list) or isinstance(data, tuple)):
# pyramid with only one level, unwrap
return False, data[0]
if len(sizes) > 1:
consistent = bool(np.all(sizes[:-1] > sizes[1:]))
flat = bool(np.all(sizes == sizes[0]))
if flat:
# note: the individual array case should be caught by the first
# code line in this function, hasattr(ndim) and ndim > 1.
raise ValueError(
'Input data should be an array-like object, or a sequence of '
'arrays of decreasing size. Got arrays of single shape: '
f'{shapes[0]}'
)
if not consistent:
raise ValueError(
'Input data should be an array-like object, or a sequence of '
'arrays of decreasing size. Got arrays in incorrect order, '
f'shapes: {shapes}'
)
return True, data
else:
return False, data
def guess_labels(data):
"""Guess if array contains labels data."""
if hasattr(data, 'dtype') and data.dtype in (
np.int32,
np.uint32,
np.int64,
np.uint64,
):
return 'labels'
return 'image'
| [
37811,
5162,
408,
62,
81,
22296,
11,
4724,
62,
16680,
2304,
1000,
11,
4724,
62,
23912,
1424,
13,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
628,
198,
4299,
4724,
62,
81,
22296,
7,
43358,
2599,
198,
220,
220,
220,
37227,
8205,
4... | 2.491394 | 1,162 |
from compliance_checker.base import BaseCheck, BaseNCCheck, Result
import logging
logger = logging.getLogger(__name__)
from xsf_checker import __version__
| [
6738,
11846,
62,
9122,
263,
13,
8692,
1330,
7308,
9787,
11,
7308,
45,
4093,
258,
694,
11,
25414,
198,
198,
11748,
18931,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
6738,
2124,
28202,
62,... | 3.18 | 50 |
# -*- coding: utf-8 -*-
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the gcp module - cloudresourcemanager.py"""
import typing
import unittest
import mock
from tests.providers.gcp import gcp_mocks
class GoogleCloudResourceManagerTest(unittest.TestCase):
"""Test Google Cloud Resource Manager class."""
# pylint: disable=line-too-long
@typing.no_type_check
@mock.patch('libcloudforensics.providers.gcp.internal.common.ExecuteRequest')
@mock.patch('libcloudforensics.providers.gcp.internal.cloudresourcemanager.GoogleCloudResourceManager.GrmApi')
def testGetResource(self, mock_grm_api, mock_execute_request):
"""Validates the GetResource function"""
mock_execute_request.return_value = [gcp_mocks.MOCK_CLOUD_RESOURCE_PROJECT]
mock_resource_client = mock_grm_api.return_value.projects.return_value
response = gcp_mocks.FAKE_CLOUD_RESOURCE_MANAGER.GetResource(
'projects/000000000000')
mock_execute_request.assert_called_with(mock_resource_client,
'get', {'name': 'projects/000000000000'})
self.assertEqual(response,
{
"createTime": "2020-01-01T00:00:00.000Z",
"displayName": "fake-project",
"etag": "Tm90IGFuIGV0YWd4eHh4eA==",
"name": "projects/000000000000",
"parent": "folders/111111111111",
"projectId": "fake-project",
"state": "ACTIVE",
"updateTime": "2020-01-01T00:00:00.000Z"
})
@typing.no_type_check
@mock.patch('libcloudforensics.providers.gcp.internal.common.ExecuteRequest')
@mock.patch('libcloudforensics.providers.gcp.internal.cloudresourcemanager.GoogleCloudResourceManager.GrmApi')
def testGetResourceInvalidName(self, _, __):
"""Validates the GetResource function raises an exception for an invalid
resource name."""
with self.assertRaises(TypeError):
gcp_mocks.FAKE_CLOUD_RESOURCE_MANAGER.GetResource(
'badtype/000000000000')
@typing.no_type_check
@mock.patch('libcloudforensics.providers.gcp.internal.common.ExecuteRequest')
@mock.patch('libcloudforensics.providers.gcp.internal.cloudresourcemanager.GoogleCloudResourceManager.GrmApi')
def testGetProjectAncestry(self, _, mock_execute_request):
"""Validates the GetProjectAncestry function"""
mock_execute_request.side_effect = [
[gcp_mocks.MOCK_CLOUD_RESOURCE_PROJECT],
[gcp_mocks.MOCK_CLOUD_RESOURCE_FOLDER],
[gcp_mocks.MOCK_CLOUD_RESOURCE_ORGANIZATION]
]
response = gcp_mocks.FAKE_CLOUD_RESOURCE_MANAGER.ProjectAncestry()
self.assertListEqual(response,
[
{
'createTime': '2020-01-01T00:00:00.000Z',
'displayName': 'fake-project',
'etag': 'Tm90IGFuIGV0YWd4eHh4eA==',
'name': 'projects/000000000000',
'parent': 'folders/111111111111',
'projectId': 'fake-project',
'state': 'ACTIVE', 'updateTime': '2020-01-01T00:00:00.000Z'
},
{
'createTime': '2020-01-01T00:00:00.000Z',
'displayName': 'fake-folder',
'etag': 'Tm90IGFuIGV0YWd4eHh4eA==',
'name': 'folders/111111111111',
'parent': 'organizations/222222222222',
'state': 'ACTIVE',
'updateTime': '2020-01-01T00:00:00.000Z'
},
{
'createTime': '2020-01-01T00:00:00.000Z',
'directoryCustomerId': 'bm9jdXN0',
'displayName': 'fake-organization.com',
'etag': 'Tm90IGFuIGV0YWd4eHh4eA==',
'name': 'organizations/222222222222',
'state': 'ACTIVE',
'updateTime': '2020-01-01T00:00:00.000Z'
}
])
@typing.no_type_check
@mock.patch('libcloudforensics.providers.gcp.internal.common.ExecuteRequest')
@mock.patch('libcloudforensics.providers.gcp.internal.cloudresourcemanager.GoogleCloudResourceManager.GrmApi')
def testGetIamPolicy(self, mock_grm_api, mock_execute_request):
"""Validates the GetIamPolicy function"""
mock_execute_request.return_value = [gcp_mocks.MOCK_IAM_POLICY]
mock_resource_client = mock_grm_api.return_value.projects.return_value
response = gcp_mocks.FAKE_CLOUD_RESOURCE_MANAGER.GetIamPolicy(
'projects/000000000000')
mock_execute_request.assert_called_with(mock_resource_client,
'getIamPolicy', {'resource': 'projects/000000000000'})
self.assertEqual(response, {
"version": 1,
"etag": "bm90X2V0YWc=",
"bindings": [
{
"role": "roles/cloudbuild.builds.builder",
"members": [
"serviceAccount:012345678901@cloudbuild.gserviceaccount.com"
]
},
{
"role": "roles/owner",
"members": [
"serviceAccount:fake_sa@fake-project.iam.gserviceaccount.com",
"user:fakeaccount@fakedomain.com"
]
}
]
})
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
33448,
3012,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428... | 2.273636 | 2,401 |
#Podemos fazer uma análise agrupando a balneabilidade por campanha e depois salvamos o resultado também no formato csv:
import csv
# lê os dados
with open("balneabilidade.csv") as file:
beach_status_reader = csv.reader(file, delimiter=",", quotechar='"')
header, vinicius, *data = beach_status_reader
# agrupa campanhas e suas respectivas balneabilidades
bathing_by_campaign = {}
for row in data:
campaign = row[6]
bathing = row[2]
if campaign not in bathing_by_campaign:
bathing_by_campaign[campaign] = {
"Própria": 0,
"Imprópria": 0,
"Muito Boa": 0,
"Indisponível": 0,
"Satisfatória": 0,
}
bathing_by_campaign[campaign][bathing] += 1
# escreve o relatório em csv
# abre um arquivo para escrita
with open("report_por_campanha.csv", "w") as file:
writer = csv.writer(file)
# escreve o cabeçalho
headers = [
"Campanha",
"Própria",
"Imprópria",
"Muito Boa",
"Indisponível",
"Satisfatória",
]
writer.writerow(headers)
# escreve as linhas de dados
for campaign, bathing in bathing_by_campaign.items():
# desempacota os valores de balneabilidade para formar uma linha
# equivalente a
# row = [campaign]
# for value in bathing.values():
# row.append(value)
row = [campaign, *bathing.values()]
writer.writerow(row)
# =====================================================
# Existe ainda o leitor e escritor baseado em dicionários. Sua principal vantagem é que, com ele, não precisamos manipular os índices para acessar os dados das colunas. Mas, devido a estrutura de dados utilizada, ele tem como desvantagem o espaço ocupado em memória, sendo maior que o comum:
import csv
# lê os dados
with open("balneabilidade.csv") as file:
beach_status_reader = csv.DictReader(file, delimiter=",", quotechar='"')
# a linha de cabeçaçhos é utilizada como chave do dicionário
# agrupa campanhas e suas respectivas balneabilidades
bathing_by_campaign = {}
for row in beach_status_reader:
campaign = row["numero_boletim"] # as linhas são dicionários
bathing = row["categoria"]
if campaign not in bathing_by_campaign:
bathing_by_campaign[campaign] = {
"Própria": 0,
"Imprópria": 0,
"Muito Boa": 0,
"Indisponível": 0,
"Satisfatória": 0,
}
bathing_by_campaign[campaign][bathing] += 1
# abre um arquivo para escrita
with open("report_por_campanha_dicionarios.csv", "w") as file:
# os valores a serem escritos devem ser dicionários
header = [
"Campanha",
"Própria",
"Imprópria",
"Muito Boa",
"Indisponível",
"Satisfatória",
]
# É necessário passar o arquivo e o cabeçalho
writer = csv.DictWriter(file, fieldnames=header)
# escreve as linhas de dados
for campaign, bathing in bathing_by_campaign.items():
# desempacota os valores de balneabilidade para formar uma linha
# equivalente a
# row = {"campanha": campaign}
# for name, value in bathing.items():
# row[name] = value
row = {"Campanha": campaign, **bathing}
writer.writerow(row) | [
2,
41565,
368,
418,
277,
19178,
334,
2611,
281,
6557,
75,
786,
556,
12618,
25440,
257,
3652,
710,
14991,
312,
671,
16964,
1413,
272,
3099,
304,
1207,
10924,
24143,
321,
418,
267,
1255,
4533,
256,
4131,
2634,
76,
645,
1296,
5549,
269,
... | 2.203557 | 1,518 |
from dace import registry, InterstateEdge
from dace.properties import make_properties
from dace.transformation.interstate.loop_detection import DetectLoop
@registry.autoregister
@make_properties
class RemoveLoop(DetectLoop):
""" Unrolls a state machine for-loop into multiple states """
@staticmethod
| [
6738,
288,
558,
1330,
20478,
11,
30739,
37021,
198,
6738,
288,
558,
13,
48310,
1330,
787,
62,
48310,
198,
6738,
288,
558,
13,
7645,
1161,
13,
3849,
5219,
13,
26268,
62,
15255,
3213,
1330,
35874,
39516,
198,
198,
31,
2301,
4592,
13,
... | 3.658824 | 85 |
from markov_sentence_generator import MarkovModel
from markov_sentence_generator import SentenceGenerator
from error_correction import API_URL_BASE, correct_spelling
from sentences import Message
from itertools import chain
import random
import os
import socket
import sys
def parce_input():
"""
Чтение копуса сообщений
:return: Корпус сообщений
:rtype: list
"""
file = open('/home/website/virtual-person/messages/korpus.txt', "r", encoding="utf8")
data = file.read()
data = data.replace('\n', '')
korpus = data.split(' ')
file.close()
return korpus
def get_message(message):
"""
Получение сообщений
:param message: Сообщение
:type message: str
:return: Список характеристик и список слов
:rtype: tuple(list, list)
"""
analised_message = Message(message)
sentiment, characteristics = analised_message.get_result()
words = []
chars = []
for i in range(0, len(characteristics)):
if i == 0:
chars.append(characteristics[i])
else:
for j in characteristics[i]:
words.append(j)
return chars, words
def get_data_for_answer(words):
"""
Получение информации из сообщения для ответа
:param words:Список слов
:type words: list
:return: Начальное слово и список добавленных слов
:rtype: tuple(str, list)
"""
start_word = None
added_words = []
if len(words) > 1:
for word in words:
actual_word = word[0]['word'].lower()
if word[0]['role'] == 'nsubj':
if actual_word == 'я':
start_word = 'Ты'
elif actual_word == 'ты':
start_word = 'Я'
elif actual_word == 'мы':
start_word = 'Мы'
elif actual_word == 'вы':
start_word = 'Я'
else:
start_word = actual_word
for word in words:
actual_word = word[0]['word'].lower()
if word[0]['role'] == 'root':
if start_word == None:
start_word = actual_word
else:
added_words.append(actual_word)
for word in words:
actual_word = word[0]['word'].lower()
if word[0]['role'] != 'root' and word[0]['role'] != 'nsubj':
if start_word == None:
start_word = actual_word
else:
added_words.append(actual_word)
return start_word, added_words
def get_data_for_narrative(words):
"""
Получение информации из сообщения для повествования
:param words: Список слов
:type words: list
:return: Начальное слово и список добавленных слов
:rtype: tuple(str, list)
"""
start_word = None
added_words = []
for word in words:
actual_word = word[0]['word']
if word[0]['role'] == 'nsubj':
start_word = actual_word
for word in words:
actual_word = word[0]['word']
if word[0]['role'] == 'root':
if start_word == None:
start_word = actual_word
else:
added_words.append(actual_word)
for word in words:
actual_word = word[0]['word']
if word[0]['role'] != 'root' and word[0]['role'] != 'nsubj':
if start_word == None:
start_word = actual_word
else:
added_words.append(actual_word)
return start_word, added_words
def analize_message(chars, words):
"""
Анализ сообщения
:param chars: Список характеристик
:type chars: list
:param words: Список слов
:type words: list
:returns: Начальное слово и список добавленных слов
:rtype: tuple(str, list)
"""
start_word = ''
added_words = []
if chars['sentence_type'] == 'question':
start_word, added_words = get_data_for_answer(words)
elif chars['sentence_type'] == 'narration':
start_word, added_words = get_data_for_narrative(words)
elif chars['sentence_type'] == 'exclamation':
start_word, added_words = get_data_for_narrative(words)
return start_word, added_words
def main():
"""
Функция main
"""
data = parce_input() # Обучающий корпус
ORDER = 3
LENGTH = 20
START_WORD = 'Я'
ENDING = '.'
ADDED_WORDS = ['пришёл', 'домой', 'ура']
PREVIOUS_END = '.'
sent_gen = SentenceGenerator(data) # Создание модели.
# sent_gen.markov_model_order_1/2/3.model - извлечь модель
print(sys.getsizeof(sent_gen))
# запуск сокетов
stop = False
# ip, port
SERVER_ADDRESS = ('localhost', 6666)
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(SERVER_ADDRESS)
server_socket.listen(10)
print('Сервер запущен, CTRL+С для остановки')
# запуск сокетов
ORDER = 2
# for i in range(20):
# sent = sent_gen.generate_random_sentence(ORDER, LENGTH, START_WORD, ENDING, ADDED_WORDS, PREVIOUS_END)
# sent_gen.print(ORDER, sent)
while True:
connection, address = server_socket.accept()
print("new connection from {address}".format(address=address))
# message = input()
message = str(connection.recv(4096).decode(encoding='utf-8'))
print("Query >", message)
if len(message) == 0:
print('ERROR: Kakojto debil vvel pustuyu stroku')
continue
inp_length = len(message)
chars, words = get_message(message)
chars = chars[0]
start_word, added_words = analize_message(chars, words)
ending = None
previous_end = None
if chars['sentence_type'] == 'question':
previous_end = '?'
ending = '.'
else:
previous_end = '.'
ending = '.'
LENGTH = inp_length + random.randint(0, 3)
sent = sent_gen.generate_random_sentence(ORDER, LENGTH, start_word, ending, added_words, previous_end)
answer = sent_gen.print(ORDER, sent)
answer = correct_spelling(' '.join(answer))
print("Answer >", answer)
connection.send(bytes(answer, encoding='UTF-8'))
connection.close()
main()
| [
6738,
1317,
709,
62,
34086,
594,
62,
8612,
1352,
1330,
2940,
709,
17633,
198,
6738,
1317,
709,
62,
34086,
594,
62,
8612,
1352,
1330,
11352,
594,
8645,
1352,
198,
6738,
4049,
62,
10215,
8243,
1330,
7824,
62,
21886,
62,
33,
11159,
11,
... | 1.892361 | 3,168 |
from pygears.typing_common import flatten
from pygears.typing import Tuple, Unit, Uint
| [
6738,
12972,
70,
4127,
13,
774,
13886,
62,
11321,
1330,
27172,
268,
198,
6738,
12972,
70,
4127,
13,
774,
13886,
1330,
309,
29291,
11,
11801,
11,
471,
600,
628,
628,
628,
198
] | 2.90625 | 32 |
import asyncio
from datetime import datetime
from random import randint, shuffle
import common
from cogs.duelcog import item_chance_roll
from discord.ext import commands
from twython import TwythonError
class Twitter(commands.Cog):
""" Twitter Fetchers"""
@commands.command(name="trump")
async def get_trump(self, ctx):
"""Get Trump's latest Yuge success!"""
if common.twitter is None:
await ctx.send("Twitter not activated.")
return
twitter_id = 'realdonaldtrump'
tweet_text = \
':pen_ballpoint::monkey: Trump has been saying things, as ' \
'usual...'
rt_text = \
':pen_ballpoint::monkey: Trump has been repeating things, as ' \
'usual... (RT ALERT)'
try:
await get_last_tweet(twitter_id, tweet_text,
rt_text, ctx, max(1, common.missed_trumps))
except TwythonError:
await ctx.send("Twitter is acting up, try again later.")
else:
if common.trump_chance_roll_rdy:
await item_chance_roll(ctx.message.author.display_name,
ctx.message.channel,
92 - (common.missed_trumps * 2))
common.trump_chance_roll_rdy = False
common.missed_trumps = 0
@commands.command(name='news')
async def get_news(self, ctx):
"""Grab a news story"""
if common.twitter is None:
return
shuffle(common.news_handles)
found_art = False
while not found_art:
source = common.news_handles.pop(0)
common.news_handles.append(source)
tweet_text = "It looks like @" + source + " is reporting:"
rt_text = "It looks like @" + source + " is retweeting:"
try:
await get_last_tweet(source, tweet_text, rt_text, ctx)
except TwythonError:
print("Error in get_news, trying another source")
else:
found_art = True
return
@commands.command(name='toggle-news', hidden=True)
async def toggle_news(self, ctx):
"""Toggle the news feed on and off"""
if common.NEWS_FEED_ON:
common.NEWS_FEED_ON = False
await ctx.send("News Feed turned off.")
else:
if not common.NEWS_FEED_CREATED:
ctx.loop.create_task(handle_news(ctx, self.bot))
common.NEWS_FEED_CREATED = True
common.NEWS_FEED_ON = True
await ctx.send("News Feed turned on.")
async def get_last_tweet(_id, tweet_text, rt_text, ctx, c=1):
"""
Gets the last tweet for id.
:param _id: Twitter id
:param tweet_text: flavor text for tweets
:param rt_text: flavor text for retweets
:param ctx: Context
:param c: number of tweets to get
:return:
"""
if common.twitter is None:
await ctx.send("Twitter not activated.")
return
try:
last_tweet = common.twitter.get_user_timeline(screen_name=_id, count=c,
include_retweets=True)
except TwythonError as e:
raise e
else:
for i in range(c):
# if it's a retweet, send the original tweet
if 'retweeted_status' in last_tweet[i]:
rt_id = last_tweet[i]['retweeted_status']['id']
rt_screen_name = last_tweet[i]['retweeted_status']['user'][
'screen_name']
await ctx.send('{}\n\nhttps://twitter.com/{}/status/{}'
.format(rt_text, rt_screen_name, str(rt_id)))
# otherwise, send the tweet
else:
await ctx.send('{}\n\nhttps://twitter.com/{}/status/{}'
.format(tweet_text, last_tweet[i]['user']
['screen_name'],
str(last_tweet[i]['id'])))
async def check_trumps_mouth(bot):
"""
Waits for an update from the prez
:return: None
"""
c_to_send = None
decay = 0
await bot.wait_until_ready()
for channel in bot.get_all_channels():
if channel.name == 'gen_testing' \
or channel.name == common.ARGS['channel']:
c_to_send = channel
break
if common.twitter is None:
return
common.last_id = common.twitter.get_user_timeline(
screen_name='realdonaldtrump', count=1, include_retweets=False)[0]['id']
delay = common.trump_del * 60
while not bot.is_closed():
await asyncio.sleep(delay)
print("Checked trump at {}".format(datetime.now()))
try:
trumps_lt_id = common.twitter.get_user_timeline(
screen_name='realdonaldtrump', count=1,
include_retweets=False)[0]['id']
except:
print("Error caught in check_trump, shortening delay")
delay = 60
else:
if decay > 0:
delay = (common.trump_del - decay) * 60
decay -= 1
else:
delay = common.trump_del * 60
if trumps_lt_id != common.last_id:
common.trump_tweets_seen += 1
await c_to_send.send("New Message from the prez! Try !trump")
decay = common.trump_del - 1
delay = (common.trump_del - decay) * 60
common.last_id = trumps_lt_id
common.trump_chance_roll_rdy = True
common.missed_trumps += 1
async def handle_news(ctx, bot):
"""
Handles the news feed
:return:
"""
shuffle(common.news_handles)
await bot.wait_until_ready()
for channel in bot.get_all_channels():
if channel.name == 'gen_testing' or channel.name == 'newsfeed':
c_to_send = channel
break
if common.twitter is None:
await ctx.send("Twitter not activated.")
return
delay = (common.news_del * 60) + (randint(0, 10) * 60)
while not bot.is_closed():
next_source = common.news_handles.pop(0)
common.news_handles.append(next_source)
print("Next news source will be {}".format(next_source))
await asyncio.sleep(delay)
if common.NEWS_FEED_ON:
try:
news = common.twitter.get_user_timeline(
screen_name=next_source, count=1,
include_retweets=False)
except:
print("Error caught in news, shortening delay")
delay = 30
else:
delay = (common.news_del * 60) + (randint(0, 10) * 60)
await c_to_send.send("https://twitter.com/{0}/status/{1}"
.format(news[0]['user']['screen_name'],
str(news[0]['id'])))
else:
common.NEWS_FEED_CREATED = False
print("Destroying News Feed Task")
return
| [
11748,
30351,
952,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
4738,
1330,
43720,
600,
11,
36273,
198,
198,
11748,
2219,
198,
6738,
269,
18463,
13,
646,
417,
66,
519,
1330,
2378,
62,
39486,
62,
2487,
198,
6738,
36446,
13,
2302... | 1.97448 | 3,605 |
# All content Copyright (C) 2018 Genomics plc
from wecall_test_drivers.base_test import BaseTest
from wecall_test_drivers.svc_driver import SVCDriver
| [
2,
1439,
2695,
15069,
357,
34,
8,
2864,
5215,
31994,
458,
66,
198,
6738,
356,
13345,
62,
9288,
62,
36702,
13,
8692,
62,
9288,
1330,
7308,
14402,
198,
6738,
356,
13345,
62,
9288,
62,
36702,
13,
21370,
66,
62,
26230,
1330,
20546,
8610... | 3.355556 | 45 |
import collections, sys, glob
p = p1()
| [
198,
11748,
17268,
11,
25064,
11,
15095,
198,
220,
220,
220,
220,
220,
220,
198,
220,
220,
198,
79,
796,
279,
16,
3419,
198
] | 2.083333 | 24 |
from sortedcontainers import SortedDict
from ..base.orderbook import OrderbookBase
from .websocket import BitbankWebsocket
from .api import BitbankApi
| [
6738,
23243,
3642,
50221,
1330,
311,
9741,
35,
713,
198,
198,
6738,
11485,
8692,
13,
2875,
2070,
1330,
8284,
2070,
14881,
198,
6738,
764,
732,
1443,
5459,
1330,
4722,
17796,
1135,
1443,
5459,
198,
6738,
764,
15042,
1330,
4722,
17796,
32... | 3.55814 | 43 |
"""
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
API-specific fixtures
"""
import pytest
from .helpers import assemble_authorization_header
API_TOKEN = 'testing-TESTING'
@pytest.fixture(scope='package')
@pytest.fixture(scope='package')
def api_client(api_app):
"""Provide a test HTTP client against the API."""
return api_app.test_client()
@pytest.fixture(scope='package')
def api_client_authz_header():
"""Provide a test HTTP client against the API."""
return assemble_authorization_header(API_TOKEN)
| [
37811,
198,
25,
15269,
25,
4793,
12,
1238,
2481,
449,
420,
831,
509,
7211,
364,
354,
21184,
198,
25,
34156,
25,
31492,
347,
10305,
357,
3826,
4600,
43,
2149,
24290,
63,
2393,
329,
3307,
8,
198,
198,
17614,
12,
11423,
34609,
198,
378... | 2.939394 | 198 |
from .pos_embed import *
from .rel_multi_head import *
from .rel_bias import *
from .memory import *
from .scale import *
from .transformer_xl import *
from .loader import *
from .sequence import *
__version__ = '0.13.0'
| [
6738,
764,
1930,
62,
20521,
1330,
1635,
198,
6738,
764,
2411,
62,
41684,
62,
2256,
1330,
1635,
198,
6738,
764,
2411,
62,
65,
4448,
1330,
1635,
198,
6738,
764,
31673,
1330,
1635,
198,
6738,
764,
9888,
1330,
1635,
198,
6738,
764,
7645,
... | 3 | 74 |
import robin_stocks as robinhood
| [
11748,
3857,
259,
62,
29522,
355,
3857,
259,
2894,
198
] | 3.3 | 10 |
from articles.api.views import ArticleViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'', ArticleViewSet, base_name='articles')
urlpatterns = router.urls
| [
6738,
6685,
13,
15042,
13,
33571,
1330,
10172,
7680,
7248,
198,
6738,
1334,
62,
30604,
13,
472,
1010,
1330,
15161,
49,
39605,
198,
198,
472,
353,
796,
15161,
49,
39605,
3419,
198,
472,
353,
13,
30238,
7,
81,
6,
3256,
10172,
7680,
72... | 3.433333 | 60 |
FILE_PREFIX_NETWORK = 'network-'
| [
25664,
62,
47,
31688,
10426,
62,
12884,
33249,
796,
705,
27349,
19355,
198
] | 2.538462 | 13 |
# Python code to run State Evolution (SE) related functions for
# Sparse Regression Codes (SPARCs)
#
# 1. State evolution for general base matrix W.
# 2. Asymptotic state evolution for general base matrix W.
#
# Copyright (c) 2020 Kuan Hsieh
import numpy as np
from sparc import create_base_matrix, is_power_of_2, psk_constel
from copy import copy
### Check code/decode/channel params functions
def check_code_params(code_params):
'''
Check SPARC code parameters for State Evolution (SE) simulations
'''
code_params_copy = {} # Will overwrite original (prevents unwanted params)
# Check SPARC type e.g. power allocated, spatially coupled
sparc_type_list = ['complex',
'modulated',
'power_allocated',
'spatially_coupled']
for item in sparc_type_list:
if item not in code_params:
code_params[item] = False # default
else:
assert type(code_params[item]) == bool,\
"'{}' must be boolean".format(key)
code_params_copy[item] = copy(code_params[item])
# Required SPARC code parameters (all SPARC types)
code_param_list = ['P','R','M']
in_code_param_list(code_param_list)
P,R,M = map(code_params.get, code_param_list)
assert (type(P)==float or type(P)==np.float64) and P>0
assert (type(R)==float or type(R)==np.float64) and R>0
assert type(M)==int and M>0 and is_power_of_2(M)
# Required SPARC code parameters (modulated)
# ONLY SUPPORTS PSK MODULATION
if code_params['modulated']:
code_param_list = ['K']
in_code_param_list(code_param_list)
K = code_params['K']
assert type(K)==int and K>1 and is_power_of_2(K)
if not code_params['complex']:
assert K==2, 'Real-modulated SPARCs requires K=2'
# Required SPARC code parameters (power allocated)
# ONLY SUPPORTS ITERATIVE POWER ALLOCATION
if code_params['power_allocated']:
code_param_list = ['B', 'R_PA_ratio']
in_code_param_list(code_param_list)
B, R_PA_ratio = map(code_params.get, code_param_list)
assert type(B)==int and B>1
assert type(R_PA_ratio)==float or type(R_PA_ratio)==np.float64
assert R_PA_ratio>=0
# Required SPARC code parameters (spatially coupled)
# ONLY SUPPORTS OMEGA, LAMBDA BASE MATRICES
if code_params['spatially_coupled']:
code_param_list = ['omega', 'Lambda']
in_code_param_list(code_param_list)
omega, Lambda = map(code_params.get, code_param_list)
assert type(omega)==int and omega>1
assert type(Lambda)==int and Lambda>=(2*omega-1)
# Overwrite orignal
code_params.clear()
code_params.update(dict(code_params_copy))
def sparc_se(awgn_var, code_params, t_max, mc_samples):
"""
State evolution (SE) for Sparse Regression Codes.
I resuse the Monto Carlo samples instead of resampling them
at everytime to save computation.
awgn_var : AWGN channel noise variance
code_params: SPARC code parameters
t_max : max number of iterations
mc_samples : num of Monte Carlo samples
"""
check_code_params(code_params)
# Construct base matrix W
tmp = code_params.copy()
tmp.update({'awgn_var':awgn_var})
W = create_base_matrix(**tmp)
assert 0 <= W.ndim <= 2
# Get code parameters
P,R,M = map(code_params.get, ['P','R','M'])
K = code_params['K'] if code_params['modulated'] else 1
if code_params['complex']:
R /= 2 # Complex SPARCs only care about the rate per dimension
if W.ndim == 0:
psi = np.ones(t_max)
elif W.ndim == 1:
Lr, Lc = 1, W.size
psi = np.ones((t_max, Lc))
elif W.ndim == 2:
Lr, Lc = W.shape
psi = np.ones((t_max, Lc))
if K>2: # Must be complex and modulated with modulation factor K>2
u = np.random.randn(mc_samples, M) + 1j*np.random.randn(mc_samples, M)
else:
u = np.random.randn(mc_samples, M)
for t in range(t_max-1):
if t>0:
tau_prev = np.copy(tau)
if W.ndim == 0:
tau = (np.log(2)*R/np.log(K*M)) * (awgn_var/P + psi[t])
else:
phi = awgn_var + np.dot(W, psi[t])/Lc
tau = (np.log(2)*R*Lr/np.log(K*M)) / np.dot(W.T, 1/phi)
if (t>0) and np.allclose(tau, tau_prev, rtol=1e-6, atol=0):
if W.ndim == 0:
psi[t:] = psi[t]
else:
psi[t:,:] = psi[t,:]
break
if W.ndim == 0:
psi[t+1] = 1 - sparc_se_E(tau, K, u)
else:
for c in range(Lc):
psi[t+1, c] = 1 - sparc_se_E(tau[c], K, u)
# Final tau can be used to estimate SER
return psi, tau
| [
2,
11361,
2438,
284,
1057,
1812,
15815,
357,
5188,
8,
3519,
5499,
329,
198,
2,
1338,
17208,
3310,
2234,
44380,
357,
4303,
25793,
82,
8,
198,
2,
198,
2,
352,
13,
1812,
6954,
329,
2276,
2779,
17593,
370,
13,
198,
2,
362,
13,
1081,
... | 2.1578 | 2,218 |
import pandas as pd
import numpy as np
import sys
def clean(df):
"""
Takes a dataframe of salesforce data and maps values to a more usable format, returning a clean data set.
:param df: pandas DataFrame containing raw data
:return: df: cleaned data set - i.e. mapped to correct values
"""
# Replace empty and NaN's with None
empty_nan_map = {np.nan: None, '': None}
df.replace(empty_nan_map, inplace=True)
# Drop unwanted headers
df = pd.DataFrame(df.drop(['RegisteredCompany', 'OpportunityId', 'CreditStatus', 'CompanyTelephone', 'ShowSector',
'BillingPostalCode', 'BillingState', 'VATNumber', 'VATNumberValidationStatus', 'Website',
'CurrencyIsoCode', 'IsWon', 'InvoiceFrequency', 'LeadChannel', 'LeadSource',
'ProductDescription', 'ReasonLost', 'OtherReasonsLost', 'OtherCustomerObjectives',
'Probability', 'GrossArea', 'NetChargeableArea', 'ProductCategory'], axis=1))
# Exhibitions: map 'Spring Fair International 2017' -> Spring17
fairs = []
years = []
for i in range(len(df)):
fairs.append(df['Exhibition'][i].split(' ', 1)[0])
years.append(df['Exhibition'][i].split(' ')[3][2:])
df['Exhibition'] = fairs
df['Year'] = years
# Company Sectors: strip redundant values, repeat entries, mistake entries, combine 3 cols to 1 col
words, results = [], []
stopwords = ['and', '&', 'the']
for i in range(len(df)):
query1, query2, query3 = df['CompanySector'][i], df['CompanySector2'][i], df['CompanySector3'][i]
queries = list()
if query1 != None:
queries += list(query1.split())
if query2 != None:
queries += list(query2.split())
if query3!= None:
queries += list(query3.split())
else:
queries = None
if queries == None:
result = None
else:
result = list([word for word in queries if word.lower() not in stopwords])
mapping = [("Children\xe2\x80\x99s", 'Children\'s Gifts'), ('Gifts,', ''), ('Children?s', 'Children\'s Gifts'),
('Fashion,', 'Fashion'), ('Jewellery,', 'Jewellery'), ('Volume,', 'Volume'), ('Kitchen,', 'Kitchen'),
('Multiple, /', ''), ('Department', 'Department Store'), ('Stores', ''), ('retailer', 'Retail'),
('/', ''), ('Multiple', ''), (' ', '')]
for k, v in mapping:
result = [i.replace(k, v) for i in result]
if '' in result: result.remove('')
result = pd.unique(result)
results.append(result)
df = pd.DataFrame(df.drop(['CompanySector', 'CompanySector2', 'CompanySector3'], axis=1))
df['CompanySectors'] = results
# Replace unknown with None
exhibitorTypeMap = {'Unknown': None}
df['ExhibitorType'].replace(exhibitorTypeMap, inplace=True)
# Replace the multitude of Hall labels with the following map
hallMap = {'': None, '1': 1, '1.1': 1, '10,11,12': [10, 11, 12], '10-Dec': None, '11': 11, '19-20': [19, 20],
'2': 2, '20': 20, '3': 3, '4': 4, '5': 5, '6': 6, '9': 9, 'Autumn Fair Intl 2014 Hall 3': 3,
'Autumn Fair Intl 2015 Hall 1': 1, 'Autumn Fair Intl 2015 Hall 4': 4,
'Autumn Fair Intl 2015 Hall 5': 5, 'Ground Level': 'n/a', 'Hall 01': 1, 'Hall 02': 2,
'Hall 03': 3, 'Hall 04': 4, 'Hall 05': 5, 'Hall 1': 1, 'Hall 1 (H1)': 1,
'Hall 10,Hall 11,Hall 12': [10, 11, 12], 'Hall 10,Hall 11,Hall 12 (H10-12)': [10, 11, 12],
'Hall 10-12': [10, 11, 12], 'Hall 17,Hall 18 (H17-18)': [17, 18], 'Hall 17-19': [17, 18, 19],
'Hall 19,Hall 20': [19, 20], 'Hall 19,Hall 20 (H19-20)': [19, 20], 'Hall 19-20': [19, 20], 'Hall 2': 2,
'Hall 2 (H2)': 2, 'Hall 3': 3, 'Hall 3 (H3)': 3, 'Hall 3 3A': 3, 'Hall 3-3A': 3, 'Hall 4': 4,
'Hall 4 (H4)': 4, 'Hall 5': 5, 'Hall 5 (H5)': 5, 'Hall 6 & 7': [6, 7], 'Hall 6, Hall 7 (H6-7)': [6, 7],
'Hall 6,Hall7': [6, 7], 'Hall 6-7': [6, 7], 'Hall 8': 8, 'Hall 8 (H8)': 8, 'Hall 9': 9,
'Hall 9 (H9)': 9, 'Hall 9-10': [9, 10], 'Hall N1-19': range(1, 20), 'Halls 10-12': [10, 11, 12],
'Halls 6 & 7': [6, 7], 'Spring Fair International 2016 - Hall 1': 1,
'Spring Fair International 2016 - Hall 19&20': [19, 20], 'Spring Fair International 2016 - Hall 3': 3,
'Spring Fair International 2016 - Hall 4': 4, 'Spring Fair International 2016 - Hall 5': 5,
'Spring Fair International 2016 - Halls 19&20': [19, 20],
'Spring Fair International 2016 - Halls 6&7': [6, 7], 'Spring Fair International 2016 Halls 6 & 7': [6, 7]}
df['Hall'].replace(hallMap, inplace=True)
cityMap = {'.': '', 'X': '', 'Tbc': '', 'Oxon': 'Oxford', 'Girona 17469': 'Girona', 'Ny': 'New York'}
df['BillingCity'].replace(cityMap, inplace=True)
# Some stage names map to the same value
StageNameMap = {'': None, 'Adv. Commercial Negotiation': 'Commercial Negotiation', 'Close Lost': 'Closed Lost'}
df['StageName'].replace(StageNameMap, inplace=True)
# Some dates incorrectly labelled, must be greater than 0
df = df[df['CreateCloseDateDiff'] >= 0]
return df
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
628,
198,
4299,
3424,
7,
7568,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
33687,
257,
1366,
14535,
286,
4200,
3174,
1366,
290,
8739,
381... | 2.243154 | 2,410 |
from keras.regularizers import *
class L1L2Lp(Regularizer):
"""Regularizer for L1, L2, and Lp regularization.
# Arguments
l1: Float; L1 regularization factor.
l2: Float; L2 regularization factor.
lp: Float; Lp regularization factor.
p: Float; Lp regularization exponent.
"""
# Aliases.
| [
6738,
41927,
292,
13,
16338,
11341,
1330,
1635,
198,
198,
4871,
406,
16,
43,
17,
43,
79,
7,
40164,
7509,
2599,
198,
220,
220,
220,
37227,
40164,
7509,
329,
406,
16,
11,
406,
17,
11,
290,
406,
79,
3218,
1634,
13,
628,
220,
220,
2... | 2.481481 | 135 |
'''for c in range(1, 10):
print(c)
print('FIM')'''
# pode usar while ou for quando sabemos o limite da repetição
#crescente
c = 1
while c < 10:
print(c)
c += 1
print('FIM')
# while decrescente
n = int(input('Digite um número para\nCalcular seu fatorial: '))
c = n
while c > 0:
print(c)
c -= 1
# Somente o while quando não sabemos o limite da repetição
n = 1
while n != 0:
n = int(input('teste: '))
print('boa')
# exemplo 2
n = 1
r = 'S'
while r == 'S':
n = int(input('Informe um valor: '))
r = str(input('Quer continuar? [S/N]: ')).upper()
print('fim')
# exemplo 3
n = 1
par = impar = 0
while n != 0:
n = int(input('Digite um valor: '))
if n != 0:
if n % 2 == 0:
par += 1
else:
impar += 1
print('Você digitou {} números pares e {} números ímpares'.format(par,impar)) | [
7061,
6,
1640,
269,
287,
2837,
7,
16,
11,
838,
2599,
198,
220,
220,
220,
3601,
7,
66,
8,
198,
4798,
10786,
37,
3955,
11537,
7061,
6,
198,
2,
279,
1098,
514,
283,
981,
267,
84,
329,
627,
25440,
17463,
368,
418,
267,
1761,
578,
... | 2.08313 | 409 |
""" Train for generating LIIF, from image to implicit representation.
Config:
train_dataset:
dataset: $spec; wrapper: $spec; batch_size:
val_dataset:
dataset: $spec; wrapper: $spec; batch_size:
(data_norm):
inp: {sub: []; div: []}
gt: {sub: []; div: []}
(eval_type):
(eval_bsize):
model: $spec
optimizer: $spec
epoch_max:
(multi_step_lr):
milestones: []; gamma: 0.5
(resume): *.pth
(epoch_val): ; (epoch_save):
"""
import argparse
import os
import yaml
import random
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import MultiStepLR
from scipy import interpolate
import matplotlib.pyplot as plt
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
import datasets
import models
import utils
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config')
parser.add_argument('--name', default=None)
parser.add_argument('--tag', default=None)
parser.add_argument('--save_path', default='./save')
parser.add_argument('--gpu', default='0')
parser.add_argument('--sr', default=None)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
with open(args.config, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
print('config loaded.')
save_name = args.name
if save_name is None:
save_name = '_' + args.config.split('/')[-1][:-len('.yaml')]
if args.tag is not None:
save_name += '_' + args.tag
save_path = os.path.join(args.save_path, save_name)
config['name'] = save_name
main(config, save_path, target_sr=args.sr)
| [
37811,
16835,
329,
15453,
406,
3978,
37,
11,
422,
2939,
284,
16992,
10552,
13,
628,
220,
220,
220,
17056,
25,
198,
220,
220,
220,
220,
220,
220,
220,
4512,
62,
19608,
292,
316,
25,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,... | 2.432099 | 810 |
import cv2
import depthai
import numpy as np # numpy - manipulate the packet data returned by depthai
import math
from math import cos, sin, tan, pi
from paho.mqtt import client as mqtt_client
from time import sleep
import time
from scipy.optimize import minimize
import datetime
broker = '10.44.99.11'
port = 1883
pubTopic = "/sensors/camera"
subTopic = "/robot/camera"
client_id = "4499-NANO"
sub_client_id = "4499-NANO"
# def getEstimatedTapes(numTapes):
# xArray = []
# yArray = []
# match numTapes:
# case 1:
# xArray[0] =
# yArray[0] =
# xArray[1] =
# yArray[1] =
# case 2:
# averageX =
stepSize = 0.05
cameraResolutionWidth = 1280.0
cameraResolutionHeight = 720.0
HFOV = 69
# focalLength = cameraResolutionWidth/(2 * tan())
# Camera elevation from horizontal, remember to change
cameraElevation = 40 * pi/180.0
# real diff is 61
targetHeightDifference = 83
# Start defining a pipeline
pipeline = depthai.Pipeline()
cam_rgb = pipeline.create(depthai.node.ColorCamera)
cam_rgb.setPreviewSize(1280, 720)
cam_rgb.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
# cam_rgb.setIspScale(2, 3)
# cam_rgb.setPreviewSize(cameraResolutionWidth, cameraResolutionHeight)
cam_rgb.setPreviewKeepAspectRatio(True)
# manipConfig = depthai.ImageManipConfig()
# manipConfig.setCropRect(0.2, 0.2, 0, 0)
# configQueue.send(manipConfig)
# manip = pipeline.create(depthai.node.ImageManip)
# manip.setResizeThumbnail(200,200, 200, 200, 200)
xout_rgb = pipeline.create(depthai.node.XLinkOut)
configIn = pipeline.create(depthai.node.XLinkIn)
configIn.setStreamName('config')
xout_rgb.setStreamName("rgb")
cam_rgb.preview.link(xout_rgb.input)
controlIn = pipeline.create(depthai.node.XLinkIn)
controlIn.setStreamName('control')
controlIn.out.link(cam_rgb.inputControl)
lowerH = 0
upperH = 94
lowerS = 142
upperS = 255
lowerV = 54
upperV = 255
expTime = 1000
sensIso = 500
# Define a source - two mono (grayscale) cameras
monoLeft = pipeline.createMonoCamera()
monoRight = pipeline.createMonoCamera()
stereo = pipeline.createStereoDepth()
spatialLocationCalculator = pipeline.createSpatialLocationCalculator()
xoutDepth = pipeline.createXLinkOut()
xoutSpatialData = pipeline.createXLinkOut()
xinSpatialCalcConfig = pipeline.createXLinkIn()
xoutDepth.setStreamName("depth")
xoutSpatialData.setStreamName("spatialData")
xinSpatialCalcConfig.setStreamName("spatialCalcConfig")
# MonoCamera
monoLeft.setResolution(depthai.MonoCameraProperties.SensorResolution.THE_720_P)
monoLeft.setBoardSocket(depthai.CameraBoardSocket.LEFT)
monoRight.setResolution(depthai.MonoCameraProperties.SensorResolution.THE_720_P)
monoRight.setBoardSocket(depthai.CameraBoardSocket.RIGHT)
outputDepth = True
outputRectified = False
lrcheck = True
subpixel = False
extended = True
# StereoDepth
stereo.setOutputDepth(outputDepth)
stereo.setOutputRectified(outputRectified)
stereo.setConfidenceThreshold(255)
stereo.setDepthAlign(depthai.CameraBoardSocket.RGB)
# setRectifyEdgeFillColor(-1)
stereo.setOutputSize(1280, 720)
stereo.setLeftRightCheck(lrcheck)
stereo.setSubpixel(subpixel)
# stereo.setExtendedDisparity(extended)
monoLeft.out.link(stereo.left)
monoRight.out.link(stereo.right)
spatialLocationCalculator.passthroughDepth.link(xoutDepth.input)
stereo.depth.link(spatialLocationCalculator.inputDepth)
topLeft = depthai.Point2f(0.4, 0.4)
bottomRight = depthai.Point2f(0.6, 0.6)
spatialLocationCalculator.setWaitForConfigInput(False)
config = depthai.SpatialLocationCalculatorConfigData()
config.depthThresholds.lowerThreshold = 0
config.depthThresholds.upperThreshold = 10000
config.roi = depthai.Rect(topLeft, bottomRight)
spatialLocationCalculator.initialConfig.addROI(config)
spatialLocationCalculator.out.link(xoutSpatialData.input)
xinSpatialCalcConfig.out.link(spatialLocationCalculator.inputConfig)
# imu = pipeline.create(depthai.node.IMU)
# xlinkOut = pipeline.create(depthai.node.XLinkOut)
# xlinkOut.setStreamName("imu")
# # imu.enableIMUSensor(depthai.IMUSensor.ROTATION_VECTOR, 400)
# imu.enableIMUSensor(depthai.IMUSensor.GAME_ROTATION_VECTOR, 400)
# imu.setBatchReportThreshold(1)
# imu.setMaxBatchReports(10)
# imu.out.link(xlinkOut.input)
# Pipeline defined, now the device is assigned and pipeline is started
device = depthai.Device(pipeline)
device.startPipeline()
# configQueue = device.getInputQueue('config')
# imuQueue = device.getOutputQueue(name="imu", maxSize=50, blocking = False)
# Output queue will be used to get the depth frames from the outputs defined above
depthQueue = device.getOutputQueue(name="depth", maxSize=10, blocking=False)
spatialCalcQueue = device.getOutputQueue(name="spatialData", maxSize=1, blocking=False)
spatialCalcConfigInQueue = device.getInputQueue("spatialCalcConfig")
color = (255, 255, 255)
q_rgb = device.getOutputQueue("rgb")
frame = None
client = connect_mqtt()
# cv2.namedWindow('HSV Tuner', cv2.WINDOW_AUTOSIZE)
# cv2.createTrackbar('Lower H', "HSV Tuner", 0, 255, on_change)
# cv2.createTrackbar('Higher H', "HSV Tuner", 0, 255, on_change)
# cv2.createTrackbar('Lower S', "HSV Tuner", 0, 255, on_change)
# cv2.createTrackbar('Higher S', "HSV Tuner", 0, 255, on_change)
# cv2.createTrackbar('Lower V', "HSV Tuner", 0, 255, on_change)
# cv2.createTrackbar('Higher V', "HSV Tuner", 0, 255, on_change)
# cv2.setTrackbarPos('Lower H', "HSV Tuner", lowerH)
# cv2.setTrackbarPos('Higher H', "HSV Tuner", upperH)
# cv2.setTrackbarPos('Lower S', "HSV Tuner", lowerS)
# cv2.setTrackbarPos('Higher S', "HSV Tuner", upperS)
# cv2.setTrackbarPos('Lower V', "HSV Tuner", lowerV)
# cv2.setTrackbarPos('Higher V', "HSV Tuner", upperV)
controlQueue = device.getInputQueue('control')
ctrl = depthai.CameraControl()
ctrl.setManualExposure(expTime, sensIso)
ctrl.setAutoFocusMode(depthai.RawCameraControl.AutoFocusMode.OFF)
ctrl.setManualFocus(0)
controlQueue.send(ctrl)
# configQueue.send(manipConfig)
cfg = depthai.SpatialLocationCalculatorConfig()
avgDistance = 0
avgAngle = 0
while True:
# print(device.getInputQueueNames())
# lowerH = cv2.getTrackbarPos('Lower H', "HSV Tuner")
# upperH = cv2.getTrackbarPos('Higher H', "HSV Tuner")
# lowerS = cv2.getTrackbarPos('Lower S', "HSV Tuner")
# upperS = cv2.getTrackbarPos('Higher S', "HSV Tuner")
# lowerV = cv2.getTrackbarPos('Lower V', "HSV Tuner")
# upperV = cv2.getTrackbarPos('Higher V', "HSV Tuner")
# getImuAngle()
inDepth = depthQueue.get() # blocking call, will wait until a new data has arrived
depthFrame = inDepth.getFrame()
depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
depthFrameColor = cv2.equalizeHist(depthFrameColor)
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_OCEAN)
in_rgb = q_rgb.tryGet()
if in_rgb is not None:
frame = in_rgb.getCvFrame()
startTime = time.time()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lowerThreshold = np.array([lowerH, lowerS, lowerV])
upperThreshold = np.array([upperH, upperS, upperV])
#check if color in range
mask = cv2.inRange(hsv, lowerThreshold, upperThreshold)
result = cv2.bitwise_and(frame, frame, mask = mask)
blur = cv2.GaussianBlur(mask, (5, 5), 0)
edges = cv2.Canny(mask, 75, 150)
contours, hierarchy = cv2.findContours(blur, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
roiList = []
for contour in contours:
peri = cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, 0.04 * peri, True)
rotatedRect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rotatedRect)
boxArray = np.int0(box)
cntArea = cv2.contourArea(contour)
topLeftX = float(boxArray[0][0])
topLeftY = float(boxArray[0][1])
bottomRightX = float(boxArray[1][0])
bottomRightY = float(boxArray[1][1])
boxColor = (0,0,255)
if(peri > 35 and cntArea > 100):
# cv2.drawContours(frame,[boxArray],0,boxColor,2)
topLeft = depthai.Point2f(-0.01 + (topLeftX/cameraResolutionWidth), -0.01 + (topLeftY/cameraResolutionHeight))
bottomRight = depthai.Point2f(0.01 + (bottomRightX/cameraResolutionWidth), 0.01 + (bottomRightY/cameraResolutionHeight))
configData = depthai.SpatialLocationCalculatorConfigData()
configData.calculationAlgorithm = depthai.SpatialLocationCalculatorAlgorithm.MIN
# print(configData.calculationAlgorithm)
configData.roi = depthai.Rect(topLeft, bottomRight)
# print("TL: " + str(topLeft.x) + " BR: " + str(bottomRight.x))
# print("TOP LEFT: " + str(topLeftX) + " BOTTOM RIGHT: " + str(bottomRightX))
roiList.append(configData)
# if(len(roiList) == 0):
# centerConfigData = depthai.SpatialLocationCalculatorConfigData()
# centerConfigData.roi = depthai.Rect(depthai.Point2f(0.49, 0.49), depthai.Point2f(0.51, 0.51))
# roiList.append(centerConfigData)
if(len(roiList) > 0):
cfg.setROIs(roiList)
# print(cfg)
spatialCalcConfigInQueue.send(cfg)
else:
jsonString = '{"Distance":' + '-10' + ', "Angle":' + '-100000' + ', "Confidence":' + '0' + ', "Timestamp":' + str(time.time()) +'}'
print("jsonString")
publish(client, jsonString)
continue
inDepthAvg = spatialCalcQueue.get() # blocking call, will wait until a new data has arrived
spatialData = inDepthAvg.getSpatialLocations()
# print("SPDATA: " + str(len(spatialData)))
zList = []
xList = []
yList = []
pixelYAverage = 0
pixelXAverage = 0
pixelAdditionCounter = 0
# spatialCalcQueue.get
# print("====================================")
for depthData in spatialData:
roi = depthData.config.roi
roi = roi.denormalize(width=depthFrameColor.shape[1], height=depthFrameColor.shape[0])
xmin = int(roi.topLeft().x)
ymin = int(roi.topLeft().y)
xmax = int(roi.bottomRight().x)
ymax = int(roi.bottomRight().y)
# xCenterPixels = ((xMin + xMax) * cameraResolutionHeight)/2
# yCenterPixels = ((yMin + yMax) * cameraResolutionWidth)/2
# pixelAdditionCounter = pixelAdditionCounter + 1
zCoordinateCamera = (depthData.spatialCoordinates.z)/25.4
yCoordinateCamera = (depthData.spatialCoordinates.y)/25.4
xCoordinateCamera = (depthData.spatialCoordinates.x)/25.4
# print(depthData.depthAveragePixelCount)
convertedCoordinates = convertCoordinates(xCoordinateCamera, yCoordinateCamera, zCoordinateCamera)
# print("XRAW: " + str(xCoordinateCamera) + " Y: " + str(yCoordinateCamera) + " Z: " + str(zCoordinateCamera))
# print("X: " + str(convertedCoordinates[0]) + " Y: " + str(convertedCoordinates[1]) + " Z: " + str(convertedCoordinates[2]))
if(convertedCoordinates[0] != 0):
zList.append(convertedCoordinates[2])
xList.append(convertedCoordinates[0])
yList.append(convertedCoordinates[1])
# fontType = cv2.FONT_HERSHEY_TRIPLEX
# cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX)
# cv2.putText(frame, f"X: {int(depthData.spatialCoordinates.x)} mm", (xmin + 10, ymin + 20), fontType, 0.5, color)
# cv2.putText(frame, f"Y: {int(depthData.spatialCoordinates.y)} mm", (xmin + 10, ymin + 35), fontType, 0.5, color)
# cv2.putText(frame, f"Z: {int(depthData.spatialCoordinates.z)} mm", (xmin + 10, ymin + 50), fontType, 0.5, color)
# zMedian = np.median(zList)
# for i in range(0, len(zList) - 1):
# if(abs(zList[i] - zMedian) >= 20):
# zList.remove(zList[i])
# xList.remove(xList[i])
# yList.remove(yList[i])
# for i in range(0, len(zList)):
# factor = targetHeightDifference/zList[i]
# zList[i] = zList[i] * factor
# # xList[i] = xList[i] * factor
# # yList[i] = yList[i] * factor
if(len(xList) != 0):
xAverage = np.average(np.array(xList))
yAverage = np.average(np.array(yList))
bestGuess = np.array([xAverage + 26, yAverage])
targetResult = minimize(reduceBestFitError, bestGuess, args = (xList, yList), method = "Nelder-Mead", options = {"maxiter": 10})
targetCenterX = targetResult.x[0] + 8
targetCenterY = targetResult.x[1]
angle = math.atan(targetCenterY/targetCenterX)
distance = math.sqrt((targetCenterX ** 2) + (targetCenterY ** 2))
jsonString = '{"Distance":' + str(distance) + ', "Angle":' + str(angle) + ', "Confidence":' + str(len(xList)) + ', "Timestamp":' + str(time.time()) +'}'
publish(client, jsonString)
endTime = time.time()
# print("START: " + str(endTime - startTime))
print("TargetX: " + str(targetCenterX) + " TargetY: " + str(targetCenterY) + " Distance: " + str(distance) + " Angle: " + str(180 * (angle)/pi) + " Confidence: " + str(len(xList)))
# print(xList)
# else:
# publish(client, "Hello")
# if(abs(targetRadiusCheck - 576) > 100):
# print("Didn't get correct target")
# else:
# print("CenterX: " + str(targetCenterX) + "CenterY: " + str(targetCenterY))
# print(xList)
# cv2.imshow("depth", depthFrameColor)
# cv2.imshow("frame", frame)
# cv2.imshow("mask", result)
# cv2.imshow("blur", blur)
# newConfig = False
key = cv2.waitKey(1)
if key == ord('q'):
break | [
11748,
269,
85,
17,
198,
11748,
6795,
1872,
198,
11748,
299,
32152,
355,
45941,
220,
1303,
299,
32152,
532,
18510,
262,
19638,
1366,
4504,
416,
6795,
1872,
198,
11748,
10688,
198,
6738,
10688,
1330,
8615,
11,
7813,
11,
25706,
11,
31028,... | 2.302625 | 6,133 |
#!/usr/env/python
## Import General Tools
import sys
import os
from datetime import datetime as dt
import re
from glob import glob
import numpy as np
import json
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
##-------------------------------------------------------------------------
## Main Program
##-------------------------------------------------------------------------
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
24330,
14,
29412,
198,
198,
2235,
17267,
3611,
20003,
198,
11748,
25064,
198,
11748,
28686,
198,
6738,
4818,
8079,
1330,
4818,
8079,
355,
288,
83,
198,
11748,
302,
198,
6738,
15095,
1330,
15095,
198,
11748,
299,
321... | 4.245283 | 106 |
import pandas as pd
import prettytable
import time
from abc import ABC, abstractmethod
from collections import OrderedDict
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.base import BaseEstimator
from .vectorizers import BM25Vectorizer
from transformers import DistilBertTokenizer
import re
import math
import nltk
from nltk.corpus import stopwords
from nltk import SnowballStemmer
from scipy.spatial.distance import cosine
import numpy as np
class BaseRetriever(BaseEstimator, ABC):
"""
Abstract base class for all Retriever classes.
All retrievers should inherit from this class.
Each retriever class should implement a _fit_vectorizer method and a
_compute_scores method
"""
def fit(self, df: pd.DataFrame, y=None):
"""
Fit the retriever to a list of documents or paragraphs
Parameters
----------
df: pandas.DataFrame object with all documents
"""
self.metadata = df
return self._fit_vectorizer(df)
@abstractmethod
@abstractmethod
def predict(self, query: str) -> OrderedDict:
"""
Compute the top_n closest documents given a query
Parameters
----------
query: str
Returns
-------
best_idx_scores: OrderedDict
Dictionnaire with top_n best scores and idices of the documents as keys
"""
t0 = time.time()
scores = self._compute_scores(query)
idx_scores = [(idx, score) for idx, score in enumerate(scores)]
best_idx_scores = OrderedDict(
sorted(idx_scores, key=(lambda tup: tup[1]), reverse=True)[: self.top_n]
)
# inspired from https://github.com/facebookresearch/DrQA/blob/50d0e49bb77fe0c6e881efb4b6fe2e61d3f92509/scripts/reader/interactive.py#L63
if self.verbose:
rank = 1
table = prettytable.PrettyTable(["rank", "index", "title"])
for i in range(len(closest_docs_indices)):
index = closest_docs_indices[i]
if self.paragraphs:
article_index = self.paragraphs[int(index)]["index"]
title = self.metadata.iloc[int(article_index)]["title"]
else:
title = self.metadata.iloc[int(index)]["title"]
table.add_row([rank, index, title])
rank += 1
print(table)
print("Time: {} seconds".format(round(time.time() - t0, 5)))
return best_idx_scores
class TfidfRetriever(BaseRetriever):
"""
A scikit-learn estimator for TfidfRetriever. Trains a tf-idf matrix from a corpus
of documents then finds the most N similar documents of a given input document by
taking the dot product of the vectorized input document and the trained tf-idf matrix.
Parameters
----------
lowercase : boolean
Convert all characters to lowercase before tokenizing. (default is True)
preprocessor : callable or None
Override the preprocessing (string transformation) stage while preserving
the tokenizing and n-grams generation steps. (default is None)
tokenizer : callable or None
Override the string tokenization step while preserving the preprocessing
and n-grams generation steps (default is None)
stop_words : string {‘english’}, list, or None
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. ‘english’ is currently the only supported string value.
If a list, that list is assumed to contain stop words, all of which will
be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value in the
range [0.7, 1.0) to automatically detect and filter stop words based on
intra corpus document frequency of terms.
(default is None)
token_pattern : string
Regular expression denoting what constitutes a “token”. The default regexp
selects tokens of 2 or more alphanumeric characters (punctuation is completely
ignored and always treated as a token separator).
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different n-grams
to be extracted. All values of n such that min_n <= n <= max_n will be used.
(default is (1, 1))
max_df : float in range [0.0, 1.0] or int
When building the vocabulary ignore terms that have a document frequency strictly
higher than the given threshold (corpus-specific stop words). If float, the parameter
represents a proportion of documents, integer absolute counts. This parameter is
ignored if vocabulary is not None. (default is 1.0)
min_df : float in range [0.0, 1.0] or int
When building the vocabulary ignore terms that have a document frequency
strictly lower than the given threshold. This value is also called cut-off
in the literature. If float, the parameter represents a proportion of
documents, integer absolute counts. This parameter is ignored if vocabulary
is not None. (default is 1)
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are indices
in the feature matrix, or an iterable over terms. If not given, a vocabulary
is determined from the input documents. (default is None)
paragraphs : iterable
an iterable which yields either str, unicode or file objects
top_n : int (default 20)
maximum number of top articles (or paragraphs) to retrieve
verbose : bool, optional
If true, all of the warnings related to data processing will be printed.
Attributes
----------
vectorizer : TfidfVectorizer
See https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html
tfidf_matrix : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
Examples
--------
>>> from cdqa.retriever import TfidfRetriever
>>> retriever = TfidfRetriever(ngram_range=(1, 2), max_df=0.85, stop_words='english')
>>> retriever.fit(X=df)
>>> best_idx_scores = retriever.predict(X='Since when does the the Excellence Program of BNP Paribas exist?')
"""
class BM25Retriever(BaseRetriever):
"""
A scikit-learn estimator for BM25Retriever. Trains a matrix based on BM25 statistics
from a corpus of documents then finds the most N similar documents of a given input
query by computing the BM25 score for each document based on the query.
Parameters
----------
lowercase : boolean
Convert all characters to lowercase before tokenizing. (default is True)
preprocessor : callable or None
Override the preprocessing (string transformation) stage while preserving
the tokenizing and n-grams generation steps. (default is None)
tokenizer : callable or None
Override the string tokenization step while preserving the preprocessing
and n-grams generation steps (default is None)
stop_words : string {‘english’}, list, or None
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. ‘english’ is currently the only supported string value.
If a list, that list is assumed to contain stop words, all of which will
be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value in the
range [0.7, 1.0) to automatically detect and filter stop words based on
intra corpus document frequency of terms.
(default is None)
token_pattern : string
Regular expression denoting what constitutes a “token”. The default regexp
selects tokens of 2 or more alphanumeric characters (punctuation is completely
ignored and always treated as a token separator).
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different n-grams
to be extracted. All values of n such that min_n <= n <= max_n will be used.
(default is (1, 1))
max_df : float in range [0.0, 1.0] or int
When building the vocabulary ignore terms that have a document frequency strictly
higher than the given threshold (corpus-specific stop words). If float, the parameter
represents a proportion of documents, integer absolute counts. This parameter is
ignored if vocabulary is not None. (default is 1.0)
min_df : float in range [0.0, 1.0] or int
When building the vocabulary ignore terms that have a document frequency
strictly lower than the given threshold. This value is also called cut-off
in the literature. If float, the parameter represents a proportion of
documents, integer absolute counts. This parameter is ignored if vocabulary
is not None. (default is 1)
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are indices
in the feature matrix, or an iterable over terms. If not given, a vocabulary
is determined from the input documents. (default is None)
paragraphs : iterable
an iterable which yields either str, unicode or file objects
top_n : int (default 20)
maximum number of top articles (or paragraphs) to retrieve
verbose : bool, optional
If true, all of the warnings related to data processing will be printed.
k1 : float, optional (default=2.0)
term k1 in the BM25 formula
b : float, optional (default=0.75)
term b in the BM25 formula
floor : float or None, optional (default=None)
floor value for idf terms
Attributes
----------
vectorizer : BM25Vectorizer
Examples
--------
>>> from cdqa.retriever import BM25Retriever
>>> retriever = BM25Retriever(ngram_range=(1, 2), max_df=0.85, stop_words='english')
>>> retriever.fit(df=df)
>>> best_idx_scores = retriever.predict(query='Since when does the the Excellence Program of BNP Paribas exist?')
"""
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
2495,
11487,
198,
11748,
640,
198,
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
1341,
35720,
13,
30053,
62,
2302,
7861,
13,
5239,
13... | 2.839227 | 3,620 |
"""
Multi-objective EA.
"""
import os
import array
import multiprocessing
import numpy
from deap import base
from deap import creator
from generator.helpers import *
# -------- Dataset Parameters --------
n = 100 # number of instances
m = 15 # number of attributes
# -------- GA Parameters --------
MIN_VALUE = 0 # individuals have int values [0.2), i.e. 0 or 1
MAX_VALUE = 2 # individuals have int values [0.2), i.e. 0 or 1
MIN_STRATEGY = 0.2 # min value for standard deviation of the mutation
MAX_STRATEGY = 1 # max value standard deviation of the mutation
population_size = 200 # number of individuals in each generation
# -------- Run Parameters --------
complexity_measures = [0.3]
# complexity_measures = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
# complexity_measures = [0.2, 0.4, 0.6, 0.8]
amount_of_datasets_per_complexity_measure = 1
num_subs = 3
if m % num_subs != 0:
sys.exit('%i attributes can not be split into %i equal-sized groups' % (m, num_subs))
else:
m_subs = int(m / num_subs)
# set print options for large arrays
np.set_printoptions(threshold=np.inf, precision=2, linewidth=np.inf)
pd.set_option('expand_frame_repr', False)
# initialize EA
# -1 for "minimize" (the difference of desired and actual complexity)
# IMPORTANT: The tuple has the form (fitness_sub1, fitness_sub2, fitness_sub3, fitness_complete)
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0, -1.0, -1.0))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessMin, strategy=None)
creator.create("Strategy", array.array, typecode='d')
toolbox = base.Toolbox()
# run the EA
if __name__ == '__main__':
start_total = time.time()
# initialize multiprocessing
pool = multiprocessing.Pool()
toolbox.register('map', pool.map)
# loop through amount of desired datasets for each complexity measure
for i in range(amount_of_datasets_per_complexity_measure):
start_iter = time.time()
print('\n-------------------- ITERATION %r --------------------\n' % (i + 1))
# loop through each complexity
for complexity in complexity_measures:
print('Complexity: %r\n' % complexity)
# create folder for each complexity measure if not existent
if not os.path.exists('../assets/complexity_%r' % complexity):
os.makedirs('../assets/complexity_%r' % complexity)
# create list of MSTs for each sub dataset
mst_edges = []
for i_sub in range(num_subs):
# create sub data set (function stores the file and returns the MST)
data_set_mst = create_dataset_and_or_mst(n=n, m=m_subs, covariance_between_attributes=False,
path='../assets/complexity_%r/data_%r_%r.csv' % (
complexity, (i + 1), (i_sub + 1)))
mst_edges.append(data_set_mst)
# combine subsets
data_set_combined = pd.DataFrame()
for i_sub in range(num_subs):
# concatenate columns
data_set_combined = pd.concat(
[data_set_combined, pd.read_csv(filepath_or_buffer='../assets/complexity_%r/data_%r_%r.csv' % (
complexity, (i + 1), (i_sub + 1)), usecols=[x for x in range(m_subs)])], axis=1,
ignore_index=True)
data_set_combined.to_csv(path_or_buf='../assets/complexity_%r/data_%r.csv' % (complexity, (i + 1)),
index=False)
# get mst of final dataset
mst_final = create_dataset_and_or_mst(data=data_set_combined, save_file=False)
mst_edges.append(mst_final)
# run EA
main(mst_edges=mst_edges, b=complexity, path='../assets/complexity_%r/data_%r.csv' % (complexity, (i + 1)))
print('Time for iteration', (i + 1), ':', time.time() - start_iter)
print('\n-------------------------------------------------')
print('Total time:', time.time() - start_total)
| [
37811,
198,
29800,
12,
15252,
425,
19814,
13,
198,
37811,
198,
11748,
28686,
198,
11748,
7177,
198,
11748,
18540,
305,
919,
278,
198,
11748,
299,
32152,
198,
6738,
390,
499,
1330,
2779,
198,
6738,
390,
499,
1330,
13172,
628,
198,
6738,
... | 2.319932 | 1,766 |
'''
module with class definition and methods for reading and writing .grp files
Created on Jun 20, 2012
@author: David Wadden
'''
import os, re
class GRP:
'''
class to read .grp files and return a list
'''
def read(self, in_path):
'''
read a .grp file
'''
with open(in_path, 'r') as f:
lines = f.readlines()
# need the second conditional to ignore comment lines
self.grp = [line.strip() for line in lines if line and not re.match('^#', line)]
def write(self, out):
'''
write a .grp file
'''
with open(out, 'w') as f:
for x in self.grp:
f.write(str(x) + '\n')
def write_grp(in_list,out):
'''
standalone mehthods to write .grp files
'''
with open(out, 'w') as f:
for x in in_list:
#print str(x)
f.write(str(x) + '\n')
def read_grp(in_path):
'''
standalone method to read .grp files
'''
with open(in_path, 'r') as f:
lines = f.readlines()
# again, second conditional ignores comment lines
return [line.strip() for line in lines if line and not re.match('^#', line)] | [
7061,
6,
198,
21412,
351,
1398,
6770,
290,
5050,
329,
3555,
290,
3597,
764,
2164,
79,
3696,
198,
41972,
319,
7653,
1160,
11,
2321,
198,
31,
9800,
25,
3271,
370,
38014,
198,
7061,
6,
198,
11748,
28686,
11,
302,
198,
4871,
10863,
47,
... | 2.122378 | 572 |
import pytest
from unittest import mock
from awx.main.models import AdHocCommand, InventoryUpdate, Job, JobTemplate, ProjectUpdate
from awx.main.models.ha import Instance, InstanceGroup
from awx.main.tasks import apply_cluster_membership_policies
from awx.api.versioning import reverse
from django.utils.timezone import now
@pytest.mark.django_db
@pytest.mark.django_db
class TestPolicyTaskScheduling:
"""Tests make assertions about when the policy task gets scheduled"""
@pytest.mark.parametrize('field, value, expect', [
('name', 'foo-bar-foo-bar', False),
('policy_instance_percentage', 35, True),
('policy_instance_minimum', 3, True),
('policy_instance_list', ['bar?'], True),
('modified', now(), False)
])
@pytest.mark.parametrize('field, value, expect', [
('hostname', 'foo-bar-foo-bar', True),
('managed_by_policy', False, True),
('enabled', False, False),
('capacity_adjustment', 0.42, True),
('capacity', 42, False)
])
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
| [
11748,
12972,
9288,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
6738,
3253,
87,
13,
12417,
13,
27530,
1330,
1215,
39,
420,
21575,
11,
35772,
10260,
11,
15768,
11,
15768,
30800,
11,
4935,
10260,
198,
6738,
3253,
87,
13,
12417,
13,... | 2.492674 | 546 |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import preprocessing as prep
from scipy.stats import norm
# Animated
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
11227,
341,
355,
11034,
198,
11748,
662,
36948,
355,
3143,
198,
6738,
629,
541,
88,
13,
34242,
1330,
2593,
... | 3.4 | 50 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tries to classify which sentence is the correct fifth sentence."""
import os
import time
from absl import app
from absl import flags
from absl import logging
import gin
import gin.tf
import models
import rocstories_sentence_embeddings
import tensorflow.compat.v2 as tf
import tensorflow.compat.v2.keras.backend as K
import tensorflow_datasets.public_api as tfds
import utils
gfile = tf.io.gfile
tf.enable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_string('save_dir', None, 'Where to save model.')
flags.DEFINE_integer('index', None, 'Optional index of this experiment.')
flags.DEFINE_string('data_dir', None, 'TFDS dataset directory.')
flags.DEFINE_multi_string('gin_config', None, 'Gin config file.')
flags.DEFINE_multi_string('gin_bindings', [],
'Newline separated list of Gin parameter bindings.')
def cur_learning_rate(optimizer):
"""Copied from keras/optimizers.py."""
lr = optimizer.lr * (1. / (1. + optimizer.decay * K.cast(
optimizer.iterations, K.dtype(optimizer.decay))))
return lr
@gin.configurable('dataset')
def prepare_datasets(dataset_name=gin.REQUIRED,
shuffle_input_sentences=False,
num_eval_examples=2000,
batch_size=32):
"""Create batched, properly-formatted datasets from the TFDS datasets.
Args:
dataset_name: Name of TFDS dataset.
shuffle_input_sentences: If True, the order of the input sentences is
randomized.
num_eval_examples: Number of examples to use during evaluation. For the
nolabel evaluation, this is also the number of distractors we choose
between.
batch_size: Batch size.
Returns:
A dictionary mapping from the dataset split to a Dataset object.
"""
splits_to_load = {
'valid_nolabel': 'train[:2%]',
'train': 'train[2%:]',
'train_nolabel': 'train[2%:4%]',
'valid2018': rocstories_sentence_embeddings.VALIDATION_2018,
'valid2016': rocstories_sentence_embeddings.VALIDATION_2016}
datasets = tfds.load(
dataset_name,
data_dir=FLAGS.data_dir,
split=splits_to_load,
download=False)
emb_matrices = {}
# Convert datasets to expected training data format, and build of the
# embedding matrices.
train_ds = utils.build_train_style_dataset(
datasets['train'], batch_size, shuffle_input_sentences)
datasets['train'], emb_matrices['train'] = train_ds
valid_nolabel_ds = utils.build_train_style_dataset(
datasets['valid_nolabel'], batch_size, False,
num_examples=num_eval_examples)
datasets['valid_nolabel'], emb_matrices['valid_nolabel'] = valid_nolabel_ds
train_nolabel_ds = utils.build_train_style_dataset(
datasets['train_nolabel'], batch_size, False,
num_examples=num_eval_examples)
datasets['train_nolabel'], emb_matrices['train_nolabel'] = train_nolabel_ds
# Convert official evaluation datasets to validation data format. There are no
# embedding matrices involved here since the task has only two possible next
# sentences to pick between for each example.
datasets['valid2018'] = utils.build_validation_dataset(
datasets['valid2018']).take(num_eval_examples)
datasets['valid2016'] = utils.build_validation_dataset(
datasets['valid2016']).take(num_eval_examples)
logging.info('EMBEDDING MATRICES CREATED:')
for key in emb_matrices:
logging.info('%s: %s', key, emb_matrices[key].shape)
return datasets, emb_matrices
@gin.configurable(blacklist=['save_dir'])
def train(save_dir, num_epochs=300,
learning_rate=0.0001, save_every_n_epochs=25):
"""Train pipeline for next sentence embedding prediction on ROCStories."""
#### LOAD DATA ####
datasets, embedding_matrices = prepare_datasets()
#### CREATE MODEL AND OPTIMIZER ####
num_input_sentences = tf.compat.v1.data.get_output_shapes(
datasets['train'])[0][1]
model = models.build_model(
num_input_sentences=num_input_sentences,
embedding_matrix=embedding_matrices['train'])
metrics = model.create_metrics()
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
num_train_steps = 0
#### DO TRAINING ####
summary_writer = tf.summary.create_file_writer(
os.path.join(save_dir, 'summaries'))
with summary_writer.as_default():
logging.info('Starting training.')
for epoch in range(1, num_epochs+1):
for x, labels in datasets['train']:
utils.train_step(model, optimizer, x, labels, metrics)
num_train_steps += 1
start_time = time.time()
utils.do_evaluation(model, metrics, datasets, embedding_matrices)
logging.info('Eval took %f seconds.', (time.time() - start_time))
to_log = ['%s=%f, ' % (m.name, m.result()) for m in metrics.values()]
logging.info('Epoch %d, %s ', epoch, ''.join(to_log))
# Add each metric to the TensorBoard and then reset it for the next epoch.
for metric in metrics.values():
tf.summary.scalar(
metric.name, metric.result(), step=optimizer.iterations)
metric.reset_states()
# lr = cur_learning_rate(optimizer)
# tf.summary.scalar('learning_rate', lr, step=optimizer.iterations)
if epoch % save_every_n_epochs == 0:
prefix = os.path.join(
save_dir, 'ep%04d_step%05d.ckpt' % (epoch, num_train_steps))
logging.info('Saving checkpoint: %s', prefix)
checkpoint.save(file_prefix=prefix)
#### SAVE HYPERPARAMETERS AND FINAL EVAL RESULTS TO FILE ####
to_save = {}
for metric in metrics.values():
metric.reset_states()
utils.do_evaluation(model, metrics, datasets, embedding_matrices)
for metric in metrics.values():
to_save['metric_' + metric.name] = metric.result().numpy()
results_file_path = os.path.join(save_dir, 'final_eval.tsv')
with gfile.GFile(results_file_path, 'w') as f:
for name, value in to_save.iteritems():
f.write('%s\t%s\n' % (name, str(value)))
if __name__ == '__main__':
flags.mark_flag_as_required('save_dir')
flags.mark_flag_as_required('gin_config')
app.run(main)
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
33448,
383,
3012,
4992,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 2.6873 | 2,504 |
"""
Copyright 2016 Disney Connected and Advanced Technologies
Licensed under the Apache License, Version 2.0 (the "Apache License")
with the following modification; you may not use this file except in
compliance with the Apache License and the following modification to it:
Section 6. Trademarks. is deleted and replaced with:
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor
and its affiliates, except as required to comply with Section 4(c) of
the License and to reproduce the content of the NOTICE file.
You may obtain a copy of the Apache License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the Apache License with the above modification is
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the Apache License for the specific
language governing permissions and limitations under the Apache License.
"""
__author__ = "Joe Roets, Brandon Kite, Dylan Yelton, Michael Bachtel"
__copyright__ = "Copyright 2016, Disney Connected and Advanced Technologies"
__license__ = "Apache"
__version__ = "2.0"
__maintainer__ = "Joe Roets"
__email__ = "joe@dragonchain.org"
""" only used for manually inserting data into nodes table """
from blockchain.db.postgres import network_db as net_dao
from blockchain.network import Node
import os
import uuid
import argparse
def load_required_nodes(owner, host, port, phases, node_id=str(uuid.uuid4())):
"""
manually insert network node into database
Args:
owner: node owner
host: node host
port: node port
phases: node phases provided
node_id: node uuid pk
"""
node = Node(node_id, owner, host, port, phases)
net_dao.insert_node(node)
print('inserted node into database ' + os.environ.get('BLOCKCHAIN_DB_NAME') + " " + node.node_id)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process node data.')
parser.add_argument('--owner', default="TEST_OWNER")
parser.add_argument('--host', default="localhost")
parser.add_argument('-p', '--port')
parser.add_argument('--phases', default="00001")
args = vars(parser.parse_args())
owner = args['owner']
host = args['host']
port = args['port']
phases = args['phases']
load_required_nodes(owner, host, port, phases)
| [
37811,
198,
15269,
1584,
8519,
8113,
276,
290,
13435,
21852,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
25189,
4891,
13789,
4943,
198,
4480,
262,
1708,
17613,
26,
345,
743,
407,
779,
428,
2... | 3.195929 | 786 |
from flask import Blueprint, render_template, abort
upload = Blueprint('upload', __name__,
template_folder='templates')
@upload.route('/upload') | [
6738,
42903,
1330,
39932,
11,
8543,
62,
28243,
11,
15614,
198,
198,
25850,
796,
39932,
10786,
25850,
3256,
11593,
3672,
834,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220... | 2.698413 | 63 |
# coding: utf-8
#
# Copyright 2019, Kurusugawa Computer Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
with open('README.md', 'r', encoding='utf-8') as f:
readme = f.read()
about = {}
with open(os.path.join(here, 'm3u8tool', '__version__.py'), 'r', encoding='utf-8') as f:
exec(f.read(), about)
setup(
name='m3u8tool',
version=about['__version__'],
description='A HTTP Live Streaming (HLS) manipulation tool',
long_description=readme,
long_description_content_type='text/markdown',
author='squld',
author_email='sato@kurusugawa.jp',
url='https://github.com/kurusugawa-computer/m3u8tool',
install_requires=[reqs('requirements.txt')],
packages=find_packages(exclude=["tests"]),
entry_points={
"console_scripts": [
"m3u8tool = m3u8tool.app:main"
]
},
license='Apache License 2.0',
python_requires='>=3.5',
keywords='m3u8',
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Utilities",
"Operating System :: OS Independent",
],
)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
198,
2,
15069,
13130,
11,
18132,
385,
1018,
6909,
13851,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
40... | 2.730821 | 743 |
# INIT DATA
members=[
{
"id":"10001",
"firstName":"Marry",
"lastName":"Jane",
"role":"Admin",
"email":"j.marry@mail.com",
"password":"password1",
},
{
"id":"10002",
"firstName":"Tommy",
"lastName":"Miller",
"role":"Developer",
"email":"t.miller@mail.com",
"password":"password2",
}
]
| [
2,
220,
3268,
2043,
42865,
198,
198,
30814,
41888,
198,
220,
220,
220,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
366,
312,
2404,
3064,
486,
1600,
198,
220,
220,
220,
220,
220,
220,
220,
366,
11085,
5376,
2404,
44,
6532,
1600,
... | 1.877358 | 212 |
from os import path
from setuptools import setup, find_packages
from biokit.version import __version__
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md")) as f:
long_description = f.read()
CLASSIFIERS = [
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering",
]
REQUIRES = ["biopython==1.76", "numpy==1.18.2", "cython"]
setup(
name="jlsteenwyk-biokit",
description="",
long_description=long_description,
long_description_content_type="text/markdown",
author="Jacob L. Steenwyk",
author_email="jlsteenwyk@gmail.com",
url="https://github.com/jlsteenwyk/biokit",
packages=find_packages(),
classifiers=CLASSIFIERS,
entry_points={
"console_scripts": [
"biokit = biokit.biokit:main",
"bk_alignment_length = biokit.biokit:alignment_length", # Alignment-based functions
"bk_aln_len = biokit.biokit:alignment_length",
"bk_alignment_recoding = biokit.biokit:alignment_recoding",
"bk_aln_recoding = biokit.biokit:alignment_recoding",
"bk_recode = biokit.biokit:alignment_recoding",
"bk_alignment_summary = biokit.biokit:alignment_summary",
"bk_aln_summary = biokit.biokit:alignment_summary",
"bk_consensus_sequence = biokit.biokit:consensus_sequence",
"bk_con_seq = biokit.biokit:consensus_sequence",
"bk_constant_sites = biokit.biokit:constant_sites",
"bk_con_sites = biokit.biokit:constant_sites",
"bk_parsimony_informative_sites = biokit.biokit:parsimony_informative_sites",
"bk_pi_sites = biokit.biokit:parsimony_informative_sites",
"bk_pis = biokit.biokit:parsimony_informative_sites",
"bk_position_specific_score_matrix = biokit.biokit:position_specific_score_matrix",
"bk_pssm = biokit.biokit:position_specific_score_matrix",
"bk_variable_sites = biokit.biokit:variable_sites",
"bk_var_sites = biokit.biokit:variable_sites",
"bk_vs = biokit.biokit:variable_sites",
"bk_gc_content_first_position = biokit.biokit:gc_content_first_position", # Coding sequences-based functions
"bk_gc1 = biokit.biokit:gc_content_first_position",
"bk_gc_content_second_position = biokit.biokit:gc_content_second_position",
"bk_gc2 = biokit.biokit:gc_content_second_position",
"bk_gc_content_third_position = biokit.biokit:gc_content_third_position",
"bk_gc3 = biokit.biokit:gc_content_third_position",
"bk_gene_wise_relative_synonymous_codon_usage = biokit.biokit:gene_wise_relative_synonymous_codon_usage",
"bk_gene_wise_rscu = biokit.biokit:gene_wise_relative_synonymous_codon_usage",
"bk_gw_rscu = biokit.biokit:gene_wise_relative_synonymous_codon_usage",
"bk_grscu = biokit.biokit:gene_wise_relative_synonymous_codon_usage",
"bk_relative_synonymous_codon_usage = biokit.biokit:relative_synonymous_codon_usage",
"bk_rscu = biokit.biokit:relative_synonymous_codon_usage",
"bk_translate_sequence = biokit.biokit:translate_sequence",
"bk_translate_seq = biokit.biokit:translate_sequence",
"bk_trans_seq = biokit.biokit:translate_sequence",
"bk_fastq_read_lengths = biokit.biokit:fastq_read_lengths", # FASTQ-based functions
"bk_fastq_read_lens = biokit.biokit:fastq_read_lengths",
"bk_subset_pe_fastq_reads = biokit.biokit:subset_pe_fastq_reads",
"bk_subset_pe_fastq = biokit.biokit:subset_pe_fastq_reads",
"bk_subset_se_fastq_reads = biokit.biokit:subset_se_fastq_reads",
"bk_subset_se_fastq = biokit.biokit:subset_se_fastq_reads",
"bk_trim_pe_adapters_fastq = biokit.biokit:trim_pe_adapters_fastq",
"bk_trim_pe_adapters_fastq_reads = biokit.biokit:trim_pe_adapters_fastq",
"bk_trim_pe_fastq = biokit.biokit:trim_pe_fastq",
"bk_trim_pe_fastq_reads = biokit.biokit:trim_pe_fastq",
"bk_trim_se_adapters_fastq = biokit.biokit:trim_se_adapters_fastq",
"bk_trim_se_adapters_fastq_reads = biokit.biokit:trim_se_adapters_fastq",
"bk_trim_se_fastq = biokit.biokit:trim_se_fastq",
"bk_trim_se_fastq_reads = biokit.biokit:trim_se_fastq",
"bk_gc_content = biokit.biokit:gc_content", # genomes-based functions
"bk_gc = biokit.biokit:gc_content",
"bk_genome_assembly_metrics = biokit.biokit:genome_assembly_metrics",
"bk_assembly_metrics = biokit.biokit:genome_assembly_metrics",
"bk_l50 = biokit.biokit:l50",
"bk_l90 = biokit.biokit:l90",
"bk_longest_scaffold = biokit.biokit:longest_scaffold",
"bk_longest_scaff = biokit.biokit:longest_scaffold",
"bk_longest_contig = biokit.biokit:longest_scaffold",
"bk_longest_cont = biokit.biokit:longest_scaffold",
"bk_n50 = biokit.biokit:n50",
"bk_n90 = biokit.biokit:n90",
"bk_number_of_large_scaffolds = biokit.biokit:number_of_large_scaffolds",
"bk_num_of_lrg_scaffolds = biokit.biokit:number_of_large_scaffolds",
"bk_number_of_large_contigs = biokit.biokit:number_of_large_scaffolds",
"bk_num_of_lrg_cont = biokit.biokit:number_of_large_scaffolds",
"bk_number_of_scaffolds = biokit.biokit:number_of_scaffolds",
"bk_num_of_scaffolds = biokit.biokit:number_of_scaffolds",
"bk_number_of_contigs = biokit.biokit:number_of_scaffolds",
"bk_num_of_cont = biokit.biokit:number_of_scaffolds",
"bk_sum_of_scaffold_lengths = biokit.biokit:sum_of_scaffold_lengths",
"bk_sum_of_contig_lengths = biokit.biokit:sum_of_scaffold_lengths",
"bk_character_frequency = biokit.biokit:character_frequency", # text-based functions
"bk_char_freq = biokit.biokit:character_frequency",
"bk_faidx = biokit.biokit:faidx",
"bk_get_entry = biokit.biokit:faidx",
"bk_ge = biokit.biokit:faidx",
"bk_file_format_converter = biokit.biokit:file_format_converter",
"bk_format_converter = biokit.biokit:file_format_converter",
"bk_ffc = biokit.biokit:file_format_converter",
"bk_multiple_line_to_single_line_fasta = biokit.biokit:multiple_line_to_single_line_fasta",
"bk_ml2sl = biokit.biokit:multiple_line_to_single_line_fasta",
"bk_remove_short_sequences = biokit.biokit:remove_short_sequences",
"bk_remove_short_seqs = biokit.biokit:remove_short_sequences",
"bk_remove_fasta_entry = biokit.biokit:remove_fasta_entry",
"bk_rename_fasta_entries = biokit.biokit:rename_fasta_entries",
"bk_rename_fasta = biokit.biokit:rename_fasta_entries",
"bk_reorder_by_sequence_length = biokit.biokit:reorder_by_sequence_length",
"bk_reorder_by_seq_len = biokit.biokit:reorder_by_sequence_length",
"bk_sequence_complement = biokit.biokit:sequence_complement",
"bk_seq_comp = biokit.biokit:sequence_complement",
"bk_sequence_length = biokit.biokit:sequence_length",
"bk_seq_len = biokit.biokit:sequence_length",
"bk_single_line_to_multiple_line_fasta = biokit.biokit:single_line_to_multiple_line_fasta",
"bk_sl2ml = biokit.biokit:single_line_to_multiple_line_fasta",
]
},
version=__version__,
include_package_data=True,
install_requires=REQUIRES,
)
# push new version to pypi
# rm -rf dist
# python setup.py sdist bdist_wheel --universal
# twine upload dist/* -r pypi
# then push to anaconda
| [
6738,
28686,
1330,
3108,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
6738,
3182,
482,
270,
13,
9641,
1330,
11593,
9641,
834,
198,
198,
1456,
796,
3108,
13,
397,
2777,
776,
7,
6978,
13,
15908,
3672,
7,
83... | 1.96859 | 4,107 |
from typing import Dict
from typing import List
from typing import Any
from typing import Callable
from typing import Optional
from types import ModuleType
import os
import json
import yaml
from syncgit import _gitcmd as gitcmd
from syncgit._gitcmd import RepoInfo
from syncgit._threadsafe_dict import ThreadSafeDict
from syncgit._threadloop import ThreadLoop
from syncgit._config import SYNCGIT_DEFAULT_POLL_INTERVAL
| [
6738,
19720,
1330,
360,
713,
198,
6738,
19720,
1330,
7343,
198,
6738,
19720,
1330,
4377,
198,
6738,
19720,
1330,
4889,
540,
198,
6738,
19720,
1330,
32233,
198,
6738,
3858,
1330,
19937,
6030,
198,
198,
11748,
28686,
198,
11748,
33918,
198,... | 3.776786 | 112 |
"""
Various Python utilities.
"""
from inspect import getmembers, getmodule, isfunction, ismethod
def full_name(item):
"""Return the full name of a something passed in so it can be retrieved
later on.
"""
if isinstance(item, basestring):
return item
if ismethod(item):
module_name = full_name(dict(getmembers(item))['im_self'])
else:
module_name = getmodule(item).__name__
if isfunction(item):
name = item.func_name
else:
name = item.__name__
return '.'.join([module_name, name])
def object_at_end_of_path(path):
"""Attempt to return the Python object at the end of the dotted
path by repeated imports and attribute access.
"""
access_path = path.split('.')
module = None
for index in xrange(1, len(access_path)):
try:
# import top level module
module_name = '.'.join(access_path[:-index])
module = __import__(module_name)
except ImportError:
continue
else:
for step in access_path[1:-1]: # walk down it
module = getattr(module, step)
break
if module:
return getattr(module, access_path[-1])
else:
return globals()['__builtins__'][path]
def non_unicode_kwarg_keys(kwargs):
"""Convert all the keys to strings as Python won't accept unicode.
"""
return dict([(str(k), v) for k, v in kwargs.items()]) if kwargs else {}
| [
37811,
198,
220,
220,
220,
26386,
11361,
20081,
13,
198,
37811,
198,
6738,
10104,
1330,
651,
30814,
11,
651,
21412,
11,
318,
8818,
11,
318,
24396,
628,
198,
4299,
1336,
62,
3672,
7,
9186,
2599,
198,
220,
220,
220,
37227,
13615,
262,
... | 2.408497 | 612 |
import numpy as np
import seekpath
if __name__ == '__main__':
import os
import sys
import yaml
from phonopy import Phonopy
from phonopy.interface.phonopy_yaml import get_unitcell_from_phonopy_yaml
from phonopy.file_IO import parse_FORCE_SETS, parse_BORN
import matplotlib
if len(sys.argv) > 1:
cell = get_unitcell_from_phonopy_yaml(sys.argv[1])
else:
cell = get_unitcell_from_phonopy_yaml("POSCAR-unitcell.yaml")
phonon_info = yaml.load(open("phonon.yaml"))
cell = get_unitcell_from_phonopy_yaml("POSCAR-unitcell.yaml")
phonon = None
if os.path.isfile("phonopy.conf"):
with open("phonopy.conf") as f:
for line in f:
if 'PRIMITIVE_AXIS' in line:
prim_vals = [frac2val(x) for x in line.split()[2:]]
if len(prim_vals) == 9:
primitive_matrix = np.reshape(prim_vals, (3, 3))
phonon = Phonopy(cell,
phonon_info['supercell_matrix'],
primitive_matrix=primitive_matrix)
else:
print("PRIMITIVE_AXIS is something wrong.")
sys.exit(1)
break
if phonon is None:
phonon = Phonopy(cell, phonon_info['supercell_matrix'])
force_sets = parse_FORCE_SETS()
phonon.set_displacement_dataset(force_sets)
phonon.produce_force_constants()
if os.path.isfile("BORN"):
with open("BORN") as f:
primitive = phonon.get_primitive()
nac_params = parse_BORN(primitive, filename="BORN")
nac_params['factor'] = 14.399652
phonon.set_nac_params(nac_params)
band = Band(phonon, num_qpoints=101)
if band.run():
band.write_band_yaml()
_, distances, frequencies, _ = band.get_band()
d_end = distances[-1][-1]
f_max = np.max(frequencies)
primitive = phonon.get_primitive()
num_atom = primitive.get_number_of_atoms()
length = num_atom ** (1.0 / 3) * 4.5
figsize_x = d_end * length
margin = 0.7
scale = 0.15
delta_d = d_end / (figsize_x - margin) * scale
matplotlib.use('Agg')
matplotlib.rcParams.update({'figure.figsize': (figsize_x, 3.1),
'font.family': 'serif'})
import matplotlib.pyplot as plt
band.plot_band(plt, delta_d=(delta_d))
band.save_band(plt)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
5380,
6978,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1330,
28686,
198,
220,
220,
220,
1330,
25064,
198,
220,
220,
220,
1330,
331,
43695,
198,
22... | 1.904798 | 1,334 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import torch.utils.data
import numpy
import os
MAX_VALUE = 413.5
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
11748,
28034,
13,
26791,
13,
7890,
201,
198,
11748,
299,
32152,
201,
198,
11748,
28686,
201,
198,
201,
198,
... | 2.118644 | 59 |
#encoding: utf8
from gevent import monkey; monkey.patch_socket()
import gevent
import gzip
import time
codes = [
('2900',
u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05d1\u05d9\u05e0\u05d5\u05d9 \u05d5\u05d4\u05e9\u05d9\u05db\u05d5\u05df'),
('3600', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05ea\u05de\u05ea'),
('0400',
u'\u05de\u05e9\u05e8\u05d3 \u05e8\u05d0\u05e9 \u05d4\u05de\u05de\u05e9\u05dc\u05d4'),
('5900',
u'\u05e9\u05e8\u05d5\u05ea \u05d4\u05ea\u05e2\u05e1\u05d5\u05e7\u05d4'),
('3000',
u'\u05d4\u05de\u05e9\u05e8\u05d3 \u05dc\u05e7\u05dc\u05d9\u05d8\u05ea \u05e2\u05dc\u05d9\u05d4'),
('1100',
u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05ea\u05d9\u05d9\u05e8\u05d5\u05ea'),
('2400',
u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05d1\u05e8\u05d9\u05d0\u05d5\u05ea'),
('3300',
u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05d7\u05e7\u05dc\u05d0\u05d5\u05ea \u05d5\u05e4\u05d9\u05ea\u05d5\u05d7 \u05d4\u05db\u05e4\u05e8'),
('0540',
u'\u05de.\u05d4\u05d0\u05d5\u05e6\u05e8 - \u05de\u05d8\u05d4 \u05d4\u05d7\u05e9\u05d1 \u05d4\u05db\u05dc\u05dc\u05d9'),
('0490', u'\u05e8\u05d5\u05d4\u05de'),
('0000',
u'\u05d0\u05e0\u05d0 \u05d1\u05d7\u05e8 \u05de\u05e9\u05e8\u05d3/\u05d9\u05d7\u05d9\u05d3\u05d4'),
('4050',
u'\u05e8\u05e9\u05d5\u05ea \u05dc\u05d0\u05d5\u05de\u05d9\u05ea \u05dc\u05d1\u05d8\u05d9\u05d7\u05d5\u05ea \u05d3\u05e8\u05db\u05d9\u05dd'),
('5800',
u'\u05d4\u05d0\u05d5\u05e6\u05e8-\u05d6\u05db\u05d5\u05d9\u05d5\u05ea \u05e0\u05d9\u05e6\u05d5\u05dc\u05d9 \u05d4\u05e9\u05d5\u05d0\u05d4'),
('5300', u'\u05dc\u05e9\u05db\u05ea \u05d4\u05e0\u05e9\u05d9\u05d0'),
('2200',
u'\u05d4\u05de\u05e9\u05e8\u05d3 \u05dc\u05e9\u05d9\u05e8\u05d5\u05ea\u05d9 \u05d3\u05ea'),
('2000', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05d7\u05d9\u05e0\u05d5\u05da'),
('5200',
u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05de\u05d3\u05e2 \u05d5\u05d4\u05d8\u05db\u05e0\u05d5\u05dc\u05d5\u05d2\u05d9\u05d4'),
('2300', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05e8\u05d5\u05d5\u05d7\u05d4'),
('3600', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05db\u05dc\u05db\u05dc\u05d4'),
('1600',
u'\u05d4\u05de\u05e9\u05e8\u05d3 \u05dc\u05d4\u05d2\u05e0\u05ea \u05d4\u05e1\u05d1\u05d9\u05d1\u05d4'),
('0600', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05e4\u05e0\u05d9\u05dd'),
('0420',
u'\u05e4\u05d9\u05ea\u05d5\u05d7 \u05e0\u05d2\u05d1, \u05d2\u05dc\u05d9\u05dc \u05d5\u05e9.\u05e4\u05e2\u05d5\u05dc\u05d4'),
('2950', u'\u05ea\u05e0\u05d5\u05e4\u05d4'),
('5210',
u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05ea\u05e8\u05d1\u05d5\u05ea \u05d5\u05d4\u05e1\u05e4\u05d5\u05e8\u05d8'),
('5400',
u'\u05d4\u05d7\u05d8\u05d9\u05d1\u05d4 \u05dc\u05d4\u05ea\u05d9\u05e9\u05d1\u05d5\u05ea'),
('1700',
u'\u05ea\u05d9\u05d0\u05d5\u05dd \u05d4\u05e4\u05e2\u05d5\u05dc\u05d5\u05ea \u05d1\u05e9\u05d8\u05d7\u05d9\u05dd'),
('5700',
u'\u05d4\u05de\u05e9\u05e8\u05d3 \u05dc\u05d1\u05d8\u05d7\u05d5\u05df \u05e4\u05e0\u05d9\u05dd'),
('0500', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05d0\u05d5\u05e6\u05e8'),
('4000',
u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05ea\u05d7\u05d1\u05d5\u05e8\u05d4'),
('1200',
u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05d0\u05e0\u05e8\u05d2\u05d9\u05d4 \u05d5\u05d4\u05de\u05d9\u05dd'),
('3700',
u'\u05de.\u05d4\u05d0\u05d5\u05e6\u05e8 - \u05de\u05e0\u05d4\u05dc\u05ea \u05d4\u05d2\u05de\u05dc\u05d0\u05d5\u05ea'),
('5500', u'\u05e8\u05e9\u05d5\u05ea \u05d4\u05de\u05d9\u05dd'),
('3200',
u'\u05de. \u05d4\u05d7\u05d9\u05e0\u05d5\u05da-\u05d7\u05d9\u05e0\u05d5\u05da \u05d4\u05ea\u05d9\u05d9\u05e9\u05d1\u05d5\u05ea\u05d9'),
('2080',
u'\u05d7\u05d9\u05e0\u05d5\u05da-\u05d0\u05d2\u05e3 \u05dc\u05de\u05d5\u05e1\u05d3\u05d5\u05ea \u05ea\u05d5\u05e8\u05e0\u05d9\u05d9\u05dd'),
('5000',
u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05ea\u05e7\u05e9\u05d5\u05e8\u05ea')]
#codes = [('5800', u'\u05d4\u05d0\u05d5\u05e6\u05e8-\u05d6\u05db\u05d5\u05d9\u05d5\u05ea \u05e0\u05d9\u05e6\u05d5\u05dc\u05d9 \u05d4\u05e9\u05d5\u05d0\u05d4'), ('5700', u'\u05d4\u05de\u05e9\u05e8\u05d3 \u05dc\u05d1\u05d8\u05d7\u05d5\u05df \u05e4\u05e0\u05d9\u05dd'), ('1600', u'\u05d4\u05de\u05e9\u05e8\u05d3 \u05dc\u05d4\u05d2\u05e0\u05ea \u05d4\u05e1\u05d1\u05d9\u05d1\u05d4'), ('3000', u'\u05d4\u05de\u05e9\u05e8\u05d3 \u05dc\u05e7\u05dc\u05d9\u05d8\u05ea \u05e2\u05dc\u05d9\u05d4'), ('2200', u'\u05d4\u05de\u05e9\u05e8\u05d3 \u05dc\u05e9\u05d9\u05e8\u05d5\u05ea\u05d9 \u05d3\u05ea'), ('2080', u'\u05d7\u05d9\u05e0\u05d5\u05da-\u05d0\u05d2\u05e3 \u05dc\u05de\u05d5\u05e1\u05d3\u05d5\u05ea \u05ea\u05d5\u05e8\u05e0\u05d9\u05d9\u05dd'), ('5300', u'\u05dc\u05e9\u05db\u05ea \u05d4\u05e0\u05e9\u05d9\u05d0'), ('3200', u'\u05de. \u05d4\u05d7\u05d9\u05e0\u05d5\u05da-\u05d7\u05d9\u05e0\u05d5\u05da \u05d4\u05ea\u05d9\u05d9\u05e9\u05d1\u05d5\u05ea\u05d9'), ('0540', u'\u05de.\u05d4\u05d0\u05d5\u05e6\u05e8 - \u05de\u05d8\u05d4 \u05d4\u05d7\u05e9\u05d1 \u05d4\u05db\u05dc\u05dc\u05d9'), ('3700', u'\u05de.\u05d4\u05d0\u05d5\u05e6\u05e8 - \u05de\u05e0\u05d4\u05dc\u05ea \u05d4\u05d2\u05de\u05dc\u05d0\u05d5\u05ea'), ('0500', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05d0\u05d5\u05e6\u05e8'), ('1200', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05d0\u05e0\u05e8\u05d2\u05d9\u05d4 \u05d5\u05d4\u05de\u05d9\u05dd'), ('2900', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05d1\u05d9\u05e0\u05d5\u05d9 \u05d5\u05d4\u05e9\u05d9\u05db\u05d5\u05df'), ('2400', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05d1\u05e8\u05d9\u05d0\u05d5\u05ea'), ('2000', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05d7\u05d9\u05e0\u05d5\u05da'), ('3300', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05d7\u05e7\u05dc\u05d0\u05d5\u05ea \u05d5\u05e4\u05d9\u05ea\u05d5\u05d7 \u05d4\u05db\u05e4\u05e8'), ('5200', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05de\u05d3\u05e2 \u05d5\u05d4\u05d8\u05db\u05e0\u05d5\u05dc\u05d5\u05d2\u05d9\u05d4'), ('0600', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05e4\u05e0\u05d9\u05dd'), ('2300', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05e8\u05d5\u05d5\u05d7\u05d4'), ('4000', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05ea\u05d7\u05d1\u05d5\u05e8\u05d4'), ('1100', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05ea\u05d9\u05d9\u05e8\u05d5\u05ea'), ('3600', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05ea\u05de\u05ea'), ('5000', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05ea\u05e7\u05e9\u05d5\u05e8\u05ea'), ('5210', u'\u05de\u05e9\u05e8\u05d3 \u05d4\u05ea\u05e8\u05d1\u05d5\u05ea \u05d5\u05d4\u05e1\u05e4\u05d5\u05e8\u05d8'), ('0400', u'\u05de\u05e9\u05e8\u05d3 \u05e8\u05d0\u05e9 \u05d4\u05de\u05de\u05e9\u05dc\u05d4'), ('0420', u'\u05e4\u05d9\u05ea\u05d5\u05d7 \u05e0\u05d2\u05d1, \u05d2\u05dc\u05d9\u05dc \u05d5\u05e9.\u05e4\u05e2\u05d5\u05dc\u05d4'), ('0490', u'\u05e8\u05d5\u05d4\u05de'), ('5500', u'\u05e8\u05e9\u05d5\u05ea \u05d4\u05de\u05d9\u05dd'), ('4050', u'\u05e8\u05e9\u05d5\u05ea \u05dc\u05d0\u05d5\u05de\u05d9\u05ea \u05dc\u05d1\u05d8\u05d9\u05d7\u05d5\u05ea \u05d3\u05e8\u05db\u05d9\u05dd'), ('5900', u'\u05e9\u05e8\u05d5\u05ea \u05d4\u05ea\u05e2\u05e1\u05d5\u05e7\u05d4'), ('1700', u'\u05ea\u05d9\u05d0\u05d5\u05dd \u05d4\u05e4\u05e2\u05d5\u05dc\u05d5\u05ea \u05d1\u05e9\u05d8\u05d7\u05d9\u05dd')]
for k,v in codes:
print k,"-",v.encode('utf8')
import csv
import re
import urllib2
from pyquery import PyQuery as pq
DataURL = "http://tmichot.gov.il/ibi_apps/WFServlet?IBIF_webapp=/ibi_apps&IBIC_server=EDASERVE&IBIF_ex=suppe_notif_item_all6&CLICKED_ON=&LSMYEAR=%(year)s&COMP_CODE=%(hcode)s&COMP_CODE_DISPLAY=%%E4%%EE%%F9%%F8%%E3%%20%%EC%%E4%%E2%%F0%%FA%%20%%E4%%F1%%E1%%E9%%E1%%E4&NOTTYPE_LIST=FOC_NONE&LSCOMMIT_LIST=%(code)s&MDRILL=1&YEARTXT=%(year)s1231"
CodesURL = "http://tmichot.gov.il/ibi_apps/WFServlet?IBIF_ex=runtrig&TRIGGER0=1&TRIGGER1=suppe_trg_lst_cc_commit&SW_CURROBJ=COMP_CODE&APPID=supp_ext_app&LSTFRMID=suppe_notif_item_all6&FRAMEPCT=0&DO_fex=suppe_notif_item_all6&SW_CLICKED=&LSMYEAR=%(year)s&COMP_CODE=%(hcode)s&NOTTYPE_LIST=FOC_NONE&NOTTYPE=FOC_NONE&LSMCOMMITXT=&LSCOMMIT_LIST=FOC_NONE&LSCOMMIT=FOC_NONE&OUTPUT=HTML&LSMYEAR_DISPLAY=%(year)s&COMP_CODE_DISPLAY=%%E4%%EE%%F9%%F8%%E3+%%EC%%E1%%E8%%E7%%E5%%EF+%%F4%%F0%%E9%%ED&NOTTYPE_DISPLAY=%%E4%%EB%%EC&LSCOMMIT_DISPLAY=%%E4%%EB%%EC&OUTPUT_DISPLAY=%%E4%%F6%%E2%%FA+%%E3%%E5%%E7&SW_INITIAL=N&RAND=0.9564581164158881"
CodesRE = re.compile('set1\("([0-9]+)',re.M)
g = []
for year in range(2012,2015):
do_stuff(year)
#gevent.joinall(g)
| [
2,
12685,
7656,
25,
3384,
69,
23,
198,
198,
6738,
4903,
1151,
1330,
21657,
26,
21657,
13,
17147,
62,
44971,
3419,
198,
11748,
4903,
1151,
198,
11748,
308,
13344,
198,
11748,
640,
198,
198,
40148,
796,
685,
198,
19203,
1959,
405,
3256,... | 1.430904 | 5,695 |
# coding: utf-8
from __future__ import print_function, unicode_literals
import tarfile
import threading
from .sutil import errdesc
from .util import Queue, fsenc
from .bos import bos
class QFile(object):
"""file-like object which buffers writes into a queue"""
class StreamTar(object):
"""construct in-memory tar file from the given path"""
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
13422,
7753,
198,
11748,
4704,
278,
198,
198,
6738,
764,
82,
22602,
1330,
1931,
4372,
3798,
198,
6... | 3.317757 | 107 |
import pandas as pd
test_records = [
(1228, False, 1.0, 'QQ', 'VII', 'Pullover', 27.341917760765206, 617.516272605143, 2896.437068349862, 'AB', 0.00036614625001438027, 6.790803554896343e-05, True),
(1792, False, 1.0, 'AAT', 'X', 'Trouser', 92.06323912407882, 214.45262374753824, 2090.9883380607143, 'CP', 0.0006170868045364592, 0.00013976126468283044, False),
(1072, True, 1.0, 'WKN', 'VII', 'Bag', 53.559076271648806, 632.5399019492289, 1267.726492026083, 'HU', 0.04305724111608195, 0.00011397806633107171, False)]
test_formated_x = pd.DataFrame(
{
0:[1]+[0]*2,
1:[0]*3,
2:[0]*3,
3:[0]*3,
4:[0]*3,
5:[0]*3,
6:[0]*3,
7:[0]*3,
8:[0]*3,
9:[0]*3,
}
)
test_formated_y = pd.Series([True]+[False]*2) | [
11748,
19798,
292,
355,
279,
67,
198,
9288,
62,
8344,
3669,
796,
220,
685,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
357,
1065,
2078,
11,
10352,
11,
352,
13,
15,
11,
705,
48,
48,
3256,
705,
... | 1.672691 | 498 |
from .utils import prettify, highlight
| [
6738,
764,
26791,
1330,
46442,
1958,
11,
7238,
628
] | 4.444444 | 9 |
#!/usr/bin/env python
"""
demo_leds -- demo of blink1 library showing independent LED access
"""
import time,sys
from blink1.blink1 import Blink1
try:
blink1 = Blink1()
except:
print("no blink1 found")
sys.exit()
print("fading to 255,0,0 on LED1" )
blink1.fade_to_rgb(500, 255, 0, 0, 1)
print("fading to 0,0,255 on LED2" )
blink1.fade_to_rgb(500, 0, 0, 255, 2)
time.sleep(1.0)
print("fading to blue on LED1" )
blink1.fade_to_color(500, 'blue', 1)
print("fading to red on LED2" )
blink1.fade_to_color(500, 'red', 2)
time.sleep(1.0)
print("fading to black on both LEDs")
blink1.fade_to_color(1000, 'black', 0)
time.sleep(1.0)
print("fading to green on both LEDs")
blink1.fade_to_color(500, '#00FF00')
time.sleep(1.0)
print("fading to black on both LEDs")
blink1.fade_to_color(500, 'black')
print("closing connection to blink(1)")
blink1.close()
print("done")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
9536,
78,
62,
992,
82,
1377,
13605,
286,
21019,
16,
5888,
4478,
4795,
12365,
1895,
198,
198,
37811,
198,
11748,
640,
11,
17597,
198,
6738,
21019,
16,
13,
2436,
676,
... | 2.295337 | 386 |
import torch
from metrics.base_metric import BaseMetric
| [
11748,
28034,
198,
198,
6738,
20731,
13,
8692,
62,
4164,
1173,
1330,
7308,
9171,
1173,
628,
198
] | 3.470588 | 17 |
import unittest
from typing import Callable
import numpy as np
from l5kit.random import GaussianRandomGenerator, LambdaRandomGenerator, ReplayRandomGenerator
if __name__ == "__main__":
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
19720,
1330,
4889,
540,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
300,
20,
15813,
13,
25120,
1330,
12822,
31562,
29531,
8645,
1352,
11,
21114,
6814,
29531,
8645,
1352,
11,
23635,
29531,
86... | 3.181818 | 66 |
'''
Write a program to read 6 integers (y1, m1, d1, y2, m2, and d2)
- y1 m1 and d1 are the year, month and birthday of the first friend
- y2 m2 and d2 are the year, month and second friend's birthday
Write a program to find out which friend was born first. If the first person is born first, print “1”, if the
second friend is born first, print “2” (in case that the birthday of both of them is similar, print “equal”).
'''
y1 = int(input())
m1 = int(input())
d1 = int(input())
y2 = int(input())
m2 = int(input())
d2 = int(input())
if y1 != y2:
print('1' if y1 < y2 else '2')
else:
if m1 != m2:
print('1' if m1 < m2 else '2')
else:
if d1 != d2:
print('1' if d1 < d2 else '2')
else:
print('equal')
| [
7061,
6,
198,
16594,
257,
1430,
284,
1100,
718,
37014,
357,
88,
16,
11,
285,
16,
11,
288,
16,
11,
331,
17,
11,
285,
17,
11,
290,
288,
17,
8,
198,
12,
331,
16,
285,
16,
290,
288,
16,
389,
262,
614,
11,
1227,
290,
10955,
286,
... | 2.364486 | 321 |
from datetime import datetime
from investing_algorithm_framework.core.models.model_extension import \
ModelExtension
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
14771,
62,
282,
42289,
62,
30604,
13,
7295,
13,
27530,
13,
19849,
62,
2302,
3004,
1330,
3467,
198,
220,
220,
220,
9104,
11627,
3004,
628
] | 3.617647 | 34 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 28 13:06:14 2021
@author: Denise Lanzieri
"""
import bigfile
from lenstools.simulations.nbody import NbodySnapshot
import numpy as np
a2z = lambda a: 1 / a - 1
class FastPMSnapshot(NbodySnapshot):
"""
A class that handles FastPM simulation snapshots
"""
_header_keys = [
'masses', 'num_particles_file', 'num_particles_total', 'box_size',
'num_files', 'Om0', 'Ode0', 'h'
]
############################
#Open the file with bigfile#
############################
@classmethod
###################################################################################
######################Abstract method implementation###############################
###################################################################################
@classmethod
@classmethod
###########################################################################################
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3825,
1526,
2579,
1511,
25,
3312,
25,
1415,
33448,
198,
198,
31,
9800,
25,
43596,
14730,
... | 3.465035 | 286 |
'''
Aula 25 - Operadores de comparação
'''
print(20 == 21) # False
print(20 != 21) # True
print(21 > 1) # True
print(20 == 20 and 20 != 21) # True | [
7061,
6,
198,
220,
220,
220,
317,
4712,
1679,
532,
6564,
324,
2850,
390,
552,
3301,
16175,
28749,
198,
7061,
6,
198,
198,
4798,
7,
1238,
6624,
2310,
8,
1303,
10352,
198,
4798,
7,
1238,
14512,
2310,
8,
1303,
6407,
198,
4798,
7,
248... | 2.412698 | 63 |
'''import the required packages and read the file.'''
import pandas as pd
import numpy as np
print('reading file')
data = pd.read_csv('../data/input_file_1.csv.zip', sep = ',', index_col=0, compression='zip')
print('file shape', data.shape)
'''parse column to date format'''
print('date encoding')
data['issue_d'] = pd.to_datetime(data['issue_d'])
'''check for and remove datapoints with null values.'''
print(data['issue_d'].isnull().any(), data['purpose'].isnull().any())
print('remove null datapoints to see if it helps...')
data = data.loc[data['purpose'].isnull() == False]
'''eliminate purpose categories with low count.'''
print('eliminating small count categories')
threshold = 19000
counts = data['purpose'].value_counts()
keep_list = counts[counts > threshold].index
data = data[data['purpose'].isin(keep_list)]
'''replace the existing labels so that they can be called easily from pandas and TensorFlow'''
print('replacing labels')
to_replace = {
'Debt consolidation': 'debt_consolidation',
'Home improvement': 'home_improvement',
'Credit card refinancing': 'credit_card',
'Other': 'other',
'Vacation': 'vacation',
'Medical expenses': 'medical',
'Car financing': 'car',
'Major purchase': 'major_purchase',
'Moving and relocation': 'moving',
'Home buying': 'house'
}
data['purpose'] = data['purpose'].replace(to_replace)
print(data['purpose'].value_counts())
'''Create one-hot encoded dummy columns for categorical variables.'''
print('hot encoding')
data = pd.get_dummies(data, columns=['purpose'], drop_first=False)
print('data columns AFTER hot encoding ', data.columns)
'''split training and test data by date quantile.'''
data_train = data.loc[data['issue_d'] < data['issue_d'].quantile(0.9)]
data_test = data.loc[data['issue_d'] >= data['issue_d'].quantile(0.9)]
print('Number of loans in the partition: ', data_train.shape[0] + data_test.shape[0])
print('Number of loans in the full dataset:', data.shape[0])
'''Drop the date column as not needed for the model.'''
data_train.drop('issue_d', axis=1, inplace=True)
data_test.drop('issue_d', axis=1, inplace=True)
'''Split features and labels'''
y_train = data_train['rejected']
y_test = data_test['rejected']
X_train = data_train.drop('rejected', axis=1)
X_test = data_test.drop('rejected', axis=1)
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.preprocessing import Imputer
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import SGDClassifier
'''Build a pipeline for preprocessing and training'''
pipeline_sgdlogreg = Pipeline([
('imputer', Imputer(copy=False)), # Mean imputation by default
('scaler', StandardScaler(copy=False)),
('model', SGDClassifier(
class_weight='balanced',
loss='log',
max_iter=1000,
tol = 1e-3,
random_state=1,
n_jobs=10,
warm_start=True
)
)
])
param_grid_sgdlogreg = {
'model__alpha': [10**-3, 10**-2, 10**1],
'model__penalty': ['l1', 'l2']
}
'''Set up a grid search.'''
grid_sgdlogreg = GridSearchCV(
estimator=pipeline_sgdlogreg,
param_grid=param_grid_sgdlogreg,
scoring='roc_auc',
pre_dispatch=3,
n_jobs=5,
cv=5,
verbose=5,
return_train_score=False
)
'''Fit the model.'''
print('fitting')
grid_sgdlogreg.fit(X_train, y_train)
'''Print model parameters, best parameters and best score.'''
print('parameters ', grid_sgdlogreg._get_param_iterator())
print(grid_sgdlogreg.best_params_, grid_sgdlogreg.best_score_)
from sklearn.metrics import roc_auc_score, recall_score
'''Make predictions on test dataset.'''
y_score = grid_sgdlogreg.predict_proba(X_test)[:,1]
y_score_flag = [int(round(i)) for i in y_score]
'''Two ways of evaluating results, check that they match.'''
print('LOOK FOR DISCREPANCIES HERE...')
print(roc_auc_score(y_test, y_score), recall_score(y_test, y_score_flag, pos_label=1), recall_score(y_test, y_score_flag, pos_label=0))
y_score_flag = grid_sgdlogreg.predict(X_test)
print(roc_auc_score(y_test, y_score), recall_score(y_test, y_score_flag, pos_label=1), recall_score(y_test, y_score_flag, pos_label=0))
| [
7061,
6,
11748,
262,
2672,
10392,
290,
1100,
262,
2393,
2637,
7061,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
628,
198,
4798,
10786,
25782,
2393,
11537,
198,
198,
7890,
796,
279,
67,
13,
961,
62,
... | 2.68285 | 1,586 |
import yaml
import os
from sys import exit
from psypose.utils import MEVA_CFG_DIR, MEVA_DATA_DIR
if __name__ == '__main__':
cfg = Config('1212')
pass
| [
11748,
331,
43695,
198,
11748,
28686,
198,
6738,
25064,
1330,
8420,
198,
6738,
17331,
3455,
13,
26791,
1330,
11948,
11731,
62,
22495,
38,
62,
34720,
11,
11948,
11731,
62,
26947,
62,
34720,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834... | 2.596774 | 62 |
from fenics import *
import numpy as np
import matplotlib.pyplot as plt
parameters["form_compiler"]["quadrature_degree"] = 4
parameters["form_compiler"]["cpp_optimize"] = True
def load_ellipsoid_data():
"""Returns 4-tuple:
mesh - the mesh,
mf - MeshFunction defining boundary markers,
numbering - dict of marking numbers,
fibers - list of functions defining microstructure"""
import dolfin
mesh = dolfin.Mesh(dolfin.MPI.comm_world, "data/mesh.xml")
mf = dolfin.MeshFunction("size_t", mesh, "data/facet_function.xml")
numbering = {
"BASE": 10,
"ENDO": 30,
"EPI": 40
}
# load fibers, sheet, cross_sheet data
fiber_element = dolfin.VectorElement(family="Quadrature",
cell=mesh.ufl_cell(),
degree=4,
quad_scheme="default")
fiber_space = dolfin.FunctionSpace(mesh, fiber_element)
fiber = dolfin.Function(fiber_space, "data/fiber.xml")
sheet = dolfin.Function(fiber_space, "data/sheet.xml")
cross_sheet = dolfin.Function(fiber_space, "data/cross_sheet.xml")
fibers = [fiber,sheet, cross_sheet]
return mesh, mf, numbering, fibers
# Define parameters
N = 3
K = [Constant(1), Constant(0.5), Constant(0.25)]
beta = [[0., 0.2, 0.], [0.2, 0., 0.3], [0., 0.3, 0.]]
f = [Constant(2.), Constant(2.), Constant(0.)]
#Define mesh and function spaces
mesh, boundary_markers, numbering, fibers = load_ellipsoid_data()
P = mixed_function_space(mesh, N) # mixed pressure space
#Define boundary conditions
bc_epi = DirichletBC(P.sub(0), Constant(10500), boundary_markers, 40)
bc_endo = DirichletBC(P.sub(1), Constant(3300), boundary_markers, 30)
bcs = [bc_epi, bc_endo]
# Define variational problem
p = TrialFunction(P)
q = TestFunction(P)
a = sum([K[i] * inner(grad(p[i]), grad(q[i])) for i in range(N)])*dx +\
sum([beta[i][i+1]*(p[i]-p[i+1])*q[i] for i in range(N-1)])*dx +\
sum([beta[i+1][i]*(p[i+1]-p[i])*q[i+1] for i in range(N-1)])*dx
L = sum([f[i]*q[i] for i in range(N)])*dx
#Define storage file
p0_file = XDMFFile("teste_LV3/p0.xdmf")
p1_file = XDMFFile("teste_LV3/p1.xdmf")
p2_file = XDMFFile("teste_LV3/p2.xdmf")
# Solve
p = Function(P)
solve(a == L, p, bcs=bcs)
#Find solutions for each function space
(p0,p1, p2) = p.split(True)
p0_file.write_checkpoint(p0, 'p0')
p1_file.write_checkpoint(p1, 'p1')
p2_file.write_checkpoint(p2, 'p2')
# Plot solution p0
plot(mesh, alpha=0.1, edgecolor='k', color='w')
fig = plot(p0)
plt.colorbar(fig)
ax = plt.gca()
ax.view_init(elev=-67, azim=-179)
ax.set_axis_off()
plt.savefig('teste_LV3/p0.png')
# Plot solution p1
plot(mesh, alpha=0.1, edgecolor='k', color='w')
fig = plot(p1)
plt.colorbar(fig)
ax = plt.gca()
ax.view_init(elev=-67, azim=-179)
ax.set_axis_off()
plt.savefig('teste_LV3/p1.png')
# Plot solution p2
plot(mesh, alpha=0.1, edgecolor='k', color='w')
fig = plot(p2)
plt.colorbar(fig)
ax = plt.gca()
ax.view_init(elev=-67, azim=-179)
ax.set_axis_off()
plt.savefig('teste_LV3/p2.png')
| [
6738,
277,
268,
873,
1330,
1635,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
17143,
7307,
14692,
687,
62,
5589,
5329,
1,
7131,
1,
421,
41909,
1300,
62,
16863,
8973,
796,
... | 2.143357 | 1,430 |
# Python stubs generated by omniidl from ..\..\..\..\..\idl\COS\CosNotification.idl
# DO NOT EDIT THIS FILE!
import omniORB, _omnipy
from omniORB import CORBA, PortableServer
_0_CORBA = CORBA
_omnipy.checkVersion(4,2, __file__, 1)
try:
property
except NameError:
#
# Start of module "CosNotification"
#
__name__ = "CosNotification"
_0_CosNotification = omniORB.openModule("CosNotification", r"..\..\..\..\..\idl\COS\CosNotification.idl")
_0_CosNotification__POA = omniORB.openModule("CosNotification__POA", r"..\..\..\..\..\idl\COS\CosNotification.idl")
# typedef ... Istring
_0_CosNotification.Istring = Istring
_0_CosNotification._d_Istring = (omniORB.tcInternal.tv_string,0)
_0_CosNotification._ad_Istring = (omniORB.tcInternal.tv_alias, Istring._NP_RepositoryId, "Istring", (omniORB.tcInternal.tv_string,0))
_0_CosNotification._tc_Istring = omniORB.tcInternal.createTypeCode(_0_CosNotification._ad_Istring)
omniORB.registerType(Istring._NP_RepositoryId, _0_CosNotification._ad_Istring, _0_CosNotification._tc_Istring)
del Istring
# typedef ... PropertyName
_0_CosNotification.PropertyName = PropertyName
_0_CosNotification._d_PropertyName = omniORB.typeMapping["IDL:omg.org/CosNotification/Istring:1.0"]
_0_CosNotification._ad_PropertyName = (omniORB.tcInternal.tv_alias, PropertyName._NP_RepositoryId, "PropertyName", omniORB.typeCodeMapping["IDL:omg.org/CosNotification/Istring:1.0"]._d)
_0_CosNotification._tc_PropertyName = omniORB.tcInternal.createTypeCode(_0_CosNotification._ad_PropertyName)
omniORB.registerType(PropertyName._NP_RepositoryId, _0_CosNotification._ad_PropertyName, _0_CosNotification._tc_PropertyName)
del PropertyName
# typedef ... PropertyValue
_0_CosNotification.PropertyValue = PropertyValue
_0_CosNotification._d_PropertyValue = omniORB.tcInternal.tv_any
_0_CosNotification._ad_PropertyValue = (omniORB.tcInternal.tv_alias, PropertyValue._NP_RepositoryId, "PropertyValue", omniORB.tcInternal.tv_any)
_0_CosNotification._tc_PropertyValue = omniORB.tcInternal.createTypeCode(_0_CosNotification._ad_PropertyValue)
omniORB.registerType(PropertyValue._NP_RepositoryId, _0_CosNotification._ad_PropertyValue, _0_CosNotification._tc_PropertyValue)
del PropertyValue
# struct Property
_0_CosNotification.Property = omniORB.newEmptyClass()
_0_CosNotification.Property = Property
_0_CosNotification._d_Property = (omniORB.tcInternal.tv_struct, Property, Property._NP_RepositoryId, "Property", "name", omniORB.typeMapping["IDL:omg.org/CosNotification/PropertyName:1.0"], "value", omniORB.typeMapping["IDL:omg.org/CosNotification/PropertyValue:1.0"])
_0_CosNotification._tc_Property = omniORB.tcInternal.createTypeCode(_0_CosNotification._d_Property)
omniORB.registerType(Property._NP_RepositoryId, _0_CosNotification._d_Property, _0_CosNotification._tc_Property)
del Property
# typedef ... PropertySeq
_0_CosNotification.PropertySeq = PropertySeq
_0_CosNotification._d_PropertySeq = (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosNotification/Property:1.0"], 0)
_0_CosNotification._ad_PropertySeq = (omniORB.tcInternal.tv_alias, PropertySeq._NP_RepositoryId, "PropertySeq", (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosNotification/Property:1.0"], 0))
_0_CosNotification._tc_PropertySeq = omniORB.tcInternal.createTypeCode(_0_CosNotification._ad_PropertySeq)
omniORB.registerType(PropertySeq._NP_RepositoryId, _0_CosNotification._ad_PropertySeq, _0_CosNotification._tc_PropertySeq)
del PropertySeq
# typedef ... OptionalHeaderFields
_0_CosNotification.OptionalHeaderFields = OptionalHeaderFields
_0_CosNotification._d_OptionalHeaderFields = omniORB.typeMapping["IDL:omg.org/CosNotification/PropertySeq:1.0"]
_0_CosNotification._ad_OptionalHeaderFields = (omniORB.tcInternal.tv_alias, OptionalHeaderFields._NP_RepositoryId, "OptionalHeaderFields", omniORB.typeCodeMapping["IDL:omg.org/CosNotification/PropertySeq:1.0"]._d)
_0_CosNotification._tc_OptionalHeaderFields = omniORB.tcInternal.createTypeCode(_0_CosNotification._ad_OptionalHeaderFields)
omniORB.registerType(OptionalHeaderFields._NP_RepositoryId, _0_CosNotification._ad_OptionalHeaderFields, _0_CosNotification._tc_OptionalHeaderFields)
del OptionalHeaderFields
# typedef ... FilterableEventBody
_0_CosNotification.FilterableEventBody = FilterableEventBody
_0_CosNotification._d_FilterableEventBody = omniORB.typeMapping["IDL:omg.org/CosNotification/PropertySeq:1.0"]
_0_CosNotification._ad_FilterableEventBody = (omniORB.tcInternal.tv_alias, FilterableEventBody._NP_RepositoryId, "FilterableEventBody", omniORB.typeCodeMapping["IDL:omg.org/CosNotification/PropertySeq:1.0"]._d)
_0_CosNotification._tc_FilterableEventBody = omniORB.tcInternal.createTypeCode(_0_CosNotification._ad_FilterableEventBody)
omniORB.registerType(FilterableEventBody._NP_RepositoryId, _0_CosNotification._ad_FilterableEventBody, _0_CosNotification._tc_FilterableEventBody)
del FilterableEventBody
# typedef ... QoSProperties
_0_CosNotification.QoSProperties = QoSProperties
_0_CosNotification._d_QoSProperties = omniORB.typeMapping["IDL:omg.org/CosNotification/PropertySeq:1.0"]
_0_CosNotification._ad_QoSProperties = (omniORB.tcInternal.tv_alias, QoSProperties._NP_RepositoryId, "QoSProperties", omniORB.typeCodeMapping["IDL:omg.org/CosNotification/PropertySeq:1.0"]._d)
_0_CosNotification._tc_QoSProperties = omniORB.tcInternal.createTypeCode(_0_CosNotification._ad_QoSProperties)
omniORB.registerType(QoSProperties._NP_RepositoryId, _0_CosNotification._ad_QoSProperties, _0_CosNotification._tc_QoSProperties)
del QoSProperties
# typedef ... AdminProperties
_0_CosNotification.AdminProperties = AdminProperties
_0_CosNotification._d_AdminProperties = omniORB.typeMapping["IDL:omg.org/CosNotification/PropertySeq:1.0"]
_0_CosNotification._ad_AdminProperties = (omniORB.tcInternal.tv_alias, AdminProperties._NP_RepositoryId, "AdminProperties", omniORB.typeCodeMapping["IDL:omg.org/CosNotification/PropertySeq:1.0"]._d)
_0_CosNotification._tc_AdminProperties = omniORB.tcInternal.createTypeCode(_0_CosNotification._ad_AdminProperties)
omniORB.registerType(AdminProperties._NP_RepositoryId, _0_CosNotification._ad_AdminProperties, _0_CosNotification._tc_AdminProperties)
del AdminProperties
# struct EventType
_0_CosNotification.EventType = omniORB.newEmptyClass()
_0_CosNotification.EventType = EventType
_0_CosNotification._d_EventType = (omniORB.tcInternal.tv_struct, EventType, EventType._NP_RepositoryId, "EventType", "domain_name", (omniORB.tcInternal.tv_string,0), "type_name", (omniORB.tcInternal.tv_string,0))
_0_CosNotification._tc_EventType = omniORB.tcInternal.createTypeCode(_0_CosNotification._d_EventType)
omniORB.registerType(EventType._NP_RepositoryId, _0_CosNotification._d_EventType, _0_CosNotification._tc_EventType)
del EventType
# typedef ... EventTypeSeq
_0_CosNotification.EventTypeSeq = EventTypeSeq
_0_CosNotification._d_EventTypeSeq = (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosNotification/EventType:1.0"], 0)
_0_CosNotification._ad_EventTypeSeq = (omniORB.tcInternal.tv_alias, EventTypeSeq._NP_RepositoryId, "EventTypeSeq", (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosNotification/EventType:1.0"], 0))
_0_CosNotification._tc_EventTypeSeq = omniORB.tcInternal.createTypeCode(_0_CosNotification._ad_EventTypeSeq)
omniORB.registerType(EventTypeSeq._NP_RepositoryId, _0_CosNotification._ad_EventTypeSeq, _0_CosNotification._tc_EventTypeSeq)
del EventTypeSeq
# struct PropertyRange
_0_CosNotification.PropertyRange = omniORB.newEmptyClass()
_0_CosNotification.PropertyRange = PropertyRange
_0_CosNotification._d_PropertyRange = (omniORB.tcInternal.tv_struct, PropertyRange, PropertyRange._NP_RepositoryId, "PropertyRange", "low_val", omniORB.typeMapping["IDL:omg.org/CosNotification/PropertyValue:1.0"], "high_val", omniORB.typeMapping["IDL:omg.org/CosNotification/PropertyValue:1.0"])
_0_CosNotification._tc_PropertyRange = omniORB.tcInternal.createTypeCode(_0_CosNotification._d_PropertyRange)
omniORB.registerType(PropertyRange._NP_RepositoryId, _0_CosNotification._d_PropertyRange, _0_CosNotification._tc_PropertyRange)
del PropertyRange
# struct NamedPropertyRange
_0_CosNotification.NamedPropertyRange = omniORB.newEmptyClass()
_0_CosNotification.NamedPropertyRange = NamedPropertyRange
_0_CosNotification._d_NamedPropertyRange = (omniORB.tcInternal.tv_struct, NamedPropertyRange, NamedPropertyRange._NP_RepositoryId, "NamedPropertyRange", "name", omniORB.typeMapping["IDL:omg.org/CosNotification/PropertyName:1.0"], "range", omniORB.typeMapping["IDL:omg.org/CosNotification/PropertyRange:1.0"])
_0_CosNotification._tc_NamedPropertyRange = omniORB.tcInternal.createTypeCode(_0_CosNotification._d_NamedPropertyRange)
omniORB.registerType(NamedPropertyRange._NP_RepositoryId, _0_CosNotification._d_NamedPropertyRange, _0_CosNotification._tc_NamedPropertyRange)
del NamedPropertyRange
# typedef ... NamedPropertyRangeSeq
_0_CosNotification.NamedPropertyRangeSeq = NamedPropertyRangeSeq
_0_CosNotification._d_NamedPropertyRangeSeq = (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosNotification/NamedPropertyRange:1.0"], 0)
_0_CosNotification._ad_NamedPropertyRangeSeq = (omniORB.tcInternal.tv_alias, NamedPropertyRangeSeq._NP_RepositoryId, "NamedPropertyRangeSeq", (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosNotification/NamedPropertyRange:1.0"], 0))
_0_CosNotification._tc_NamedPropertyRangeSeq = omniORB.tcInternal.createTypeCode(_0_CosNotification._ad_NamedPropertyRangeSeq)
omniORB.registerType(NamedPropertyRangeSeq._NP_RepositoryId, _0_CosNotification._ad_NamedPropertyRangeSeq, _0_CosNotification._tc_NamedPropertyRangeSeq)
del NamedPropertyRangeSeq
# enum QoSError_code
_0_CosNotification.UNSUPPORTED_PROPERTY = omniORB.EnumItem("UNSUPPORTED_PROPERTY", 0)
_0_CosNotification.UNAVAILABLE_PROPERTY = omniORB.EnumItem("UNAVAILABLE_PROPERTY", 1)
_0_CosNotification.UNSUPPORTED_VALUE = omniORB.EnumItem("UNSUPPORTED_VALUE", 2)
_0_CosNotification.UNAVAILABLE_VALUE = omniORB.EnumItem("UNAVAILABLE_VALUE", 3)
_0_CosNotification.BAD_PROPERTY = omniORB.EnumItem("BAD_PROPERTY", 4)
_0_CosNotification.BAD_TYPE = omniORB.EnumItem("BAD_TYPE", 5)
_0_CosNotification.BAD_VALUE = omniORB.EnumItem("BAD_VALUE", 6)
_0_CosNotification.QoSError_code = omniORB.Enum("IDL:omg.org/CosNotification/QoSError_code:1.0", (_0_CosNotification.UNSUPPORTED_PROPERTY, _0_CosNotification.UNAVAILABLE_PROPERTY, _0_CosNotification.UNSUPPORTED_VALUE, _0_CosNotification.UNAVAILABLE_VALUE, _0_CosNotification.BAD_PROPERTY, _0_CosNotification.BAD_TYPE, _0_CosNotification.BAD_VALUE,))
_0_CosNotification._d_QoSError_code = (omniORB.tcInternal.tv_enum, _0_CosNotification.QoSError_code._NP_RepositoryId, "QoSError_code", _0_CosNotification.QoSError_code._items)
_0_CosNotification._tc_QoSError_code = omniORB.tcInternal.createTypeCode(_0_CosNotification._d_QoSError_code)
omniORB.registerType(_0_CosNotification.QoSError_code._NP_RepositoryId, _0_CosNotification._d_QoSError_code, _0_CosNotification._tc_QoSError_code)
# struct PropertyError
_0_CosNotification.PropertyError = omniORB.newEmptyClass()
_0_CosNotification.PropertyError = PropertyError
_0_CosNotification._d_PropertyError = (omniORB.tcInternal.tv_struct, PropertyError, PropertyError._NP_RepositoryId, "PropertyError", "code", omniORB.typeMapping["IDL:omg.org/CosNotification/QoSError_code:1.0"], "name", omniORB.typeMapping["IDL:omg.org/CosNotification/PropertyName:1.0"], "available_range", omniORB.typeMapping["IDL:omg.org/CosNotification/PropertyRange:1.0"])
_0_CosNotification._tc_PropertyError = omniORB.tcInternal.createTypeCode(_0_CosNotification._d_PropertyError)
omniORB.registerType(PropertyError._NP_RepositoryId, _0_CosNotification._d_PropertyError, _0_CosNotification._tc_PropertyError)
del PropertyError
# typedef ... PropertyErrorSeq
_0_CosNotification.PropertyErrorSeq = PropertyErrorSeq
_0_CosNotification._d_PropertyErrorSeq = (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosNotification/PropertyError:1.0"], 0)
_0_CosNotification._ad_PropertyErrorSeq = (omniORB.tcInternal.tv_alias, PropertyErrorSeq._NP_RepositoryId, "PropertyErrorSeq", (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosNotification/PropertyError:1.0"], 0))
_0_CosNotification._tc_PropertyErrorSeq = omniORB.tcInternal.createTypeCode(_0_CosNotification._ad_PropertyErrorSeq)
omniORB.registerType(PropertyErrorSeq._NP_RepositoryId, _0_CosNotification._ad_PropertyErrorSeq, _0_CosNotification._tc_PropertyErrorSeq)
del PropertyErrorSeq
# exception UnsupportedQoS
_0_CosNotification.UnsupportedQoS = omniORB.newEmptyClass()
_0_CosNotification.UnsupportedQoS = UnsupportedQoS
_0_CosNotification._d_UnsupportedQoS = (omniORB.tcInternal.tv_except, UnsupportedQoS, UnsupportedQoS._NP_RepositoryId, "UnsupportedQoS", "qos_err", omniORB.typeMapping["IDL:omg.org/CosNotification/PropertyErrorSeq:1.0"])
_0_CosNotification._tc_UnsupportedQoS = omniORB.tcInternal.createTypeCode(_0_CosNotification._d_UnsupportedQoS)
omniORB.registerType(UnsupportedQoS._NP_RepositoryId, _0_CosNotification._d_UnsupportedQoS, _0_CosNotification._tc_UnsupportedQoS)
del UnsupportedQoS
# exception UnsupportedAdmin
_0_CosNotification.UnsupportedAdmin = omniORB.newEmptyClass()
_0_CosNotification.UnsupportedAdmin = UnsupportedAdmin
_0_CosNotification._d_UnsupportedAdmin = (omniORB.tcInternal.tv_except, UnsupportedAdmin, UnsupportedAdmin._NP_RepositoryId, "UnsupportedAdmin", "admin_err", omniORB.typeMapping["IDL:omg.org/CosNotification/PropertyErrorSeq:1.0"])
_0_CosNotification._tc_UnsupportedAdmin = omniORB.tcInternal.createTypeCode(_0_CosNotification._d_UnsupportedAdmin)
omniORB.registerType(UnsupportedAdmin._NP_RepositoryId, _0_CosNotification._d_UnsupportedAdmin, _0_CosNotification._tc_UnsupportedAdmin)
del UnsupportedAdmin
# struct FixedEventHeader
_0_CosNotification.FixedEventHeader = omniORB.newEmptyClass()
_0_CosNotification.FixedEventHeader = FixedEventHeader
_0_CosNotification._d_FixedEventHeader = (omniORB.tcInternal.tv_struct, FixedEventHeader, FixedEventHeader._NP_RepositoryId, "FixedEventHeader", "event_type", omniORB.typeMapping["IDL:omg.org/CosNotification/EventType:1.0"], "event_name", (omniORB.tcInternal.tv_string,0))
_0_CosNotification._tc_FixedEventHeader = omniORB.tcInternal.createTypeCode(_0_CosNotification._d_FixedEventHeader)
omniORB.registerType(FixedEventHeader._NP_RepositoryId, _0_CosNotification._d_FixedEventHeader, _0_CosNotification._tc_FixedEventHeader)
del FixedEventHeader
# struct EventHeader
_0_CosNotification.EventHeader = omniORB.newEmptyClass()
_0_CosNotification.EventHeader = EventHeader
_0_CosNotification._d_EventHeader = (omniORB.tcInternal.tv_struct, EventHeader, EventHeader._NP_RepositoryId, "EventHeader", "fixed_header", omniORB.typeMapping["IDL:omg.org/CosNotification/FixedEventHeader:1.0"], "variable_header", omniORB.typeMapping["IDL:omg.org/CosNotification/OptionalHeaderFields:1.0"])
_0_CosNotification._tc_EventHeader = omniORB.tcInternal.createTypeCode(_0_CosNotification._d_EventHeader)
omniORB.registerType(EventHeader._NP_RepositoryId, _0_CosNotification._d_EventHeader, _0_CosNotification._tc_EventHeader)
del EventHeader
# struct StructuredEvent
_0_CosNotification.StructuredEvent = omniORB.newEmptyClass()
_0_CosNotification.StructuredEvent = StructuredEvent
_0_CosNotification._d_StructuredEvent = (omniORB.tcInternal.tv_struct, StructuredEvent, StructuredEvent._NP_RepositoryId, "StructuredEvent", "header", omniORB.typeMapping["IDL:omg.org/CosNotification/EventHeader:1.0"], "filterable_data", omniORB.typeMapping["IDL:omg.org/CosNotification/FilterableEventBody:1.0"], "remainder_of_body", omniORB.tcInternal.tv_any)
_0_CosNotification._tc_StructuredEvent = omniORB.tcInternal.createTypeCode(_0_CosNotification._d_StructuredEvent)
omniORB.registerType(StructuredEvent._NP_RepositoryId, _0_CosNotification._d_StructuredEvent, _0_CosNotification._tc_StructuredEvent)
del StructuredEvent
# typedef ... EventBatch
_0_CosNotification.EventBatch = EventBatch
_0_CosNotification._d_EventBatch = (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosNotification/StructuredEvent:1.0"], 0)
_0_CosNotification._ad_EventBatch = (omniORB.tcInternal.tv_alias, EventBatch._NP_RepositoryId, "EventBatch", (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosNotification/StructuredEvent:1.0"], 0))
_0_CosNotification._tc_EventBatch = omniORB.tcInternal.createTypeCode(_0_CosNotification._ad_EventBatch)
omniORB.registerType(EventBatch._NP_RepositoryId, _0_CosNotification._ad_EventBatch, _0_CosNotification._tc_EventBatch)
del EventBatch
_0_CosNotification.EventReliability = "EventReliability"
_0_CosNotification.BestEffort = 0
_0_CosNotification.Persistent = 1
_0_CosNotification.ConnectionReliability = "ConnectionReliability"
_0_CosNotification.Priority = "Priority"
_0_CosNotification.LowestPriority = -32767
_0_CosNotification.HighestPriority = 32767
_0_CosNotification.DefaultPriority = 0
_0_CosNotification.StartTime = "StartTime"
_0_CosNotification.StopTime = "StopTime"
_0_CosNotification.Timeout = "Timeout"
_0_CosNotification.OrderPolicy = "OrderPolicy"
_0_CosNotification.AnyOrder = 0
_0_CosNotification.FifoOrder = 1
_0_CosNotification.PriorityOrder = 2
_0_CosNotification.DeadlineOrder = 3
_0_CosNotification.DiscardPolicy = "DiscardPolicy"
_0_CosNotification.LifoOrder = 4
_0_CosNotification.MaximumBatchSize = "MaximumBatchSize"
_0_CosNotification.PacingInterval = "PacingInterval"
_0_CosNotification.StartTimeSupported = "StartTimeSupported"
_0_CosNotification.StopTimeSupported = "StopTimeSupported"
_0_CosNotification.MaxEventsPerConsumer = "MaxEventsPerConsumer"
# interface QoSAdmin
_0_CosNotification._d_QoSAdmin = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosNotification/QoSAdmin:1.0", "QoSAdmin")
omniORB.typeMapping["IDL:omg.org/CosNotification/QoSAdmin:1.0"] = _0_CosNotification._d_QoSAdmin
_0_CosNotification.QoSAdmin = omniORB.newEmptyClass()
_0_CosNotification.QoSAdmin = QoSAdmin
_0_CosNotification._tc_QoSAdmin = omniORB.tcInternal.createTypeCode(_0_CosNotification._d_QoSAdmin)
omniORB.registerType(QoSAdmin._NP_RepositoryId, _0_CosNotification._d_QoSAdmin, _0_CosNotification._tc_QoSAdmin)
# QoSAdmin operations and attributes
QoSAdmin._d_get_qos = ((), (omniORB.typeMapping["IDL:omg.org/CosNotification/QoSProperties:1.0"], ), None)
QoSAdmin._d_set_qos = ((omniORB.typeMapping["IDL:omg.org/CosNotification/QoSProperties:1.0"], ), (), {_0_CosNotification.UnsupportedQoS._NP_RepositoryId: _0_CosNotification._d_UnsupportedQoS})
QoSAdmin._d_validate_qos = ((omniORB.typeMapping["IDL:omg.org/CosNotification/QoSProperties:1.0"], ), (omniORB.typeMapping["IDL:omg.org/CosNotification/NamedPropertyRangeSeq:1.0"], ), {_0_CosNotification.UnsupportedQoS._NP_RepositoryId: _0_CosNotification._d_UnsupportedQoS})
# QoSAdmin object reference
omniORB.registerObjref(QoSAdmin._NP_RepositoryId, _objref_QoSAdmin)
_0_CosNotification._objref_QoSAdmin = _objref_QoSAdmin
del QoSAdmin, _objref_QoSAdmin
# QoSAdmin skeleton
__name__ = "CosNotification__POA"
QoSAdmin._omni_skeleton = QoSAdmin
_0_CosNotification__POA.QoSAdmin = QoSAdmin
omniORB.registerSkeleton(QoSAdmin._NP_RepositoryId, QoSAdmin)
del QoSAdmin
__name__ = "CosNotification"
_0_CosNotification.MaxQueueLength = "MaxQueueLength"
_0_CosNotification.MaxConsumers = "MaxConsumers"
_0_CosNotification.MaxSuppliers = "MaxSuppliers"
_0_CosNotification.RejectNewEvents = "RejectNewEvents"
# interface AdminPropertiesAdmin
_0_CosNotification._d_AdminPropertiesAdmin = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosNotification/AdminPropertiesAdmin:1.0", "AdminPropertiesAdmin")
omniORB.typeMapping["IDL:omg.org/CosNotification/AdminPropertiesAdmin:1.0"] = _0_CosNotification._d_AdminPropertiesAdmin
_0_CosNotification.AdminPropertiesAdmin = omniORB.newEmptyClass()
_0_CosNotification.AdminPropertiesAdmin = AdminPropertiesAdmin
_0_CosNotification._tc_AdminPropertiesAdmin = omniORB.tcInternal.createTypeCode(_0_CosNotification._d_AdminPropertiesAdmin)
omniORB.registerType(AdminPropertiesAdmin._NP_RepositoryId, _0_CosNotification._d_AdminPropertiesAdmin, _0_CosNotification._tc_AdminPropertiesAdmin)
# AdminPropertiesAdmin operations and attributes
AdminPropertiesAdmin._d_get_admin = ((), (omniORB.typeMapping["IDL:omg.org/CosNotification/AdminProperties:1.0"], ), None)
AdminPropertiesAdmin._d_set_admin = ((omniORB.typeMapping["IDL:omg.org/CosNotification/AdminProperties:1.0"], ), (), {_0_CosNotification.UnsupportedAdmin._NP_RepositoryId: _0_CosNotification._d_UnsupportedAdmin})
# AdminPropertiesAdmin object reference
omniORB.registerObjref(AdminPropertiesAdmin._NP_RepositoryId, _objref_AdminPropertiesAdmin)
_0_CosNotification._objref_AdminPropertiesAdmin = _objref_AdminPropertiesAdmin
del AdminPropertiesAdmin, _objref_AdminPropertiesAdmin
# AdminPropertiesAdmin skeleton
__name__ = "CosNotification__POA"
AdminPropertiesAdmin._omni_skeleton = AdminPropertiesAdmin
_0_CosNotification__POA.AdminPropertiesAdmin = AdminPropertiesAdmin
omniORB.registerSkeleton(AdminPropertiesAdmin._NP_RepositoryId, AdminPropertiesAdmin)
del AdminPropertiesAdmin
__name__ = "CosNotification"
#
# End of module "CosNotification"
#
__name__ = "CosNotification_idl"
_exported_modules = ( "CosNotification", )
# The end.
| [
2,
11361,
17071,
82,
7560,
416,
39030,
8461,
312,
75,
422,
11485,
59,
492,
59,
492,
59,
492,
59,
492,
59,
312,
75,
59,
34,
2640,
59,
36734,
3673,
2649,
13,
312,
75,
201,
198,
2,
8410,
5626,
48483,
12680,
45811,
0,
201,
198,
201,... | 2.696475 | 8,029 |
import threading
import logging
logger = logging.getLogger('opentsdb-py')
| [
11748,
4704,
278,
198,
11748,
18931,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
10786,
404,
658,
9945,
12,
9078,
11537,
628
] | 3.166667 | 24 |
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.views.generic import TemplateView, RedirectView
from devroom.models import Devroom, DevroomParticipants
from exhibitor.models import Exhibitor, ExhibitorParticipants
from sabot.multiYear import getActiveYear
from sponsor.models import Sponsoring
| [
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
1195,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
37350,
7680,
11,
2297,
1060,
7680,
198,
198,
6738,
1614,
... | 3.862069 | 87 |
# -*- coding: utf-8 -*-
"""
* TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-蓝鲸 PaaS 平台(BlueKing-PaaS) available.
* Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
"""
from typing import Optional
import curlify
from requests.exceptions import (
ChunkedEncodingError,
ConnectionError,
ConnectTimeout,
ContentDecodingError,
HTTPError,
ProxyError,
ReadTimeout,
RequestException,
Timeout,
)
__all__ = [
"ChunkedEncodingError",
"ConnectionError",
"HTTPError",
"ProxyError",
"ReadTimeout",
"ConnectTimeout",
"ContentDecodingError",
"Timeout",
"BKAPIError",
"UserNotAuthenticated",
"ResponseError",
"APIGatewayResponseError",
"HTTPResponseError",
"JSONResponseError",
]
class UserNotAuthenticated(BKAPIError):
"""User not authenticated"""
class ResponseError(RequestException, BKAPIError):
"""Response has error"""
@property
@property
@property
@property
class APIGatewayResponseError(ResponseError):
"""Request apigateway error, and the response is generated by the apigateway"""
class HTTPResponseError(ResponseError):
"""HTTP request status code error"""
class JSONResponseError(ResponseError):
"""The response content is not a valid json"""
class EndpointNotSetError(BKAPIError):
"""Endpoint is not set"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
1635,
9368,
1087,
14573,
15708,
318,
10607,
284,
1104,
262,
1280,
2723,
2055,
416,
1642,
5525,
241,
251,
165,
110,
116,
162,
247,
118,
12859,
239,
12,
164,
... | 3.14845 | 613 |
from selenium import webdriver
from selenium.webdriver.common.by import By
import random
import time
# Caminho para webDriver
PATH = r'C:\Users\Lucas\Documents\Meus-projetos\Jogo-da-Forca\chromedriver.exe'
print('você pode errar seis vezes a letra.')
print('Escolha o tipo de palavra: ')
print('\n1 - Filmes\n')
print('\n2 - Musicas\n')
print('\n3 - Jogos\n')
tipo_de_palavra = int(input('Escreva a opção: '))
palavra_oculta=[]
letras_corretas = []
if tipo_de_palavra:
navegador = chrome()
if tipo_de_palavra == 1:
palavras = buscarFilmes(navegador)
elif tipo_de_palavra == 2:
palavras = buscarMusicas(navegador)
elif tipo_de_palavra == 3:
palavras = buscarJogos(navegador)
else:
print('caractere invalido')
time.sleep(10)
palavra = palavras[random.randint(0, len(palavras)-1)]
print(palavra)
for x in range(len(palavra)):
if palavra[x] != ' ':
palavra_oculta.append("*")
else:
palavra_oculta.append(" ")
letras_corretas.append(palavra[x])
print(palavra_oculta)
print(letras_corretas)
letrasTentadas = []
letras_mostradas = []
tentativas = 6
while True:
print('palavra: ', end=(''))
for x in range(len(palavra_oculta)):
print(palavra_oculta[x], end=(''))
print()
print('Letras tendadas: ',end='')
for y in range(len(letrasTentadas)):
print(letrasTentadas[y], end=('| '))
print()
letra = input('Digite uma letra: ').lower()
if letra in letrasTentadas:
print('Essa letra já foi tentada.')
else:
letrasTentadas.append(letra)
print()
if letra in letras_corretas:
print('A letra ',letra,' está na palavra.')
letras_mostradas.append(letra)
trocarCaractere()
else:
print('A letra ',letra,' não está na palavra.')
tentativas -= 1
print('você ainda tem', tentativas, 'tentativas' )
if tentativas == 0:
print('A palavra era',palavra)
print('Você perdeu')
break
elif palavra_oculta == letras_corretas:
print('A palavra era',palavra)
print('Você venceu')
break
| [
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
1525,
1330,
2750,
198,
11748,
4738,
198,
11748,
640,
198,
198,
2,
327,
5669,
8873,
31215,
3992,
32103,
198,
34219,
796,
374,
6,
3... | 2.010036 | 1,096 |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from scheduler.utils.validation import Validation
from panopto_client.remote_recorder import RemoteRecorderManagement
import re
| [
2,
15069,
33448,
33436,
12,
2043,
11,
2059,
286,
2669,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
6738,
6038,
18173,
13,
26791,
13,
12102,
341,
1330,
3254,
24765,
198,
6738,
3425,
404,
1462,
... | 3.65 | 60 |
# -*- coding: utf-8 -*-
"""
Whiley Test
~~~~~~~~~~~
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import pytest
from pygments.lexers import WhileyLexer
from pygments.token import Token
@pytest.fixture(scope='module')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
2893,
88,
6208,
198,
220,
220,
220,
220,
15116,
4907,
93,
628,
220,
220,
220,
1058,
22163,
4766,
25,
15069,
4793,
12,
1238,
2481,
416,
262,
... | 2.738739 | 111 |
import warnings
import param
import numpy as np
import holoviews as hv
import geoviews as gv
import cartopy.crs as ccrs
import datashader as ds
from PIL import Image, ImageDraw
from holoviews.core.operation import Operation
from holoviews.core.util import pd
from holoviews.element.util import split_path
from holoviews.operation.datashader import ResamplingOperation, rasterize, regrid
from holoviews.operation import contours
from holoviews.streams import Stream, FreehandDraw
class rasterize_polygon(ResamplingOperation):
"""
Rasterizes Polygons elements to a boolean mask using PIL
"""
class extract_foreground(Operation):
"""
Uses Grabcut algorithm to extract the foreground from an image given
path or polygon types.
"""
foreground = param.ClassSelector(class_=hv.Path)
background = param.ClassSelector(class_=hv.Path)
iterations = param.Integer(default=5, bounds=(0, 20), doc="""
Number of iterations to run the GrabCut algorithm for.""")
class GrabCutDashboard(Stream):
"""
Defines a Dashboard for extracting contours from an Image.
"""
crs = param.ClassSelector(default=ccrs.PlateCarree(), class_=ccrs.Projection,
precedence=-1, doc="""
Projection the inputs and output paths are defined in.""")
path_type = param.ClassSelector(default=gv.Path, class_=hv.Path,
precedence=-1, is_instance=False, doc="""
The element type to draw into.""")
update_contour = param.Action(default=lambda self: self.event(), precedence=1,
doc="""Button triggering GrabCut.""")
filter_contour = param.Action(default=lambda self: self.filter_stream.event(filter=True), precedence=1,
doc="""Button triggering GrabCut.""")
width = param.Integer(default=500, precedence=-1, doc="""
Width of the plot""")
height = param.Integer(default=500, precedence=-1, doc="""
Height of the plot""")
downsample = param.Magnitude(default=1., doc="""
Amount to downsample image by before applying grabcut.""")
iterations = param.Integer(default=5, bounds=(0, 20), doc="""
Number of iterations to run the GrabCut algorithm for.""")
minimum_size = param.Integer(default=10)
| [
11748,
14601,
198,
198,
11748,
5772,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
6039,
709,
769,
82,
355,
289,
85,
198,
11748,
4903,
709,
769,
82,
355,
308,
85,
198,
11748,
6383,
11081,
13,
66,
3808,
355,
36624,
3808,
198,
11748,
... | 2.73857 | 853 |
## Wrangle CCdata_merged and add previous grocery trip, spend and previous restaurant trip, spend or each transaction
# Change amount to float datatype and remove , if any, in the amount value
print(CCdata['Amount'].dtype)
CCdata['Amount'] = CCdata['Amount'].str.replace('\,','') # removing all , in the amount values
CCdata['Amount'] = pd.to_numeric(CCdata['Amount'])
#a. Number of days since last grocery shopping trip and b. The amount of last grocery shopping
# filter rows with category groceries, shopping walmart and store walmart
# extract the date and amount as series from the above filtered df
# offset the value by 1 row join with the CC merged data to get the previous grocery trip date and amount
CCdata_merged['Description'] = CCdata_merged['Description'].str.lower()
CCdata_merged['Grocery Flag'] = 0
CCdata_merged.loc[CCdata_merged['Category'].isin(["Groceries","Shopping-Walmart"]),'Grocery Flag'] = 1
CCdata_merged.loc[CCdata_merged['Description'].str.contains(r'wal(?=[\*\-mart]+)',regex=True),'Grocery Flag'] = 1
groceryData = CCdata_merged[(CCdata_merged['Grocery Flag']==1) & (CCdata_merged['Amount']>0)]
groceryData = groceryData[['Amount','Grocery Flag']]
groceryData['Amount'] = pd.to_numeric(groceryData['Amount'])
# Treating duplicates - it's possible to go to multiple grocery stores in a day
print(groceryData.shape)
print(len(pd.unique(groceryData.index))) # Unique dates in groceryData < no. of rows in groceryData
# Groupby date to create unique values of date and grocery spend - for each date, find sum of grocery spend and grocery flag
# (to flag multiple grocery store visits in a day).
groceryData = groceryData.groupby(['Date']).sum().reset_index()
# Create month, year and day columns in groceryData and set Date as index
groceryData['Month'] = groceryData['Date'].dt.strftime("%b")
groceryData['Year'] = groceryData['Date'].dt.year.astype(int)
groceryData['Day'] = groceryData['Date'].dt.day.astype(int)
groceryData = groceryData.set_index('Date')
print(groceryData.shape)
print(groceryData.dtypes)
print(groceryData.head(10))
# Creating the columns for last grocery trip date and amount
grocery_temp = pd.DataFrame()
grocery_temp['Date'] = pd.date_range(start=CCdata.index.min(), end=CCdata.index.max(), freq='D')
grocery_temp = grocery_temp.set_index('Date')
print(grocery_temp.head(10))
grocery_temp = grocery_temp.join(groceryData, how='left')
print(grocery_temp.shape)
print(grocery_temp.head(10))
grocery_temp = grocery_temp.shift(1)
grocery_temp = grocery_temp.ffill()
print(grocery_temp.head(10))
#Combine day, month and year to get the last grocery trip date
grocery_temp = grocery_temp.reset_index()
print(grocery_temp.shape)
grocery_temp['Year'] = grocery_temp['Year'].fillna(grocery_temp['Date'].dt.year).astype(int)
# grocery_temp['Month'] = grocery_temp['Month'].fillna(grocery_temp['Date'].dt.month)
# dt.month doesn't work coz it gives the month number ex.2, not the month name
grocery_temp['Month'] = grocery_temp['Month'].fillna(grocery_temp['Date'].dt.strftime("%b"))
grocery_temp['Day'] = grocery_temp['Day'].fillna(grocery_temp['Date'].dt.day).astype(int)
grocery_temp['Amount'] = grocery_temp['Amount'].fillna(0.0)
grocery_temp['Grocery Flag'] = grocery_temp['Grocery Flag'].fillna(0).astype(int)
grocery_temp = grocery_temp.rename({'Amount':'PrevGroceryAmount',
'Grocery Flag':'PrevGroceryFlag'}, axis=1)
grocery_temp = grocery_temp.set_index('Date')
print(grocery_temp.head(10))
cols = ['Day','Month','Year']
grocery_temp['PrevGroceryDate'] = grocery_temp[cols].astype(str).apply('-'.join, axis=1)
grocery_temp['PrevGroceryDate'] = pd.to_datetime(grocery_temp['PrevGroceryDate'], format="%d-%b-%Y")
print(grocery_temp.dtypes)
print(grocery_temp.head(10))
## Join prev grocery amount and prev grocery date onto CC data_merged
grocery_temp = grocery_temp.drop(['Month','Year','Day'],axis=1)
print(CCdata_merged.shape)
print(CCdata_merged.columns)
print(grocery_temp.shape)
print(grocery_temp.columns)
CCdata_merged = CCdata_merged.join(grocery_temp, how='left')
# dropping duplicates in CCdata_merged
CCdata_merged.drop_duplicates(subset=None,keep='first',inplace=True)
print(CCdata_merged.shape)
print(CCdata_merged.columns)
# getting value counts of year, month and date
print(CCdata_merged.groupby('Year')['Month'].unique())
print(CCdata_merged['Day'].unique())
print(CCdata_merged['Grocery Flag'].unique())
## c. Number of days since you last ordered from a restaurant and d. The amount of last restaurant order
# Filter rows of restaurant transactions
CCdata_merged['Restaurant Flag']=0
CCdata_merged.loc[CCdata_merged['Category'].isin(["Restaurant","Food - Take out"]), 'Restaurant Flag'] = 1
restaurantData = CCdata_merged[CCdata_merged['Restaurant Flag']==1]
restaurantData = restaurantData[['Amount','Restaurant Flag']]
restaurantData['Amount'] = pd.to_numeric(restaurantData['Amount'])
print(restaurantData.shape)
print(restaurantData.head(10))
print(len(pd.unique(restaurantData.index))) ## If unique length of index < no. of rows of restaurantData, groupby date and sum of restaurant spend
# Unique dates in restaurantData < no. of rows in restaurantData
# Groupby date to create unique values of date and restaurant spend - for each date, find sum of restaurant spend,res flag
restaurantData = restaurantData.groupby(['Date']).sum().reset_index()
# Create month, year and day columns in restaurantData and set Date as index
restaurantData['Month'] = restaurantData['Date'].dt.strftime("%b")
restaurantData['Year'] = restaurantData['Date'].dt.year.astype(int)
restaurantData['Day'] = restaurantData['Date'].dt.day.astype(int)
restaurantData = restaurantData.set_index('Date')
print(restaurantData.shape)
# Creating the columns for last restaurant trip date and amount
restaurant_temp = pd.DataFrame()
restaurant_temp['Date'] = pd.date_range(start=CCdata.index.min(), end=CCdata.index.max(), freq='D')
restaurant_temp = restaurant_temp.set_index('Date')
print(restaurant_temp.head(10))
restaurant_temp = restaurant_temp.join(restaurantData, how='left')
print(restaurant_temp.shape)
print(restaurant_temp.head(10))
restaurant_temp = restaurant_temp.shift(1)
restaurant_temp = restaurant_temp.ffill()
print(restaurant_temp.head(10))
#Combine day, month and year to get the last restaurant trip date
restaurant_temp = restaurant_temp.reset_index()
print(restaurant_temp.shape)
restaurant_temp['Year'] = restaurant_temp['Year'].fillna(restaurant_temp['Date'].dt.year).astype(int)
# restaurant_temp['Month'] = restaurant_temp['Month'].fillna(restaurant_temp['Date'].dt.month)
# dt.month doesn't work coz it gives the month number ex.2, not the month name
restaurant_temp['Month'] = restaurant_temp['Month'].fillna(restaurant_temp['Date'].dt.strftime("%b"))
restaurant_temp['Day'] = restaurant_temp['Day'].fillna(restaurant_temp['Date'].dt.day).astype(int)
restaurant_temp['Amount'] = restaurant_temp['Amount'].fillna(0.0)
restaurant_temp['Restaurant Flag'] = restaurant_temp['Restaurant Flag'].fillna(0).astype(int)
restaurant_temp = restaurant_temp.rename({'Amount':'PrevRestaurantAmount',
'Restaurant Flag':'PrevRestaurantFlag'}, axis=1)
restaurant_temp = restaurant_temp.set_index('Date')
print(restaurant_temp.head(10))
cols = ['Day','Month','Year']
restaurant_temp['PrevRestaurantDate'] = restaurant_temp[cols].astype(str).apply('-'.join, axis=1)
print(restaurant_temp.head(20))
restaurant_temp['PrevRestaurantDate'] = pd.to_datetime(restaurant_temp['PrevRestaurantDate'], format="%d-%b-%Y")
print(restaurant_temp.dtypes)
## Join prev restaurant amount and prev restaurant date onto CC data_merged
restaurant_temp = restaurant_temp.drop(['Month','Year','Day'],axis=1)
print(CCdata_merged.shape)
print(restaurant_temp.shape)
CCdata_merged = CCdata_merged.join(restaurant_temp, how='left')
# dropping duplicates in CCdata_merged
CCdata_merged.drop_duplicates(subset=None,keep='first',inplace=True)
print(CCdata_merged.shape)
print(CCdata_merged.columns)
print(CCdata_merged['Restaurant Flag'].unique())
| [
2235,
15634,
9248,
12624,
7890,
62,
647,
2004,
290,
751,
2180,
16918,
5296,
11,
4341,
290,
2180,
7072,
5296,
11,
4341,
393,
1123,
8611,
198,
198,
2,
9794,
2033,
284,
12178,
4818,
265,
2981,
290,
4781,
837,
611,
597,
11,
287,
262,
20... | 2.77868 | 2,955 |
"""Tokens for one-time use"""
# Built-in
from datetime import datetime, timedelta, timezone
from secrets import token_urlsafe
# Django
from django.contrib.auth import get_user_model
from django.db.models import CharField, DateTimeField, Index
# Personal
from jklib.django.db.fields import ActiveField, ForeignKeyCascade, RequiredField
from jklib.django.db.models import LifeCycleModel
from jklib.django.db.queries import get_object_or_none
# --------------------------------------------------------------------------------
# > Models
# --------------------------------------------------------------------------------
class SecurityToken(LifeCycleModel):
"""
Tokens are OTP linked to users, to allow for special actions like password reset
Only 1 active token per user/type
Token have a limited duration/lifespan
Can only be used once
The expected workflow of the model API is:
create_new_token --> Creates a new token for a user/type
fetch_token_instance --> Fetches the Token instance linked to your token value
consume_token --> Token won't be usable anymore
Other utilities for clean up and security are also available
"""
# ----------------------------------------
# Constants
# ----------------------------------------
MIN_DURATION = 300 # 5 minutes
MAX_DURATION = 604800 # 7 days
TYPE_MAX_LENGTH = 50
# ----------------------------------------
# Fields
# ----------------------------------------
user = RequiredField(
ForeignKeyCascade, get_user_model(), related_name="tokens", verbose_name="User"
)
type = RequiredField(CharField, max_length=TYPE_MAX_LENGTH, verbose_name="Type")
value = RequiredField(
CharField, unique=True, max_length=1000, verbose_name="Token value"
)
expired_at = RequiredField(DateTimeField, verbose_name="Expires at")
used_at = DateTimeField(null=True, blank=True, verbose_name="Used at")
is_active_token = ActiveField()
# ----------------------------------------
# Behavior (meta, str, save)
# ----------------------------------------
def __str__(self):
"""
:return: Returns the token value
:rtype: str
"""
return f"{self.value}"
# ----------------------------------------
# Properties
# ----------------------------------------
@property
def can_be_used(self):
"""
:return: Checks if the token is active, not used, and not expired
:rtype: bool
"""
return self.is_active_token and (not self.is_used) and (not self.is_expired)
@property
def is_expired(self):
"""
:return: Whether the token has expired
:rtype: bool
"""
now = datetime.now(timezone.utc)
return self.expired_at < now
@property
def is_used(self):
"""
:return: Whether the token has been used
:rtype: bool
"""
return self.used_at is not None
# ----------------------------------------
# Public API
# ----------------------------------------
def consume_token(self):
"""Deactivates the token and stores its used timestamp"""
self.used_at = datetime.now(timezone.utc)
self.deactivate_token()
@classmethod
def create_new_token(cls, user, token_type, token_duration):
"""
Creates a new token for the user/type, and deactivates the previous ones
:param User user: Instance from the User model
:param str token_type: Type of the token
:param int token_duration: Token lifespan in seconds
:return: The token instance and its value
:rtype: SecurityToken
"""
token_value = cls._generate_unique_token()
token_params = cls._get_valid_token_params(
user, token_value, token_type, token_duration
)
cls.deactivate_user_tokens(user, token_params["type"])
token_instance = cls.objects.create(**token_params)
return token_instance
def deactivate_token(self):
"""Marks a token as not being the active one anymore"""
self.is_active_token = False
self.save()
@classmethod
def deactivate_user_tokens(cls, user, token_type=None):
"""
Deactivates all tokens for a user. Can be narrowed down to a specific type.
:param User user: The user whose tokens must be deactivated
:param str token_type: Type of the token. Defaults to None
"""
tokens = cls.objects.filter(user=user, is_active_token=True)
if token_type is not None:
tokens = tokens.filter(type=token_type)
for token in tokens:
token.deactivate_token()
@classmethod
def fetch_token_instance(cls, token_value, token_type):
"""
Tries to fetch an ACTIVE Token instance using a token value and type
:param str token_value: Value of the token
:param str token_type: Type of the token
:return: The valid token instance or None
:rtype: Token or None
"""
token = get_object_or_none(cls, value=token_value, type=token_type)
if token is not None and token.can_be_used:
return token
else:
return None
# ----------------------------------------
# Cron jobs
# ----------------------------------------
@classmethod
def cleanup_expired_unused_tokens(cls):
"""Deletes all the tokens that are expired and haven't been used"""
now = datetime.now(timezone.utc)
expired_unused_tokens = cls.objects.filter(used_at=None, expired_at__lt=now)
expired_unused_tokens.delete()
# ----------------------------------------
# Private
# ----------------------------------------
@classmethod
def _generate_unique_token(cls):
"""
:return: The unique value to be used for creating a new token
:rtype: str
"""
while True:
token_value = token_urlsafe(50)
results = cls.objects.filter(value=token_value)
if len(results) == 0:
break
return token_value
@classmethod
def _get_valid_token_params(cls, user, token_value, token_type, token_duration):
"""
Validates (and replaces if necessary) the parameters for creating a new token
:param User user: Instance of the User model
:param str token_value: Value of the token, which should be unique
:param str token_type: Type of the token
:param int token_duration: Token lifespan
:return: Parameters to be used for creating a new token
:rtype: dict
"""
token_type = cls._validate_token_type(token_type)
token_duration = cls._validate_token_duration(token_duration)
expiration_date = datetime.now(timezone.utc) + timedelta(seconds=token_duration)
return {
"user": user,
"type": token_type,
"value": token_value,
"expired_at": expiration_date,
"used_at": None,
"is_active_token": True,
}
@classmethod
def _validate_token_duration(cls, value):
"""
Returns the initial duration is a valid integer, else raises an error
:param int value: Duration of the token in seconds
:raise TypeError: When the provided value is not an integer
:raise ValueError: When the provided value is out of bounds
:return: The initial value, if valid
:rtype: int
"""
if type(value) != int:
raise TypeError("Token duration must be an integer")
if value < cls.MIN_DURATION or value > cls.MAX_DURATION:
raise ValueError(
f"Token duration must be between {cls.MIN_DURATION} and {cls.MAX_DURATION} seconds"
)
return value
@classmethod
def _validate_token_type(cls, value):
"""
Returns the initial type if it is a non-empty string, else raises an error
:param str value: Type of the token
:raise TypeError: When the provided value is a string
:raise ValueError: When the provided value is empty
:return: The trimmed value, if valid
:rtype: str
"""
if type(value) != str:
raise TypeError("Token type must be a string")
value = value.strip()
if value == "":
raise ValueError("Token type cannot be empty")
return value
| [
37811,
22906,
329,
530,
12,
2435,
779,
37811,
198,
198,
2,
28477,
12,
259,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
11,
640,
11340,
198,
6738,
13141,
1330,
11241,
62,
6371,
21230,
198,
198,
2,
37770,
198,
6738,
42625... | 2.615737 | 3,279 |
# encoding:utf-8
"""
爬取快代理的所有的ip:port,并验证是否可用
"""
from selenium import webdriver
import urllib2
import time
for i in range(10):
url = "https://www.kuaidaili.com/free/inha/"
driver = webdriver.Chrome(r"C:\Users\19663\Desktop\chromedriver.exe")
url = url + str(i + 1) + "/"
driver.get(url)
time.sleep(1)
data = driver.find_elements_by_xpath("//tbody//tr")
http_list = []
for da in data:
ip = da.find_element_by_xpath("./td[1]").text
port = da.find_element_by_xpath("./td[2]").text
http = ip + ":" + port
http_list.append(http)
driver.close()
for http in http_list:
httpproxy = urllib2.ProxyHandler({"http": http})
opener = urllib2.build_opener(httpproxy)
request = urllib2.Request("https://www.baidu.com")
try:
response = opener.open(request, timeout=10)
print(http + u"可以使用")
except:
print(http + u"无效") | [
2,
21004,
25,
40477,
12,
23,
198,
37811,
198,
163,
230,
105,
20998,
244,
33232,
104,
47987,
49426,
228,
21410,
33699,
222,
17312,
231,
21410,
541,
25,
634,
11,
33176,
114,
165,
103,
234,
46237,
223,
42468,
28938,
99,
20998,
107,
18796... | 1.98125 | 480 |
# gemato: CLI routines
# vim:fileencoding=utf-8
# (c) 2017-2019 Michał Górny
# Licensed under the terms of 2-clause BSD license
from __future__ import print_function
import argparse
import datetime
import io
import logging
import multiprocessing
import os.path
import sys
import timeit
import gemato.exceptions
import gemato.find_top_level
import gemato.hash
import gemato.manifest
import gemato.openpgp
import gemato.profile
import gemato.recursiveloader
class GematoCommand(object):
"""
Base class for commands supported by gemato.
"""
@property
def name(self):
"""
Command name. Used on the command-line
"""
pass
@property
def help(self):
"""
Command description for --help.
"""
pass
def add_options(self, subp):
"""
Add options specific to the command to subparser @subp.
"""
subp.add_argument('--debug', action='store_true',
help='Enable debugging output')
def parse_args(self, args, argp):
"""
Process command-line arguments @args. @argp is the argparse
instance provided for error reporting.
"""
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
def __call__(self):
"""
Perform the command. Returns the exit status.
"""
pass
def cleanup(self):
"""
Perform any cleanups necessary. Called on program termination.
"""
pass
class BaseOpenPGPMixin(object):
"""
A base mixin that adds logic to load and use OpenPGP keys.
"""
class VerifyingOpenPGPMixin(BaseOpenPGPMixin):
"""
Verification-class OpenPGP mixin. Additionally refreshes keys.
"""
class BaseManifestLoaderMixin(object):
"""
Mixin for commands using RecursiveManifestLoader class.
"""
class BaseUpdateMixin(BaseManifestLoaderMixin, BaseOpenPGPMixin):
"""
A mixin that adds common bits for update-class commands.
"""
| [
2,
16840,
5549,
25,
43749,
31878,
198,
2,
43907,
25,
7753,
12685,
7656,
28,
40477,
12,
23,
198,
2,
357,
66,
8,
2177,
12,
23344,
38844,
41615,
402,
10205,
81,
3281,
198,
2,
49962,
739,
262,
2846,
286,
362,
12,
565,
682,
347,
10305,... | 2.564617 | 797 |
"""KafkaUtils"""
import logging
import kafka
from retrying import retry
logger = logging.getLogger()
class KafkaUtils:
"""Common kafka utils"""
@staticmethod
@retry(stop_max_attempt_number=8, wait_fixed=15000)
def wait_for_topic_status(bootstrap_servers, topic_name,
exists=True) -> None:
"""Wait for topic to exist or to be deleted."""
client = kafka.KafkaClient(bootstrap_servers)
logger.info("Check for topic %s status exists=%s client topics=%s",
topic_name, exists, str(client.topics))
if exists:
assert topic_name in client.topics
else:
assert topic_name not in client.topics
| [
37811,
42,
1878,
4914,
18274,
4487,
37811,
198,
11748,
18931,
198,
198,
11748,
479,
1878,
4914,
198,
6738,
1005,
14992,
1330,
1005,
563,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
3419,
628,
198,
4871,
46906,
18274,
4487,
... | 2.250784 | 319 |
# %% [markdown]
# # Python Workshop: Basics I
#
# [If you are looking for the **Python, Pycharm & packages installation guide**, [it's here.](https://www.aiismath.com/pages/python_pycharm_installation/python_pycharm_installation/)]
#
# [](https://colab.research.google.com/github/YoniChechik/AI_is_Math/blob/master/c_01_intro_to_CV_and_Python/basic_python_tutorial.ipynb)
# <hr>
#
# Based on:
#
# this [git](https://github.com/zhiyzuo/python-tutorial) of Zhiya Zuo
#
# &
#
# tutorials from [tutorialspoint](https://www.tutorialspoint.com/python)
#
# <hr>
#
# ## Introduction
#
# Python is an interpreted, high-level, general-purpose programming language. Created by Guido van Rossum and first released in 1991, Python's design philosophy emphasizes code readability. The language construction and object-oriented approach aims to help programmers write clear, logical code for small and large-scale projects. [Wikipedia]
#
# - Python is Interpreted - Python is processed at runtime by the interpreter. You do not need to compile your program before executing it. This is similar to PERL, PHP and MATLAB.
#
# - Python is Interactive - You can actually sit at a Python prompt and interact with the interpreter directly to write your programs.
#
# - Python is Object-Oriented - Python supports Object-Oriented style or technique of programming that encapsulates code within objects.
#
# - Popular Language for Data Analysis - Most of the time, you will need external packages to assist data analyses.
#
# 
#
# ### PyCharm
#
# Pycharm is currently (2019) the most commonly used IDE (Integrated development environment) for programing in python.
#
# for complete tutorial on **installation** of Python + Pycharm + additional packages please refer to [this page](https://www.aiismath.com/pages/python_pycharm_installation/python_pycharm_installation/).
#
# <img src="https://upload.wikimedia.org/wikipedia/commons/a/a1/PyCharm_Logo.svg" alt="pycharm logo" width="200"/>
#
# ### Jupyter notebook
#
# Jupyter is an easy way to merge code and explanations in a beautiful way.
#
# The easiest way to interact with such notebook (.ipynb) is with [google colab](https://colab.research.google.com). There you can run each cell independently or all cells combined through 'Runtime' section or the play button.
#
# the main disadvantage of google colab is that debugging there is problematic.
#
# <img src="https://upload.wikimedia.org/wikipedia/commons/3/38/Jupyter_logo.svg" alt="jupyter logo" width="200"/>
#
# ### Naming convention
#
# There are two commonly used style in programming:
#
# 1. __camelCase__
# 2. __snake_case__ or __lower_case_with_underscore__
#
# Always make sure you use one convention consistently across one project.
#
# All variable (function and class) names must start with a letter or underscore (_). You can include numbers, but it can't be the first char.
# %%
myStringHere = 'my string' # valid
x = 3 # valid
x_3 = "xyz" # valid
# 3_x = "456" # invalid. Numbers cannot be in the first position.
# %% [markdown]
# ### Lines and indentation
# Python doesn't need braces to indicate blocks of code for class and function definitions or flow control.
# Blocks of code are denoted by line indentation (Tabs), which is rigidly enforced.
# %%
if True:
print("True")
else:
print("False")
# %% [markdown]
# ### Indexing
#
# Indexing in python start from 0 (like c, unlike Matlab). Accessing a range of list/array is, by convetion, `some_list[start_ind:end_ind_minus_one]`
#
# %%
x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y = x[0] #get element 0
print("y = " + str(y))
y = x[2] #get element 2
print("y = " + str(y))
y = x[0:2] #get elements 0,1
print("y = " + str(y))
y = x[:2] # same as [0:2]
print("y = " + str(y))
y = x[3:10]
print("y = " + str(y))
y = x[3:] # same as above
print("y = " + str(y))
# %% [markdown]
# You can also go to last element easily:
# %%
y = x[-1] # last element
print("y = " + str(y))
y = x[:-1] # all until the last element - noninclusive
print("y = " + str(y))
y = x[-3:] # last three elements
print("y = " + str(y))
# %% [markdown]
# Step size is also an option:
# %%
y = x[0:3:2] # only evens until 3 noninclusive
print("y = " + str(y))
y = x[1:5:2] # only odds until 5 noninclusive
print("y = " + str(y))
y = x[::3] # +3 step size - entire list
print("y = " + str(y))
y = x[::-1] # reverse!
print("y = " + str(y))
# %% [markdown]
# ## Primitives
# In this section, we go over some common primitive data types in Python.
# While the word _primitive_ looks obscure, we can think of it as the most basic data type that cannot be further decomposed into simpler ones.
# ### Numbers
# numbers without fractional partsare called ___integer___. In Python, they are abbreviated as `int`
# %%
x = 3
type(x)
# %% [markdown]
# numbers with fractional part are floating point numbers. They are named `float` in Python.
# %%
y = 3.0
type(y)
# %% [markdown]
# We can apply arithmetic to these numbers. However, one thing we need to be careful about is ___type conversion___. See the example below.
# %%
z = 2 * x # int times int
type(z)
# %%
z = y ** x # int to the power float
print(z)
type(z)
# %%
z = x / 2 # what will happen when dividing two ints?
z
# %% [markdown]
# ### Boolean
# Boolean type comes in handy when we need to check conditions. For example:
# %%
my_error = 1.6
compare_result = my_error < 0.1
print(compare_result)
print(type(compare_result))
# %% [markdown]
# There are two valid Boolean values: `True` and `False`. We can also think of them as `1` and `0`, respectively.
# %%
my_error > 0
# %% [markdown]
# When we use Boolean values for arithmetic operations, they will become `1/0` automatically
# %%
(my_error > 0) + 2
# %% [markdown]
# ### Strings
# In Python, we use `str` type for storing letters, words, and any other characters.
#
# To initialize a string variable, you can use either double or single quotes.
# %%
my_str1 = "see you"
print(my_str1)
print(type(my_str1))
my_str2 = 'see you later'
print(my_str2)
print(type(my_str2))
# %% [markdown]
# We can also use `+` to _concatenate_ different strings
# %%
my_str1 + ' tomorrow'
# %% [markdown]
# One way of formatting strings is equivalent to c language:
# %%
print("1/3 is approximately %.2f" % (1/3)) # %f for floating point number
print(" '%s' != '%s'" % (my_str1, my_str2)) # %s for string
# %% [markdown]
# you can also simply do string concatenation:
# %%
print("Printing a string: " + my_str1 + ", and printing a number: " + str(3))
# %% [markdown]
# `str` is an **iterable object,** meaning that we can iterate through each individual character:
# %%
print(my_str1[0])
print(my_str1[2:6])
# %% [markdown]
# ## Data Structures
# In this section, we discuss some ___nonprimitive___ data structures in Python.
# ### List
# Initialize a list with brackets. You can store anything in a list, even if they are different types
#
# %%
a_list = [1, 2, 3] # commas to separate elements
print("Length of a_list is: %i" % (len(a_list)))
print("The 3rd element of a_list is: %s" %
(a_list[2])) # Remember Python starts with 0
print("The last element of a_list is: %s" % (a_list[-1])) # -1 means the end
print("The sum of a_list is %.2f" % (sum(a_list)))
# %% [markdown]
# We can put different types in a list
# %%
b_list = [20, True, "good", "good"]
print(b_list)
# %% [markdown]
# Update a list: __pop__, __remove__, __append__, __extend__
# %%
print(a_list)
print("Pop %i out of a_list" % a_list.pop(1)) # pop the value of an index
print(a_list)
# %%
print("Remove the string good from b_list:")
b_list.remove("good") # remove a specific value (the first one in the list)
print(b_list)
# %%
a_list.append(10)
print("After appending a new value, a_list is now: %s" % (str(a_list)))
# %% [markdown]
# merge `a_list` and `b_list`:
# %%
a_list.extend(b_list)
print("Merging a_list and b_list: %s" % (str(a_list)))
# %% [markdown]
# We can also use `+` to concatenate two lists
# %%
a_list + b_list
# %% [markdown]
# ### Tuple
# Tuple is a special case of list whose elements cannot be changed (immutable).
#
# Initialize a tuple with parenthesis:
# %%
a_tuple = (1, 2, 3, 10)
print(a_tuple)
print("First element of a_tuple: %i" % a_tuple[0])
# %% [markdown]
# You can't change the values of a tuple:
# %%
# a_tuple[0] = 5
# %% [markdown]
# In order to create a single value tuple, you need to add a ','
# %%
a_tuple = (1) # this would create a int type
print(type(a_tuple))
b_tuple = (1,) # this would create a tuple type, take note of the comma.
print(type(b_tuple))
# %% [markdown]
# ### Dictionary
# Dictionary: key-value pairs
#
# Initialize a dict by curly brackets `{}`
# %%
d = {} # empty dictionary
# add a key-value by using bracket (key). You can put anything in key/value.
d[1] = "1 value"
print(d)
# %%
# Use for loop to add values
for index in range(2, 10):
d[index] = "%i value" % index
print(d)
print("All the keys: " + str(d.keys()))
print("All the values: " + str(d.values()))
# %%
for key in d:
print("Key is: %i, Value is : %s" % (key, d[key]))
# %% [markdown]
# ###Side note: mutable Vs. immutable objects
#
# **Everything in Python is an object**, and object can be either mutable (changeable after creation) or immutable.
# Almost all Python objects are mutable, except from the primitives (numbers, booleans, strings) and tuples
#
# Why it's interesting? because when you reference a new variable, it's always soft link (like a shortcut in windows), and if you change a mutable object, it reference changes too! Something that can cause big bugs!
# %%
# mutable object: no problem
a = 'Hello'
b = a
b = b + ' World!'
print(a)
print(b)
# %%
# immutable object: big problem
a = ['Hello']
b = a
b[0] = 'World!'
print(a)
print(b)
# %%
# use .copy() to overcome this:
c = a.copy()
c[0] = "other world"
print(a)
print(c)
| [
2,
43313,
685,
4102,
2902,
60,
198,
2,
1303,
11361,
26701,
25,
45884,
314,
198,
2,
198,
2,
685,
1532,
345,
389,
2045,
329,
262,
12429,
37906,
11,
9485,
354,
1670,
1222,
10392,
9988,
5698,
1174,
11,
685,
270,
338,
994,
8183,
7,
545... | 2.901568 | 3,444 |
"""Overlap Technical Analysis"""
___docformat__ = "numpy"
import pandas as pd
import pandas_ta as ta
def ema(s_interval: str, df_stock: pd.DataFrame, window_length: int) -> pd.DataFrame:
"""Gets exponential moving average (EMA) for stock
Parameters
----------
s_interval: str
Data interval
df_stock: pd.DataFrame
Dataframe of dates and prices
window_length: int
Length of EMA window
Returns
----------
df_ta: pd.DataFrame
Dataframe containing prices and EMA
"""
# Daily
if s_interval == "1440min":
df_ta = ta.ema(df_stock["Adj Close"], length=window_length).dropna()
# Intraday
else:
df_ta = ta.ema(
df_stock["Close"],
length=window_length,
).dropna()
return pd.DataFrame(df_ta)
def sma(s_interval: str, df_stock: pd.DataFrame, window_length: int) -> pd.DataFrame:
"""Gets simple moving average (EMA) for stock
Parameters
----------
s_interval: str
Data interval
df_stock: pd.DataFrame
Dataframe of dates and prices
window_length: int
Length of SMA window
Returns
----------
df_ta: pd.DataFrame
Dataframe containing prices and SMA
"""
# Daily
if s_interval == "1440min":
df_ta = ta.sma(df_stock["Adj Close"], length=window_length).dropna()
# Intraday
else:
df_ta = ta.sma(
df_stock["Close"],
length=window_length,
).dropna()
return pd.DataFrame(df_ta)
def wma(s_interval: str, df_stock: pd.DataFrame, window_length: int) -> pd.DataFrame:
"""Gets weighted moving average (WMA) for stock
Parameters
----------
s_interval: str
Data interval
df_stock: pd.DataFrame
Dataframe of dates and prices
window_length: int
Length of SMA window
Returns
----------
df_ta: pd.DataFrame
Dataframe containing prices and WMA
"""
# Daily
if s_interval == "1440min":
df_ta = ta.wma(df_stock["Adj Close"], length=window_length).dropna()
# Intraday
else:
df_ta = ta.wma(
df_stock["Close"],
length=window_length,
).dropna()
return pd.DataFrame(df_ta)
def hma(s_interval: str, df_stock: pd.DataFrame, window_length: int) -> pd.DataFrame:
"""Gets hull moving average (HMA) for stock
Parameters
----------
s_interval: str
Data interval
df_stock: pd.DataFrame
Dataframe of dates and prices
window_length: int
Length of SMA window
Returns
----------
df_ta: pd.DataFrame
Dataframe containing prices and HMA
"""
# Daily
if s_interval == "1440min":
df_ta = ta.hma(df_stock["Adj Close"], length=window_length).dropna()
# Intraday
else:
df_ta = ta.hma(
df_stock["Close"],
length=window_length,
).dropna()
return pd.DataFrame(df_ta)
def zlma(s_interval: str, df_stock: pd.DataFrame, window_length: int) -> pd.DataFrame:
"""Gets zero-lagged exponential moving average (ZLEMA) for stock
Parameters
----------
s_interval: str
Data interval
df_stock: pd.DataFrame
Dataframe of dates and prices
window_length: int
Length of EMA window
Returns
----------
df_ta: pd.DataFrame
Dataframe containing prices and EMA
"""
# Daily
if s_interval == "1440min":
df_ta = ta.zlma(df_stock["Adj Close"], length=window_length).dropna()
# Intraday
else:
df_ta = ta.zlma(
df_stock["Close"],
length=window_length,
).dropna()
return pd.DataFrame(df_ta)
def vwap(day_df: pd.DataFrame) -> pd.DataFrame:
"""Gets volume weighted average price (VWAP)
Parameters
----------
day_df: pd.DataFrame
Dataframe of dates and prices for the last trading day
Returns
----------
df_vwap: pd.DataFrame
Dataframe with VWAP data
"""
df_vwap = ta.vwap(
high=day_df["High"],
low=day_df["Low"],
close=day_df["Close"],
volume=day_df["Volume"],
)
return pd.DataFrame(df_vwap)
| [
37811,
5886,
37796,
20671,
14691,
37811,
198,
17569,
15390,
18982,
834,
796,
366,
77,
32152,
1,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
19798,
292,
62,
8326,
355,
20486,
628,
198,
4299,
795,
64,
7,
82,
62,
3849,
2100,
... | 2.236884 | 1,887 |