content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# coding=utf-8
# Copyright 2018-2020 EVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import patch, MagicMock
from pathlib import Path
from src.utils.generic_utils import (str_to_class, is_gpu_available,
generate_file_path)
from src.readers.opencv_reader import OpenCVReader
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
2864,
12,
42334,
8696,
32,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.238462 | 260 |
import numpy as np
import torch
from utils.training_utils import build_between_sentence_data
def build_relation_list(option = 'scidtb'):
"""
iterate over data to yield a list of all relations
"""
if option == 'scidtb':
return ['ROOT',
'elab-aspect',
'elab-addition',
'enablement',
'same-unit',
'contrast',
'attribution',
'evaluation',
'bg-goal',
'manner-means',
'elab-enum_member',
'joint',
'null',
'elab-definition',
'bg-compare',
'elab-example',
'cause',
'result',
'progression',
'temporal',
'bg-general',
'condition',
'exp-reason',
'summary',
'comparison',
'exp-evidence',
'elab-process_step']
return ['ROOT',
'因果关系',
'背景关系',
'转折关系',
'并列关系',
'目的关系',
'例证关系',
'解说关系',
'条件关系',
'总分关系',
'假设关系',
'顺承关系',
'对比关系',
'递进关系',
'评价关系',
'推断关系',
'让步关系',
'选择关系']
#tokenize the relation data
#input a list of feature pairs
def tokenize_relation_data(features, tokenizer, SEQ_LEN = 40):
"""
input a list of tuples of EDUs as relation pair feature
output the concatenated tokenized version
"""
new_features = []
for f in features:
new_f = []
for i,edu in enumerate(f):
if edu == None:
sentence = ''
else:
sentence = edu.sentence
tokens = tokenizer.encode_plus(sentence, max_length = SEQ_LEN,
truncation = True, padding = "max_length",
add_special_tokens = True, return_token_type_ids = False,
return_attention_mask = False, return_tensors = 'pt')
Xids = tokens['input_ids'].numpy()
# if it is the second component discard the CLS
if i == 1:
Xids = Xids[:,1:]
new_f.append(torch.Tensor(Xids))
new_features.append(torch.cat(new_f, dim = -1).reshape(1,-1))
return torch.cat(new_features, dim = 0).long()
def transform_heads_simple(heads, edus, simple_relation_net, relation_list, tokenizer):
"""
using the baseline bert to predict relations
return the predicted relations by direct relation classification
heads is a dictionary where heads[edu.id] is the id of its head
"""
simple_relation_net = simple_relation_net.cuda()
relations = {}
n = len(heads)
for edu in range(1,1+n):
#edu is the id of this edu
#if it is root, label it as ROOT relation
head = heads[edu]
if head == 0:
feature = [[None, edus[edu -1]]]
else:
if head < edu:
feature = [[edus[head - 1], edus[edu - 1]]]
else:
feature = [[ edus[edu - 1], edus[head - 1]]]
feature = tokenize_relation_data(feature, tokenizer)
relation = np.argmax(simple_relation_net(feature.cuda()).cpu().detach().numpy())
relations[edu] = relation_list[relation]
return relations
#assemble post processing to transform heads
def assembled_transform_heads(heads, edus, relation_bert, lstm_tagger, between_relation_bert,
between_tagger, relation_list, tokenizer):
"""
predict relations from respect dependency structure
correspond to the BERT + Stacked BiLSTM model
return a dictionary of relations
edus start from id 0
inputs a relation bert as encoder and an lstm tagger as decoder
Args:
- :param: `heads` (list of integers): the id of the head for each edu
- :param: `edus` (list of EDU): a list of edus
- :param: `relation_bert` (transformer model): the transformer model used to get
the embeddings for first-level sequence labeling
- :param: `lstm_tagger` (sequence labeling model): the sequence labeling model
used to get the embeddings for first-level sequence labeling
- :param: `between_relation_bert` (transformer model): the transformer model used
to get the embeddings for second-level sequence labeling
- :param: `between_tagger` (sequence labeling model): the sequence labeling model
used to get the embeddings for second-level sequence labeling
- :param: `relation_list` (list of str): a list of all relations
Return:
- :param: 'relations' (disctionary): each (key, value) pair in the dictionary,
key is the id of the edu, value is
"""
relations = {}
relation_features = []
#the position encodings
position = np.array([i for i in range(768)])
position_enc = np.power(10000,position/768)
n = len(heads)
between_heads = {}
between_edus = []
#between features is the feature for post tagging between sentences
between_features = []
for edu in range(1,1+n):
#edu is the id of this edu
#if it is root, label it as ROOT relation
head = heads[edu]
if head == 0:
feature = [[None, edus[edu -1]]]
else:
if head < edu:
feature = [[edus[head - 1], edus[edu - 1]]]
else:
feature = [[edus[edu -1], edus[head - 1]]]
feature = tokenize_relation_data(feature, tokenizer)
#append feature to between sentence
sin_enc = np.sin(edus[edu -1].id/position_enc)+np.cos(edus[edu-1].sentenceID/position_enc)
if head == 0 or edus[edu - 1].sentenceNo != edus[head - 1].sentenceNo:
between_heads[edu] = head
between_edus.append(edu)
between_features.append(between_relation_bert(feature.cuda())[0][:,0,:].cpu().detach() + sin_enc)
feature = relation_bert(feature.cuda())[0][:,0,:].cpu().detach() + sin_enc
relation_features.append(feature)
relation_features = torch.cat(relation_features, dim = 0)
all_relations = np.argmax(lstm_tagger(relation_features.cuda()).cpu().detach().numpy(), axis = 1)
for i, relation_index in enumerate(all_relations):
relations[i+1] = relation_list[relation_index]
between_features = torch.cat(between_features, dim = 0)
between_relations = np.argmax(between_tagger(between_features.cuda()).cpu().detach().numpy(), axis = 1)
for i, relation_index in enumerate(between_relations):
relations[between_edus[i]] = relation_list[relation_index]
return relations
def build_paired_data(data, option = 'scidtb', between_sentence = False):
"""
build features and labels for direct relation classification
each feature is a tuple of two edus (in the same order as they appear
in the discourse
option can be scidtb or cdtb
"""
relation_list = build_relation_list(option)
between_sentence_data = build_between_sentence_data(data)
features = []
labels = []
for edus, between_edus in zip(data, between_sentence_data):
for edu in edus:
if between_sentence and (not edu in between_edus):
continue
if edu.head == 0:
features.append([None, edu])
else:
if edu.id > edu.head:
features.append([edus[edu.head - 1], edu])
else:
features.append([edu, edus[edu.head - 1]])
labels.append(relation_list.index(edu.relation))
return features, torch.Tensor(labels)
def prepare_finetune_dataloader(data, tokenizer, option = 'scidtb',\
SEQ_LEN = 40, between_sentence = False):
"""
prepare the dataloader for finetuning
"""
features, labels = build_paired_data(data, option, between_sentence)
features = tokenize_relation_data(features, tokenizer, SEQ_LEN)
X_train = features.long()
y_train = labels.long()
train_dataset = torch.utils.data.TensorDataset(X_train, y_train)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True, drop_last = True)
return train_dataloader
#a dataset structure for sequence labeling
def prepare_seq_label_dataloader(data, tokenizer, relation_bert ,option = 'scidtb', SEQ_LEN = 40,\
between_sentence = False):
"""
prepare the dataloader to train sequence labeling models
"""
relation_bert = relation_bert.cuda()
edu2tokens = {}
relation_list = build_relation_list(option)
for edus in data:
for edu in edus:
if edu.head == 0:
features = [[None, edu]]
else:
if edu.head < edu.id:
features = [[edus[edu.head - 1], edu]]
else:
features = [[edu, edus[edu.head - 1]]]
edu2tokens[edu]= tokenize_relation_data(features, tokenizer, SEQ_LEN).long().reshape(1,-1)
#turn pair tokens to bert representations
edu2representations = {}
if between_sentence:
data = build_between_sentence_data(data)
for j,edus in enumerate(data):
token_features = torch.cat([edu2tokens[edu] for edu in edus], dim = 0)
token_features = relation_bert(token_features.cuda())[0][:,0,:].detach().cpu()
for i,edu in enumerate(edus):
edu2representations[edu] = token_features[i]
#add position encodings
position = np.array([i for i in range(len(edu2representations[data[0][0]]))])
position_enc = np.power(10000,position/768)
for edus in data:
for edu in edus:
sin_enc = np.sin(edu.id/position_enc)+np.cos(edu.sentenceID/position_enc)
edu2representations[edu] = edu2representations[edu] + sin_enc
#use ed2representations to build sequence tagging of lstm
features = []
labels = []
for edus in data:
features.append(torch.cat([edu2representations[edu].reshape(1,-1) for edu in edus], dim = 0))
labels.append(torch.Tensor([relation_list.index(edu.relation)for edu in edus]).long() )
train_dataloader = LSTMDataset(features, labels)
return train_dataloader
#training for sequence labeling
#validate the model, return the loss
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
3384,
4487,
13,
34409,
62,
26791,
1330,
1382,
62,
23395,
62,
34086,
594,
62,
7890,
198,
198,
4299,
1382,
62,
49501,
62,
4868,
7,
18076,
796,
705,
1416,
312,
83,
65,
6,
259... | 2.423772 | 3,929 |
"""
935
medium
knight dialer
"""
sol = Solution()
print(sol.knightDialer(2))
| [
37811,
198,
24,
2327,
198,
24132,
198,
74,
3847,
5980,
263,
198,
37811,
628,
198,
34453,
796,
28186,
3419,
198,
4798,
7,
34453,
13,
74,
3847,
24400,
263,
7,
17,
4008,
628,
628,
628,
198
] | 2.428571 | 35 |
from aiohttp import web
import asyncio
import logging
host = '127.0.0.1'
port = 9000
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever()
| [
6738,
257,
952,
4023,
1330,
3992,
198,
11748,
30351,
952,
198,
11748,
18931,
628,
198,
4774,
796,
705,
16799,
13,
15,
13,
15,
13,
16,
6,
198,
634,
796,
50138,
628,
628,
198,
26268,
796,
30351,
952,
13,
1136,
62,
15596,
62,
26268,
... | 2.69697 | 66 |
__author__ = 'mehdibenchoufi'
| [
834,
9800,
834,
796,
705,
1326,
31298,
571,
24421,
280,
12463,
6,
198
] | 2.307692 | 13 |
"""Trains DGP models.
You can use this to create checkpoints which you can analyze with sample_gradients.py
Adapted from
https://github.com/hughsalimbeni/DGPs_with_IWVI/blob/master/experiments/run_conditional_density_estimation.py
"""
import csv
import os
import random
import warnings
from typing import Dict
import numpy as np
import tensorflow as tf
from gpflow.training.monitor import (
CallbackTask,
CheckpointTask,
LogdirWriter,
ModelToTensorBoardTask,
Monitor,
MonitorContext,
MonitorTask,
PeriodicIterationCondition,
PrintTimingsTask,
restore_session,
)
from tensorflow_core.core.framework.summary_pb2 import Summary
import experiment_common
import metrics
import sample_gradients
from build_models import build_model
from dgps_with_iwvi.dreg_optimizer import DregModel
from dgps_with_iwvi.models import DGP_VI
import demo_dataset
warnings.simplefilter(action="ignore", category=FutureWarning)
warnings.simplefilter(action="ignore", category=DeprecationWarning)
if __name__ == "__main__":
main()
| [
37811,
2898,
1299,
360,
16960,
4981,
13,
198,
198,
1639,
460,
779,
428,
284,
2251,
36628,
543,
345,
460,
16602,
351,
6291,
62,
9744,
2334,
13,
9078,
198,
198,
48003,
276,
422,
198,
5450,
1378,
12567,
13,
785,
14,
71,
6724,
21680,
32... | 3.048851 | 348 |
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import logging
import numpy as np
import plyfile
import skimage.measure
import time
import torch
import deep_sdf.utils
def convert_sdf_samples_to_npy(
pytorch_2d_sdf_tensor,
pixel_grid_origin,
N,
npy_filename_out,
offset=None,
scale=None,
number_sample
):
"""
Convert sdf samples to .ply
:param pytorch_2d_sdf_tensor: a torch.FloatTensor of shape (n,n)
:pixel_grid_origin: a list of two floats: the left, down origin of the pixel grid
:pixel_size: float, the size of the pixels
:npy_filename_out: string, path of the filename to save to
This function adapted from: https://github.com/RobotLocomotion/spartan
"""
start_time = time.time()
numpy_2d_sdf_array = pytorch_2d_sdf_tensor.numpy()
# This will be replaced by the Marching Square algorithm (skimage.measure.find_contour) in the blank shape optimisation case.
# To clarify, contours extracted here are not required by image-based surrogate models, while can be used for the reconstruction of CAD models.
coords = skimage.measure.find_contour(
numpy_2d_sdf_array, 0.0
)
# Find the largest array in coords
blank = coords[longest(coords)]
# Convert the sdf values to 0-1 values.
numpy_2d_sdf_array[numpy_2d_sdf_array <= 0] = 1
numpy_2d_sdf_array[numpy_2d_sdf_array >0] = 0
# Save pixel-based blank images to *.npy files.
logging.debug("saving blank images to %s" % (npy_filename_out + 'image.npy'))
np.save(npy_filename_out + 'image.npy', numpy_2d_sdf_array)
# Save the coordinates of blank shapes to *.npy files.
logging.debug("saving blank shapes to %s" % (npy_filename_out + 'shape.npy'))
np.save(npy_filename_out + 'shape.npy', blank)
# Generate more sampled points on the blank shapes.
blank_sample = blank.copy()
Sample(blank_sample, number_sample-blank_sample.shape[0])
# Save the coordinates of sampled blank shapes to *.npy files.
logging.debug("saving blank shapes to %s" % (npy_filename_out + 'sample.npy'))
np.save(npy_filename_out + 'sample.npy', blank_sample) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
5472,
12,
25579,
3203,
13,
1439,
6923,
33876,
13,
198,
198,
11748,
18931,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
35960,
7753,
198,
11748,
1341,
9060,
13,
1326,
5... | 2.637349 | 830 |
import os
import torch
from transformers import BertModel, BertTokenizer
from bert_ner.aux import get_data_from_tuples, get_all_sentences, batchify, test, bioes_classes
from bert_ner.model import NERModel
_path = os.path.dirname(__file__)
_test_filename = os.path.join(_path, '../data/test.conll')
_save_filename = os.path.join(_path, '../data/ontonotes.model')
MODEL = (BertModel, BertTokenizer, 'bert-base-uncased')
if __name__ == '__main__':
model_class, tokenizer_class, pretrained_weights = MODEL
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
language_model = model_class.from_pretrained(pretrained_weights)
print('Loading test data')
sentences = get_all_sentences(_test_filename, max_lines=-1)
test_data = get_data_from_tuples(sentences, tokenizer)
test_batches = batchify(test_data, 10)
model = NERModel(language_model, nout=len(bioes_classes))
checkpoint = torch.load(_save_filename)
model.load_state_dict(checkpoint['model_state_dict'])
model.cuda()
model.eval()
test(model, test_batches, tokenizer)
| [
11748,
28686,
198,
198,
11748,
28034,
198,
6738,
6121,
364,
1330,
22108,
17633,
11,
22108,
30642,
7509,
198,
198,
6738,
275,
861,
62,
1008,
13,
14644,
1330,
651,
62,
7890,
62,
6738,
62,
28047,
2374,
11,
651,
62,
439,
62,
34086,
3007,
... | 2.756345 | 394 |
import pytest
from ansiblemetrics.playbook.num_uri import NumUri
script_0_1 = '---\n-host: localhost'
script_0_2 = '- oasis_roles.rhsm\n- oasis_roles.molecule_openstack_ci\n- oasis_roles.molecule_docker_ci'
script_1 = '- name: Check that you can connect (GET) to a page and it returns a status 200\n\turi:\n\t\turl: ' \
'http://www.example.com '
TEST_DATA = [
(script_0_1, 0),
(script_0_2, 0),
(script_1, 1)
]
@pytest.mark.parametrize('script, expected', TEST_DATA)
| [
11748,
12972,
9288,
198,
6738,
9093,
856,
4164,
10466,
13,
1759,
2070,
13,
22510,
62,
9900,
1330,
31835,
52,
380,
198,
198,
12048,
62,
15,
62,
16,
796,
705,
6329,
59,
77,
12,
4774,
25,
1957,
4774,
6,
198,
12048,
62,
15,
62,
17,
... | 2.251142 | 219 |
from nimba.core.exceptions import ImproperlyRoute
| [
6738,
299,
320,
7012,
13,
7295,
13,
1069,
11755,
1330,
12205,
525,
306,
43401,
628
] | 3.4 | 15 |
#!/usr/bin/env python3
# Copyright (c) 2021 Silas Cutler, silas.cutler@gmail.com (https://silascutler.com/)
# See the file 'COPYING' for copying permission.
import os
import re
import sys
import guestfs
if __name__ == "__main__":
if len(sys.argv) < 2:
print("inspect_vm: missing disk image to inspect", file=sys.stderr)
sys.exit(1)
disk = sys.argv[1]
h = HostAnalzer_Linux(disk)
#h.get_root_info()
h.hash_filesystem()
h.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
33448,
4243,
292,
37194,
11,
3313,
292,
13,
8968,
1754,
31,
14816,
13,
785,
357,
5450,
1378,
18217,
3372,
315,
1754,
13,
785,
34729,
198,
2,
4091,
262,
23... | 2.386935 | 199 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/3/21 13:56
# @Author : GUO Ziyao
import os
from easyw.bootstrap import app
port = int(os.environ.get('EASYW_PORT', 9715))
app.run(host='0.0.0.0', port=port, debug=True)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
2864,
14,
18,
14,
2481,
1511,
25,
3980,
198,
2,
2488,
13838,
220,
1058,
19348,
... | 2.194444 | 108 |
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url
from . import views
urlpatterns=[
url('home',views.welcome,name = 'welcome'),
# url(r'^search/', views.search_results, name='search_results')
url(r'^$',views.signup, name='sign'),
url(r'^profile/$',views.profile, name='profile'),
url(r'^search/$',views.search_results, name='search'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12708,
1330,
9037,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
... | 2.784946 | 186 |
"""Addition circuits"""
from computer.gates import AND, OR, XOR
from computer.configuration import PRECISION
# TODO Refactor to stateless classes?
def half_adder(a: str, b: str):
"""Add two bits together and return the sum and carry bit.
Max output is 2 i.e. sum=0, carry=1.
```
a b sum carry
0 0 0 0
1 0 1 0
0 1 1 0
1 1 0 1
```
"""
sum = XOR(a, b)
carry = AND(a, b)
return sum, carry
def full_adder(a: str, b: str, c: str):
"""Add three bits together and return the sum and carry bit.
Max output is 3 i.e. sum=1, carry=1.
```
a b c sum carry
0 0 0 0 0
0 1 0 1 0
0 0 1 1 0
0 1 1 0 1
1 0 0 1 0
1 1 0 0 1
1 0 1 0 1
1 1 1 1 1
```
"""
sum1, carry1 = half_adder(a, b)
sum, carry2 = half_adder(sum1, c)
carry = OR(carry1, carry2)
return sum, carry
def ripple_carry_adder(a: str, b: str, precision: int = PRECISION):
"""Add two numbers represented as bit strings together and return the sum and carry bit"""
# allocate memory for the sum and carry
sum = ["0"] * precision
carry = "0"
# from right-most bit, call full-adder and carry the result forward to next bit
for i in range(1, precision + 1):
sum[-i], carry = full_adder(a[-i], b[-i], carry)
print(i, sum[-i], carry)
sum = "".join(sum)
return sum, carry
| [
37811,
4550,
653,
24907,
37811,
628,
198,
6738,
3644,
13,
70,
689,
1330,
5357,
11,
6375,
11,
1395,
1581,
198,
6738,
3644,
13,
11250,
3924,
1330,
22814,
34,
42446,
628,
198,
2,
16926,
46,
6524,
11218,
284,
1181,
1203,
6097,
30,
628,
... | 2.188791 | 678 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import math
import numpy as np
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
198,
2,
49962,
739,
262,
17168,
5964,
13,
198,
198,
11748,
10688,
198,
198,
11748,
299,
32152,
355,
45941,
628,
628,
198
] | 3.666667 | 30 |
from http.server import BaseHTTPRequestHandler, HTTPServer
import bpy
import json
import sys
argv = sys.argv
argv = argv[argv.index("--") + 1:]
outputFolder=argv[0]
blendFolder=argv[1]
with HTTPServer(('', 8000), handler) as server:
server.serve_forever()
| [
6738,
2638,
13,
15388,
1330,
7308,
40717,
18453,
25060,
11,
38288,
18497,
198,
11748,
275,
9078,
198,
11748,
33918,
198,
11748,
25064,
198,
198,
853,
85,
796,
25064,
13,
853,
85,
198,
853,
85,
796,
1822,
85,
58,
853,
85,
13,
9630,
7... | 2.739583 | 96 |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
import random
from tensorflow import keras
import cv2
import numpy as np
def ShuffleArray(feature, label):
"""
Shuffle Array
:param feature: the feature to train model, which is an array
:param label: the label of the feature, which is an array
:return: the shuffled feature and label
"""
randIndex = [a for a in range(feature.shape[0])]
random.shuffle(randIndex)
featureShuffle = feature[randIndex, :]
labelShuffle = label[randIndex, :]
return featureShuffle, labelShuffle
def ShuffleList(feature, label):
"""
Shuffle list
:param feature: the feature to train model, which is a list
:param label: the label of the feature, which is a list
:return: the shuffled feature and label
"""
randIndex = [a for a in range(len(feature))]
random.shuffle(randIndex)
featureShuffle = [feature[ind] for ind in randIndex]
labelShuffle = [label[ind] for ind in randIndex]
return featureShuffle, labelShuffle
def RepeatArray(x, num):
"""
Repeat array.
:param x: the original feature, which is an array.
:param num: the repeat times, which is an int number.
:return: the augmented feature, which is an array
"""
if num < 1:
randIndex = [a for a in range(x.shape[0])]
random.shuffle(randIndex)
y = x[randIndex[0:int(np.round(x.shape[0] * num))], :]
else:
num = int(num)
xShape = list(x.shape)
xShape[0] = np.round(x.shape[0] * num)
y = np.zeros(xShape)
for n in range(num):
y[x.shape[0] * n:x.shape[0] * (n + 1), :] = x
return y
def RepeatList(x, num):
"""
Repeat list.
:param x: the original feature, which is a list.
:param num: the repeat times, which is an int number.
:return: the augmented feature, which is a list.
"""
if num < 1:
randIndex = [a for a in range(x.shape[0])]
random.shuffle(randIndex)
y = x[randIndex[0:int(np.round(x.shape[0] * num))], :]
else:
num = int(num)
y = x * num
return y
def FindIndex(x, y):
"""
Find index of y in x
:param x: a list
:param y: the target to find, which is a number.
:return: a index list indicating the location of y in x.
"""
return [a for a in range(len(x)) if x[a] == y]
def ReadTxt(txtPath):
"""
Read txt to get file name and the corresponding labels (support multi-task)
:param txtPath: the path of txt. Each row of txt is composed of fileName and label,
such as ../img.png 0 1. Here, the front string is the imagePath, 0 is the class label
for the first task, 1 is the class label for the second task.
:return: the list of fileName and the array of label for multi-task
"""
with open(txtPath) as f:
feature = f.readlines()
f.close()
feature = [s.strip() for s in feature]
featureOut = [s.split(' ') for s in feature]
filePathList = [featureTmp[0] for featureTmp in featureOut]
labelList = [featureTmp[1:] for featureTmp in featureOut]
labelArray = np.zeros([len(labelList), len(labelList[0])])
for i, _ in enumerate(labelList):
for j, _ in enumerate(labelList[0]):
labelArray[i, j] = int(labelList[i][j])
return filePathList, labelArray
def ObtainTaskDict(actionDefine):
"""
Obtain task dict
:param actionDefine: a dictionary from imitationLearning.json
:return: the task list, action dictionary for each task,
dictionary of action name for each task.
"""
# obtain task List for multiTask (default is 0)
taskList = list()
for _, actionDefineTmp in enumerate(actionDefine):
taskList = taskList + actionDefineTmp["task"]
taskList = list(set(taskList))
taskActionDict = dict()
actionNameDict = dict()
for task in taskList:
taskActionDict[task] = list()
actionNameDict[task] = list()
for _, actionDefineTmp in enumerate(actionDefine):
for task in actionDefineTmp["task"]:
taskActionDict[task].append(actionDefineTmp)
for key in taskActionDict:
taskActionList = taskActionDict[key]
actionNameDict[key] = [taskActionList[n]["name"] for n in range(len(taskActionList))]
return taskList, taskActionDict, actionNameDict
def DataGenerator(actionSpaceList, imgFiles=None, labels=None, batchSize=64, dim=150):
"""
Generator for fit_generator
:param actionSpaceList: action number for different tasks, which is a list.
:param imgFiles: list of image file name.
:param labels: the classes for different tasks, whicn is an array.
:param batchSize: the image number for one batch.
:param dim: the size of input image. The image is resized to [dim, dim]
:return: the batch of input images, the one-hot label array for different tasks.
"""
m = len(imgFiles)
numMinibatches = int(m / batchSize)
while 1:
permutation = list(np.random.permutation(m))
for i in range(numMinibatches):
minibatchesX = np.empty((batchSize, dim, dim, 3))
minibatchesY0 = np.empty((batchSize), dtype=int)
minibatchesY1 = np.empty((batchSize), dtype=int)
index = permutation[i * batchSize:(i + 1) * batchSize]
for j, ind in enumerate(index):
img = cv2.imread(imgFiles[ind]).astype('float')
imgOut = PreprocessImage(img)
minibatchesX[j,] = imgOut
if len(actionSpaceList) == 1:
minibatchesY0[j] = labels[ind, 0]
else:
minibatchesY0[j] = labels[ind, 0]
minibatchesY1[j] = labels[ind, 1]
minibatchesY0 = keras.utils.to_categorical(minibatchesY0,
num_classes=actionSpaceList[0])
if len(actionSpaceList) == 2:
minibatchesY1 = keras.utils.to_categorical(minibatchesY1,
num_classes=actionSpaceList[1])
if len(actionSpaceList) == 1:
yield minibatchesX, minibatchesY0
else:
yield minibatchesX, [minibatchesY0, minibatchesY1]
def PreprocessImage(img):
"""
Preprocess for imput image.
:param actionSpaceList: action number for different tasks, which is a list.
:param img: the image.
:return: the normalized image.
"""
imgOut = img / 255.
return imgOut
def GetFeatureAndLabel(feature, label, fileName, actionSpaceList):
"""
Get feature and label
:param feature: the original feature for each image, which is a dictionary. The key
is the name of image file.
:param label: the label of feature, which is a dictionary. The key
is the name of image file.
:param fileName: file name of image, which is a list
:param actionSpaceList: action number for different tasks, which is a list.
:return: the feature array and one-hot label list for multi-task
"""
for key in feature.keys():
timeStep = feature[key].shape[0]
featureDim = feature[key].shape[1]
taskNum = label[key].shape[0]
break
featureMulti = np.zeros([len(fileName), timeStep, featureDim])
labelMulti = np.zeros([len(fileName), taskNum])
index = list()
for n, fileNameTmp in enumerate(fileName):
if fileNameTmp in feature.keys():
featureMulti[n, :] = feature[fileNameTmp]
labelMulti[n, :] = label[fileNameTmp]
index.append(n)
else:
continue
if len(index) == 0:
return None, None
featureMultiOut = featureMulti[index, :]
labelMultiOut = labelMulti[index, :]
labelMultiVecOut = list()
for n in range(taskNum):
labelMultiVecOut.append(keras.utils.to_categorical(labelMultiOut[:, n],
num_classes=actionSpaceList[n]))
return featureMultiOut, labelMultiVecOut
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
24893,
1087,
318,
10607,
284,
1104,
262,
1280,
2723,
2055,
416,
1642,
3776,
32,
1797,
48510,
1695,
13,
198,
198,
1212,
2723,
2438,
2393,
318,
11971,
739,
262... | 2.441313 | 3,442 |
import argparse
from pydub import AudioSegment
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--flag", dest="flag", help="Flag to Use")
args = parser.parse_args()
flag = args.flag
if args.flag is None:
print("Please specify a flag with -f")
exit(1)
else:
flag = args.flag
flag = flag.upper()
outputfilename = "./output.wav"
output_data = AudioSegment.empty()
for char in flag:
output_data += add_letter_to_output(char)
output_data.export(outputfilename, format="wav")
| [
11748,
1822,
29572,
198,
6738,
279,
5173,
549,
1330,
13491,
41030,
434,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
48610,
13,
2860,
62,
49140,
7203,
12,
69,
1600,
366,
438,
32109,
1600,
2244,
2625,
32109,
1600... | 2.849162 | 179 |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
from pandapower.control.controller.characteristic_control import CharacteristicControl
from pandapower.control.util.characteristic import Characteristic
class USetTapControl(CharacteristicControl):
"""
Controller that adjusts the setpoint of a local tap changer voltage control based on a load flow result (e.g. p_lv_mw, i_lv_ka etc.)
according to a defined characteristic.
INPUT:
**net** (attrdict) - Pandapower net
**cid** (int) - ID of the tap changer controller, an attribute of which is controlled
**variable** (float) - Variable from the result table that is used for the characteristic
OPTIONAL:
**in_service** (bool, True) - Indicates if the controller is currently in_service
**drop_same_existing_ctrl** (bool, False) - Indicates if already existing controllers of the same type and with the same matching parameters (e.g. at same element) should be dropped
""" | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
357,
66,
8,
1584,
12,
1238,
2481,
416,
2059,
286,
15035,
741,
290,
39313,
403,
71,
30288,
5136,
329,
6682,
18963,
198,
2,
290,
6682,
4482,
8987,
357,
... | 3.363095 | 336 |
""" Loading strategies for Query
* `QueryLoaderBase`: base class
* `PrimaryQueryLoader` for the primary query (top level)
* `RelatedQueryLoader` for related queries (joined queries)
"""
from __future__ import annotations
from collections import abc
import sqlalchemy as sa
from jessiql.query_object import SelectedRelation
from jessiql.typing import SAModelOrAlias, SARowDict
from .jselectinloader import JSelectInLoader
class QueryLoaderBase:
""" Loader base
Base for classes that implement:
* Prepare an SQL statement for loading objects
* Execute this statement
* Populate existing objects with loaded related fields (applicable to related loaders)
Operations, such as select, filter, sort, skip/limit, are out of scope here.
"""
__slots__ = ()
def prepare_statement(self, stmt: sa.sql.Select) -> sa.sql.Select:
""" Hook: prepare the SELECT statement before any operation is applied
Use it to add columns & conditions that the loader will need.
NOTE: called before any operation had a chance to modify the statement.
Args:
stmt: A boilerplate SELECT statement against the current model
"""
return stmt
def for_states(self, source_states: list[SARowDict]):
""" Associate the loader with a list of "states": objects loaded by the parent loader
Only makes sense for related loaders
"""
def load_results(self, stmt: sa.sql.Select, connection: sa.engine.Connection) -> abc.Iterator[SARowDict]:
""" Actually execute the query and handle result rows fetched from it.
NOTE: called when all operations have already been applied to the statement.
NOTE: for_states() has already been called.
Args:
stmt: The statement to execute
connection: The connection to execute the statement with
Returns:
Iterator of result dicts
"""
raise NotImplementedError
class PrimaryQueryLoader(QueryLoaderBase):
""" Primary loader: for the top-level model
This loader is used for the primary model: the one at the top.
"""
class RelatedQueryLoader(QueryLoaderBase):
""" Related loader: for related models
This loader is used to populate loaded models with related fields.
"""
__slots__ = 'loader',
| [
37811,
12320,
10064,
329,
43301,
198,
198,
9,
4600,
20746,
17401,
14881,
63,
25,
2779,
1398,
198,
9,
4600,
35170,
20746,
17401,
63,
329,
262,
4165,
12405,
357,
4852,
1241,
8,
198,
9,
4600,
9819,
20746,
17401,
63,
329,
3519,
20743,
357... | 3.112732 | 754 |
"""
TAPnx setup script.
See license in LICENSE.txt.
"""
import os
from setuptools import setup
# provide a long description using reStructuredText
LONG_DESCRIPTION = r"""
**TAPnx** is a Python package that lets you...
Read the `docs`_ or see usage examples and demos on `GitHub`_.
.. _GitHub: https://github.com/tapnx/tapnx-examples
.. _docs: https://tapnx.readthedocs.io
"""
# list of classifiers from the PyPI classifiers trove
CLASSIFIERS = [
#"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Network Assignment",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Scientific/Engineering :: Traffic",
"Topic :: Scientific/Engineering :: Mathematics"
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
]
DESC = (
"Short Description"
)
# only specify install_requires if not in RTD environment
if os.getenv("READTHEDOCS") == "True":
INSTALL_REQUIRES = []
else:
with open("requirements.txt") as f:
INSTALL_REQUIRES = [line.strip() for line in f.readlines()]
# now call setup
setup(
name="tapnx",
version="0.0.1",
description=DESC,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
url="https://github.com/tapnx/tapnx",
author="Sam O'Neill",
author_email="sam.t.oneill@googlemail.com",
license="MIT",
platforms="any",
packages=["tapnx"],
python_requires=">=3.6",
install_requires=INSTALL_REQUIRES,
# extras_require={
# "folium": ["folium>=0.11"],
# "kdtree": ["scipy>=1.5"],
# "balltree": ["scikit-learn>=0.24"],
# },
) | [
37811,
198,
51,
2969,
77,
87,
9058,
4226,
13,
198,
198,
6214,
5964,
287,
38559,
24290,
13,
14116,
13,
198,
37811,
198,
198,
11748,
28686,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
2,
2148,
257,
890,
6764,
1262,
302,
4... | 2.691034 | 725 |
"""Module for defining types used in neumann package."""
import numpy as np
boolnp = np.uint8 # np.bool
intnp = int # np.int
longnp = np.int64 # np.long
doublenp = np.float64 # np.double
complexnp = np.complex128
| [
37811,
26796,
329,
16215,
3858,
973,
287,
497,
40062,
5301,
526,
15931,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
30388,
37659,
796,
45941,
13,
28611,
23,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1303,
45941,
13,
30388,
1... | 2.2 | 115 |
# Generated by Django 3.1.4 on 2021-09-07 10:57
from django.db import migrations, models
import report.models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
19,
319,
33448,
12,
2931,
12,
2998,
838,
25,
3553,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
989,
13,
27530,
628
] | 3.027027 | 37 |
#!/usr/bin/python3
import dbus
import time
import sys
from dbus.mainloop.glib import DBusGMainLoop
from gi.repository import GObject
import json
DBUS_BUS_ENOCEAN_NAME = 'com.devicehive.enocean'
DBUS_BUS_BLE_NAME = 'com.devicehive.bluetooth'
SWITCH_ADDRESS = '00:2A:1A:B8'
BULB_ADDRESS = '20:C3:8F:F5:49:B4'
bulb_on_value = '0f0d0300ffffffc800c800c8000059ffff'
bulb_off_value = '0f0d0300ffffff0000c800c8000091ffff'
bulb_handle = 0x002b
DBusGMainLoop(set_as_default=True)
bulb = init_bulb()
if __name__ == '__main__':
main() | [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
288,
10885,
198,
11748,
640,
198,
11748,
25064,
198,
6738,
288,
10885,
13,
12417,
26268,
13,
4743,
571,
1330,
360,
16286,
38,
13383,
39516,
198,
6738,
308,
72,
13,
260,
1930,
... | 2.143426 | 251 |
import uuid
import graphql
import github
__all__ = [
"HTTPClient",
]
| [
11748,
334,
27112,
198,
198,
11748,
4823,
13976,
198,
198,
11748,
33084,
628,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
366,
40717,
11792,
1600,
198,
60,
198
] | 2.516129 | 31 |
from setuptools import setup, find_packages
setup(name="qtstyles",
version="0.0.3",
install_requires=[
"QtPy>=1.4.1"
],
description="A collection of Qt style sheets and helpful classes for applying them.",
long_description=open("README.md").read(),
# https://setuptools.readthedocs.io/en/latest/setuptools.html#including-data-files
package_data={"qtstyles": ["style_sheets/*.qss"]}, # include style sheets
author="Simon Garisch",
author_email="gatman946@gmail.com",
url="https://github.com/simongarisch/qtstyles",
packages=find_packages()
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
201,
198,
201,
198,
40406,
7,
3672,
2625,
39568,
47720,
1600,
201,
198,
220,
220,
220,
220,
220,
2196,
2625,
15,
13,
15,
13,
18,
1600,
201,
198,
220,
220,
220,
220,
220,
27... | 2.399254 | 268 |
import numpy as np
import scipy as sp
import scipy.misc
'''
im = scipy.misc.lena()
factors = [2, 4, 8, 16, 32]
'''
im = imread('hummingbird.png')
#im = im[0:120, 0:120]
#factors = [2, 3, 4, 5, 6]
im = im[54:, 103:-104]
factors = [2, 3, 8, 17, 32]
im = sum(im, 2)
figure('image'), clf()
subplot(2, 3, 1)
imshow(im)
for i, b in enumerate(factors):
subplot(2, 3, i+2)
i = rebin(im, (b, b))
imshow(i)
| [
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
355,
599,
198,
11748,
629,
541,
88,
13,
44374,
198,
198,
7061,
6,
198,
320,
796,
629,
541,
88,
13,
44374,
13,
75,
8107,
3419,
198,
22584,
669,
796,
685,
17,
11,
604,
... | 1.975728 | 206 |
# -*- coding: utf-8 -*-
# This is a test file intended to be used with pytest
# pytest automatically runs all the function starting with "test_"
# see https://docs.pytest.org for more information
from dummy_module import dummy_function
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
770,
318,
257,
1332,
2393,
5292,
284,
307,
973,
351,
12972,
9288,
198,
2,
12972,
9288,
6338,
4539,
477,
262,
2163,
3599,
351,
366,
9288,
62,
1,
198,
2,
766,
3740,... | 3.552239 | 67 |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" CLIP model configuration """
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/config.json",
# See all CLIP models at https://huggingface.co/models?filter=clip
}
class CLIPTextConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CLIPModel`]. It is used to
instantiate an CLIP model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the CLIP
[openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model
outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 49408):
Vocabulary size of the CLIP text model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`CLIPModel`].
hidden_size (`int`, *optional*, defaults to 512):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
max_position_embeddings (`int`, *optional*, defaults to 77):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
`"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
dropout (`float`, *optional*, defaults to 0.0):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float``, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
Example:
```python
>>> from transformers import CLIPTextModel, CLIPTextConfig
>>> # Initializing a CLIPTextModel with openai/clip-vit-base-patch32 style configuration
>>> configuration = CLIPTextConfig()
>>> # Initializing a CLIPTextConfig from the openai/clip-vit-base-patch32 style configuration
>>> model = CLIPTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "clip_text_model"
class CLIPVisionConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CLIPModel`]. It is used to
instantiate an CLIP model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the CLIP
[openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model
outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 32):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
`"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
dropout (`float`, *optional*, defaults to 0.0):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float``, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
Example:
```python
>>> from transformers import CLIPVisionModel, CLIPVisionConfig
>>> # Initializing a CLIPVisionModel with openai/clip-vit-base-patch32 style configuration
>>> configuration = CLIPVisionConfig()
>>> # Initializing a CLIPVisionModel model from the openai/clip-vit-base-patch32 style configuration
>>> model = CLIPVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "clip_vision_model"
class CLIPConfig(PretrainedConfig):
r"""
[`CLIPConfig`] is the configuration class to store the configuration of a
[`CLIPModel`]. It is used to instantiate CLIP model according to the specified arguments,
defining the text model and vision model configs.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model
outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
text_config_dict (`dict`, *optional*):
Dictionary of configuration options used to initialize [`CLIPTextConfig`].
vision_config_dict (`dict`, *optional*):
Dictionary of configuration options used to initialize [`CLIPVisionConfig`].
projection_dim (`int`, *optional*, defaults to 512):
Dimentionality of text and vision projection layers.
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
The inital value of the *logit_scale* paramter. Default is used as per the original CLIP implementation.
kwargs (*optional*):
Dictionary of keyword arguments.
"""
model_type = "clip"
is_composition = True
@classmethod
def from_text_vision_configs(cls, text_config: CLIPTextConfig, vision_config: CLIPVisionConfig, **kwargs):
r"""
Instantiate a [`CLIPConfig`] (or a derived class) from clip text model configuration and
clip vision model configuration.
Returns:
[`CLIPConfig`]: An instance of a configuration object
"""
return cls(text_config_dict=text_config.to_dict(), vision_config_dict=vision_config.to_dict(), **kwargs)
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default
[`~PretrainedConfig.to_dict`].
Returns:
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
output["text_config"] = self.text_config.to_dict()
output["vision_config"] = self.vision_config.to_dict()
output["model_type"] = self.__class__.model_type
return output
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
33448,
383,
12905,
2667,
32388,
3457,
13,
1074,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
34... | 2.935771 | 3,254 |
import gym
import numpy as np
import matplotlib.pyplot as plt
import cv2
import torch
import torch.nn as nn
import random
#Initializing models and hyper parameters
env = gym.envs.make("CartPole-v1")
env.reset()
#Initializing policy parameters
numStates = 4
numActions = 2
hiddenDim = 50
lr = 0.0005
model = DQN(numStates, hiddenDim, numActions)
optim = torch.optim.Adam(list(model.parameters()), lr)
#Initialzing parameters
criterion = nn.L1Loss()
numEpisodes = 200
gamma=0.9
epsilon=0.3
eps_decay=0.99
# Training
optimize()
#Saving the model
| [
11748,
11550,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
11748,
269,
85,
17,
201,
198,
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,... | 2.456067 | 239 |
import json
from json import JSONDecodeError
| [
11748,
33918,
198,
6738,
33918,
1330,
19449,
10707,
1098,
12331,
628,
628,
198
] | 3.769231 | 13 |
from rest_framework import status
from rest_framework.response import Response
from threepio import logger
from api.v2.exceptions import failure_response
from api.v2.serializers.details import UserAllocationSourceSerializer
from api.v2.views.base import AuthModelViewSet
from core.models import (
AllocationSource, UserAllocationSource, AtmosphereUser, EventTable)
class UserAllocationSourceViewSet(AuthModelViewSet):
"""
API endpoint that allows scripts to be viewed or edited.
"""
queryset = UserAllocationSource.objects.all()
serializer_class = UserAllocationSourceSerializer
search_fields = ("^title")
lookup_fields = ("allocation_source__uuid", "id")
http_method_names = ['options', 'head', 'get', 'post', 'delete']
def get_queryset(self):
"""
Get user allocation source relationship
"""
# user = self.get_object()
return UserAllocationSource.objects.all() # filter(user__uuid=user)
# @detail_route(methods=['get'])
# def user(self,request,pk=None):
# user = AtmosphereUser.objects.filter(uuid=pk).last()
# return Response([AllocationSourceSerializer(i.allocation_source,context={'request':request}).data for i in UserAllocationSource.objects.filter(user=user)])
#
# helper methods
# validations
| [
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
294,
260,
538,
952,
1330,
49706,
198,
198,
6738,
40391,
13,
85,
17,
13,
1069,
11755,
1330,
5287,
62,
26209,
198,
6738,
40391,
13,
85,
... | 2.959821 | 448 |
from django.urls import path, include
from .views import (
UserListAPIView, ClientListAPIView, ClientFolderListAPIView,
DocumentListAPIView, UserClientAPIView, UserClientDetailAPIView,
FolderClientAPIView, FolderAPIView, DocumentAPIView, DocumentDetailAPIView,
user_by_name
)
urlpatterns = [
path('all', UserListAPIView.as_view(), name='all_clients'),
path('clients/', user_by_name, name='clients_by_name'),
path('clients', UserClientAPIView.as_view(), name='clients'),
path(
'clients/<int:pk>/',
UserClientDetailAPIView.as_view(),
name='clients_detail'
),
path('clients/all', ClientListAPIView.as_view(), name='client_list'),
path('folders/', FolderAPIView.as_view(), name='folders'),
path(
'folders/<int:pk>/',
FolderClientAPIView.as_view(),
name='folders_detail'
),
path('folders/all', ClientFolderListAPIView.as_view(), name='folder_list'),
path('documents/', DocumentAPIView.as_view(), name='documents'),
path(
'documents/<int:pk>',
DocumentDetailAPIView.as_view(),
name='documents_detail'
),
path('documents/all', DocumentListAPIView.as_view(), name='documents_list'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
198,
6738,
764,
33571,
1330,
357,
198,
220,
220,
220,
11787,
8053,
2969,
3824,
769,
11,
20985,
8053,
2969,
3824,
769,
11,
20985,
41092,
8053,
2969,
3824,
769,
11,
198,
220,
220,
... | 2.396078 | 510 |
import concurrent.futures
from typing import Optional
from discord import Guild, Role
from discord import utils
from discord.ext import commands, tasks
from requests.exceptions import HTTPError
from twitch import TwitchClient
from SECRET import twitch_client_id
from models import UserData
from utils.database import get_guild_settings
client: TwitchClient = TwitchClient(client_id=twitch_client_id)
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
streamerids = {}
| [
11748,
24580,
13,
69,
315,
942,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
36446,
1330,
16446,
11,
20934,
198,
6738,
36446,
1330,
3384,
4487,
198,
6738,
36446,
13,
2302,
1330,
9729,
11,
8861,
198,
6738,
7007,
13,
1069,
11755,
1330,
... | 3.804688 | 128 |
version = "3.40"
| [
9641,
796,
366,
18,
13,
1821,
1,
198
] | 2.125 | 8 |
# main.py -- Chapter 7 - Gesture Controller
##################################################################################
# Title : Gesture Controller Test Application
# Filename : main.py
# Author : JWB
# Origin Date : 01/07/2019
# Version : 1.0.0
# Copyright : Jacob Beningo
# All Rights Reserved
#
# THIS SOFTWARE IS PROVIDED BY BENINGO EMBEDDED GROUP "AS IS" AND ANY EXPRESSED
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL BENINGO EMBEDDED GROUP OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
##################################################################################
import micropython # For emergency exception buffer
from pyb import I2C # For i2c bus access
from APDS_9960 import APDS_9960 # Gesture control driver
import utime
# Buffer for interrupt error messages
micropython.alloc_emergency_exception_buf(100)
############################################################
# Application Constants
############################################################
############################################################
# Application Variables
############################################################
# Setup the MCU and application code to starting conditions
# The blue LED will start on, the yellow LED will be off
############################################################
#
# Start script execution ...
#
############################################################
# Initialize the system
System_Init()
# Create a uart object, uart4, and setup the serial parameters
i2c = I2C(1) # create on bus 1
i2c = I2C(1, I2C.MASTER) # create and init as a master
i2c.init(I2C.MASTER, baudrate=400000) # init as a master
# Initialize the pins that will be used for LED control
LED_Forward = pyb.Pin('PD14', pyb.Pin.OUT_PP)
LED_Backward = pyb.Pin('PB0', pyb.Pin.OUT_PP)
LED_Left = pyb.Pin('PB4', pyb.Pin.OUT_PP)
LED_Right = pyb.Pin('PA3', pyb.Pin.OUT_PP)
# Set the LED's initial state to off
LED_Forward.value(1)
LED_Backward.value(1)
LED_Left.value(1)
LED_Right.value(1)
# Initialize the gesture driver and disable debug messages
Gesture = APDS_9960(i2c, False)
GestureDetected = False
GestureDetectedTime = utime.ticks_ms()
# Main application loop
while True:
Result = Gesture.Detect()
# Determine if there has been a validated gesture, if so tell us!
if Result == APDS_9960.GESTURE_LEFT:
GestureDetected = True
GestureDetectedTime = utime.ticks_ms()
LED_Left.low()
print("Gesture Left!")
elif Result == APDS_9960.GESTURE_RIGHT:
GestureDetected = True
GestureDetectedTime = utime.ticks_ms()
LED_Right.low()
print("Gesture Right!")
elif Result == APDS_9960.GESTURE_FORWARD:
GestureDetected = True
GestureDetectedTime = utime.ticks_ms()
LED_Forward.low()
print("Gesture Forward!")
elif Result == APDS_9960.GESTURE_BACKWARD:
GestureDetected = True
GestureDetectedTime = utime.ticks_ms()
LED_Backward.low()
print("Gesture Backward!")
if GestureDetected is True:
if (utime.ticks_ms() - GestureDetectedTime) > 5000:
GestureDetected = False
LED_Backward.high()
LED_Forward.high()
LED_Right.high()
LED_Left.high() | [
2,
1388,
13,
9078,
1377,
7006,
767,
532,
44641,
495,
22741,
198,
29113,
29113,
14468,
2235,
198,
2,
11851,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1058,
220,
220,
44641,
495,
22741,
6208,
1567... | 2.83878 | 1,377 |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as n_const
from neutron_lib.plugins.ml2 import api as driver_api
from neutron_lib import worker
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from neutron.common import constants as neutron_const
from networking_arista._i18n import _, _LI, _LE
from networking_arista.common import db
from networking_arista.common import db_lib
from networking_arista.common import exceptions as arista_exc
from networking_arista.ml2 import arista_ml2
from networking_arista.ml2 import sec_group_callback
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('ml2_arista', 'networking_arista.common.config')
# Messages
EOS_UNREACHABLE_MSG = _('Unable to reach EOS')
UNABLE_TO_DELETE_PORT_MSG = _('Unable to delete port from EOS')
UNABLE_TO_DELETE_DEVICE_MSG = _('Unable to delete device')
# Constants
INTERNAL_TENANT_ID = 'INTERNAL-TENANT-ID'
PORT_BINDING_HOST = 'binding:host_id'
MECHANISM_DRV_NAME = 'arista'
class AristaDriver(driver_api.MechanismDriver):
"""Ml2 Mechanism driver for Arista networking hardware.
Remembers all networks and VMs that are provisioned on Arista Hardware.
Does not send network provisioning request if the network has already been
provisioned before for the given port.
"""
def create_network_precommit(self, context):
"""Remember the tenant, and network information."""
network = context.current
segments = context.network_segments
if not self.rpc.hpb_supported():
# Hierarchical port binding is not supported by CVX, only
# allow VLAN network type.
if(segments and
segments[0][driver_api.NETWORK_TYPE] != n_const.TYPE_VLAN):
return
network_id = network['id']
tenant_id = network['tenant_id'] or INTERNAL_TENANT_ID
with self.eos_sync_lock:
db_lib.remember_tenant(tenant_id)
for segment in segments:
db_lib.remember_network_segment(tenant_id,
network_id,
segment.get('segmentation_id'),
segment.get('id'))
def create_network_postcommit(self, context):
"""Provision the network on the Arista Hardware."""
network = context.current
network_id = network['id']
network_name = network['name']
tenant_id = network['tenant_id'] or INTERNAL_TENANT_ID
segments = context.network_segments
shared_net = network['shared']
with self.eos_sync_lock:
if db_lib.is_network_provisioned(tenant_id, network_id):
try:
network_dict = {
'network_id': network_id,
'segments': segments,
'network_name': network_name,
'shared': shared_net}
self.rpc.create_network(tenant_id, network_dict)
except arista_exc.AristaRpcError as err:
LOG.error(_LE("create_network_postcommit: Did not create "
"network %(name)s. Reason: %(err)s"),
{'name': network_name, 'err': err})
else:
LOG.info(_LI('Network %s is not created as it is not found in '
'Arista DB'), network_id)
def update_network_precommit(self, context):
"""At the moment we only support network name change
Any other change in network is not supported at this time.
We do not store the network names, therefore, no DB store
action is performed here.
"""
new_network = context.current
orig_network = context.original
if new_network['name'] != orig_network['name']:
LOG.info(_LI('Network name changed to %s'), new_network['name'])
def update_network_postcommit(self, context):
"""At the moment we only support network name change
If network name is changed, a new network create request is
sent to the Arista Hardware.
"""
new_network = context.current
orig_network = context.original
if ((new_network['name'] != orig_network['name']) or
(new_network['shared'] != orig_network['shared'])):
network_id = new_network['id']
network_name = new_network['name']
tenant_id = new_network['tenant_id'] or INTERNAL_TENANT_ID
shared_net = new_network['shared']
with self.eos_sync_lock:
if db_lib.is_network_provisioned(tenant_id, network_id):
try:
network_dict = {
'network_id': network_id,
'segments': context.network_segments,
'network_name': network_name,
'shared': shared_net}
self.rpc.create_network(tenant_id, network_dict)
except arista_exc.AristaRpcError as err:
LOG.error(_LE('update_network_postcommit: Did not '
'update network %(name)s. '
'Reason: %(err)s'),
{'name': network_name, 'err': err})
else:
LOG.info(_LI('Network %s is not updated as it is not found'
' in Arista DB'), network_id)
def delete_network_precommit(self, context):
"""Delete the network information from the DB."""
network = context.current
network_id = network['id']
tenant_id = network['tenant_id'] or INTERNAL_TENANT_ID
with self.eos_sync_lock:
if db_lib.is_network_provisioned(tenant_id, network_id):
if db_lib.are_ports_attached_to_network(network_id):
db_lib.forget_all_ports_for_network(network_id)
LOG.info(_LI('Deleting all ports on network %s'),
network_id)
db_lib.forget_network_segment(tenant_id, network_id)
def delete_network_postcommit(self, context):
"""Send network delete request to Arista HW."""
network = context.current
segments = context.network_segments
if not self.rpc.hpb_supported():
# Hierarchical port binding is not supported by CVX, only
# send the request if network type is VLAN.
if (segments and
segments[0][driver_api.NETWORK_TYPE] != n_const.TYPE_VLAN):
# If network type is not VLAN, do nothing
return
# No need to pass segments info when calling delete_network as
# HPB is not supported.
segments = []
network_id = network['id']
tenant_id = network['tenant_id'] or INTERNAL_TENANT_ID
with self.eos_sync_lock:
# Succeed deleting network in case EOS is not accessible.
# EOS state will be updated by sync thread once EOS gets
# alive.
try:
self.rpc.delete_network(tenant_id, network_id, segments)
# if necessary, delete tenant as well.
self.delete_tenant(tenant_id)
except arista_exc.AristaRpcError as err:
LOG.error(_LE('delete_network_postcommit: Did not delete '
'network %(network_id)s. Reason: %(err)s'),
{'network_id': network_id, 'err': err})
def create_port_precommit(self, context):
"""Remember the information about a VM and its ports
A VM information, along with the physical host information
is saved.
"""
# Returning from here, since the update_port_precommit is performing
# same operation, and also need of port binding information to decide
# whether to react to a port create event which is not available when
# this method is called.
return
def bind_port(self, context):
"""Bind port to a network segment.
Provisioning request to Arista Hardware to plug a host
into appropriate network is done when the port is created
this simply tells the ML2 Plugin that we are binding the port
"""
host_id = context.host
port = context.current
physnet_info = self.eapi.get_physical_network(host_id)
physnet = physnet_info.get('physnet')
switch_id = physnet_info.get('switch_id')
if not physnet or not switch_id:
if port.get('binding:vnic_type') == portbindings.VNIC_BAREMETAL:
# Find physnet using link_local_information in baremetal case
physnet = self._get_physnet_from_link_info(port, physnet_info)
else:
LOG.debug("The host %(host)s not connected to arista "
"switches. Physical Network info = %(pi)s",
{'host': host_id, 'pi': physnet_info})
return
if not physnet or not self._is_in_managed_physnets(physnet):
LOG.debug("bind_port for port %(port)s: physical_network "
"%(physnet)s is not managed by Arista "
"mechanism driver", {'port': port.get('id'),
'physnet': physnet})
return
LOG.debug("bind_port for port %(port)s: physical_network=%(physnet)s,"
"switch_id=%(swid)s", {'port': port.get('id'),
'physnet': physnet,
'swid': switch_id})
for segment in context.segments_to_bind:
if not self._is_in_managed_physnets(
segment.get(driver_api.PHYSICAL_NETWORK)):
continue
if segment[driver_api.NETWORK_TYPE] == n_const.TYPE_VXLAN:
# Check if CVX supports HPB
if not self.rpc.hpb_supported():
LOG.debug("bind_port: HPB is not supported")
return
# The physical network is connected to arista switches,
# allocate dynamic segmentation id to bind the port to
# the network that the port belongs to.
try:
next_segment = context.allocate_dynamic_segment(
{'id': context.network.current['id'],
'network_type': n_const.TYPE_VLAN,
'physical_network': physnet})
except Exception as exc:
LOG.error(_LE("bind_port for port %(port)s: Failed to "
"allocate dynamic segment for physnet "
"%(physnet)s. %(exc)s"),
{'port': port.get('id'), 'physnet': physnet,
'exc': exc})
return
LOG.debug("bind_port for port %(port)s: "
"current_segment=%(current_seg)s, "
"next_segment=%(next_seg)s",
{'port': port.get('id'), 'current_seg': segment,
'next_seg': next_segment})
context.continue_binding(segment['id'], [next_segment])
else:
# The network_type is vlan, try binding process for baremetal.
self._bind_port_to_baremetal(context, segment, physnet_info)
def create_port_postcommit(self, context):
"""Plug a physical host into a network.
Send provisioning request to Arista Hardware to plug a host
into appropriate network.
"""
# Returning from here, since the update_port_postcommit is performing
# same operation, and also need of port binding information to decide
# whether to react to a port create event which is not available when
# this method is called.
return
def _bound_segments(self, context):
"""Check if a given port is managed by the mechanism driver.
It returns bound segment dictionary, if physical network in the bound
segment is included in the managed physical network list.
"""
if not self.managed_physnets:
return [
binding_level.get(driver_api.BOUND_SEGMENT)
for binding_level in (context.binding_levels or [])
]
bound_segments = []
for binding_level in (context.binding_levels or []):
bound_segment = binding_level.get(driver_api.BOUND_SEGMENT)
if (bound_segment and
self._is_in_managed_physnets(
bound_segment.get(driver_api.PHYSICAL_NETWORK))):
bound_segments.append(bound_segment)
return bound_segments
def _handle_port_migration_precommit(self, context):
"""Handles port migration in precommit
It updates the port's new host in the DB
"""
orig_port = context.original
orig_host = context.original_host
new_host = context.host
new_port = context.current
port_id = orig_port['id']
if new_host and orig_host and new_host != orig_host:
LOG.debug("Handling port migration for: %s " % orig_port)
network_id = orig_port['network_id']
tenant_id = orig_port['tenant_id'] or INTERNAL_TENANT_ID
# Ensure that we use tenant Id for the network owner
tenant_id = self._network_owner_tenant(context, network_id,
tenant_id)
device_id = new_port['device_id']
with self.eos_sync_lock:
port_provisioned = db_lib.is_port_provisioned(port_id,
orig_host)
if port_provisioned:
db_lib.update_port(device_id, new_host, port_id,
network_id, tenant_id)
return True
def _handle_port_migration_postcommit(self, context):
"""Handles port migration in postcommit
In case of port migration, it removes the port from the original host
and also it release the segment id if no port is attached to the same
segment id that the port is attached to.
"""
orig_port = context.original
orig_host = context.original_host
new_host = context.host
if new_host and orig_host and new_host != orig_host:
self._try_to_release_dynamic_segment(context, migration=True)
# Handling migration case.
# 1. The port should be unplugged from network
# 2. If segment_id is provisioned and it not bound to any port it
# should be removed from EOS.
network_id = orig_port['network_id']
tenant_id = orig_port['tenant_id'] or INTERNAL_TENANT_ID
# Ensure that we use tenant Id for the network owner
tenant_id = self._network_owner_tenant(context, network_id,
tenant_id)
for binding_level in context._original_binding_levels or []:
if self._network_provisioned(
tenant_id, network_id,
segment_id=binding_level.segment_id):
with self.eos_sync_lock:
# Removing the port form original host
self._delete_port(orig_port, orig_host, tenant_id)
# If segment id is not bound to any port, then
# remove it from EOS
segment = self.ndb.get_segment_by_id(
context._plugin_context,
binding_level.segment_id)
if not segment:
try:
segment_info = [{
'id': binding_level.segment_id,
'network_id': network_id,
}]
LOG.debug("migration_postcommit:"
"deleting segment %s", segment_info)
self.rpc.delete_network_segments(tenant_id,
segment_info)
# Remove the segment from the provisioned
# network DB.
db_lib.forget_network_segment(
tenant_id, network_id,
binding_level.segment_id)
except arista_exc.AristaRpcError:
LOG.info(EOS_UNREACHABLE_MSG)
return True
def update_port_precommit(self, context):
"""Update the name of a given port.
At the moment we only support port name change.
Any other change to port is not supported at this time.
We do not store the port names, therefore, no DB store
action is performed here.
"""
new_port = context.current
orig_port = context.original
if new_port['name'] != orig_port['name']:
LOG.info(_LI('Port name changed to %s'), new_port['name'])
device_id = new_port['device_id']
host = context.host
pretty_log("update_port_precommit: new", new_port)
pretty_log("update_port_precommit: orig", orig_port)
if not self._supported_device_owner(new_port['device_owner']):
return
# Check if it is port migration case
if self._handle_port_migration_precommit(context):
return
# Check if the port is part of managed physical network
seg_info = self._bound_segments(context)
if not seg_info:
# Ignoring the update as the port is not managed by
# arista mechanism driver.
return
# device_id and device_owner are set on VM boot
port_id = new_port['id']
network_id = new_port['network_id']
tenant_id = new_port['tenant_id'] or INTERNAL_TENANT_ID
# Ensure that we use tenant Id for the network owner
tenant_id = self._network_owner_tenant(context, network_id, tenant_id)
for seg in seg_info:
if not self._network_provisioned(tenant_id, network_id,
seg[driver_api.SEGMENTATION_ID],
seg[driver_api.ID]):
LOG.info(
_LI("Adding %s to provisioned network database"), seg)
with self.eos_sync_lock:
db_lib.remember_tenant(tenant_id)
db_lib.remember_network_segment(
tenant_id, network_id,
seg[driver_api.SEGMENTATION_ID],
seg[driver_api.ID])
with self.eos_sync_lock:
port_down = False
if(new_port['device_owner'] ==
n_const.DEVICE_OWNER_DVR_INTERFACE):
# We care about port status only for DVR ports because
# for DVR, a single port exists on multiple hosts. If a port
# is no longer needed on a host then the driver gets a
# port_update notification for that <port, host> with the
# port status as PORT_STATUS_DOWN.
port_down = context.status == n_const.PORT_STATUS_DOWN
if host and not port_down:
port_host_filter = None
if(new_port['device_owner'] ==
n_const.DEVICE_OWNER_DVR_INTERFACE):
# <port, host> uniquely identifies a DVR port. Other
# ports are identified by just the port id
port_host_filter = host
port_provisioned = db_lib.is_port_provisioned(
port_id, port_host_filter)
if not port_provisioned:
LOG.info("Remembering the port")
# Create a new port in the DB
db_lib.remember_tenant(tenant_id)
db_lib.remember_vm(device_id, host, port_id,
network_id, tenant_id)
else:
if(new_port['device_id'] != orig_port['device_id'] or
context.host != context.original_host or
new_port['network_id'] != orig_port['network_id'] or
new_port['tenant_id'] != orig_port['tenant_id']):
LOG.info("Updating the port")
# Port exists in the DB. Update it
db_lib.update_port(device_id, host, port_id,
network_id, tenant_id)
else: # Unbound or down port does not concern us
orig_host = context.original_host
LOG.info("Forgetting the port on %s" % str(orig_host))
db_lib.forget_port(port_id, orig_host)
def _port_updated(self, context):
"""Returns true if any port parameters have changed."""
new_port = context.current
orig_port = context.original
return (new_port['device_id'] != orig_port['device_id'] or
context.host != context.original_host or
new_port['network_id'] != orig_port['network_id'] or
new_port['tenant_id'] != orig_port['tenant_id'])
def update_port_postcommit(self, context):
"""Update the name of a given port in EOS.
At the moment we only support port name change
Any other change to port is not supported at this time.
"""
port = context.current
orig_port = context.original
device_id = port['device_id']
device_owner = port['device_owner']
host = context.host
is_vm_boot = device_id and device_owner
if not self._supported_device_owner(device_owner):
return
vnic_type = port['binding:vnic_type']
binding_profile = port['binding:profile']
bindings = []
if binding_profile:
bindings = binding_profile.get('local_link_information', [])
port_id = port['id']
port_name = port['name']
network_id = port['network_id']
tenant_id = port['tenant_id'] or INTERNAL_TENANT_ID
# Ensure that we use tenant Id for the network owner
tenant_id = self._network_owner_tenant(context, network_id, tenant_id)
sg = port['security_groups']
orig_sg = orig_port['security_groups']
pretty_log("update_port_postcommit: new", port)
pretty_log("update_port_postcommit: orig", orig_port)
# Check if it is port migration case
if self._handle_port_migration_postcommit(context):
# Return from here as port migration is already handled.
return
seg_info = self._bound_segments(context)
if not seg_info:
LOG.debug("Ignoring the update as the port is not managed by "
"Arista switches.")
return
with self.eos_sync_lock:
hostname = self._host_name(host)
port_host_filter = None
if(port['device_owner'] ==
n_const.DEVICE_OWNER_DVR_INTERFACE):
# <port, host> uniquely identifies a DVR port. Other
# ports are identified by just the port id
port_host_filter = host
port_provisioned = db_lib.is_port_provisioned(port_id,
port_host_filter)
# If network does not exist under this tenant,
# it may be a shared network. Get shared network owner Id
net_provisioned = self._network_provisioned(
tenant_id, network_id)
for seg in seg_info:
if not self._network_provisioned(
tenant_id, network_id,
segmentation_id=seg[driver_api.SEGMENTATION_ID]):
net_provisioned = False
segments = []
if net_provisioned and self.rpc.hpb_supported():
segments = seg_info
all_segments = self.ndb.get_all_network_segments(
network_id, context=context._plugin_context)
try:
self.rpc.create_network_segments(
tenant_id, network_id,
context.network.current['name'], all_segments)
except arista_exc.AristaRpcError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to create network segments"))
try:
orig_host = context.original_host
port_down = False
if(port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE):
# We care about port status only for DVR ports
port_down = context.status == n_const.PORT_STATUS_DOWN
if orig_host and (port_down or host != orig_host or
device_id == neutron_const.DEVICE_ID_RESERVED_DHCP_PORT):
LOG.info("Deleting the port %s" % str(orig_port))
# The port moved to a different host or the VM
# connected to the port was deleted or its in DOWN
# state. So delete the old port on the old host.
self._delete_port(orig_port, orig_host, tenant_id)
if(port_provisioned and net_provisioned and hostname and
is_vm_boot and not port_down and
device_id != neutron_const.DEVICE_ID_RESERVED_DHCP_PORT):
LOG.info(_LI("Port plugged into network"))
# Plug port into the network only if it exists in the db
# and is bound to a host and the port is up.
self.rpc.plug_port_into_network(device_id,
hostname,
port_id,
network_id,
tenant_id,
port_name,
device_owner,
sg, orig_sg,
vnic_type,
segments=segments,
switch_bindings=bindings)
else:
LOG.info(_LI("Port not plugged into network"))
except arista_exc.AristaRpcError as err:
LOG.error(_LE('update_port_postcommit: Did not update '
'port %(port_id)s. Reason: %(err)s'),
{'port_id': port_id, 'err': err})
def delete_port_precommit(self, context):
"""Delete information about a VM and host from the DB."""
port = context.current
pretty_log("delete_port_precommit:", port)
port_id = port['id']
host_id = context.host
with self.eos_sync_lock:
if db_lib.is_port_provisioned(port_id, host_id):
db_lib.forget_port(port_id, host_id)
def delete_port_postcommit(self, context):
"""Unplug a physical host from a network.
Send provisioning request to Arista Hardware to unplug a host
from appropriate network.
"""
port = context.current
host = context.host
network_id = port['network_id']
tenant_id = port['tenant_id'] or INTERNAL_TENANT_ID
# Ensure that we use tenant Id for the network owner
tenant_id = self._network_owner_tenant(context, network_id, tenant_id)
pretty_log("delete_port_postcommit:", port)
# If this port is the last one using dynamic segmentation id,
# and the segmentation id was allocated by this driver, it needs
# to be released.
self._try_to_release_dynamic_segment(context)
with self.eos_sync_lock:
try:
self._delete_port(port, host, tenant_id)
self._delete_segment(context, tenant_id)
except arista_exc.AristaRpcError:
# Can't do much if deleting a port failed.
# Log a warning and continue.
LOG.warning(UNABLE_TO_DELETE_PORT_MSG)
def _delete_port(self, port, host, tenant_id):
"""Deletes the port from EOS.
param port: Port which is to be deleted
param host: The host on which the port existed
param tenant_id: The tenant to which the port belongs to. Some times
the tenant id in the port dict is not present (as in
the case of HA router).
"""
device_id = port['device_id']
port_id = port['id']
network_id = port['network_id']
device_owner = port['device_owner']
if not self._supported_device_owner(device_owner):
return
vnic_type = port['binding:vnic_type']
binding_profile = port['binding:profile']
switch_bindings = []
if binding_profile:
switch_bindings = binding_profile.get('local_link_information', [])
sg = port['security_groups']
if not device_id or not host:
LOG.warning(UNABLE_TO_DELETE_DEVICE_MSG)
return
try:
if not self._network_provisioned(tenant_id, network_id):
# If we do not have network associated with this, ignore it
return
hostname = self._host_name(host)
self.rpc.unplug_port_from_network(device_id, device_owner,
hostname, port_id, network_id,
tenant_id, sg, vnic_type,
switch_bindings=switch_bindings)
self.rpc.remove_security_group(sg, switch_bindings)
# if necessary, delete tenant as well.
self.delete_tenant(tenant_id)
except arista_exc.AristaRpcError:
LOG.info(EOS_UNREACHABLE_MSG)
def _delete_segment(self, context, tenant_id):
"""Deletes a dynamic network segment from EOS.
param context: The port context
param tenant_id: The tenant which the port belongs to
"""
if not self.rpc.hpb_supported():
# Returning as HPB not supported by CVX
return
port = context.current
network_id = port.get('network_id')
if not context._binding_levels:
return
for binding_level in context._binding_levels:
LOG.debug("deleting segment %s", binding_level.segment_id)
if self._network_provisioned(tenant_id, network_id,
segment_id=binding_level.segment_id):
segment = self.ndb.get_segment_by_id(
context._plugin_context, binding_level.segment_id)
if not segment:
# The segment is already released. Delete it from EOS
LOG.debug("Deleting segment %s", binding_level.segment_id)
try:
segment_info = {
'id': binding_level.segment_id,
'network_id': network_id,
}
self.rpc.delete_network_segments(tenant_id,
[segment_info])
# Remove the segment from the provisioned network DB.
db_lib.forget_network_segment(
tenant_id, network_id, binding_level.segment_id)
except arista_exc.AristaRpcError:
LOG.info(EOS_UNREACHABLE_MSG)
else:
LOG.debug("Cannot delete segment_id %(segid)s "
"segment is %(seg)s",
{'segid': binding_level.segment_id,
'seg': segment})
def _try_to_release_dynamic_segment(self, context, migration=False):
"""Release dynamic segment allocated by the driver
If this port is the last port using the segmentation id allocated
by the driver, it should be released
"""
host = context.original_host if migration else context.host
physnet_info = self.eapi.get_physical_network(host)
physnet = physnet_info.get('physnet')
if not physnet:
return
binding_levels = context.binding_levels
LOG.debug("_try_release_dynamic_segment: "
"binding_levels=%(bl)s", {'bl': binding_levels})
if not binding_levels:
return
segment_id = None
bound_drivers = []
for binding_level in binding_levels:
bound_segment = binding_level.get(driver_api.BOUND_SEGMENT)
driver = binding_level.get(driver_api.BOUND_DRIVER)
bound_drivers.append(driver)
if (bound_segment and
bound_segment.get('physical_network') == physnet and
bound_segment.get('network_type') == n_const.TYPE_VLAN):
segment_id = bound_segment.get('id')
break
# If the segment id is found and it is bound by this driver, and also
# the segment id is not bound to any other port, release the segment.
# When Arista driver participate in port binding by allocating dynamic
# segment and then calling continue_binding, the driver should the
# second last driver in the bound drivers list.
if (segment_id and bound_drivers[-2:-1] == [MECHANISM_DRV_NAME]):
filters = {'segment_id': segment_id}
result = db_lib.get_port_binding_level(filters)
LOG.debug("Looking for entry with filters=%(filters)s "
"result=%(result)s ", {'filters': filters,
'result': result})
if not result:
# The requested segment_id does not exist in the port binding
# database. Release the dynamic segment.
context.release_dynamic_segment(segment_id)
LOG.debug("Released dynamic segment %(seg)s allocated "
"by %(drv)s", {'seg': segment_id,
'drv': bound_drivers[-2]})
def delete_tenant(self, tenant_id):
"""delete a tenant from DB.
A tenant is deleted only if there is no network or VM configured
configured for this tenant.
"""
objects_for_tenant = (db_lib.num_nets_provisioned(tenant_id) +
db_lib.num_vms_provisioned(tenant_id))
if not objects_for_tenant:
db_lib.forget_tenant(tenant_id)
try:
self.rpc.delete_tenant(tenant_id)
except arista_exc.AristaRpcError:
with excutils.save_and_reraise_exception():
LOG.info(EOS_UNREACHABLE_MSG)
| [
2,
15069,
357,
66,
8,
2211,
4946,
25896,
5693,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,... | 2.003294 | 18,213 |
"""
python bindings for OMNeT++
"""
# this module exposes public attributes of all relevant modules
from ._ccanvas import *
from ._chistogram import *
from ._cmessage import *
from ._cobject import *
from ._coutvector import *
from ._cpacket import *
from ._cqueue import *
from ._csimplemodule import *
from ._csimtime import *
from ._logging import *
from ._stddev import *
from ._ctimestampedvalue import *
from ._ccomponenttype import *
from ._cpsquare import *
from ._cksplit import *
from ._cdataratechannel import *
from ._ctopology import *
| [
37811,
198,
29412,
34111,
329,
32468,
8199,
51,
4880,
198,
37811,
198,
198,
2,
428,
8265,
32142,
1171,
12608,
286,
477,
5981,
13103,
198,
6738,
47540,
535,
272,
11017,
1330,
1635,
198,
6738,
47540,
354,
396,
21857,
1330,
1635,
198,
6738... | 3.395062 | 162 |
import os
import logging
from argparse import ArgumentParser
from concurrent.futures import ThreadPoolExecutor, as_completed
from .lib.saj import StreamAnalyticsJobs
from .lib.utils import chkpath, mklog
if __name__ == "__main__":
main()
| [
11748,
28686,
198,
11748,
18931,
198,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
6738,
24580,
13,
69,
315,
942,
1330,
14122,
27201,
23002,
38409,
11,
355,
62,
785,
16838,
198,
6738,
764,
8019,
13,
82,
1228,
1330,
13860,
37702,
140... | 3.220779 | 77 |
"""Add the share/reconstruct method to the Syft Module defined here:
https://github.com/OpenMined/PySyft/blob/dev/src/syft/lib/torch/module.py
"""
# stdlib
from collections import OrderedDict
import copy
# third party
import syft as sy
import torch
import sympc.module as sympc_module
from sympc.session import Session
from .nn import Conv2d
from .nn import Linear
MAP_TORCH_TO_SYMPC = {
"Linear": Linear,
"Conv2d": Conv2d,
}
MAP_TORCH_TO_SYMPC.update({f"{k}Pointer": v for k, v in MAP_TORCH_TO_SYMPC.items()})
get = reconstruct
for method in {share, reconstruct, get}:
if getattr(sy.Module, method.__name__, None) is not None:
raise ValueError(f"Method {method.__name__} already exists in the sy.Module")
setattr(sy.Module, method.__name__, method)
| [
37811,
4550,
262,
2648,
14,
260,
41571,
2446,
284,
262,
1632,
701,
19937,
5447,
994,
25,
198,
198,
5450,
1378,
12567,
13,
785,
14,
11505,
44,
1389,
14,
20519,
13940,
701,
14,
2436,
672,
14,
7959,
14,
10677,
14,
1837,
701,
14,
8019,
... | 2.69863 | 292 |
"""utils for file manager
"""
import os
from utils import getdate_now, randkey
def store_file(filename, chunks, filetype, chunkpath=None):
""" to store a file and return file address
"""
filepath = os.path.join('/mnt/media', filetype)
filepath = os.path.join(filepath, getdate_now().strftime('%Y%m/%d/%H%M%S'))
os.makedirs(filepath)
_, ftype = os.path.splitext(filename)
filename = randkey(length=16) + ftype
pwd = os.path.join(filepath, filename)
with open(pwd, 'wb+') as dest:
for chunk in chunks:
if chunkpath is None:
dest.write(chunk)
else:
with open(os.path.join(chunkpath, chunk), 'rb') as src:
dest.write(src.read())
return filename, pwd, os.path.getsize(pwd)
def store_chunk(key, index, file):
""" to store a chunk
"""
filepath = os.path.join('/mnt/media', 'chunks')
filepath = os.path.join(filepath, key)
with open(os.path.join(filepath, 'chunk' + str(index)), 'wb') as dest:
for chunk in file.chunks():
dest.write(chunk)
def file_iterator(filename, chunk_size=8192, offset=0, length=None):
""" get a file iterator for downloading
"""
with open(filename, 'rb') as file:
file.seek(offset, os.SEEK_SET)
remaining = length
while True:
bytes_length = chunk_size if remaining is None else min(remaining, chunk_size)
data = file.read(bytes_length)
if not data:
break
if remaining:
remaining -= len(data)
yield data
| [
37811,
26791,
329,
2393,
4706,
198,
37811,
198,
11748,
28686,
198,
198,
6738,
3384,
4487,
1330,
651,
4475,
62,
2197,
11,
43720,
2539,
198,
198,
4299,
3650,
62,
7753,
7,
34345,
11,
22716,
11,
2393,
4906,
11,
16058,
6978,
28,
14202,
259... | 2.209302 | 731 |
from splinter import Browser
from bs4 import BeautifulSoup as bs
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
import requests
| [
6738,
4328,
3849,
1330,
34270,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
355,
275,
82,
198,
6738,
3992,
26230,
62,
37153,
13,
46659,
1330,
13282,
32103,
13511,
198,
11748,
19798,
292,
355,
279,
67,
220,
220,
198,
11748,
7007,
62... | 3.790698 | 43 |
#!/usr/bin/env python3
### Importing
# Importing External Packages
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from pyrogram.errors import exceptions, UserNotParticipant
from pyrogram.types import Update, Message
from pymongo import MongoClient
from mega import *
from mega.errors import RequestError
# Importing inbuilt
import string
import random
import time
# Importing Credentials & Required Data
try:
from testexp.config import Config
except ModuleNotFoundError:
from config import Config
finally:
mongoSTR = Config.MONGO_STR
### Global Variable
common_text = '\n\n<b><u>If you are facing any problem😫, so report📝 at @jetbots_support</u></b>'
to_login = '<b>If you are not logged in then, send login detail in this format email,password.</b>\n'
### Connecting To Database
mongo_client = MongoClient(mongoSTR)
db_login_detail = mongo_client['MegaUploader']
collection_login = db_login_detail['login_details']
### Defining some functions
#Checking User whether he joined channel and group or not joined.
# Getting Email & Password From Database
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
628,
198,
21017,
17267,
278,
198,
2,
17267,
278,
34579,
6400,
1095,
198,
6738,
12972,
39529,
13,
19199,
1330,
554,
1370,
9218,
3526,
21864,
11,
554,
1370,
9218,
3526,
9704,
929,
198,
67... | 3.442006 | 319 |
from flask import jsonify
from models.Booking import BookingDAO
from models.Person import PersonDAO
from models.Room import RoomDAO
from models.AvailablePerson import AvailablePersonDAO
from models.AvailableRoom import AvailableRoomDAO
class Booking(object):
"""
This method, as the name says, communicates with the model, which then creates a new booking entry
To do this, the controller side first checks if:
a) The room exists
b) The host and invitee/s exists
c) Both the invitee and the host are available in set timeframe
d) The room is also available in set timeframe
NOTE: Certain rooms cannot be added depending of the host's role
Breaking each part...
"""
# returns a full query of all booking entries
# Returns a single booking entry according to its id
# updates a booking entry
| [
6738,
42903,
1330,
33918,
1958,
198,
198,
6738,
4981,
13,
10482,
278,
1330,
4897,
278,
5631,
46,
198,
6738,
4981,
13,
15439,
1330,
7755,
5631,
46,
198,
6738,
4981,
13,
41178,
1330,
10096,
5631,
46,
198,
198,
6738,
4981,
13,
10493,
154... | 3.549587 | 242 |
""" Inspired by https://github.com/ahoucbvtw/RegiFamily_shiny"""
""" Detecting multiple bright spots in an image with Python and OpenCV """
# import the necessary packages
from imutils import contours
from skimage import measure
import numpy as np
import argparse
import imutils
import cv2 | [
37811,
45827,
416,
3740,
1378,
12567,
13,
785,
14,
993,
280,
21101,
85,
4246,
14,
8081,
72,
24094,
62,
1477,
3541,
37811,
198,
37811,
35874,
278,
3294,
6016,
10222,
287,
281,
2939,
351,
11361,
290,
4946,
33538,
37227,
198,
2,
1330,
26... | 3.802632 | 76 |
# coding=utf-8
from django.contrib.auth.models import AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.utils.text import capfirst, force_text
from rest_framework import viewsets, mixins, status, views as rest_views
from rest_framework.decorators import action
from rest_framework.response import Response as DRFResponse
from cla_eventlog import event_registry
from cla_eventlog.constants import LOG_LEVELS
from cla_eventlog.models import ComplaintLog
from complaints.forms import ComplaintLogForm
from core.drf.mixins import FormActionMixin, NestedGenericModelMixin
from .models import Complaint, Category
from .serializers import ComplaintSerializerBase, CategorySerializerBase, ComplaintLogSerializerBase
class ComplaintFormActionMixin(FormActionMixin):
"""
This is for backward compatibility
"""
FORM_ACTION_OBJ_PARAM = "complaint"
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
19200,
12982,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
11299,
19199,
13,
27530,
1330,
14041,
6030,
198,
6738,
42625,
14208,
13,
26791,
... | 3.569721 | 251 |
# by Kami Bigdely
# Extract class
full_names = ['Elizabeth Debicki', 'Jim Carrey']
birth_year = [1990, 1962]
movies = [['Tenet', 'Vita & Virgina', 'Guardians of the Galexy', 'The Great Gatsby'],\
['Ace Ventura', 'The Mask', 'Dubm and Dumber', 'The Truman Show', 'Yes Man']]
email = ['deb@makeschool.com', 'jim@makeschool.com']
for i, value in enumerate(email):
person = Person(full_names[i], birth_year[i], email[i], movies[i])
if birth_year[i] > 1985:
print(full_names[i])
print('Movies Played: ', end='')
for m in person.movies:
print(m, end=', ')
print()
person.send_hiring_email() | [
2,
416,
509,
6277,
4403,
2934,
306,
198,
2,
29677,
1398,
198,
198,
12853,
62,
14933,
796,
37250,
43568,
8965,
624,
72,
3256,
705,
18050,
1879,
4364,
20520,
198,
24280,
62,
1941,
796,
685,
19891,
11,
20033,
60,
198,
76,
20526,
796,
1... | 2.342857 | 280 |
class Vector:
"""
A class for vector calculations in an n dimensional vector space.
Supports addition and multiplying with a scalar.
"""
def add(self, z2):
"""
Adds the vector z2 to this vector and returns the result.
"""
assert(len(self.entries) == len(z2.entries))
return Vector([self.entries[i] + z2.entries[i] for i in range(len(self.entries))])
def scalar(self, a):
"""
Scales this vector by a and returns the result.
"""
return Vector([x*a for x in self.entries])
x = Vector([0, 2, -3, 5])
y = Vector([1, 2, 3, 4])
print("Vector:")
print(f"x = {x.entries}")
print(f"y = {y.entries}")
print(f"x + y = {x.add(y).entries}")
print(f"x * 3 = {x.scalar(3).entries}")
class VectorPlus(Vector): # Extend Vector class.
"""
Extension of the Vector class that implements the inner and outer vector product.
"""
def inner(self, z2):
"""
Calculates the inner vector product.
"""
assert(len(self.entries) == len(z2.entries))
return sum([self.entries[i] * z2.entries[i] for i in range(len(self.entries))])
def outer(self, z2):
"""
Calculates the outer vector product.
"""
return VectorPlus([[x * y for y in z2.entries] for x in self.entries])
a = VectorPlus([0, 2, -3, 5])
b = VectorPlus([1, 2, 3, 4])
print("\nVectorPlus:")
print(f"a = {a.entries}")
print(f"b = {b.entries}")
print(f"a inner b = {a.inner(b)}")
print(f"a outer b = {a.outer(b).entries}") | [
4871,
20650,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
317,
1398,
329,
15879,
16765,
287,
281,
299,
38517,
15879,
2272,
13,
198,
220,
220,
220,
45267,
3090,
290,
48816,
351,
257,
16578,
283,
13,
198,
220,
220,
220,
37227,
6... | 2.29037 | 675 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from cec2013.cec2013 import *
import matplotlib.pyplot as plt
from multiprocessing.dummy import Pool as ThreadPool
import multiprocessing
import xlwt
# standard DE
# standard CDE
# NCDE
CEC_benchmark(benchmark_fun_set=range(1,21)) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
269,
721,
6390,
13,
344,
66,
6390,
1330,
1635,
198,
11748,
2603,
2... | 2.571429 | 119 |
from colorsys import rgb_to_hls, hls_to_rgb
""" USEFUL FUNCTIONS """
""" CONSTANTS """
# Background color of app
BG_COLOR = "#292929"
# Background color of Song Tables
STBG_COLOR = "#06603d"
# Widget color
W_COLOR = "#84bd00"
# White (green) color
WHITE = "#e1f0e1"
# Black (green) color
BLACK = "#0a330a"
# Font for titles
TITLE_FONT = "Georgia"
# Non-black font color used
FONT_COLOR = "#888888"
""" GLOBALS """
# Every QPushButton
BUTTON_STYLE = """
QPushButton {{
background-color: {w_color};
font-weight: 800;
font-size: 16px;
color: {black};
border: 1px solid {bg_color};
border-radius: 3px;
padding: 5px 10px 5px 10px;
}}
QPushButton:pressed {{
border-style: inset;
background-color: {pressed_color};
}}
""".format(
w_color=W_COLOR, black=BLACK, bg_color=BG_COLOR, pressed_color=darken(W_COLOR, 1.5)
)
# Every QComboBox
COMBOBOX_STYLE = """
QComboBox {{
background-color: {w_color};
selection-background-color: {select_color};
selection-color: #aaaaaa;
font-weight: 800;
color: {black};
border-radius: 5px;
height: 19px;
}}
QComboBox QAbstractItemView {{
background-color: {item_color};
selection-background-color: {select_bg_color};
selection-color: {select_color};
color: {black};
}}
QComboBox::drop-down {{
background-color: {arrow_bg_color};
border: 1px solid {arrow_border_color};
border-radius: 2px;
height: 13px;
width: 30px;
right: 3px;
top: 2px;
}}
QComboBox::down-arrow {{
image: url(down_arrow.png);
height: 9px;
}}
""".format(
w_color=W_COLOR,
item_color=WHITE,
select_bg_color=lighten(W_COLOR, 2),
select_color=darken(BLACK, 2),
black=BLACK,
arrow_bg_color=lighten(W_COLOR, 1.2),
arrow_border_color=darken(W_COLOR, 1.1),
)
# Every LineEdit
LINEEDIT_STYLE = """
QLineEdit {{
background-color: {w_color};
color: {black};
}}
""".format(
w_color=W_COLOR, black=darken(BLACK, 2)
)
# Every QTableWidget (including SongDataTableWidget objects) and a little extra
# for the SimpleFilterArtistsTable in the Simple Filter tab (bigger text)
TABLEWIDGET_STYLE = """
QTableWidget, SimpleFilterArtistsTable {{
background-color: {stbg_color};
selection-background-color: {select_color};
selection-color: {white};
font-size: 15px;
font-weight: 450;
}}
QHeaderView::section, SimpleFilterArtistsTable::section {{
background-color: {white};
font-weight: 600;
font-size: 17px;
height: 30px;
}}
SimpleFilterArtistsTable {{
font-size: 20px;
font-weight: 500;
}}
""".format(
stbg_color=STBG_COLOR, select_color=darken(STBG_COLOR, 2), white=WHITE
)
## How we do this? vvvvv
## HOW TO STYLE THE UNSELECTABLE ITEMS OF QTABLEWIDGET
## MAKE HEADERS OF QTABLEWIDGET UNSELECTABLE
""" TAB-SPECIFIC """
# Advanced Search QGroupBoxes for Filter Playlists tab
AdvFilterPlaylistStyle = """
QGroupBox::title {{
color: {white};
padding: 0 8px 6px 4px;
left: 15px;
}}
QCheckBox {{
color: {white};
}}
{button}
{combobox}
{tablewidget}
{lineedit}
""".format(
white=WHITE,
button=BUTTON_STYLE,
combobox=COMBOBOX_STYLE,
tablewidget=TABLEWIDGET_STYLE,
lineedit=LINEEDIT_STYLE,
)
# Simple Search for Filter Playlist tab
SimpleFilterPlaylistStyle = """
QLabel {{
color: {white};
font-size: 12px;
font-weight: 600;
}}
{button}
{combobox}
{tablewidget}
""".format(
white=WHITE,
button=BUTTON_STYLE,
combobox=COMBOBOX_STYLE,
tablewidget=TABLEWIDGET_STYLE,
)
# Playlist Songs tab
PlaylistSongsStyle = """
{button}
{combobox}
{tablewidget}
""".format(
button=BUTTON_STYLE, combobox=COMBOBOX_STYLE, tablewidget=TABLEWIDGET_STYLE
)
# Queue Maker tab
QueueMakerStyle = """
{button}
{tablewidget}
""".format(
button=BUTTON_STYLE, tablewidget=TABLEWIDGET_STYLE
)
# Tab bar/widgets
TabStyle = """
QTabWidget::pane {{
border-top: 3px solid #888888;
}}
QTabWidget::tab-bar {{
left: 7px;
}}
QTabWidget > QWidget {{
background-color: {bg_color};
border-radius: 4px;
}}
QTabBar::tab {{
border-top-left-radius: 4px;
border-top-right-radius: 4px;
padding: 4px;
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #E1E1E1, stop: 0.4 #DDDDDD,
stop: 0.5 #D8D8D8, stop: 1.0 #D3D3D3);
}}
QTabBar::tab::selected {{
background: #aaaaaa;
}}
""".format(
bg_color=BG_COLOR, font_color=FONT_COLOR
)
# Login Popup
LoginStyle = """
QDialog {{
background-color: {bg_color};
}}
QLabel {{
font-size: 25px;
font-family: {title_font};
font-weight: 300;
color: {font_color}
}}
QLineEdit {{
background-color: #bebebe;
border-radius: 4px;
font-size: 15px;
color: {black};
}}
{button}
""".format(
bg_color=BG_COLOR,
title_font=TITLE_FONT,
font_color=FONT_COLOR,
button=BUTTON_STYLE,
black=darken(BLACK, 2),
)
NewPlaylistStyle = """
QDialog {{
background-color: {bg_color};
}}
QLabel {{
font-size: 25px;
font-family: {title_font};
font-weight: 300;
color: {font_color};
}}
{lineedit}
{button}
""".format(
bg_color=BG_COLOR,
title_font=TITLE_FONT,
font_color=FONT_COLOR,
lineedit=LINEEDIT_STYLE,
button=BUTTON_STYLE,
)
| [
6738,
7577,
893,
1330,
46140,
62,
1462,
62,
71,
7278,
11,
289,
7278,
62,
1462,
62,
81,
22296,
198,
198,
37811,
23210,
46476,
29397,
4177,
11053,
37227,
628,
628,
198,
37811,
7102,
2257,
1565,
4694,
37227,
198,
2,
25353,
3124,
286,
598... | 2.319965 | 2,269 |
from flask import Blueprint, render_template
from flaskr.db import get_db
bp = Blueprint('blog', __name__)
@bp.route('/') | [
6738,
42903,
1330,
39932,
11,
8543,
62,
28243,
198,
6738,
42903,
81,
13,
9945,
1330,
651,
62,
9945,
628,
198,
46583,
796,
39932,
10786,
14036,
3256,
11593,
3672,
834,
8,
628,
198,
31,
46583,
13,
38629,
10786,
14,
11537
] | 3.205128 | 39 |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017 by University of Kassel and Fraunhofer Institute for Wind Energy and
# Energy System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed
# by a BSD-style license that can be found in the LICENSE file.
from math import pi
from numpy import sign, nan, append, zeros, max, array, power, sqrt
from pandas import Series, DataFrame, concat
import pandapower as pp
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
def from_ppc(ppc, f_hz=50, validate_conversion=False):
"""
This function converts pypower case files to pandapower net structure.
INPUT:
**ppc** : The pypower case file.
OPTIONAL:
**f_hz** (float, 50) - The frequency of the network.
**validate_conversion** (bool, False) - If True, validate_from_ppc is run after conversion.
OUTPUT:
**net** : pandapower net.
EXAMPLE:
import pandapower.converter as pc
from pypower import case4gs
ppc_net = case4gs.case4gs()
pp_net = pc.from_ppc(ppc_net, f_hz=60)
"""
# --- catch common failures
if Series(ppc['bus'][:, 9] <= 0).any():
logger.info('There are false baseKV given in the pypower case file.')
# --- general_parameters
baseMVA = ppc['baseMVA'] # MVA
omega = pi * f_hz # 1/s
MAX_VAL = 99999.
net = pp.create_empty_network(f_hz=f_hz)
# --- bus data -> create buses, sgen, load, shunt
for i in range(len(ppc['bus'])):
# create buses
pp.create_bus(net, name=int(ppc['bus'][i, 0]), vn_kv=ppc['bus'][i, 9], type="b",
zone=ppc['bus'][i, 6], in_service=bool(ppc['bus'][i, 1] != 4),
max_vm_pu=ppc['bus'][i, 11], min_vm_pu=ppc['bus'][i, 12])
# create sgen, load
if ppc['bus'][i, 2] > 0:
pp.create_load(net, i, p_kw=ppc['bus'][i, 2] * 1e3, q_kvar=ppc['bus'][i, 3] * 1e3,
controllable=False)
elif ppc['bus'][i, 2] < 0:
pp.create_sgen(net, i, p_kw=ppc['bus'][i, 2] * 1e3, q_kvar=ppc['bus'][i, 3] * 1e3,
type="", controllable=False)
elif ppc['bus'][i, 3] != 0:
pp.create_load(net, i, p_kw=ppc['bus'][i, 2] * 1e3, q_kvar=ppc['bus'][i, 3] * 1e3,
controllable=False)
# create shunt
if ppc['bus'][i, 4] != 0 or ppc['bus'][i, 5] != 0:
pp.create_shunt(net, i, p_kw=ppc['bus'][i, 4] * 1e3,
q_kvar=-ppc['bus'][i, 5] * 1e3)
# unused data of ppc: Vm, Va (partwise: in ext_grid), zone
# --- gen data -> create ext_grid, gen, sgen
gen_lookup = DataFrame(nan, columns=['element', 'element_type'],
index=range(len(ppc['gen'][:, 0])))
for i in range(len(ppc['gen'])):
# if in ppc is only one gen -> numpy initially uses one dim array -> change to two dim array
if len(ppc["gen"].shape) == 1:
ppc["gen"] = array(ppc["gen"], ndmin=2)
current_bus_idx = pp.get_element_index(net, 'bus', name=int(ppc['gen'][i, 0]))
current_bus_type = int(ppc['bus'][current_bus_idx, 1])
# create ext_grid
if current_bus_type == 3:
if len(pp.get_connected_elements(net, 'ext_grid', current_bus_idx)) > 0:
logger.info('At bus %d an ext_grid already exists. ' % current_bus_idx +
'Because of that generator %d ' % i +
'is converted not as an ext_grid but as a sgen')
current_bus_type = 1
else:
gen_lookup.element.loc[i] = pp.create_ext_grid(
net, bus=current_bus_idx, vm_pu=ppc['gen'][i, 5],
va_degree=ppc['bus'][current_bus_idx, 8], in_service=bool(ppc['gen'][i, 7] > 0),
max_p_kw=-ppc['gen'][i, 9] * 1e3, min_p_kw=-ppc['gen'][i, 8] * 1e3,
max_q_kvar=ppc['gen'][i, 3] * 1e3, min_q_kvar=ppc['gen'][i, 4] * 1e3)
gen_lookup.element_type.loc[i] = 'ext_grid'
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_kvar of gen %d must be less than max_q_kvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_kw of gen %d must be less than min_p_kw but is not.' % i)
# create gen
elif current_bus_type == 2:
gen_lookup.element.loc[i] = pp.create_gen(
net, bus=current_bus_idx, vm_pu=ppc['gen'][i, 5], p_kw=-ppc['gen'][i, 1] * 1e3,
in_service=bool(ppc['gen'][i, 7] > 0), controllable=True,
max_p_kw=-ppc['gen'][i, 9] * 1e3, min_p_kw=-ppc['gen'][i, 8] * 1e3,
max_q_kvar=ppc['gen'][i, 3] * 1e3, min_q_kvar=ppc['gen'][i, 4] * 1e3)
gen_lookup.element_type.loc[i] = 'gen'
if ppc['gen'][i, 1] < 0:
logger.info('p_kw of gen %d must be less than zero but is not.' % i)
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_kvar of gen %d must be less than max_q_kvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_kw of gen %d must be less than min_p_kw but is not.' % i)
# create sgen
if current_bus_type == 1:
gen_lookup.element.loc[i] = pp.create_sgen(
net, bus=current_bus_idx, p_kw=-ppc['gen'][i, 1] * 1e3,
q_kvar=-ppc['gen'][i, 2] * 1e3, type="", in_service=bool(ppc['gen'][i, 7] > 0),
max_p_kw=-ppc['gen'][i, 9] * 1e3, min_p_kw=-ppc['gen'][i, 8] * 1e3,
max_q_kvar=ppc['gen'][i, 3] * 1e3, min_q_kvar=ppc['gen'][i, 4] * 1e3,
controllable=True)
gen_lookup.element_type.loc[i] = 'sgen'
if ppc['gen'][i, 1] < 0:
logger.info('p_kw of sgen %d must be less than zero but is not.' % i)
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_kvar of gen %d must be less than max_q_kvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_kw of gen %d must be less than min_p_kw but is not.' % i)
# unused data of ppc: Vg (partwise: in ext_grid and gen), mBase, Pc1, Pc2, Qc1min, Qc1max,
# Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30,ramp_q, apf
# --- branch data -> create line, trafo
for i in range(len(ppc['branch'])):
from_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][i, 0]))
to_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][i, 1]))
from_vn_kv = ppc['bus'][from_bus, 9]
to_vn_kv = ppc['bus'][to_bus, 9]
if (from_vn_kv == to_vn_kv) & ((ppc['branch'][i, 8] == 0) | (ppc['branch'][i, 8] == 1)) & \
(ppc['branch'][i, 9] == 0):
Zni = ppc['bus'][to_bus, 9]**2/baseMVA # ohm
max_i_ka = ppc['branch'][i, 5]/ppc['bus'][to_bus, 9]/sqrt(3)
if max_i_ka == 0.0:
max_i_ka = MAX_VAL
logger.debug("ppc branch rateA is zero -> Using MAX_VAL instead to calculate " +
"maximum branch flow")
pp.create_line_from_parameters(
net, from_bus=from_bus, to_bus=to_bus, length_km=1,
r_ohm_per_km=ppc['branch'][i, 2]*Zni, x_ohm_per_km=ppc['branch'][i, 3]*Zni,
c_nf_per_km=ppc['branch'][i, 4]/Zni/omega*1e9/2,
max_i_ka=max_i_ka, type='ol',
in_service=bool(ppc['branch'][i, 10]))
else:
if from_vn_kv >= to_vn_kv:
hv_bus = from_bus
vn_hv_kv = from_vn_kv
lv_bus = to_bus
vn_lv_kv = to_vn_kv
tp_side = 'hv'
else:
hv_bus = to_bus
vn_hv_kv = to_vn_kv
lv_bus = from_bus
vn_lv_kv = from_vn_kv
tp_side = 'lv'
if from_vn_kv == to_vn_kv:
logger.warning('The pypower branch %d (from_bus, to_bus)=(%d, %d) is considered'
' as a transformer because of a ratio != 0 | 1 but it connects '
'the same voltage level', i, ppc['branch'][i, 0],
ppc['branch'][i, 1])
rk = ppc['branch'][i, 2]
xk = ppc['branch'][i, 3]
zk = (rk ** 2 + xk ** 2) ** 0.5
sn = ppc['branch'][i, 5] * 1e3
if sn == 0.0:
sn = MAX_VAL
logger.debug("ppc branch rateA is zero -> Using MAX_VAL instead to calculate " +
"apparent power")
ratio_1 = 0 if ppc['branch'][i, 8] == 0 else (ppc['branch'][i, 8] - 1) * 100
i0_percent = -ppc['branch'][i, 4] * 100 * baseMVA * 1e3 / sn
if i0_percent < 0:
logger.info('A transformer always behaves inductive consumpting but the '
'susceptance of pypower branch %d (from_bus, to_bus)=(%d, %d) is '
'positive.', i, ppc['branch'][i, 0], ppc['branch'][i, 1])
pp.create_transformer_from_parameters(
net, hv_bus=hv_bus, lv_bus=lv_bus, sn_kva=sn, vn_hv_kv=vn_hv_kv,
vn_lv_kv=vn_lv_kv, vsc_percent=sign(xk) * zk * sn / 1e3, vscr_percent=rk * sn / 1e3,
pfe_kw=0, i0_percent=i0_percent, shift_degree=ppc['branch'][i, 9],
tp_st_percent=abs(ratio_1) if ratio_1 else nan,
tp_pos=sign(ratio_1) if ratio_1 else nan,
tp_side=tp_side if ratio_1 else None, tp_mid=0 if ratio_1 else nan)
# unused data of ppc: rateB, rateC
# --- gencost -> create polynomial_cost, piecewise_cost
if 'gencost' in ppc:
if len(ppc['gencost'].shape) == 1:
# reshape gencost if only one gencost is given -> no indexError
ppc['gencost'] = ppc['gencost'].reshape((1, ppc['gencost'].shape[0]))
if ppc['gencost'].shape[0] <= gen_lookup.shape[0]:
idx_p = range(ppc['gencost'].shape[0])
idx_q = []
elif ppc['gencost'].shape[0] > gen_lookup.shape[0]:
idx_p = range(gen_lookup.shape[0])
idx_q = range(gen_lookup.shape[0], ppc['gencost'].shape[0])
if ppc['gencost'].shape[0] >= 2*gen_lookup.shape[0]:
idx_p = range(gen_lookup.shape[0])
idx_q = range(gen_lookup.shape[0], 2*gen_lookup.shape[0])
for idx in idx_p:
_create_costs(net, ppc, gen_lookup, 'p', idx)
for idx in idx_q:
_create_costs(net, ppc, gen_lookup, 'q', idx)
# areas are unconverted
if validate_conversion:
logger.setLevel(logging.DEBUG)
if not validate_from_ppc(ppc, net):
logger.error("Validation failed.")
return net
def validate_from_ppc(ppc_net, pp_net, max_diff_values={
"vm_pu": 1e-6, "va_degree": 1e-5, "p_branch_kw": 1e-3, "q_branch_kvar": 1e-3, "p_gen_kw": 1e-3,
"q_gen_kvar": 1e-3}):
"""
This function validates the pypower case files to pandapower net structure conversion via a \
comparison of loadflow calculation results. (Hence the opf cost conversion is not validated.)
INPUT:
**ppc_net** - The pypower case file which already contains the pypower powerflow results.
**pp_net** - The pandapower network.
OPTIONAL:
**max_diff_values** - Dict of maximal allowed difference values. The keys must be
'vm_pu', 'va_degree', 'p_branch_kw', 'q_branch_kvar', 'p_gen_kw' and 'q_gen_kvar' and
the values floats.
OUTPUT:
**conversion_success** - conversion_success is returned as False if pypower or pandapower
cannot calculate a powerflow or if the maximum difference values (max_diff_values )
cannot be hold.
EXAMPLE:
import pandapower.converter as pc
pp_net = cv.from_ppc(ppc_net, f_hz=50)
conversion_success = cv.validate_from_ppc(ppc_net, pp_net)
NOTE:
The user has to take care that the loadflow results already are included in the provided \
ppc_net.
"""
# --- check pypower powerflow success, if possible
ppc_success = True
if 'success' in ppc_net.keys():
if ppc_net['success'] != 1:
ppc_success = False
logger.error("The given ppc data indicates an unsuccessful pypower powerflow: " +
"'ppc_net['success'] != 1'")
if (ppc_net['branch'].shape[1] < 17):
ppc_success = False
logger.error("The shape of given ppc data indicates missing pypower powerflow results.")
# --- try to run a pandapower powerflow
try:
pp.runpp(pp_net, init="dc", calculate_voltage_angles=True, trafo_model="pi")
except pp.LoadflowNotConverged:
try:
pp.runpp(pp_net, calculate_voltage_angles=True, init="flat", trafo_model="pi")
except pp.LoadflowNotConverged:
try:
pp.runpp(pp_net, trafo_model="pi")
except pp.LoadflowNotConverged:
logger.error('The pandapower powerflow does not converge.')
return False
# --- prepare powerflow result comparison by reordering pp results as they are in ppc results
if (ppc_success) & (pp_net.converged):
# --- store pypower powerflow results
ppc_res_branch = ppc_net['branch'][:, 13:17]
ppc_res_bus = ppc_net['bus'][:, 7:9]
ppc_res_gen = ppc_net['gen'][:, 1:3]
# --- pandapower bus result table
pp_res_bus = array(pp_net.res_bus[['vm_pu', 'va_degree']])
# --- pandapower gen result table
pp_res_gen = zeros([1, 2])
# consideration of parallel generators via storing how much generators have been considered
# each node
already_used_gen = Series(zeros([pp_net.bus.shape[0]]), index=pp_net.bus.index).astype(int)
GENS = DataFrame(ppc_net['gen'][:, [0]].astype(int))
change_q_compare = []
for i, j in GENS.iterrows():
current_bus_idx = pp.get_element_index(pp_net, 'bus', name=j[0])
current_bus_type = int(ppc_net['bus'][current_bus_idx, 1])
# ext_grid
if current_bus_type == 3:
if already_used_gen.at[current_bus_idx] == 0:
pp_res_gen = append(pp_res_gen, array(pp_net.res_ext_grid[
pp_net.ext_grid.bus == current_bus_idx][['p_kw', 'q_kvar']])[
already_used_gen.at[current_bus_idx]].reshape((1, 2)), 0)
already_used_gen.at[current_bus_idx] += 1
else:
pp_res_gen = append(pp_res_gen, array(pp_net.res_sgen[
pp_net.sgen.bus == current_bus_idx][['p_kw', 'q_kvar']])[
already_used_gen.at[current_bus_idx]-1].reshape((1, 2)), 0)
already_used_gen.at[current_bus_idx] += 1
change_q_compare += [j[0]]
# gen
elif current_bus_type == 2:
pp_res_gen = append(pp_res_gen, array(pp_net.res_gen[
pp_net.gen.bus == current_bus_idx][['p_kw', 'q_kvar']])[
already_used_gen.at[current_bus_idx]].reshape((1, 2)), 0)
if already_used_gen.at[current_bus_idx] > 0:
change_q_compare += [j[0]]
already_used_gen.at[current_bus_idx] += 1
# sgen
elif current_bus_type == 1:
pp_res_gen = append(pp_res_gen, array(pp_net.res_sgen[
pp_net.sgen.bus == current_bus_idx][['p_kw', 'q_kvar']])[
already_used_gen.at[current_bus_idx]].reshape((1, 2)), 0)
already_used_gen.at[current_bus_idx] += 1
pp_res_gen = pp_res_gen[1:, :] # delete initial zero row
# --- pandapower branch result table
pp_res_branch = zeros([1, 4])
# consideration of parallel branches via storing how much branches have been considered
# each node-to-node-connection
init1 = concat([pp_net.line.from_bus, pp_net.line.to_bus], axis=1).drop_duplicates()
init2 = concat([pp_net.trafo.hv_bus, pp_net.trafo.lv_bus], axis=1).drop_duplicates()
init1['hv_bus'] = nan
init1['lv_bus'] = nan
init2['from_bus'] = nan
init2['to_bus'] = nan
already_used_branches = concat([init1, init2], axis=0)
already_used_branches['number'] = zeros([already_used_branches.shape[0], 1]).astype(int)
BRANCHES = DataFrame(ppc_net['branch'][:, [0, 1, 8, 9]])
for i in BRANCHES.index:
from_bus = pp.get_element_index(pp_net, 'bus', name=int(ppc_net['branch'][i, 0]))
to_bus = pp.get_element_index(pp_net, 'bus', name=int(ppc_net['branch'][i, 1]))
from_vn_kv = ppc_net['bus'][from_bus, 9]
to_vn_kv = ppc_net['bus'][to_bus, 9]
ratio = BRANCHES[2].at[i]
angle = BRANCHES[3].at[i]
# from line results
if (from_vn_kv == to_vn_kv) & ((ratio == 0) | (ratio == 1)) & (angle == 0):
pp_res_branch = append(pp_res_branch, array(pp_net.res_line[
(pp_net.line.from_bus == from_bus) &
(pp_net.line.to_bus == to_bus)]
[['p_from_kw', 'q_from_kvar', 'p_to_kw', 'q_to_kvar']])[
int(already_used_branches.number.loc[
(already_used_branches.from_bus == from_bus) &
(already_used_branches.to_bus == to_bus)].values)].reshape(1, 4), 0)
already_used_branches.number.loc[(already_used_branches.from_bus == from_bus) &
(already_used_branches.to_bus == to_bus)] += 1
# from trafo results
else:
if from_vn_kv >= to_vn_kv:
pp_res_branch = append(pp_res_branch, array(pp_net.res_trafo[
(pp_net.trafo.hv_bus == from_bus) &
(pp_net.trafo.lv_bus == to_bus)]
[['p_hv_kw', 'q_hv_kvar', 'p_lv_kw', 'q_lv_kvar']])[
int(already_used_branches.number.loc[
(already_used_branches.hv_bus == from_bus) &
(already_used_branches.lv_bus == to_bus)].values)].reshape(1, 4), 0)
already_used_branches.number.loc[(already_used_branches.hv_bus == from_bus) &
(already_used_branches.lv_bus == to_bus)] += 1
else: # switch hv-lv-connection of pypower connection buses
pp_res_branch = append(pp_res_branch, array(pp_net.res_trafo[
(pp_net.trafo.hv_bus == to_bus) &
(pp_net.trafo.lv_bus == from_bus)]
[['p_lv_kw', 'q_lv_kvar', 'p_hv_kw', 'q_hv_kvar']])[
int(already_used_branches.number.loc[
(already_used_branches.hv_bus == to_bus) &
(already_used_branches.lv_bus == from_bus)].values)].reshape(1, 4), 0)
already_used_branches.number.loc[
(already_used_branches.hv_bus == to_bus) &
(already_used_branches.lv_bus == from_bus)] += 1
pp_res_branch = pp_res_branch[1:, :] # delete initial zero row
# --- do the powerflow result comparison
diff_res_bus = ppc_res_bus - pp_res_bus
diff_res_branch = ppc_res_branch - pp_res_branch * 1e-3
diff_res_gen = ppc_res_gen + pp_res_gen * 1e-3
# comparison of buses with several generator units only as q sum
GEN_uniq = GENS.drop_duplicates()
for i in GEN_uniq.loc[GEN_uniq[0].isin(change_q_compare)].index:
next_is = GEN_uniq.index[GEN_uniq.index > i]
if len(next_is) > 0:
next_i = next_is[0]
else:
next_i = GENS.index[-1] + 1
if (next_i - i) > 1:
diff_res_gen[i:next_i, 1] = sum(diff_res_gen[i:next_i, 1])
# logger info
logger.debug("Maximum voltage magnitude difference between pypower and pandapower: "
"%.2e pu" % max(abs(diff_res_bus[:, 0])))
logger.debug("Maximum voltage angle difference between pypower and pandapower: "
"%.2e degree" % max(abs(diff_res_bus[:, 1])))
logger.debug("Maximum branch flow active power difference between pypower and pandapower: "
"%.2e kW" % max(abs(diff_res_branch[:, [0, 2]] * 1e3)))
logger.debug("Maximum branch flow reactive power difference between pypower and "
"pandapower: %.2e kVAr" % max(abs(diff_res_branch[:, [1, 3]] * 1e3)))
logger.debug("Maximum active power generation difference between pypower and pandapower: "
"%.2e kW" % max(abs(diff_res_gen[:, 0] * 1e3)))
logger.debug("Maximum reactive power generation difference between pypower and pandapower: "
"%.2e kVAr" % max(abs(diff_res_gen[:, 1] * 1e3)))
if (max(abs(diff_res_bus[:, 0])) < 1e-3) & (max(abs(diff_res_bus[:, 1])) < 1e-3) & \
(max(abs(diff_res_branch[:, [0, 2]])) < 1e-3) & \
(max(abs(diff_res_branch[:, [1, 3]])) < 1e-3) & \
(max(abs(diff_res_gen)) > 1e-1).any():
logger.debug("The active/reactive power generation difference possibly results "
"because of a pypower error. Please validate "
"the results via pypower loadflow.") # this occurs e.g. at ppc case9
# give a return
if isinstance(max_diff_values, dict):
if Series(['q_gen_kvar', 'p_branch_kw', 'q_branch_kvar', 'p_gen_kw', 'va_degree',
'vm_pu']).isin(Series(list(max_diff_values.keys()))).all():
return (max(abs(diff_res_bus[:, 0])) < max_diff_values['vm_pu']) & \
(max(abs(diff_res_bus[:, 1])) < max_diff_values['va_degree']) & \
(max(abs(diff_res_branch[:, [0, 2]])) < max_diff_values['p_branch_kw'] /
1e3) & \
(max(abs(diff_res_branch[:, [1, 3]])) < max_diff_values['q_branch_kvar'] /
1e3) & \
(max(abs(diff_res_gen[:, 0])) < max_diff_values['p_gen_kw'] / 1e3) & \
(max(abs(diff_res_gen[:, 1])) < max_diff_values['q_gen_kvar'] / 1e3)
else:
logger.debug('Not all requried dict keys are provided.')
else:
logger.debug("'max_diff_values' must be a dict.") | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
357,
66,
8,
1584,
12,
5539,
416,
2059,
286,
15035,
741,
290,
39313,
403,
71,
30288,
5136,
329,
3086,
6682,
290,
198,
2,
6682,
4482,
8987,
357,
40,
54,... | 1.855515 | 12,375 |
TASKS = {
"binary_classification": 1,
"multi_class_classification": 2,
"entity_extraction": 4,
"extractive_question_answering": 5,
"summarization": 8,
"single_column_regression": 10,
"speech_recognition": 11,
}
DATASETS_TASKS = ["text-classification", "question-answering-extractive"]
| [
51,
1921,
27015,
796,
1391,
198,
220,
220,
220,
366,
39491,
62,
4871,
2649,
1298,
352,
11,
198,
220,
220,
220,
366,
41684,
62,
4871,
62,
4871,
2649,
1298,
362,
11,
198,
220,
220,
220,
366,
26858,
62,
2302,
7861,
1298,
604,
11,
198... | 2.492063 | 126 |
#!/Users/robertpoenaru/.pyenv/shims/python
import reshape_matrix as sh
import numpy as np
N = 5
M = 7
m = sh.Shaper(N, M)
mat = np.arange((N * M)).reshape(N, M) + 1
print(mat)
trunc_mat = m.Truncate(mat, 2, 2)
print(trunc_mat)
ext_mat = m.Extend(mat, 1, 3)
print(ext_mat)
| [
2,
48443,
14490,
14,
305,
4835,
79,
6571,
11493,
11757,
9078,
24330,
14,
1477,
12078,
14,
29412,
198,
198,
11748,
27179,
1758,
62,
6759,
8609,
355,
427,
198,
11748,
299,
32152,
355,
45941,
198,
198,
45,
796,
642,
198,
44,
796,
767,
... | 2.074627 | 134 |
# Copyright (c) 2014, German Neuroinformatics Node (G-Node)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted under the terms of the BSD License. See
# LICENSE file in the root of the Project.
from __future__ import (absolute_import, division, print_function)#, unicode_literals)
import unittest
from nix import *
| [
2,
15069,
357,
66,
8,
1946,
11,
2679,
13782,
259,
18982,
873,
19081,
357,
38,
12,
19667,
8,
198,
2,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
... | 3.517857 | 112 |
"""
Taken from https://github.com/maartenbreddels/ipyvolume/pull/178
"""
import numpy as np
import matplotlib.cm
import matplotlib.colors
from ipyvolume import TransferFunction
def linear_transfer_function(
color, min_opacity=0, max_opacity=0.05, reverse_opacity=False, n_elements=256
):
"""Transfer function of a single color and linear opacity.
:param color: List-like RGB, or string with hexadecimal or named color.
RGB values should be within 0-1 range.
:param min_opacity: Minimum opacity, default value is 0.0.
Lowest possible value is 0.0, optional.
:param max_opacity: Maximum opacity, default value is 0.05.
Highest possible value is 1.0, optional.
:param reverse_opacity: Linearly decrease opacity, optional.
:param n_elements: Length of rgba array transfer function attribute.
:type color: list-like or string
:type min_opacity: float, int
:type max_opacity: float, int
:type reverse_opacity: bool
:type n_elements: int
:return: transfer_function
:rtype: ipyvolume TransferFunction
:Example:
>>> import ipyvolume as ipv
>>> green_tf = ipv.transfer_function.linear_transfer_function('green')
>>> ds = ipv.datasets.aquariusA2.fetch()
>>> ipv.volshow(ds.data[::4,::4,::4], tf=green_tf)
>>> ipv.show()
.. seealso:: matplotlib_transfer_function()
"""
r, g, b = matplotlib.colors.to_rgb(color)
opacity = np.linspace(min_opacity, max_opacity, num=n_elements)
if reverse_opacity:
opacity = np.flip(opacity, axis=0)
rgba = np.transpose(
np.stack([[r] * n_elements, [g] * n_elements, [b] * n_elements, opacity])
)
transfer_function = TransferFunction(rgba=rgba)
return transfer_function
def matplotlib_transfer_function(
colormap_name,
min_opacity=0,
max_opacity=0.05,
reverse_colormap=False,
reverse_opacity=False,
n_elements=256,
):
"""Transfer function from matplotlib colormaps.
:param colormap_name: name of matplotlib colormap
:param min_opacity: Minimum opacity, default value is 0.
Lowest possible value is 0, optional.
:param max_opacity: Maximum opacity, default value is 0.05.
Highest possible value is 1.0, optional.
:param reverse_colormap: reversed matplotlib colormap, optional.
:param reverse_opacity: Linearly decrease opacity, optional.
:param n_elements: Length of rgba array transfer function attribute.
:type colormap_name: str
:type min_opacity: float, int
:type max_opacity: float, int
:type reverse_colormap: bool
:type reverse_opacity: bool
:type n_elements: int
:return: transfer_function
:rtype: ipyvolume TransferFunction
:Example:
>>> import ipyvolume as ipv
>>> rgb = (0, 255, 0) # RGB value for green
>>> green_tf = ipv.transfer_function.matplotlib_transfer_function('bone')
>>> ds = ipv.datasets.aquariusA2.fetch()
>>> ipv.volshow(ds.data[::4,::4,::4], tf=green_tf)
>>> ipv.show()
.. seealso:: linear_transfer_function()
"""
cmap = matplotlib.cm.get_cmap(name=colormap_name)
rgba = np.array([cmap(i) for i in np.linspace(0, 1, n_elements)])
if reverse_colormap:
rgba = np.flip(rgba, axis=0)
# Create opacity values to overwrite default matplotlib opacity=1.0
opacity = np.linspace(min_opacity, max_opacity, num=n_elements)
if reverse_opacity:
opacity = np.flip(opacity, axis=0)
rgba[:, -1] = opacity # replace opacity=1 with actual opacity
transfer_function = TransferFunction(rgba=rgba)
return transfer_function
| [
37811,
198,
51,
1685,
422,
3740,
1378,
12567,
13,
785,
14,
2611,
23996,
65,
26504,
1424,
14,
541,
88,
29048,
14,
31216,
14,
23188,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
11215,
198,
11748,
2... | 2.611474 | 1,377 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import unittest2 as unittest
from pants.birds.duck.ttypes import Duck
from pants.birds.goose.ttypes import Goose
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
1946,
41689,
1628,
20420,
357,
3826,
27342,
9865,
3843,
20673,
13,
9132,
737,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
3826,
38559,
24290,
737,
198,
198,
6738,
1... | 3.007246 | 138 |
from collections import Counter
from operator import itemgetter
INVENTORY = {
'A': {'price': 50, 'offer': [{'cnt': 3, 'price': 130}, {'cnt': 5, 'price': 200}]},
'B': {'price': 30, 'offer': [{'cnt': 2, 'price': 45}], 'free': {'sku': 'E', 'cnt': 2}},
'C': {'price': 20},
'D': {'price': 15},
'E': {'price': 40},
'F': {'price': 10, 'free': {'sku': 'F', 'cnt': 2}},
'G': {'price': 20},
'H': {'price': 10, 'offer': [{'cnt': 5, 'price': 45}, {'cnt': 10, 'price': 80}]},
'I': {'price': 35},
'J': {'price': 60},
'K': {'price': 70, 'offer': [{'cnt': 2, 'price': 120}]},
'L': {'price': 90},
'M': {'price': 15, 'free': {'sku': 'N', 'cnt': 3}},
'N': {'price': 40},
'O': {'price': 10},
'P': {'price': 50, 'offer': [{'cnt': 5, 'price': 200}]},
'Q': {'price': 30, 'offer': [{'cnt': 3, 'price': 80}], 'free': {'sku': 'R', 'cnt': 3}},
'R': {'price': 50},
'S': {'price': 20},
'T': {'price': 20},
'U': {'price': 40, 'free': {'sku': 'U', 'cnt': 3}},
'V': {'price': 50, 'offer': [{'cnt': 2, 'price': 90}, {'cnt': 3, 'price': 130}]},
'W': {'price': 20},
'X': {'price': 17},
'Y': {'price': 20},
'Z': {'price': 21}
}
GROUP_SKUS = ['S', 'T', 'X', 'Y', 'Z']
checkout('AAA')
| [
198,
6738,
17268,
1330,
15034,
198,
6738,
10088,
1330,
2378,
1136,
353,
198,
198,
1268,
53,
3525,
15513,
796,
1391,
198,
220,
220,
220,
705,
32,
10354,
1391,
6,
20888,
10354,
2026,
11,
705,
47895,
10354,
685,
90,
6,
66,
429,
10354,
... | 1.959502 | 642 |
# Generated by Django 2.2.6 on 2019-11-06 13:24
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
21,
319,
13130,
12,
1157,
12,
3312,
1511,
25,
1731,
201,
198,
201,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
201,
198,
201,
198
] | 2.567568 | 37 |
# Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from builtins import str
from collections import OrderedDict
from importlib import import_module
import os.path
import sys
import unittest
from tests.common_functions import create_abstract_model, \
add_components_and_load_data
from tests.project.operations.common_functions import \
get_project_operational_timepoints
TEST_DATA_DIRECTORY = \
os.path.join(os.path.dirname(__file__), "..", "..", "..", "test_data")
# Import prerequisite modules
PREREQUISITE_MODULE_NAMES = [
"temporal.operations.timepoints", "temporal.operations.horizons",
"temporal.investment.periods", "geography.load_zones",
"geography.load_following_up_balancing_areas", "project",
"project.capacity.capacity"]
NAME_OF_MODULE_BEING_TESTED = \
"project.operations.reserves.lf_reserves_up"
IMPORTED_PREREQ_MODULES = list()
for mdl in PREREQUISITE_MODULE_NAMES:
try:
imported_module = import_module("." + str(mdl), package='gridpath')
IMPORTED_PREREQ_MODULES.append(imported_module)
except ImportError:
print("ERROR! Module " + str(mdl) + " not found.")
sys.exit(1)
# Import the module we'll test
try:
MODULE_BEING_TESTED = import_module("." + NAME_OF_MODULE_BEING_TESTED,
package='gridpath')
except ImportError:
print("ERROR! Couldn't import module " + NAME_OF_MODULE_BEING_TESTED +
" to test.")
class TestLFReservesUpProvision(unittest.TestCase):
"""
"""
def test_add_model_components(self):
"""
Test that there are no errors when adding model components
:return:
"""
create_abstract_model(prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage=""
)
def test_load_model_data(self):
"""
Test that data are loaded with no errors
:return:
"""
add_components_and_load_data(prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage=""
)
def test_data_loaded_correctly(self):
"""
Test that the data loaded are as expected
:return:
"""
m, data = add_components_and_load_data(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage=""
)
instance = m.create_instance(data)
# Set: LF_RESERVES_UP_PROJECTS
expected_projects = sorted([
"Gas_CCGT", "Gas_CCGT_New", "Gas_CCGT_New_Binary", "Gas_CCGT_z2",
"Battery", "Battery_Binary", "Battery_Specified", "Hydro", "Hydro_NonCurtailable"
])
actual_projects = sorted([
prj for prj in instance.LF_RESERVES_UP_PROJECTS
])
self.assertListEqual(expected_projects, actual_projects)
# Param: lf_reserves_up_zone
expected_reserves_zone = OrderedDict(sorted(
{"Gas_CCGT": "Zone1", "Gas_CCGT_New": "Zone1",
"Gas_CCGT_New_Binary": "Zone1",
"Gas_CCGT_z2": "Zone2",
"Battery": "Zone1", "Battery_Binary": "Zone1",
"Battery_Specified": "Zone1", "Hydro": "Zone1",
"Hydro_NonCurtailable": "Zone1"}.items()
)
)
actual_reserves_zone = OrderedDict(sorted(
{prj: instance.lf_reserves_up_zone[prj]
for prj in instance.LF_RESERVES_UP_PROJECTS}.items()
)
)
self.assertDictEqual(expected_reserves_zone, actual_reserves_zone)
# Set: LF_RESERVES_UP_PRJ_OPR_TMPS
expected_prj_op_tmps = sorted(
get_project_operational_timepoints(expected_projects)
)
actual_prj_op_tmps = sorted([
(prj, tmp) for (prj, tmp) in
instance.LF_RESERVES_UP_PRJ_OPR_TMPS
])
self.assertListEqual(expected_prj_op_tmps, actual_prj_op_tmps)
# Param: lf_reserves_up_derate (defaults to 1 if not specified)
expected_derate = OrderedDict(sorted(
{"Battery": 1, "Battery_Binary": 1, "Battery_Specified": 0.5,
"Gas_CCGT": 1,
"Gas_CCGT_New": 1, "Gas_CCGT_New_Binary": 1, "Gas_CCGT_z2": 1,
"Hydro": 1, "Hydro_NonCurtailable": 1}.items()
)
)
actual_derate = OrderedDict(sorted(
{prj: instance.lf_reserves_up_derate[prj]
for prj in instance.LF_RESERVES_UP_PROJECTS}.items()
)
)
self.assertDictEqual(expected_derate, actual_derate)
# Param: lf_reserves_up_reserve_to_energy_adjustment
# (defaults to 0 if not specified)
expected_adjustment = OrderedDict(sorted(
{"Zone1": 0.1, "Zone2": 0}.items()
)
)
actual_adjustment = OrderedDict(sorted(
{z: instance.
lf_reserves_up_reserve_to_energy_adjustment[z]
for z in instance.LF_RESERVES_UP_ZONES}.items()
)
)
self.assertDictEqual(expected_adjustment, actual_adjustment)
if __name__ == "__main__":
unittest.main()
| [
2,
15069,
1584,
12,
42334,
4518,
36891,
30437,
11419,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,... | 2.057467 | 2,993 |
import f3b
import f3b.datas_pb2
import f3b.custom_params_pb2
import f3b.animations_kf_pb2
import f3b.physics_pb2
from . import Relations
from ..F3bContext import *
from ..Utils import *
from .. import Logger as log
from ..tools import F3bLod
from ..Mesh import *;
| [
11748,
277,
18,
65,
198,
11748,
277,
18,
65,
13,
19608,
292,
62,
40842,
17,
198,
11748,
277,
18,
65,
13,
23144,
62,
37266,
62,
40842,
17,
198,
11748,
277,
18,
65,
13,
11227,
602,
62,
74,
69,
62,
40842,
17,
198,
11748,
277,
18,
... | 2.394958 | 119 |
# Copyright (C) 2006-2018, Stefan Schwarzer <sschwarzer@sschwarzer.net>
# and ftputil contributors (see `doc/contributors.txt`)
# See the file LICENSE for licensing terms.
"""
ftp_stat_cache.py - cache for (l)stat data
"""
import time
import ftputil.error
import ftputil.lrucache
# This module shouldn't be used by clients of the ftputil library.
__all__ = []
class StatCache:
"""
Implement an LRU (least-recently-used) cache.
`StatCache` objects have an attribute `max_age`. After this duration after
_setting_ it a cache entry will expire. For example, if you code
my_cache = StatCache()
my_cache.max_age = 10
my_cache["/home"] = ...
the value my_cache["/home"] can be retrieved for 10 seconds. After that,
the entry will be treated as if it had never been in the cache and should
be fetched again from the remote host.
Note that the `__len__` method does no age tests and thus may include some
or many already expired entries.
"""
# Default number of cache entries
_DEFAULT_CACHE_SIZE = 5000
def enable(self):
"""
Enable storage of stat results.
"""
self._enabled = True
def disable(self):
"""
Disable the cache. Further storage attempts with `__setitem__` won't
have any visible effect.
Disabling the cache only effects new storage attempts. Values stored
before calling `disable` can still be retrieved unless disturbed by a
`resize` command or normal cache expiration.
"""
# `_enabled` is set via calling `enable` in the constructor.
# pylint: disable=attribute-defined-outside-init
self._enabled = False
def resize(self, new_size):
"""
Set number of cache entries to the integer `new_size`. If the new size
is smaller than the current cache size, relatively long-unused elements
will be removed.
"""
self._cache.size = new_size
def _age(self, path):
"""
Return the age of a cache entry for `path` in seconds. If the path
isn't in the cache, raise a `CacheMissError`.
"""
try:
return time.time() - self._cache.mtime(path)
except ftputil.lrucache.CacheKeyError:
raise ftputil.error.CacheMissError(
"no entry for path {} in cache".format(path)
)
def clear(self):
"""
Clear (invalidate) all cache entries.
"""
self._cache.clear()
def invalidate(self, path):
"""
Invalidate the cache entry for the absolute `path` if present. After
that, the stat result data for `path` can no longer be retrieved, as if
it had never been stored.
If no stat result for `path` is in the cache, do _not_ raise an
exception.
"""
# XXX: To be 100 % sure, this should be `host.sep`, but I don't want to
# introduce a reference to the `FTPHost` object for only that purpose.
assert path.startswith("/"), "{} must be an absolute path".format(path)
try:
del self._cache[path]
except ftputil.lrucache.CacheKeyError:
# Ignore errors
pass
def __getitem__(self, path):
"""
Return the stat entry for the `path`. If there's no stored stat entry
or the cache is disabled, raise `CacheMissError`.
"""
if not self._enabled:
raise ftputil.error.CacheMissError("cache is disabled")
# Possibly raise a `CacheMissError` in `_age`
if (self.max_age is not None) and (self._age(path) > self.max_age):
self.invalidate(path)
raise ftputil.error.CacheMissError(
"entry for path {} has expired".format(path)
)
else:
# XXX: I don't know if this may raise a `CacheMissError` in case of
# race conditions. I prefer robust code.
try:
return self._cache[path]
except ftputil.lrucache.CacheKeyError:
raise ftputil.error.CacheMissError(
"entry for path {} not found".format(path)
)
def __setitem__(self, path, stat_result):
"""
Put the stat data for the absolute `path` into the cache, unless it's
disabled.
"""
assert path.startswith("/")
if not self._enabled:
return
self._cache[path] = stat_result
def __contains__(self, path):
"""
Support for the `in` operator. Return a true value, if data for `path`
is in the cache, else return a false value.
"""
try:
# Implicitly do an age test which may raise `CacheMissError`.
self[path]
except ftputil.error.CacheMissError:
return False
else:
return True
#
# The following methods are only intended for debugging!
#
def __len__(self):
"""
Return the number of entries in the cache. Note that this may include
some (or many) expired entries.
"""
return len(self._cache)
def __str__(self):
"""
Return a string representation of the cache contents.
"""
lines = []
for key in sorted(self._cache):
lines.append("{}: {}".format(key, self[key]))
return "\n".join(lines)
| [
2,
15069,
357,
34,
8,
4793,
12,
7908,
11,
28842,
29726,
9107,
1279,
824,
354,
5767,
9107,
31,
824,
354,
5767,
9107,
13,
3262,
29,
198,
2,
290,
10117,
1996,
346,
20420,
357,
3826,
4600,
15390,
14,
3642,
2455,
669,
13,
14116,
63,
8,... | 2.418408 | 2,249 |
################################################
# Script to read the results of Clustering.cpp #
################################################
from ROOT import TCanvas, TFile, gStyle
from Isis.plot_tools import ColorList
gStyle.SetOptTitle( 0 )
# Gets the scatter plots from the file
ifile = TFile.Open( "Clustering.root" )
lst = ifile.GetListOfKeys()
graphs = []
for kw in lst:
graphs.append( ifile.Get( kw.GetName() + '/gr' ) )
# Minimum and maximum values for each axis
mnx, mxx = -15, 15
mny, mxy = -10, 10
graphs[ 0 ].SetMaximum( mxy )
graphs[ 0 ].SetMinimum( mny )
# Formats the different objects to be drawn
colorlst = ColorList()
for el, col in zip( graphs, colorlst ):
el.SetMarkerColor( col )
el.SetLineColor( col )
el.SetMarkerStyle( 6 )
el.GetXaxis().SetLimits( mnx, mxx )
# Draws the different cluster points in the same canvas
canvas = TCanvas()
graphs[ 0 ].Draw("AP")
for el in graphs[ 1: ]:
el.Draw("SAMEP")
| [
29113,
14468,
198,
2,
12327,
284,
1100,
262,
2482,
286,
1012,
436,
1586,
13,
20322,
1303,
198,
29113,
14468,
198,
198,
6738,
15107,
2394,
1330,
309,
6090,
11017,
11,
309,
8979,
11,
308,
21466,
198,
6738,
16530,
13,
29487,
62,
31391,
1... | 2.854599 | 337 |
import cv2
import numpy as np
import glob
import os
import argparse
def get_noise(img, value=10):
'''
#生成噪声图像
value= 大小控制雨滴的多少
'''
noise = np.random.uniform(0, 256, img.shape[0:2])
# 控制噪声水平,取浮点数,只保留最大的一部分作为噪声
v = value * 0.01
noise[np.where(noise < (256 - v))] = 0
# 噪声做初次模糊
k = np.array([[0, 0.1, 0],
[0.1, 8, 0.1],
[0, 0.1, 0]])
noise = cv2.filter2D(noise, -1, k)
return noise
def rain_blur(noise, length=10, angle=0, w=1):
'''
将噪声加上运动模糊,模仿雨滴
noise:输入噪声图,shape = img.shape[0:2]
length: 对角矩阵大小,表示雨滴的长度
angle: 倾斜的角度,逆时针为正
w: 雨滴大小
'''
# 这里由于对角阵自带45度的倾斜,逆时针为正,所以加了-45度的误差,保证开始为正
trans = cv2.getRotationMatrix2D((length / 2, length / 2), angle - 45, 1 - length / 100.0)
dig = np.diag(np.ones(length)) # 生成对焦矩阵
k = cv2.warpAffine(dig, trans, (length, length)) # 生成模糊核
k = cv2.GaussianBlur(k, (w, w), 0) # 高斯模糊这个旋转后的对角核,使得雨有宽度
# k = k / length #是否归一化
blurred = cv2.filter2D(noise, -1, k) # 用刚刚得到的旋转后的核,进行滤波
# 转换到0-255区间
cv2.normalize(blurred, blurred, 0, 255, cv2.NORM_MINMAX)
blurred = np.array(blurred, dtype=np.uint8)
return blurred
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="SAPNet_train_test")
parser.add_argument("--input_dir", type=str, default='C:/Users/Lebron/Desktop/CUSTOM_IMAGE/CityScape150/*.png')
parser.add_argument("--output_dir", type=str, default='D:/Code/AAAI_2022/results/CityScape150_rain')
parser.add_argument("--noise", type=int, default=500)
parser.add_argument("--rain_len", type=int, default=50)
parser.add_argument("--rain_angle", type=int, default=-30) # negative means rain streaks leans left
parser.add_argument("--rain_thickness", type=int, default=3)
parser.add_argument("--alpha", type=float, default=0.7)
config = parser.parse_args()
for file in glob.glob(config.input_dir):
process(img_name = file,
out_dir = config.output_dir,
noise = config.noise,
rain_len = config.rain_len,
rain_angle = config.rain_angle,
rain_thickness = config.rain_thickness,
alpha = config.alpha
)
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
15095,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
198,
4299,
651,
62,
3919,
786,
7,
9600,
11,
1988,
28,
940,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
22... | 1.610724 | 1,436 |
from .fermi import * # NOQA
| [
6738,
764,
2232,
11632,
1330,
1635,
220,
1303,
8005,
48,
32,
198
] | 2.416667 | 12 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provide some examples using GMR.
See also the `gmr.py` example beforehand. In this example, we delve a bit deeper into GMR using 2D letters as training
data.
"""
import numpy as np
from scipy.io import loadmat
import matplotlib.pyplot as plt
from pyrobolearn.models.gmm import Gaussian, GMM, plot_gmm, plot_gmr
# load the training data
G = loadmat('../../data/2Dletters/G.mat') # dict
demos = G['demos'] # shape (1,N)
n_demos = demos.shape[1]
dim = demos[0, 0][0, 0][0].shape[0]
length = demos[0, 0][0, 0][0].shape[1]
# plot the training data (x,y)
X = []
xlim, ylim = [-10, 10], [-10, 10]
plt.title("Training Data")
plt.xlim(xlim)
plt.ylim(ylim)
for i in range(0, n_demos, 2):
demo = demos[0, i][0, 0][0] # shape (2, 200)
plt.plot(demo[0], demo[1])
X.append(demo.T)
plt.show()
# reshape training data (add time in addition to (x,y), thus we now have (t,x,y))
time_linspace = np.linspace(0, 2., length)
times = np.asarray([time_linspace for _ in range(len(X))]).reshape(-1, 1)
X = np.vstack(X) # shape (N*200, 2)
X = np.hstack((times, X)) # shape (N*200, 3)
print(X.shape)
# create GMM
dim, num_components = X.shape[1], 7
gmm = GMM(gaussians=[Gaussian(mean=np.concatenate((np.random.uniform(0, 2., size=1),
np.random.uniform(-8., 8., size=dim-1))),
covariance=0.1*np.identity(dim)) for _ in range(num_components)])
# init GMM
init_method = 'k-means' # 'random', 'k-means', 'uniform', 'sklearn', 'curvature'
gmm.init(X, method=init_method)
fig, ax = plt.subplots(1, 1)
plot_gmm(gmm, dims=[1, 2], X=X, ax=ax, title='GMM after ' + init_method.capitalize(), xlim=xlim, ylim=ylim)
plt.show()
# fit a GMM on it
result = gmm.fit(X, init=None, num_iters=200)
# plot EM optimization
plt.plot(result['losses'])
plt.title('EM per iteration')
plt.show()
# plot trained GMM
fig, ax = plt.subplots(1, 1)
plot_gmm(gmm, dims=[1, 2], X=X, label=True, ax=ax, title='Our Trained GMM', option=1, xlim=xlim, ylim=ylim)
plt.show()
# GMR: condition on the input variable and plot
gaussians = []
for t in time_linspace:
g = gmm.condition(t, idx_out=[1, 2], idx_in=0).approximate_by_single_gaussian()
gaussians.append(g)
# plot figures for GMR
plot_gmr(time_linspace, gaussians=gaussians, xlim=xlim, ylim=ylim)
plt.show()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
15946,
485,
617,
6096,
1262,
6951,
49,
13,
198,
198,
6214,
635,
262,
4600,
39870,
81,
13,
9078,
63,
1672,
2709... | 2.222535 | 1,065 |
from mido import MidiFile
mid = MidiFile('outfile.mid')
for i,track in enumerate(mid.tracks):
# print('Track {}:{}'.format(i,track.name))
for msg in track:
if msg.type == 'key_signature':
m = str(msg)
print(m[33])
| [
6738,
3095,
78,
1330,
7215,
72,
8979,
198,
198,
13602,
220,
796,
7215,
72,
8979,
10786,
448,
7753,
13,
13602,
11537,
198,
198,
1640,
1312,
11,
11659,
287,
27056,
378,
7,
13602,
13,
46074,
2599,
198,
220,
220,
220,
220,
220,
220,
220... | 1.978571 | 140 |
import scapy.all as S
import urllib.parse as urlparse
from typing import Tuple
import codecs
WEBSITE = 'infosec.cs.tau.ac.il'
def parse_packet(packet) -> Tuple[str]:
"""
If the given packet is a login request to the course website, return the
username and password as a tuple => ('123456789', 'opensesame'). Otherwise,
return None.
Notes:
1. You can assume the entire HTTP request fits within one packet, and that
both the username and password are non-empty for login requests (if any
of the above assumptions fails, it's OK if you don't extract the
user/password - but you must still NOT crash).
2. Filter the course website using the `WEBSITE` constant from above. DO NOT
use the server IP for the filtering (as our domain may point to different
IPs later and your code should be reliable).
3. Make sure you return a tuple, not a list.
"""
# username and password will always be in the raw section
if not packet.haslayer(S.Raw):
return None
# Extract Host and Referer
raw_load = packet[S.Raw].load.decode('utf-8').split('\r\n')
host = ' '
referer = ' '
for entry in raw_load:
if entry.startswith('Host: '):
host = entry[len('Host: '):]
if entry.startswith('Referer: '):
referer = entry[len('Referer: '):]
# A login request to our WEBSITE
if host == WEBSITE and referer.endswith('login/'):
# Extract username and password
parsed = urlparse.parse_qs(raw_load[-1])
# We assume both username and password are not empty
if 'username' in parsed and 'password' in parsed:
username = ''.join(parsed['username'])
password = ''.join(parsed['password'])
# Replace \\ with \
username = codecs.decode(username, 'unicode_escape')
password = codecs.decode(password, 'unicode_escape')
return (username,password)
return None
def packet_filter(packet) -> bool:
"""
Filter to keep only HTTP traffic (port 80) from any HTTP client to any
HTTP server (not just the course website). This function should return
`True` for packets that match the above rule, and `False` for all other
packets.
Notes:
1. We are only keeping HTTP, while dropping HTTPS
2. Traffic from the server back to the client should not be kept
"""
# HTTP traffic means that the destination port is 80
if packet.haslayer(S.TCP) and packet[S.TCP].dport == 80:
return True
return False
if __name__ == '__main__':
import sys
main(sys.argv)
| [
11748,
629,
12826,
13,
439,
355,
311,
198,
11748,
2956,
297,
571,
13,
29572,
355,
19016,
29572,
198,
6738,
19720,
1330,
309,
29291,
198,
11748,
40481,
82,
628,
198,
8845,
4462,
12709,
796,
705,
10745,
577,
66,
13,
6359,
13,
83,
559,
... | 2.724458 | 969 |
# Problem Statement: https://www.hackerrank.com/challenges/validating-postalcode/problem
regex_integer_in_range = r'^[1-9]\d{5}$'
regex_alternating_repetitive_digit_pair = r'(\d)(?=\d\1)' | [
2,
20647,
21983,
25,
3740,
1378,
2503,
13,
31153,
8056,
962,
13,
785,
14,
36747,
34120,
14,
12102,
803,
12,
7353,
282,
8189,
14,
45573,
198,
198,
260,
25636,
62,
41433,
62,
259,
62,
9521,
796,
374,
6,
61,
58,
16,
12,
24,
60,
59,... | 2.410256 | 78 |
# 🚨 Don't change the code below 👇
from typing import Counter
print("Welcome to the Love Calculator!")
name1 = input("What is your name? \n")
name2 = input("What is their name? \n")
# 🚨 Don't change the code above 👆
#Write your code below this line 👇
#Convert names to lower case
lower_name1 = name1.lower()
lower_name2 = name2.lower()
#Count TRUE
t=lower_name1.count('t')+lower_name2.count('t')
r=lower_name1.count('r')+lower_name2.count('r')
u=lower_name1.count('u')+lower_name2.count('u')
e=lower_name1.count('e')+lower_name2.count('e')
#Count LOVE
l=lower_name1.count('l')+lower_name2.count('l')
o=lower_name1.count('o')+lower_name2.count('o')
v=lower_name1.count('v')+lower_name2.count('v')
#E already counted in TRU>E<
#Dirty score calc
score = int(str(t+r+u+e)+str(l+o+v+e))
if score < 10 or score >= 90:
print(f"Your score is {score}, you go together like coke and mentos.")
elif score >= 40 and score <= 50:
print(f"Your score is {score}, you are alright together")
else:
print(f"Your score is {score}")
| [
2,
12520,
248,
101,
2094,
470,
1487,
262,
2438,
2174,
50169,
229,
198,
6738,
19720,
1330,
15034,
628,
198,
4798,
7203,
14618,
284,
262,
5896,
43597,
2474,
8,
198,
3672,
16,
796,
5128,
7203,
2061,
318,
534,
1438,
30,
3467,
77,
4943,
... | 2.569652 | 402 |
import sys, os
import numpy as np
import pickle
import argparse
# local imports
from shared import my_module as mymod
from importlib import reload
reload(mymod)
'-----------------------------------------------------------------------------------'
# Make some arrays
arr_1 = np.array([45, 6, 94, 11, 33, 79, 103, 62]) # 1D array. 8 elements.
arr_2 = np.array(np.arange(16)).reshape(8,2) # 2D array using arange. 16 integer elemets starting from 0: 8 rows, 2 columns
arr_3 = np.array(np.linspace(8, 23, 16)).reshape(2, 8) # 2D array using linspace. 14 float elements between 7 and 23 (inclusive): 2 rows, 8 columns.
arr_4 = np.zeros((4,4)) # a 2D array of 0s. 4 rows, 4 columns.
arr_5 = np.ones((2,3,4))# 3D array of ones. an array of 3 elements(?), each of which is a 4 x 5 array of 1s. 60 total elemts.
print("We made 5 arrays. Do you want to see them printed? (If so, type 'yes')")
if input('> ').lower() == 'yes':
print(f'\n1st array:\n {arr_1}, \n2nd array:\n {arr_2}, \n3rd array:\n {arr_3}, \n4th array:\n {arr_4}, \n5th array:\n {arr_5}')
else:
print("You said something other than 'Yes' so we won't print the arrays. :(")
'-----------------------------------------------------------------------------------'
# use argparse to add command-line arguments
# create the parser object
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--a_string', default='not "no"', type=str) # anything other than "no" (not case sensitive) will cause some info about the arrays to print
parser.add_argument('-b', '--b_integer', default=1, type=int) # must be an integer from from 0 to 7 because arr_3 has 8 columns
parser.add_argument('-c', '--c_float', default=900, type=float) # recommend an input such that 500 < c < 3000
# get the arguments
args = parser.parse_args()
'-----------------------------------------------------------------------------------'
# try some methods on the arrays (using the arguments from the command-line)
# Print some basic info about the arrays
if args.a_string.lower() != 'no':
print('\nYour string input for -a is', args.a_string, 'so we will print some basic info about the arrays\n',
'\tThe shape of arr_3 is:', arr_3.shape,
'\n\tThe size of arr_4 is:', arr_4.size,
'\n\tThe mean of arr_1 is:', (arr_1).mean(),
'\n\tThe minimum value in arr_2 is:', arr_2.min())
else:
print(f"\nYour string input for -a is '{args.a_string}' so we won't print the basic info about the arrays. :(")
# do some math with the arrays
# take a slice out of arr_2 (1st column) and a slice out of arr_3 ('bth' row) and add them to arr_1
arr_new = arr_1 + arr_2[:, 1] + arr_3[0, args.b_integer]
# re-shape the resulting list into an array of 4 rows, two columns, then multiply it by the transposition of arr_2
arr_new = np.reshape(arr_new, (4, 2)) @ (arr_2.T)
print(f'\nDo some math and get a new array (note you chose to use row {args.b_integer} of arr_3):\n', arr_new)
# make a new list out of the values greater than -c in array arr_new
arr_new_large_values = []
for i in range(len(arr_new)):
for j in range(len(arr_new[i])):
if arr_new[i,j] > args.c_float:
arr_new_large_values.append(arr_new[i,j])
print(f'The values in the new array that are > {args.c_float} are:\n', arr_new_large_values)
# change some of the values in arr_4
for i in range(len(arr_4)):
arr_4[i, i] = i
print('Change the values along the diagonal in arr_4:\n', arr_4)
# check if the values along the diagonal are less than 2
print('Are the values along the diagonal of arr_4 less than 2?')
for i in range(len(arr_4)):
if arr_4[i,i] < 2:
print('\tthe value at position', [i,i], 'is less than 2')
else:
print('\tthe value at position', [i,i], 'is not less than 2')
'-----------------------------------------------------------------------------------'
# save some output as a pickle file
# make an output directory if needed
this_dir = os.path.abspath('.').split('/')[-1] # path is split at each '/'. this_dir is assigned the last object [-1] in the path. not sure why the '.' is there in abspath.
print("This is the directory we're in now:", this_dir)
out_dir = '../' + this_dir + '_output/'
print(f'We create an output directory "{out_dir}" in line with "{this_dir}" if needed, then save arr_4 as a pickle file in that output directory')
mymod.make_dir(out_dir) # calling on function from mymod that creates a directory
# save it as a pickle file
out_fn = out_dir + 'pickled_output.p'
pickle.dump(arr_4, open(out_fn, 'wb')) # 'wb' is for write binary
# read the file back in
b = pickle.load(open(out_fn, 'rb')) # 'rb is for read binary
| [
11748,
25064,
11,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2298,
293,
198,
11748,
1822,
29572,
198,
198,
2,
1957,
17944,
198,
6738,
4888,
1330,
616,
62,
21412,
355,
616,
4666,
198,
6738,
1330,
8019,
1330,
18126,
198,
260,
... | 2.910737 | 1,602 |
import MySQLdb
import json
import random
import string
db = MySQLdb.connect(host="localhost",user="he",passwd="REDADCTED",db="game")
cur = db.cursor()
json_data = open('/var/www/json/npc.json').read()
npcList = json.loads(json_data)
emptyDB()
for npcType in npcList:
try:
npcList[npcType]['hardware']
#MD, FBI, NSA, ISP, EVILCORP, SAFENET
add(npcList[npcType]['type'], npcList[npcType], npcType)
continue
except KeyError:
pass
try:
numType = npcList[npcType]['type']
#WHOIS, BANK, NPC, PUZZLE
for key in npcList[npcType]:
if key != 'type':
add(numType, npcList[npcType][key], npcType+'/'+key)
continue
except KeyError:
pass
#HIRER
for level in npcList[npcType]:
numType = npcList[npcType][level]['type']
if numType != 61:
for key in npcList[npcType][level]:
if key != 'type':
add(numType, npcList[npcType][level][key], npcType+'/'+level+'/'+key)
continue
db.commit()
# from subprocess import call
# call(["python","software_generator.py"])
# call(["python","software_generator_riddle.py"])
# call(["python","npc_generator_web.py"])
#precisa ser por os (e nao subprocess.call) pq npc_generator.py eh chamado de newroundupdater por os
import os
os.system('python /var/www/python/software_generator.py')
os.system('python /var/www/python/software_generator_riddle.py')
os.system('python /var/www/python/npc_generator_web.py') | [
11748,
33476,
9945,
198,
11748,
33918,
198,
11748,
4738,
198,
11748,
4731,
198,
198,
9945,
796,
33476,
9945,
13,
8443,
7,
4774,
2625,
36750,
1600,
7220,
2625,
258,
1600,
6603,
16993,
2625,
22083,
2885,
4177,
1961,
1600,
9945,
2625,
6057,
... | 2.414931 | 576 |
outcomes = ["Stay Home", "Play Golf", "Play Golf", "Stay Home", "Play Golf", "Stay Home", "Play Golf"]
groups = set(outcomes)
# Lambda is basically anonymous funcs
# The below can be converted to max(set(outcomes), lambda x: outcomes.count(x))
# 1. It's a key/value pair
# the output of set(outcomes) feeds into key=outcome.count
decision = max(set(outcomes), key=outcomes.count)
decision2 = max(outcomes, key=outcomes.count)
num_of_playgolf = outcomes.count("Play Golf")
print() | [
448,
8988,
796,
14631,
25681,
5995,
1600,
366,
11002,
19709,
1600,
366,
11002,
19709,
1600,
366,
25681,
5995,
1600,
366,
11002,
19709,
1600,
366,
25681,
5995,
1600,
366,
11002,
19709,
8973,
198,
198,
24432,
796,
900,
7,
448,
8988,
8,
19... | 3.037736 | 159 |
from sapextractor.utils.table_count import get
| [
6738,
473,
431,
742,
40450,
13,
26791,
13,
11487,
62,
9127,
1330,
651,
198
] | 3.357143 | 14 |
"""
This module contains classes that can score unlabelled objects
by usefulness of their labels for a model.
These classes have public interface in common, but rely on
different scoring functions and scoring tools internally.
A notion of so called tools is of particular importance. In context
of this module, the word 'tools' stands for something that is made
of machine learning models and can return arguments of a scoring
function. In the most simple case, tools are just a regular estimator:
e.g., a classifier which can return predicted probabilities or
a density estimator which can return estimated density at an object.
However, if a scoring function requires list of predictions made by
different estimators, tools are a committee (a list) of estimators
fitted on different folds. Also tools can be a pair of regressors
fitted to different powers of target.
@author: Nikolay Lysenko
"""
from typing import List, Dict, Union, Callable, Optional
from abc import ABC, abstractmethod
import numpy as np
from sklearn.base import BaseEstimator, clone
from sklearn.mixture.base import BaseMixture
from .utils import make_committee
ToolsType = Union[
BaseEstimator, List[BaseEstimator], Dict[str, BaseEstimator],
BaseMixture
]
class BaseScorer(ABC):
"""
A facade that provides unified interface for various functions
that score objects by usefulness of their labels.
:param scoring_fn:
function for scoring objects
:param revert_sign:
`False` if the most important object has the highest score
and `True` else
:param is_classification:
`True` if classification problem is studied and `False` else
"""
@abstractmethod
@abstractmethod
@abstractmethod
@abstractmethod
class UncertaintyScorerForClassification(BaseScorer):
"""
A scorer working with functions that measure uncertainty in
predicted class probabilities. Examples of such functions:
* `.scoring_functions.compute_confidences`,
* `.scoring_functions.compute_margins`,
* `.scoring_functions.compute_entropy`.
:param scoring_fn:
function for scoring objects
:param revert_sign:
`False` if the most important object has the highest score
and `True` else
:param clf:
classifier that has methods `fit` and `predict_proba`,
it becomes internal classifier of the scorer; it is recommended
to wrap `clf` in `sklearn.calibration.CalibratedClassifierCV`
if it does not predict well-calibrated probabilities by default
"""
def get_tools(self) -> BaseEstimator:
"""
Get internal classifier.
:return:
internal classifier
"""
return self.__clf
def set_tools(self, tools: BaseEstimator) -> type(None):
"""
Replace internal classifier with passed instance.
:param tools:
classifier that has methods `fit` and `predict_proba`,
it is assumed that `predict_proba` method returns
well-calibrated probabilities
:return:
None
"""
self.__clf = tools
def update_tools(
self,
X_train: np.ndarray,
y_train: np.ndarray,
est: Optional[BaseEstimator] = None,
*args, **kwargs
) -> type(None):
"""
Fit internal classifier to passed training data and,
optionally, before that replace classifier with a new
instance.
:param X_train:
feature representation of training objects
:param y_train:
target labels
:param est:
classifier that has methods `fit` and `predict_proba`;
if it is passed, its fitted instance becomes internal
classifier, so it is assumed that `predict_proba` method
returns well-calibrated probabilities
:return:
None
"""
if est is not None:
self.__check_classifier_before_update(est)
self.__clf = est.fit(X_train, y_train)
elif self.__clf is not None:
self.__check_classifier_before_update()
self.__clf.fit(X_train, y_train)
else:
raise RuntimeError(
"Classifier is not passed neither to initialization "
"nor to this function."
)
def score(self, X_new: np.ndarray) -> np.ndarray:
"""
Score new objects with the highest score standing for the most
important object.
:param X_new:
feature representation of new objects
:return:
uncertainty scores computed with `self.scoring_fn`
"""
self.__check_classifier_before_scoring()
predicted_probabilities = self.__clf.predict_proba(X_new)
scores = self._scoring_fn(predicted_probabilities)
if self._revert_sign:
scores = -scores
return scores
class CommitteeScorer(BaseScorer):
"""
A scorer working with functions that measure degree of disagreement
in predictions of committee members. Examples of such functions:
* `.scoring_functions.compute_committee_divergences`,
* `.scoring_functions.compute_committee_variances`.
:param scoring_fn:
function for scoring objects
:param revert_sign:
`False` if the most important object has the highest score
and `True` else
:param is_classification:
`True` if it is a classification problem or `False` if it is
a regression problem
:param committee:
list of instances of the same class fitted to different folds,
instances must have `predict_proba` method if it is
classification or `predict` method if it is regression;
if it is classification, it is assumed that `predict_proba`
returns well-calibrated probabilities, so consider to wrap
instances in `sklearn.calibration.CalibratedClassifierCV`
if they do not predict well-calibrated probabilities
by default
"""
def get_tools(self) -> List[BaseEstimator]:
"""
Get internal committee of estimators.
:return:
None
"""
return self.__committee
def set_tools(self, tools: List[BaseEstimator]) -> type(None):
"""
Replace internal committee with passed list of estimators.
:param tools:
list of instances of the same class fitted to different
folds, instances must have `predict_proba` method if it
is classification or `predict` method if it is regression;
if it is classification, it is assumed that `predict_proba`
returns well-calibrated probabilities
:return:
None
"""
self.__committee = tools
def update_tools(
self,
X_train: np.ndarray,
y_train: np.ndarray,
est: Optional[BaseEstimator] = None,
*args, **kwargs
) -> type(None):
"""
Fit internal committee to passed training data and,
optionally, before that replace members of the committee
with new instances.
:param X_train:
feature representation of training objects
:param y_train:
target
:param est:
estimator that has method `fit`, also it must have method
`predict_proba` if it is a classifier or it must have
method `predict` if it is a regressor; if it is passed,
its clones become members of the committee instead of
previous members, so it is also assumed that
`predict_proba` method returns well-calibrated
probabilities
:return:
None
"""
if est is not None:
self.__check_estimator_before_update(est)
self.__committee = make_committee(
est, X_train, y_train, *args, **kwargs
)
elif self.__committee is not None:
self.__check_estimator_before_update()
self.__committee = make_committee(
self.__committee[0], X_train, y_train, *args, **kwargs
)
else:
raise RuntimeError(
"Committee is not passed neither to initialization "
"nor to this function."
)
def score(self, X_new: np.ndarray) -> np.ndarray:
"""
Score new objects with the highest score standing for the most
important object.
:param X_new:
feature representation of new objects
:return:
discrepancy scores computed with `self.scoring_fn`
"""
self.__check_committee_before_scoring()
if self._is_classification:
list_of_predictions = [
est.predict_proba(X_new) for est in self.__committee
]
else:
list_of_predictions = [
est.predict(X_new) for est in self.__committee
]
scores = self._scoring_fn(list_of_predictions)
if self._revert_sign:
scores = -scores # pragma: no cover
return scores
class VarianceScorerForRegression(BaseScorer):
"""
A scorer working with functions that measure estimated variance.
Examples of such functions:
* `.scoring_functions.compute_estimations_of_variance`.
:param scoring_fn:
function for scoring objects
:param rgrs:
dict with keys 'target' and 'target^2' and values that
are regressors predicting target variable and squared
target variable respectively, these regressors must
have `predict` method for doing so
"""
def get_tools(self) -> Dict[str, BaseEstimator]:
"""
Get internal pair of regressors.
:return:
internal pair of regressors
"""
return self.__rgrs
def set_tools(self, tools: Dict[str, BaseEstimator]) -> type(None):
"""
Replace internal regressors with passed regressors.
:param tools:
dict with keys 'target' and 'target^2' and values that
are regressors predicting target variable and squared
target variable respectively, these regressors must
have method `predict` for doing so
:return:
None
"""
self.__rgrs = tools
def update_tools(
self,
X_train: np.ndarray,
y_train: np.ndarray,
rgr: Optional[BaseEstimator] = None,
*args, **kwargs
) -> type(None):
"""
Fit pair of regressors to passed training data and,
optionally, before that replace these regressors with new
instances.
:param X_train:
feature representation of training objects
:param y_train:
target variable
:param rgr:
regressor that has methods `fit` and `predict`; if it
is passed, it and its clone form a new pair of regressors
:return:
None
"""
if rgr is not None:
self.__check_regressor_before_update(rgr)
self.__rgrs = {
'target': rgr.fit(X_train, y_train),
'target^2': clone(rgr).fit(X_train, y_train ** 2)
}
elif self.__rgrs is not None:
self.__check_regressor_before_update()
self.__rgrs = {
'target': self.__rgrs['target'].fit(X_train, y_train),
'target^2': self.__rgrs['target^2'].fit(X_train, y_train ** 2)
}
else:
raise RuntimeError(
"Regressors is not passed neither to initialization "
"nor to this function."
)
def score(self, X_new: np.ndarray) -> np.ndarray:
"""
Score new objects with the highest score standing for the most
important object.
:param X_new:
feature representation of new objects
:return:
estimates of variance computed with `self.scoring_fn`
"""
self.__check_regressors_before_scoring()
predictions = self.__rgrs['target'].predict(X_new)
predictions_of_square = self.__rgrs['target^2'].predict(X_new)
scores = self._scoring_fn(predictions, predictions_of_square)
return scores
class RandomScorer(BaseScorer):
"""
A scorer that scores objects randomly.
It is needed for making exploratory actions.
"""
def get_tools(self) -> type(None):
"""
Get `None` as `RandomScorer` has no tools.
:return:
None
"""
return None
def set_tools(self, tools: BaseEstimator) -> type(None):
"""
Do nothing as `RandomScorer` has no tools.
:param tools:
anything, its value is not used
:return:
None
"""
return
def update_tools(
self,
X_train: np.ndarray,
y_train: np.ndarray,
est: Optional[BaseEstimator] = None,
*args, **kwargs
) -> type(None):
"""
Do nothing as `RandomScorer` has no tools.
:param X_train:
anything, its value is not used
:param y_train:
anything, its value is not used
:param est:
anything, its value is not used
:return:
None
"""
return
def score(self, X_new: np.ndarray) -> np.ndarray:
"""
Score new objects with the highest score standing for the most
important object.
:param X_new:
feature representation of new objects
:return:
random values
"""
scores = np.random.uniform(size=X_new.shape[0])
return scores
class DensityScorer(BaseScorer):
"""
A scorer that ranks objects by density estimations. The higher
density is, the lower object is ranked.
This scorer is needed for making exploratory actions, because
it prefers objects that looks like outliers.
:param est:
density estimator that has methods `fit` and `score_samples`,
it becomes internal density estimator of the scorer
"""
def get_tools(self) -> Union[BaseEstimator, BaseMixture]:
"""
Get internal density estimator.
:return:
internal density estimator
"""
return self.__est
def set_tools(
self, tools: Union[BaseEstimator, BaseMixture]
) -> type(None):
"""
Replace internal density estimator with passed instance.
:param tools:
density estimator that has methods `fit` and
`score_samples`
:return:
None
"""
self.__est = tools
def update_tools(
self,
X_train: np.ndarray,
y_train: np.ndarray,
est: Optional[Union[BaseEstimator, BaseMixture]] = None,
*args, **kwargs
) -> type(None):
"""
Fit internal density estimator to passed training data and,
optionally, before that replace density estimator with a new
instance.
:param X_train:
feature representation of training objects
:param y_train:
target labels
:param est:
density estimator that has methods `fit` and
`score_samples`; if it is passed, its fitted instance
becomes internal density estimator
:return:
None
"""
if est is not None:
self.__check_estimator_before_update(est)
self.__est = est.fit(X_train, y_train)
elif self.__est is not None:
self.__check_estimator_before_update()
self.__est.fit(X_train, y_train)
else:
raise RuntimeError(
"Estimator is not passed neither to initialization "
"nor to this function."
)
def score(self, X_new: np.ndarray) -> np.ndarray:
"""
Score new objects with the highest score standing for the most
important object.
:param X_new:
feature representation of new objects
:return:
uncertainty scores computed with `self.scoring_fn`
"""
self.__check_estimator_before_scoring()
scores = -self.__est.score_samples(X_new)
return scores
def dummy_fn() -> type(None):
"""
A placeholder of a function.
:return:
None
"""
pass
| [
37811,
198,
1212,
8265,
4909,
6097,
326,
460,
4776,
9642,
397,
11978,
5563,
198,
1525,
37496,
286,
511,
14722,
329,
257,
2746,
13,
198,
4711,
6097,
423,
1171,
7071,
287,
2219,
11,
475,
8814,
319,
198,
39799,
9689,
5499,
290,
9689,
489... | 2.340431 | 7,138 |
# Copyright (c) 2020 Intel Corporation. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sub license, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice (including the
# next paragraph) shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
# IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import filecmp
import os
import shutil
import tempfile
import unittest
import zipfile
from decoder.csme_unpack import main_run_for_packaged_compressed_file_input, read_ascii_huffman_table_from_file, \
clear_huffman_table_data, get_huffman_table
if __name__ == '__main__':
unittest.main()
| [
2,
15069,
357,
66,
8,
12131,
8180,
10501,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
198,
2,
4866,
286,
428,
3788,
290,
3917,
10314,
3696,
357,
116... | 3.519231 | 416 |
import helpers as h
import random
#rf is function that takes a "locks" list (see "formats" functions in micro.py)
#canR is list of indices that can be regenerated
#sf is a score function that takes a string (artifact) and returns a score where higher is better
#s is string (artifact)
#sett is Settings object
#s is initial artifact
#regenf,canRegen,scoref: see Settings __init__ (identical params)
#returns string,score
| [
11748,
49385,
355,
289,
198,
11748,
4738,
198,
197,
2,
41871,
318,
2163,
326,
2753,
257,
366,
28860,
1,
1351,
357,
3826,
366,
687,
1381,
1,
5499,
287,
4580,
13,
9078,
8,
198,
197,
2,
5171,
49,
318,
1351,
286,
36525,
326,
460,
307,... | 3.424 | 125 |
# constants.py
SOCKET_TIMEOUT = 5
BUFFER_SIZE = 1000
LOAD_BALANCER_QUEUE_SIZE = 50
LEADER_QUEUE_SIZE = 50
HEART_BEAT = 1
#Test constants | [
2,
38491,
13,
9078,
198,
50,
11290,
2767,
62,
34694,
12425,
796,
642,
198,
19499,
45746,
62,
33489,
796,
8576,
198,
35613,
62,
33,
1847,
20940,
1137,
62,
48,
8924,
8924,
62,
33489,
796,
2026,
198,
2538,
2885,
1137,
62,
48,
8924,
892... | 2.305085 | 59 |
import sys
import args
import models_small as models
import hypera2c as H
import utils
from atari_data import MultiEnvironment
import warnings
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch, os, gym, time, glob, argparse, sys
from torch.optim import Adam
from scipy.misc import imresize
from scipy.signal import lfilter
os.environ['OMP_NUM_THREADS'] = '1'
map_gpu = {
'cuda:0': 'cuda:0',
'cuda:1': 'cuda:0',
'cuda:2': 'cuda:0',
'cuda:3': 'cuda:0',
'cuda:4': 'cuda:0',
'cuda:5': 'cuda:0',
'cuda:6': 'cuda:0',
'cuda:7': 'cuda:0',
'cpu': 'cpu',
}
if __name__ == "__main__":
args = args.load_args()
args.save_dir = '{}/'.format(args.env.lower())
if args.render:
args.processes = 1
args.test = True
if args.test:
args.lr = 0
if args.scratch:
print ('training on server; saving to /scratch/eecs-share')
args.n_actions = gym.make(args.env).action_space.n
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
# print ('=> Multienvironment settings')
envs = MultiEnvironment(args.env, args.batch_size, args.frame_skip)
torch.manual_seed(args.seed)
torch.cuda.device(args.gpu)
print ('=> Loading Hyperagent')
hypernet = HyperNetwork(args)
print ('=> Loading Optimizers')
optim = load_optim(args, hypernet)
if args.pretrain_e:
print ('==> pretraining encoder to cover pz')
hypernet, optim = H.pretrain_encoder(args, hypernet, optim)
print ('=> Starting Training')
train_hyperagent()
| [
11748,
25064,
198,
11748,
26498,
198,
11748,
4981,
62,
17470,
355,
4981,
198,
11748,
8718,
64,
17,
66,
355,
367,
198,
11748,
3384,
4487,
198,
6738,
379,
2743,
62,
7890,
1330,
15237,
31441,
198,
198,
11748,
14601,
198,
11748,
299,
32152,... | 2.272352 | 727 |
import pytest
import numpy as np
import pandas as pd
from sklearn.exceptions import NotFittedError
from feature_engine.discretisers import EqualFrequencyDiscretiser, EqualWidthDiscretiser, DecisionTreeDiscretiser
| [
11748,
12972,
9288,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
1069,
11755,
1330,
1892,
37,
2175,
12331,
198,
198,
6738,
3895,
62,
18392,
13,
15410,
1186,
21572,
1330,
28701,
37... | 3.677966 | 59 |
"""saml sp views"""
from urllib.parse import ParseResult, parse_qsl, urlparse, urlunparse
from django.contrib.auth import logout
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import Http404, HttpRequest, HttpResponse
from django.http.response import HttpResponseBadRequest
from django.shortcuts import get_object_or_404, redirect
from django.utils.decorators import method_decorator
from django.utils.http import urlencode
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from structlog.stdlib import get_logger
from xmlsec import InternalError, VerificationError
from authentik.flows.challenge import (
PLAN_CONTEXT_ATTRS,
PLAN_CONTEXT_TITLE,
PLAN_CONTEXT_URL,
AutosubmitChallenge,
Challenge,
ChallengeResponse,
ChallengeTypes,
)
from authentik.flows.models import in_memory_stage
from authentik.flows.planner import (
PLAN_CONTEXT_REDIRECT,
PLAN_CONTEXT_SOURCE,
PLAN_CONTEXT_SSO,
FlowPlanner,
)
from authentik.flows.stage import ChallengeStageView
from authentik.flows.views.executor import NEXT_ARG_NAME, SESSION_KEY_GET, SESSION_KEY_PLAN
from authentik.lib.utils.urls import redirect_with_qs
from authentik.lib.views import bad_request_message
from authentik.providers.saml.utils.encoding import nice64
from authentik.sources.saml.exceptions import MissingSAMLResponse, UnsupportedNameIDFormat
from authentik.sources.saml.models import SAMLBindingTypes, SAMLSource
from authentik.sources.saml.processors.metadata import MetadataProcessor
from authentik.sources.saml.processors.request import RequestProcessor
from authentik.sources.saml.processors.response import ResponseProcessor
from authentik.stages.consent.stage import (
PLAN_CONTEXT_CONSENT_HEADER,
PLAN_CONTEXT_CONSENT_TITLE,
ConsentStageView,
)
LOGGER = get_logger()
class AutosubmitStageView(ChallengeStageView):
"""Wrapper stage to create an autosubmit challenge from plan context variables"""
# Since `ak-stage-autosubmit` redirects off site, we don't have anything to check
class InitiateView(View):
"""Get the Form with SAML Request, which sends us to the IDP"""
def handle_login_flow(self, source: SAMLSource, *stages_to_append, **kwargs) -> HttpResponse:
"""Prepare Authentication Plan, redirect user FlowExecutor"""
# Ensure redirect is carried through when user was trying to
# authorize application
final_redirect = self.request.session.get(SESSION_KEY_GET, {}).get(
NEXT_ARG_NAME, "authentik_core:if-user"
)
kwargs.update(
{
PLAN_CONTEXT_SSO: True,
PLAN_CONTEXT_SOURCE: source,
PLAN_CONTEXT_REDIRECT: final_redirect,
}
)
# We run the Flow planner here so we can pass the Pending user in the context
planner = FlowPlanner(source.pre_authentication_flow)
planner.allow_empty_flows = True
plan = planner.plan(self.request, kwargs)
for stage in stages_to_append:
plan.append_stage(stage)
self.request.session[SESSION_KEY_PLAN] = plan
return redirect_with_qs(
"authentik_core:if-flow",
self.request.GET,
flow_slug=source.pre_authentication_flow.slug,
)
def get(self, request: HttpRequest, source_slug: str) -> HttpResponse:
"""Replies with an XHTML SSO Request."""
source: SAMLSource = get_object_or_404(SAMLSource, slug=source_slug)
if not source.enabled:
raise Http404
relay_state = request.GET.get("next", "")
auth_n_req = RequestProcessor(source, request, relay_state)
# If the source is configured for Redirect bindings, we can just redirect there
if source.binding_type == SAMLBindingTypes.REDIRECT:
# Parse the initial SSO URL
sso_url = urlparse(source.sso_url)
# Parse the querystring into a dict...
url_kwargs = dict(parse_qsl(sso_url.query))
# ... and update it with the SAML args
url_kwargs.update(auth_n_req.build_auth_n_detached())
# Encode it back into a string
res = ParseResult(
scheme=sso_url.scheme,
netloc=sso_url.netloc,
path=sso_url.path,
params=sso_url.params,
query=urlencode(url_kwargs),
fragment=sso_url.fragment,
)
# and merge it back into a URL
final_url = urlunparse(res)
return redirect(final_url)
# As POST Binding we show a form
try:
saml_request = nice64(auth_n_req.build_auth_n())
except InternalError as exc:
LOGGER.warning(str(exc))
return bad_request_message(request, str(exc))
injected_stages = []
plan_kwargs = {
PLAN_CONTEXT_TITLE: f"Redirecting to {source.name}...",
PLAN_CONTEXT_CONSENT_TITLE: f"Redirecting to {source.name}...",
PLAN_CONTEXT_ATTRS: {
"SAMLRequest": saml_request,
"RelayState": relay_state,
},
PLAN_CONTEXT_URL: source.sso_url,
}
# For just POST we add a consent stage,
# otherwise we default to POST_AUTO, with direct redirect
if source.binding_type == SAMLBindingTypes.POST:
injected_stages.append(in_memory_stage(ConsentStageView))
plan_kwargs[PLAN_CONTEXT_CONSENT_HEADER] = f"Continue to {source.name}"
injected_stages.append(in_memory_stage(AutosubmitStageView))
return self.handle_login_flow(
source,
*injected_stages,
**plan_kwargs,
)
@method_decorator(csrf_exempt, name="dispatch")
class ACSView(View):
"""AssertionConsumerService, consume assertion and log user in"""
def post(self, request: HttpRequest, source_slug: str) -> HttpResponse:
"""Handles a POSTed SSO Assertion and logs the user in."""
source: SAMLSource = get_object_or_404(SAMLSource, slug=source_slug)
if not source.enabled:
raise Http404
processor = ResponseProcessor(source)
try:
processor.parse(request)
except MissingSAMLResponse as exc:
return bad_request_message(request, str(exc))
except VerificationError as exc:
return bad_request_message(request, str(exc))
try:
return processor.prepare_flow(request)
except UnsupportedNameIDFormat as exc:
return bad_request_message(request, str(exc))
class SLOView(LoginRequiredMixin, View):
"""Single-Logout-View"""
def dispatch(self, request: HttpRequest, source_slug: str) -> HttpResponse:
"""Log user out and redirect them to the IdP's SLO URL."""
source: SAMLSource = get_object_or_404(SAMLSource, slug=source_slug)
if not source.enabled:
raise Http404
logout(request)
return redirect(source.slo_url)
class MetadataView(View):
"""Return XML Metadata for IDP"""
def dispatch(self, request: HttpRequest, source_slug: str) -> HttpResponse:
"""Replies with the XML Metadata SPSSODescriptor."""
source: SAMLSource = get_object_or_404(SAMLSource, slug=source_slug)
metadata = MetadataProcessor(source, request).build_entity_descriptor()
return HttpResponse(metadata, content_type="text/xml")
| [
37811,
37687,
75,
599,
5009,
37811,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
2547,
325,
23004,
11,
21136,
62,
80,
6649,
11,
19016,
29572,
11,
19016,
403,
29572,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
2604,
44... | 2.405457 | 3,115 |
import numpy as np
import torch
from collections import namedtuple
from torch.nn.parallel import DistributedDataParallel as DDP
from rlpyt.agents.base import BaseAgent, AgentStep
from rlpyt.models.qpg.conv import PiConvTiedModel, QDoubleConvTiedModel
from rlpyt.utils.quick_args import save__init__args
from rlpyt.distributions.gaussian import Gaussian, DistInfoStd
from rlpyt.utils.buffer import buffer_to
from rlpyt.utils.logging import logger
from rlpyt.models.utils import update_state_dict
from rlpyt.utils.collections import namedarraytuple
MIN_LOG_STD = -10
MAX_LOG_STD = 2
AgentInfo = namedarraytuple("AgentInfo", ["dist_info"])
Models = namedtuple("Models", ["pi", "q"])
class SacNewAgent(BaseAgent):
"""Agent for SAC algorithm, including action-squashing, using twin Q-values."""
def __init__(
self,
ModelCls=PiConvTiedModel,
QModelCls=QDoubleConvTiedModel,
model_kwargs=None,
q_model_kwargs=None,
initial_model_state_dict=None, # All models.
load_q_model=True,
tie_weights=True,
load_model_after_min_steps=False,
action_squash=2., # Max magnitude (or None).
pretrain_std=0.75, # With squash 0.75 is near uniform.
saliency_dir=None,
):
"""Saves input arguments; network defaults stored within."""
if model_kwargs is None:
model_kwargs = dict(hidden_sizes=[256, 256])
if q_model_kwargs is None:
q_model_kwargs = dict(hidden_sizes=[256, 256])
super().__init__(ModelCls=ModelCls, model_kwargs=model_kwargs,
initial_model_state_dict=initial_model_state_dict)
save__init__args(locals())
self.min_itr_learn = 0 # Get from algo.
def q(self, observation, prev_action, prev_reward, action, detach_encoder=False):
"""Compute twin Q-values for state/observation and input action
(with grad)."""
model_inputs = buffer_to((observation, prev_action, prev_reward,
action), device=self.device)
q1, q2 = self.q_model(*model_inputs, detach_encoder=detach_encoder)
return q1.cpu(), q2.cpu()
def target_q(self, observation, prev_action, prev_reward, action):
"""Compute twin target Q-values for state/observation and input
action."""
model_inputs = buffer_to((observation, prev_action,
prev_reward, action), device=self.device)
target_q1, target_q2 = self.target_q_model(*model_inputs)
return target_q1.cpu(), target_q2.cpu()
def pi(self, observation, prev_action, prev_reward, detach_encoder=False):
"""Compute action log-probabilities for state/observation, and
sample new action (with grad). Uses special ``sample_loglikelihood()``
method of Gaussian distriution, which handles action squashing
through this process."""
model_inputs = buffer_to((observation, prev_action, prev_reward),
device=self.device)
mean, log_std = self.model(*model_inputs, detach_encoder=detach_encoder)
dist_info = DistInfoStd(mean=mean, log_std=log_std)
action, log_pi = self.distribution.sample_loglikelihood(dist_info)
log_pi, dist_info = buffer_to((log_pi, dist_info), device="cpu")
return action, log_pi, dist_info # Action stays on device for q models.
@torch.no_grad()
@property
| [
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
6738,
28034,
13,
20471,
13,
1845,
29363,
1330,
4307,
6169,
6601,
10044,
29363,
355,
360,
6322,
198,
198,
6738,
374,
75,
9078,
83,
13,
... | 2.393876 | 1,437 |
from django.http import response
from django.shortcuts import redirect, render
from django.http import HttpResponse
import requests
from .forms import New_Test, Update_Patient_info
from django.views.decorators.csrf import csrf_exempt
from diagnose.models import Diagnose
import os
import time
from django.contrib import messages
BASE_API_URL = 'http://api.fyp.anilpoudyal.com.np'
# BASE_API_URL = 'http://127.0.0.1:5000/'
@csrf_exempt
@csrf_exempt
| [
6738,
42625,
14208,
13,
4023,
1330,
2882,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
18941,
11,
8543,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
11748,
7007,
198,
6738,
764,
23914,
1330,
968,
62,
14402,
11,
1... | 2.942308 | 156 |
import atexit
import contextlib
import os
import pathlib
import requests
import shlex
import subprocess
import sys
@contextlib.contextmanager
try:
user = os.environ['GH_USER']
token = os.environ['GH_TOKEN']
except KeyError:
raise RuntimeError('You must set GH_USER and GH_TOKEN environ vars')
else:
config('auth.real.cfg').write_text(
config('auth.fff.cfg').read_text().replace(40 * 'f', token)
)
atexit.register(config('auth.real.cfg').unlink)
| [
11748,
379,
37023,
198,
11748,
4732,
8019,
198,
11748,
28686,
198,
11748,
3108,
8019,
198,
11748,
7007,
198,
11748,
427,
2588,
198,
11748,
850,
14681,
198,
11748,
25064,
628,
628,
198,
198,
31,
22866,
8019,
13,
22866,
37153,
628,
198,
2... | 2.755682 | 176 |
from ..parser import parse
# Regression test for weird edge case in which hr gets moved outside of quote
| [
6738,
11485,
48610,
1330,
21136,
628,
628,
628,
628,
198,
2,
3310,
2234,
1332,
329,
7650,
5743,
1339,
287,
543,
39436,
3011,
3888,
2354,
286,
9577,
198
] | 4.185185 | 27 |
#!/usr/bin/env python
"""
@package ion.agents.platform.rsn.simulator.logger
@file ion/agents/platform/rsn/simulator/logger.py
@author Carlos Rueda
@brief Logger configuration for the OMS simulator.
"""
__author__ = 'Carlos Rueda'
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
31,
26495,
22088,
13,
49638,
13,
24254,
13,
3808,
77,
13,
14323,
8927,
13,
6404,
1362,
198,
31,
7753,
220,
220,
220,
22088,
14,
49638,
14,
24254,
14,
3808,
77,
14,
... | 2.666667 | 90 |
# Generated by Django 2.0.6 on 2018-06-18 20:50
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
21,
319,
2864,
12,
3312,
12,
1507,
1160,
25,
1120,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from elastic import ask_delete_index, get_indices, parse_indices
host = "localhost:9200"
products = { \
'glads2_mm' : ['dev', 'test', 'prod'], \
'glads2_vk' : ['dev', 'test', 'prod'], \
'glads2_ok' : ['dev', 'test', 'prod'], \
'glads2_gp' : ['dev', 'test', 'prod'], \
'glads2_fb' : ['dev', 'test', 'prod'], \
'glads2_win' : ['dev', 'test', 'prod'], \
'glads2_ios' : ['dev', 'test', 'prod'], \
'glads2_ndr' : ['dev', 'test', 'prod'] \
}
clear()
| [
6738,
27468,
1330,
1265,
62,
33678,
62,
9630,
11,
651,
62,
521,
1063,
11,
21136,
62,
521,
1063,
628,
198,
4774,
796,
366,
36750,
25,
24,
2167,
1,
198,
29498,
796,
1391,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.866667 | 285 |
a = float(input ('Digite a 1° nota do aluno: '))
b = float(input ('Digite a 2° nota do aluno: '))
m = (a + b) / 2
if m < 5:
print ('Sua média foi de {}, REPROVADO!'.format(m))
elif m >= 7:
print ('Você foi aprovado, sua média foi de {}'.format(m))
else:
print ('Sua média foi de {}, RECUPERAÇÃO!'.format(m))
| [
64,
796,
12178,
7,
15414,
19203,
19511,
578,
257,
352,
7200,
407,
64,
466,
435,
36909,
25,
705,
4008,
198,
65,
796,
12178,
7,
15414,
19203,
19511,
578,
257,
362,
7200,
407,
64,
466,
435,
36909,
25,
705,
4008,
198,
76,
796,
357,
64... | 2.084416 | 154 |
sepatu = {"nama": "Sepatu Niko", "harga": 150000, "diskon": 30000}
baju = {"nama": "Baju Unikloh", "harga": 80000, "diskon": 8000}
celana = {"nama": "Celana Lepis", "harga": 200000, "diskon": 60000}
harga_sepatu = sepatu["harga"] - sepatu["diskon"]
harga_baju = baju["harga"] - baju["diskon"]
harga_celana = celana["harga"] - celana["diskon"]
total_harga = (harga_sepatu + harga_baju + harga_celana) * 1.1
print(total_harga)
| [
325,
8071,
84,
796,
19779,
77,
1689,
1298,
366,
19117,
33419,
11271,
78,
1600,
366,
71,
853,
64,
1298,
1315,
2388,
11,
366,
39531,
261,
1298,
513,
2388,
92,
198,
65,
1228,
84,
796,
19779,
77,
1689,
1298,
366,
33,
1228,
84,
791,
11... | 2.043269 | 208 |
from flask import Flask, jsonify, request
from multiprocessing import Value
counter = Value('i', 0)
app = Flask(__name__)
a = []
help_message = """
API Usage:
- GET /api/list
- POST /api/add data={"key": "value"}
- GET /api/get/<id>
- PUT /api/update/<id> data={"key": "value_to_replace"}
- DELETE /api/delete/<id>
"""
@app.route('/api', methods=['GET'])
@app.route('/api/list', methods=['GET'])
@app.route('/api/add', methods=['POST'])
@app.route('/api/get', methods=['GET'])
@app.route('/api/get/<int:_id>', methods=['GET'])
@app.route('/api/update', methods=['PUT'])
@app.route('/api/update/<int:_id>', methods=['PUT'])
@app.route('/api/delete/<int:_id>', methods=['DELETE'])
if __name__ == '__main__':
app.run()
| [
6738,
42903,
1330,
46947,
11,
33918,
1958,
11,
2581,
198,
6738,
18540,
305,
919,
278,
1330,
11052,
198,
198,
24588,
796,
11052,
10786,
72,
3256,
657,
8,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
198,
64,
796,
17635,
198,
167... | 2.286585 | 328 |
"""Moontracker package initialization."""
| [
37811,
16632,
756,
81,
10735,
5301,
37588,
526,
15931,
198
] | 4.2 | 10 |
import io
from contextlib import redirect_stderr, redirect_stdout
from dataclasses import dataclass, field
from typing import Generator, List
from ward.errors import FixtureError
from ward.fixtures import FixtureCache
from ward.models import Scope
from ward.test_result import TestOutcome, TestResult
from ward.testing import Test
@dataclass
| [
11748,
33245,
198,
6738,
4732,
8019,
1330,
18941,
62,
301,
1082,
81,
11,
18941,
62,
19282,
448,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
11,
2214,
198,
6738,
19720,
1330,
35986,
11,
7343,
198,
198,
6738,
15305,
13,
48277,
... | 3.833333 | 90 |
import hashlib
from typing import Iterable, Callable
from mabooia.collections import Stream, LinkedList
| [
11748,
12234,
8019,
198,
6738,
19720,
1330,
40806,
540,
11,
4889,
540,
198,
198,
6738,
285,
397,
2238,
544,
13,
4033,
26448,
1330,
13860,
11,
7502,
276,
8053,
628,
628,
628
] | 3.548387 | 31 |
# https://www.quora.com/Why-do-we-use-the-Ornstein-Uhlenbeck-Process-in-the-exploration-of-DDPG
import random
import copy
import numpy as np
class OUNoise:
"""
Ornstein-Uhlenbeck process.
"""
def __init__(self, size, mu=0., theta=0.15, sigma=0.2):
"""
Initialise parameters and noise process.
"""
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.reset()
def sample(self):
"""
Update internal state and return as noise sample.
"""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state
def reset(self):
"""
Reset internal state (=noise) to mean (=mu).
"""
self.state = copy.copy(self.mu)
| [
2,
3740,
1378,
2503,
13,
421,
5799,
13,
785,
14,
5195,
12,
4598,
12,
732,
12,
1904,
12,
1169,
12,
5574,
77,
5714,
12,
34653,
11925,
27343,
12,
18709,
12,
259,
12,
1169,
12,
20676,
6944,
12,
1659,
12,
35,
6322,
38,
198,
198,
1174... | 2.066667 | 420 |
#!/usr/bin/env python3
"""
The data types for an Ocean Salinity DTBXY
"""
import numpy as np
# data type list of lists. The inner lists are there for each file sub-block.
datatype = [
[('MaxValid', 'float32')],
[('MinValid', 'float32')],
# REGION
[('region_count', 'uint32')],
# Region ID, start and stop snap time, start and stop snap id
[('Region_ID', 'uint32'), ('Days', 'int32'), ('Seconds', 'uint32'), ('Microseconds', 'uint32'),
('stop_Days', 'int32'), ('stop_Seconds', 'uint32'), ('stop_Microseconds', 'uint32'),
('Start_Snapshot_ID', 'uint32'), ('Stop_Snapshot_ID', 'uint32')],
# stats (just one number) repeated for the 3 models, 8 pols, 12 fov zones
[('mean', 'float32'), ('median', 'float32'), ('min', 'float32'), ('max', 'float32'), ('std', 'float32')],
# counts, dTb, std_dTb, flags repeated 129 x 129
[('count_deltaTB', 'uint32'), ('deltaTB', 'float32'), ('std_deltaTB', 'float32'), ('flags', 'ushort')],
# SNAPSHOTS
[('snap_count', 'uint32')],
# snapshot general info
[('Snapshot_ID', 'uint32'), ('Snapshot_OBET', 'uint64'), ('Snapshot_Latitude', 'float32'),
('Snapshot_Longitude', 'float32'), ('Snapshot_Altitude', 'float32'), ('Snapshot_Flags', 'ushort'),
('L1c_TEC', 'int16')],
[('measurement_count', 'ushort')],
# measured Tb mean and std
[('L1cTB', 'ushort'), ('std_L1cTB', 'ushort')],
# BOA fwd model components
[('atmosTB', 'int16'), ('std_atmosTB', 'ushort'), ('flatSeaTB', 'int16'), ('std_flatSeaTB', 'ushort'),
('roughTB', 'int16'), ('std_roughTB', 'ushort'), ('galTB', 'int16'), ('std_galTB', 'ushort'),
('sunTB', 'int16'), ('std_sunTB', 'ushort'), ('sumTB', 'int16'), ('std_sumTB', 'ushort')],
# TOA fwd model components with L1c TEC
[('atmosTB', 'int16'), ('std_atmosTB', 'ushort'), ('flatSeaTB', 'int16'), ('std_flatSeaTB', 'ushort'),
('roughTB', 'int16'), ('std_roughTB', 'ushort'), ('galTB', 'int16'), ('std_galTB', 'ushort'),
('sunTB', 'int16'), ('std_sunTB', 'ushort'), ('sumTB', 'int16'), ('std_sumTB', 'ushort')],
# TOA fwd model components with A3 TEC
[('atmosTB', 'int16'), ('std_atmosTB', 'ushort'), ('flatSeaTB', 'int16'), ('std_flatSeaTB', 'ushort'),
('roughTB', 'int16'), ('std_roughTB', 'ushort'), ('galTB', 'int16'), ('std_galTB', 'ushort'),
('sunTB', 'int16'), ('std_sunTB', 'ushort'), ('sumTB', 'int16'), ('std_sumTB', 'ushort')],
# geophysics
[('SSS', 'int16'), ('std_SSS', 'ushort'), ('SST', 'int16'), ('std_SST', 'ushort'), ('WS', 'int16'),
('std_WS', 'ushort'), ('A3TEC', 'int16'), ('std_A3TEC', 'ushort'), ('Tair', 'int16'),
('std_Tair', 'ushort'), ('SP', 'int16'), ('std_SP', 'ushort'), ('TCWV', 'int16'), ('std_TCWV', 'ushort'),
('HS', 'int16'), ('std_HS', 'ushort')],
# flags
[('coast', 'ushort'), ('sun_point', 'ushort'), ('sun_tails', 'ushort'), ('rfi', 'ushort'),
('rain', 'ushort'), ('ice', 'ushort')],
[('gp_count', 'uint32')],
# grid points
[('Grid_Point_ID', 'uint32'), ('Grid_Point_Latitude', 'float32'), ('Grid_Point_Longitude', 'float32')],
[('measurement_count', 'ushort')],
[('Snapshot_Index', 'ushort'), ('Zone_Bits', 'ushort')]
] | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
464,
1366,
3858,
329,
281,
10692,
4849,
6269,
360,
22737,
34278,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
1366,
2099,
1351,
286,
8341,
13,
383,... | 2.112751 | 1,694 |
# python code to find out what set of dividers give an accurate set of N=12 frequencies for a top
# octave musical note generator.
# there are a bunch of print statements that can be uncommented to output various things
# this script is optimized to show the master clock frequencies that give the lowest percent error over the 11
# note frequncies that are derived from it.
# arguments are scan_master_freq_start, scan_master_freq_end, [octave_number]
import math
import numpy as np
import sys
ESC_HON = '\33[1m'
ESC_HOFF = '\33[22m'
ESC_RED = '\33[31m'
ESC_BLUE = '\33[34m'
ESC_PINK = '\33[35m'
ESC_DEF = '\33[39m'
SEPLINE = '-------------------------------------------------------------'
print()
print(ESC_HON + "Divide chain optimizer for top octave musical note generation" + ESC_HOFF)
f_start = 400000
f_end = f_start + 200000
f_delta = 10 # search step
if len(sys.argv)==2:
f_start = int(sys.argv[1])
f_end = f_start + f_delta
if len(sys.argv)>2:
f_start = int(sys.argv[1])
f_end = int(sys.argv[2])
if f_end<f_start:
print("Error: start and end frequencies are invalid")
exit(1)
elif f_end==f_start:
f_end = f_start + f_delta
start_octave = 6
if len(sys.argv)>3:
so = int(sys.argv[3])
if so<0 or so>10:
print("Error: start octave is out of range, default =", start_octave)
exit(1)
start_octave = so
notes = ['A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#']
N = len(notes)
note_first_hz = 1760.0 * pow(2.0, start_octave - 6) # frequency of note 'A' in Hz
f_target = np.empty(N)
# here we generate the equal interval note frequencies above note_first_hz
for index, name in enumerate(notes):
if index>2:
octave = start_octave+1
else:
octave = start_octave
freq = note_first_hz * math.pow(2.0, index / float(N))
#print(index, name + str(octave), "\t", round(freq,2))
f_target[index] = freq
low_err = 1.0
print(SEPLINE)
print("Scanning from f_osc =",f_start,"Hz to f_osc =",f_end,"Hz")
for f_osc in range(f_start,f_end,f_delta):
max_err_ratio = 0
max_bits = 0
for f in f_target:
div = f_osc / f
divint = int(round(div))
bits = int(math.ceil(math.log(divint,2.0)))
max_bits = max(max_bits, bits)
actual_f = f_osc/float(divint)
delta_f = abs(actual_f - f)
err_ratio = delta_f/f
#print(f, divint, actual_f, delta_f, round(err_ratio*100,6),'%')
max_err_ratio = max(max_err_ratio, err_ratio)
#print(f_osc,"Error =", round(max_err_ratio*100,6),"%, Divide =" , max_bits)
#print(f_osc, max_err_ratio*100)
if low_err > max_err_ratio:
low_err = max_err_ratio
best_f = f_osc
print(ESC_RED+"Lowest error found =",round(low_err*100,6),"%, using f_osc =",best_f,"Hz"+ESC_DEF)
print(SEPLINE)
print("Summary")
print(SEPLINE)
f_osc = best_f # here you can just assign f_osc to whatever you like to get the info about that source freq
# f_osc = 10000000 # 10MHz clock example
divide_set = np.empty(N, dtype=int)
for index, f in enumerate(f_target):
if index>2:
octave = start_octave+1
else:
octave = start_octave
div = f_osc / f
divint = int(round(div))
divide_set[index] = divint
actual_f = f_osc/float(divint)
delta_f = abs(actual_f - f)
err_ratio = delta_f/f
print(ESC_PINK + notes[index] + str(octave) + ESC_DEF,"\tdesired =",round(f,2), "Hz\t"+ESC_BLUE+"divide = ",divint, ESC_DEF+"\toutput =",round(actual_f,2), "Hz\terror =", round(err_ratio*100,6),'%')
print(SEPLINE)
print("Divider Factorization")
print(SEPLINE)
for index, divint in enumerate(divide_set):
print(index,"\t"+ESC_BLUE+"divide =",divint,end=ESC_DEF+"\t")
print_factors(divint)
print(SEPLINE)
exit(0)
| [
2,
21015,
2438,
284,
1064,
503,
644,
900,
286,
13576,
364,
1577,
281,
7187,
900,
286,
399,
28,
1065,
19998,
329,
257,
1353,
198,
2,
19318,
1015,
10530,
3465,
17301,
13,
198,
2,
612,
389,
257,
7684,
286,
3601,
6299,
326,
460,
307,
... | 2.238123 | 1,705 |
"""Constants for physical simulation"""
flowRate = 10 # the amount of water flowing (in ml) from the mixture vessel per second when tap is open
heatRate = 1 # the amount of heat increase (in degrees celsius) in mixture vessel when heater is on
temperatureDecay = 0.1 # amount of temperature loss (in degrees celsius) in mixture vessel when heater is off
pressureRampUp = 3 # amount of seconds to build up enough pressure to make liquid flow from storage vessels
pressureRampDown = 3 # amount of time before liquid stops flowing after the air pump has been switched off
liquidMax = 2000 # amount (in ml) of liquid that can maximally go into the containers (pi * 10 * 10 * 6.5; diameter 10cm, height ~6.5cm)
environmentTemp = 20 # environmentalTemperature
tempConversion = 0.05 # 0.00V = 0.0 degrees celsius; steps of 0.05V per degree celsius above 0
levelConversion = 0.5 # V/cm; 0V is empty
colourConversion = 0.033 # 0.00V = value 0 (pitch black) - 3.3 V = value 100 (bright white); value/lightness score (HSV)
"""Set Points: these indicate the desired values for the dimensions of the resulting mixture"""
levelSetPoint = 1.6 # cm liquid (0.8 = 500ml)
colourSetPoint = 1.65 # % value
tempSetPoint = 2.0 # degrees celsius
"""Reaction difference: the amount of points of divergence allowed before the controller reacts"""
tempReaction = 0.05
levelReaction = 0.07
colourReaction = 0.05 | [
37811,
34184,
1187,
329,
3518,
18640,
37811,
198,
11125,
32184,
796,
838,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1303,
262,
2033,
286,
1660,
17609,
357,
259,
25962,
8,
422,
262,
11710,
8837,
583,
1218,
618,
9814,
318,
1280,
198,... | 3.36534 | 427 |
#!/usr/bin/env python
#
# Upgrade the OpenStack with Clustering Status Control.
# Usage: $ ./do-upgrade.py -o cloud:xenial-newton [application-name]
#
import argparse
import logging
import six
import subprocess
import time
import yaml
logging.basicConfig(
filename='os-upgrade.log',
level=logging.DEBUG,
format=('%(asctime)s %(levelname)s '
'(%(funcName)s) %(message)s'))
log = logging.getLogger('os_upgrader')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
JUJU_VERSION = 1
# The 15.10 charm versions support the big bang upgrade scenario
# or the rollinng upgrade within a specific service (e.g. all
# units of a given service are upgraded at the same time).
SERVICES = [
# Identity and Image
'keystone',
'glance',
# Upgrade nova
'nova-cloud-controller',
'nova-compute',
# Neutron upgrades
'neutron-api',
'neutron-gateway',
# Backend block-storage upgrade.
# Note: just upgrade cinder service.
'cinder',
# Upgrade dashboard
'openstack-dashboard',
]
# Not all charms use the openstack-origin. The openstack specific
# charms do, but some of the others use an alternate origin key
# depending on who the author was.
ORIGIN_KEYS = {
'ceph': 'source',
'ceph-osd': 'source',
'ceph-mon': 'source',
'ceph-radosgw': 'source',
}
def is_rollable(service):
"""Determines if the service provided is eligible for a rolling
upgrade or not.
:param service <Service>: the service object describing the service
that should be tested for rollable upgrades
:return <bool>: True if the service is rollable, false if not.
"""
if 'openstack-upgrade' not in Juju.enumerate_actions(service.name):
# If the service does not have an openstack-upgrade action,
# then the service cannot be upgraded in a rollable fashion.
return False
if len(service.units()) <= 1:
# If there's not multiple units, no need to do the rolling
# upgrade. Go for the big bang.
return False
if service.name.lower().find('ceph') > 0:
# The ceph charms incorporate their own upgrade process by
# simply setting the source so let it do the "big-bang" style
# upgrade.
# TODO(wolsen) this should check the charm in juju 2 rather
# than rely on the service/application name.
return False
if not service.set_config('action-managed-upgrade', True):
log.warning('Failed to enable action-managed-upgrade mode.')
return False
return True
def order_units(service, units):
"""Orders the units by ensuring that the leader is the first unit.
Queries Juju in order to determine which unit is the leader, and
places that unit at the top of the list.
:param service <Service>: the service to order the units by
:param units list<Unit>: the list of units to sort
:return list<Unit>: the sorted list of units.
"""
log.info('Determining ordering for service: %s' % service.name)
ordered = []
is_leader_data = Juju.run_on_service(service.name, 'is-leader')
leader_info = filter(lambda u: u['Stdout'].strip() == 'True',
is_leader_data)
leader_unit = leader_info[0]['UnitId']
for unit in units:
if unit.name == leader_unit:
ordered.insert(0, unit)
else:
ordered.append(unit)
log.info('Upgrade order is: %s' % [unit.name for unit in ordered])
return ordered
def perform_rolling_upgrade(service):
"""Performs a rolling upgrade for the specified service.
Performs a rolling upgrade of the service by iterating through each
of the units and runs a juju action do <unit_name> openstack-upgrade
and waits for each unit to finish before continuing on to the next.
:param service <Service>: the service object describing the juju service
that should be upgraded.
"""
log.info('Performing a rolling upgrade for service: %s' % service.name)
avail_actions = Juju.enumerate_actions(service.name)
config_key = ORIGIN_KEYS.get(service.name, 'openstack-origin')
service.set_config(config_key, args.origin)
for unit in order_units(service, service.units()):
log.info('Upgrading unit: %s' % unit.name)
hacluster_unit = unit.get_hacluster_subordinate_unit()
# TODO(wolsen) This is a temporary work around to allow the user
# to evacuate a compute node during the upgrade procedure if
# desired. This has the effect of pausing the upgrade script to
# allow the user to manually intervene with the underlying cloud.
# In the future, it would be nice to provide a mechanism to allow
# the script to evacuate the node automatically (if desired).
if args.evacuate and service.name == 'nova-compute':
six.moves.input('Preparing to upgrade %s. Perform any additional '
'admin actions desired. Press ENTER to proceed.' %
unit.name)
if args.pause and hacluster_unit:
hacluster_unit.pause()
if args.pause and 'pause' in avail_actions:
unit.pause()
if 'openstack-upgrade' in avail_actions:
unit.upgrade_openstack()
if args.pause and 'resume' in avail_actions:
unit.resume()
if args.pause and hacluster_unit:
hacluster_unit.resume()
log.info(' Unit %s has finished the upgrade.' % unit.name)
def perform_bigbang_upgrade(service):
"""Performs a big-bang style upgrade for the specified service.
In order to do the big-bang style upgrade, set the config value
for the openstack-origin. Wait a few seconds until the upgrading
message is reported.
"""
log.info('Performing a big-bang upgrade for service: %s' % service.name)
config_key = ORIGIN_KEYS.get(service.name, 'openstack-origin')
service.set_config(config_key, args.origin)
# Give the service a chance to invoke the config-changed hook
# for the bigbang upgrade.
time.sleep(5)
upgrade_in_progress = True
while upgrade_in_progress:
service = Juju.current().get_service(service.name)
unit_uip = [u.is_upgrading() for u in service.units()]
upgrade_in_progress = any(unit_uip)
if upgrade_in_progress:
time.sleep(5)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
24236,
262,
4946,
25896,
351,
1012,
436,
1586,
12678,
6779,
13,
198,
2,
29566,
25,
720,
24457,
4598,
12,
929,
9526,
13,
9078,
532,
78,
6279,
25,
87,
268,
498,
12,
3605,
... | 2.684189 | 2,454 |
import pytorch_lightning as pl
import torch
import torch.nn as nn
class GPT2(pl.LightningModule):
"""
GPT-2 from `language Models are Unsupervised Multitask Learners <https://d4mucfpksywv.cloudfront.net/
better-language-models/language-models.pdf>`_
Paper by: Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever
Implementation contributed by:
- `Teddy Koker <https://github.com/teddykoker>`_
Example::
from pl_bolts.models import GPT2
seq_len = 17
batch_size = 32
vocab_size = 16
x = torch.randint(0, vocab_size, (seq_len, batch_size))
model = GPT2(embed_dim=32, heads=2, layers=2, num_positions=seq_len, vocab_size=vocab_size, num_classes=4)
results = model(x)
"""
def forward(self, x, classify=False):
"""
Expect input as shape [sequence len, batch]
If classify, return classification logits
"""
length, batch = x.shape
h = self.token_embeddings(x.long())
# prepend sos token
sos = torch.ones(1, batch, self.hparams.embed_dim, device=x.device) * self.sos
h = torch.cat([sos, h[:-1, :, :]], axis=0)
# add positional embeddings
positions = torch.arange(length, device=x.device).unsqueeze(-1)
h = h + self.position_embeddings(positions).expand_as(h)
# transformer
for layer in self.layers:
h = layer(h)
if not classify:
# return logits
return self.head(h)
h = torch.mean(h, dim=0) # average pool over sequence
return self.clf_head(h) # return classification logits
| [
11748,
12972,
13165,
354,
62,
2971,
768,
355,
458,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
628,
198,
198,
4871,
402,
11571,
17,
7,
489,
13,
15047,
768,
26796,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
22... | 2.275068 | 738 |