code
stringlengths
22
1.05M
apis
listlengths
1
3.31k
extract_api
stringlengths
75
3.25M
from __future__ import division from __future__ import print_function from pathlib import Path import sys project_path = Path(__file__).resolve().parents[1] sys.path.append(str(project_path)) from keras.layers import Dense, Activation, Dropout from keras.models import Model, Sequential from keras.regularizers import l2 from keras.optimizers import Adam import keras.backend as K import numpy as np import time import tensorflow as tf import os from core.utils import * from core.layers.graph_cnn_layer import GraphCNN from sklearn.preprocessing import normalize # Set random seed seed = 123 np.random.seed(seed) tf.random.set_seed(seed) # Settings flags = tf.compat.v1.flags FLAGS = flags.FLAGS flags.DEFINE_string('dataset', 'brc_microarray_usa', 'Dataset string.') flags.DEFINE_string('embedding_method', 'ge', 'Name of the embedding method.') #Check dataset availability if not os.path.isdir("{}/data/parsed_input/{}".format(project_path, FLAGS.dataset)): sys.exit("{} dataset is not available under data/parsed_input/".format(FLAGS.dataset)) if not os.path.isdir("{}/data/output/{}/embedding/{}".format(project_path, FLAGS.dataset, FLAGS.embedding_method)): os.makedirs("{}/data/output/{}/embedding/{}".format(project_path, FLAGS.dataset, FLAGS.embedding_method)) print("--------------------------------------------") print("--------------------------------------------") print("Hyper-parameters:") print("Dataset: {}".format(FLAGS.dataset)) print("Embedding method: {}".format(FLAGS.embedding_method)) print("--------------------------------------------") print("--------------------------------------------") # Prepare Data X, A, Y = load_training_data(dataset=FLAGS.dataset) Y_train, Y_val, Y_test, train_idx, val_idx, test_idx, train_mask = get_splits_for_learning(Y, dataset=FLAGS.dataset) # Normalize gene expression X = normalize(X, norm='l1') #for positive non-zero entries, it's equivalent to: X /= X.sum(1).reshape(-1, 1) #Save the node emmbeddings np.savetxt("{}/data/output/{}/embedding/{}/embeddings.txt".format(project_path, FLAGS.dataset, FLAGS.embedding_method), X, delimiter="\t") print("Embeddings saved in /data/output/{}/embedding/{}/embeddings.txt".format(FLAGS.dataset, FLAGS.embedding_method))
[ "tensorflow.random.set_seed", "sklearn.preprocessing.normalize", "numpy.random.seed", "pathlib.Path" ]
[((623, 643), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (637, 643), True, 'import numpy as np\n'), ((645, 669), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (663, 669), True, 'import tensorflow as tf\n'), ((1912, 1935), 'sklearn.preprocessing.normalize', 'normalize', (['X'], {'norm': '"""l1"""'}), "(X, norm='l1')\n", (1921, 1935), False, 'from sklearn.preprocessing import normalize\n'), ((127, 141), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (131, 141), False, 'from pathlib import Path\n')]
import io import zlib import numpy as np def maybe_compress(str, compress): return zlib.compress(str) if compress else str def maybe_decompress(str, decompress): return zlib.decompress(str) if decompress else str def serialize_numpy(arr: np.ndarray, compress: bool = False) -> str: """Serializes numpy array to string with optional zlib compression. Args: arr (np.ndarray): Numpy array to serialize. compress (bool, optional): Whether to compress resulting string with zlib or not. Defaults to False. Returns: str: serialized string """ buf = io.BytesIO() assert isinstance(arr, np.ndarray) np.save(buf, arr) result = buf.getvalue() return maybe_compress(result, compress) def deserialize_numpy(serialized_string: str, decompress: bool = False) -> np.ndarray: """Deserializes numpy array from compressed string. Args: serialized_string (str): Serialized numpy array decompress (bool, optional): Whether to decompress string with zlib before laoding. Defaults to False. Returns: np.ndarray: deserialized numpy array """ str = maybe_decompress(serialized_string, decompress) buf = io.BytesIO(str) return np.load(buf)
[ "io.BytesIO", "zlib.compress", "numpy.save", "numpy.load", "zlib.decompress" ]
[((616, 628), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (626, 628), False, 'import io\n'), ((672, 689), 'numpy.save', 'np.save', (['buf', 'arr'], {}), '(buf, arr)\n', (679, 689), True, 'import numpy as np\n'), ((1232, 1247), 'io.BytesIO', 'io.BytesIO', (['str'], {}), '(str)\n', (1242, 1247), False, 'import io\n'), ((1259, 1271), 'numpy.load', 'np.load', (['buf'], {}), '(buf)\n', (1266, 1271), True, 'import numpy as np\n'), ((90, 108), 'zlib.compress', 'zlib.compress', (['str'], {}), '(str)\n', (103, 108), False, 'import zlib\n'), ((182, 202), 'zlib.decompress', 'zlib.decompress', (['str'], {}), '(str)\n', (197, 202), False, 'import zlib\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import xml.etree.ElementTree as ET tree = ET.parse(sys.argv[1]) old_doc = tree.getroot() tree = ET.parse(sys.argv[2]) new_doc = tree.getroot() f = file(sys.argv[3], "wb") tab = 0 old_classes = {} def write_string(_f, text, newline=True): for t in range(tab): _f.write("\t") _f.write(text) if (newline): _f.write("\n") def escape(ret): ret = ret.replace("&", "&amp;") ret = ret.replace("<", "&gt;") ret = ret.replace(">", "&lt;") ret = ret.replace("'", "&apos;") ret = ret.replace("\"", "&quot;") return ret def inc_tab(): global tab tab += 1 def dec_tab(): global tab tab -= 1 write_string(f, '<?xml version="1.0" encoding="UTF-8" ?>') write_string(f, '<doc version="' + new_doc.attrib["version"] + '">') def get_tag(node, name): tag = "" if (name in node.attrib): tag = ' ' + name + '="' + escape(node.attrib[name]) + '" ' return tag def find_method_descr(old_class, name): methods = old_class.find("methods") if(methods != None and len(list(methods)) > 0): for m in list(methods): if (m.attrib["name"] == name): description = m.find("description") if (description != None and description.text.strip() != ""): return description.text return None def find_signal_descr(old_class, name): signals = old_class.find("signals") if(signals != None and len(list(signals)) > 0): for m in list(signals): if (m.attrib["name"] == name): description = m.find("description") if (description != None and description.text.strip() != ""): return description.text return None def find_constant_descr(old_class, name): if (old_class is None): return None constants = old_class.find("constants") if(constants != None and len(list(constants)) > 0): for m in list(constants): if (m.attrib["name"] == name): if (m.text.strip() != ""): return m.text return None def write_class(c): class_name = c.attrib["name"] print("Parsing Class: " + class_name) if (class_name in old_classes): old_class = old_classes[class_name] else: old_class = None category = get_tag(c, "category") inherits = get_tag(c, "inherits") write_string(f, '<class name="' + class_name + '" ' + category + inherits + '>') inc_tab() write_string(f, "<brief_description>") if (old_class != None): old_brief_descr = old_class.find("brief_description") if (old_brief_descr != None): write_string(f, escape(old_brief_descr.text.strip())) write_string(f, "</brief_description>") write_string(f, "<description>") if (old_class != None): old_descr = old_class.find("description") if (old_descr != None): write_string(f, escape(old_descr.text.strip())) write_string(f, "</description>") methods = c.find("methods") if(methods != None and len(list(methods)) > 0): write_string(f, "<methods>") inc_tab() for m in list(methods): qualifiers = get_tag(m, "qualifiers") write_string(f, '<method name="' + escape(m.attrib["name"]) + '" ' + qualifiers + '>') inc_tab() for a in list(m): if (a.tag == "return"): typ = get_tag(a, "type") write_string(f, '<return' + typ + '>') write_string(f, '</return>') elif (a.tag == "argument"): default = get_tag(a, "default") write_string(f, '<argument index="' + a.attrib["index"] + '" name="' + escape(a.attrib["name"]) + '" type="' + a.attrib["type"] + '"' + default + '>') write_string(f, '</argument>') write_string(f, '<description>') if (old_class != None): old_method_descr = find_method_descr(old_class, m.attrib["name"]) if (old_method_descr): write_string(f, escape(escape(old_method_descr.strip()))) write_string(f, '</description>') dec_tab() write_string(f, "</method>") dec_tab() write_string(f, "</methods>") signals = c.find("signals") if(signals != None and len(list(signals)) > 0): write_string(f, "<signals>") inc_tab() for m in list(signals): write_string(f, '<signal name="' + escape(m.attrib["name"]) + '">') inc_tab() for a in list(m): if (a.tag == "argument"): write_string(f, '<argument index="' + a.attrib["index"] + '" name="' + escape(a.attrib["name"]) + '" type="' + a.attrib["type"] + '">') write_string(f, '</argument>') write_string(f, '<description>') if (old_class != None): old_signal_descr = find_signal_descr(old_class, m.attrib["name"]) if (old_signal_descr): write_string(f, escape(old_signal_descr.strip())) write_string(f, '</description>') dec_tab() write_string(f, "</signal>") dec_tab() write_string(f, "</signals>") constants = c.find("constants") if(constants != None and len(list(constants)) > 0): write_string(f, "<constants>") inc_tab() for m in list(constants): write_string(f, '<constant name="' + escape(m.attrib["name"]) + '" value="' + m.attrib["value"] + '">') old_constant_descr = find_constant_descr(old_class, m.attrib["name"]) if (old_constant_descr): write_string(f, escape(old_constant_descr.strip())) write_string(f, "</constant>") dec_tab() write_string(f, "</constants>") dec_tab() write_string(f, "</class>") for c in list(old_doc): old_classes[c.attrib["name"]] = c for c in list(new_doc): write_class(c) write_string(f, '</doc>\n')
[ "xml.etree.ElementTree.parse" ]
[((102, 123), 'xml.etree.ElementTree.parse', 'ET.parse', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (110, 123), True, 'import xml.etree.ElementTree as ET\n'), ((157, 178), 'xml.etree.ElementTree.parse', 'ET.parse', (['sys.argv[2]'], {}), '(sys.argv[2])\n', (165, 178), True, 'import xml.etree.ElementTree as ET\n')]
import click from .definition_group.apply import apply from .definition_group.diff import diff from .definition_group.generate import generate from .definition_group.create import create # from .definition_group.lint import lint @click.group(short_help="Manage API definition configuration") @click.pass_context def definition(ctx): """This group allow handling API definition commands from templating and value files""" pass definition.add_command(apply) definition.add_command(diff) definition.add_command(create) definition.add_command(generate) # definition.add_command(lint)
[ "click.group" ]
[((233, 294), 'click.group', 'click.group', ([], {'short_help': '"""Manage API definition configuration"""'}), "(short_help='Manage API definition configuration')\n", (244, 294), False, 'import click\n')]
from linkedlist import LinkedList from node import Node class IRes: def __init__(self, result, node): self.result = result self.node = node def print_nodes(n): while(n != None): print(n.data) n = n.next def tail_and_size(n): ctr = 0 while n.next: ctr += 1 n = n.next return ctr, n def remove_start(n, limit): for i in range(limit): n = n.next return n def intersection(a, b): a_res, b_res = tail_and_size(a), tail_and_size(b) # if tail are different, no need to compare further if a_res[1] != b_res[1]: return IRes(False, None) list_diff = abs(a_res[0]-b_res[0]) # remove start nodes from longer list to ensure both are of same size if a_res[0] > b_res[0]: a = remove_start(a, list_diff) else: b = remove_start(b, list_diff) while a != None and b != None: if a == b: return IRes(True, a) a = a.next b = b.next return IRes(False, None) if __name__ == "__main__": a = Node(1) b = Node(2) c = Node(7) d = Node(6) e = Node(4) f = Node(9) g = Node(5) h = Node(1) i = Node(3) x = Node(1) y = Node(2) z = Node(7) z.next = y y.next = x i.next = h h.next = g g.next = f f.next = c # with intersection # f.next = z # without intersection e.next = d d.next = c c.next = b b.next = a result = intersection(i, e) if result.result: print("Intersection found at node instance: " + str(result.node)) else: print("No intersection")
[ "node.Node" ]
[((1066, 1073), 'node.Node', 'Node', (['(1)'], {}), '(1)\n', (1070, 1073), False, 'from node import Node\n'), ((1082, 1089), 'node.Node', 'Node', (['(2)'], {}), '(2)\n', (1086, 1089), False, 'from node import Node\n'), ((1098, 1105), 'node.Node', 'Node', (['(7)'], {}), '(7)\n', (1102, 1105), False, 'from node import Node\n'), ((1114, 1121), 'node.Node', 'Node', (['(6)'], {}), '(6)\n', (1118, 1121), False, 'from node import Node\n'), ((1130, 1137), 'node.Node', 'Node', (['(4)'], {}), '(4)\n', (1134, 1137), False, 'from node import Node\n'), ((1146, 1153), 'node.Node', 'Node', (['(9)'], {}), '(9)\n', (1150, 1153), False, 'from node import Node\n'), ((1162, 1169), 'node.Node', 'Node', (['(5)'], {}), '(5)\n', (1166, 1169), False, 'from node import Node\n'), ((1178, 1185), 'node.Node', 'Node', (['(1)'], {}), '(1)\n', (1182, 1185), False, 'from node import Node\n'), ((1194, 1201), 'node.Node', 'Node', (['(3)'], {}), '(3)\n', (1198, 1201), False, 'from node import Node\n'), ((1211, 1218), 'node.Node', 'Node', (['(1)'], {}), '(1)\n', (1215, 1218), False, 'from node import Node\n'), ((1227, 1234), 'node.Node', 'Node', (['(2)'], {}), '(2)\n', (1231, 1234), False, 'from node import Node\n'), ((1243, 1250), 'node.Node', 'Node', (['(7)'], {}), '(7)\n', (1247, 1250), False, 'from node import Node\n')]
from flask import jsonify, request, g, url_for, current_app from app_core import db from app_core.api import api from app_core.api.decorators import permission_required from app_core.models import Post, Permission, Comment @api.route('/comments/') def get_comments(): page = request.args.get('page', 1, type=int) pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate( page, per_page=current_app.config['LICMS_COMMENTS_PER_PAGE'], error_out=False) comments = pagination.items _prev = None if pagination.has_prev: _prev = url_for('api.get_comments', page=page - 1) _next = None if pagination.has_next: _next = url_for('api.get_comments', page=page + 1) return jsonify({ 'comments': [comment.to_json() for comment in comments], 'prev': _prev, 'next': _next, 'count': pagination.total }) @api.route('/comments/<int:comment_id>') def get_comment(comment_id): comment = Comment.query.get_or_404(comment_id) return jsonify(comment.to_json()) @api.route('/posts/<int:post_id>/comments/') def get_post_comments(post_id): post = Post.query.get_or_404(post_id) page = request.args.get('page', 1, type=int) pagination = post.comments.order_by(Comment.timestamp.asc()).paginate( page, per_page=current_app.config['LICMS_COMMENTS_PER_PAGE'], error_out=False) comments = pagination.items _prev = None if pagination.has_prev: _prev = url_for('api.get_post_comments', post_id=post_id, page=page - 1) _next = None if pagination.has_next: _next = url_for('api.get_post_comments', post_id=post_id, page=page + 1) return jsonify({ 'comments': [comment.to_json() for comment in comments], 'prev': _prev, 'next': _next, 'count': pagination.total }) @api.route('/posts/<int:post_id>/comments/', methods=['POST']) @permission_required(Permission.COMMENT) def new_post_comment(post_id): post = Post.query.get_or_404(post_id) comment = Comment.from_json(request.json) comment.author = g.current_user comment.post = post db.session.add(comment) db.session.commit() return jsonify(comment.to_json()), 201, {'Location': url_for('api.get_comment', comment_id=comment.id)}
[ "app_core.models.Comment.from_json", "flask.request.args.get", "app_core.db.session.commit", "app_core.api.decorators.permission_required", "app_core.models.Comment.timestamp.asc", "app_core.models.Post.query.get_or_404", "flask.url_for", "app_core.models.Comment.timestamp.desc", "app_core.db.sessio...
[((227, 250), 'app_core.api.api.route', 'api.route', (['"""/comments/"""'], {}), "('/comments/')\n", (236, 250), False, 'from app_core.api import api\n'), ((907, 946), 'app_core.api.api.route', 'api.route', (['"""/comments/<int:comment_id>"""'], {}), "('/comments/<int:comment_id>')\n", (916, 946), False, 'from app_core.api import api\n'), ((1068, 1111), 'app_core.api.api.route', 'api.route', (['"""/posts/<int:post_id>/comments/"""'], {}), "('/posts/<int:post_id>/comments/')\n", (1077, 1111), False, 'from app_core.api import api\n'), ((1865, 1926), 'app_core.api.api.route', 'api.route', (['"""/posts/<int:post_id>/comments/"""'], {'methods': "['POST']"}), "('/posts/<int:post_id>/comments/', methods=['POST'])\n", (1874, 1926), False, 'from app_core.api import api\n'), ((1928, 1967), 'app_core.api.decorators.permission_required', 'permission_required', (['Permission.COMMENT'], {}), '(Permission.COMMENT)\n', (1947, 1967), False, 'from app_core.api.decorators import permission_required\n'), ((282, 319), 'flask.request.args.get', 'request.args.get', (['"""page"""', '(1)'], {'type': 'int'}), "('page', 1, type=int)\n", (298, 319), False, 'from flask import jsonify, request, g, url_for, current_app\n'), ((990, 1026), 'app_core.models.Comment.query.get_or_404', 'Comment.query.get_or_404', (['comment_id'], {}), '(comment_id)\n', (1014, 1026), False, 'from app_core.models import Post, Permission, Comment\n'), ((1155, 1185), 'app_core.models.Post.query.get_or_404', 'Post.query.get_or_404', (['post_id'], {}), '(post_id)\n', (1176, 1185), False, 'from app_core.models import Post, Permission, Comment\n'), ((1197, 1234), 'flask.request.args.get', 'request.args.get', (['"""page"""', '(1)'], {'type': 'int'}), "('page', 1, type=int)\n", (1213, 1234), False, 'from flask import jsonify, request, g, url_for, current_app\n'), ((2010, 2040), 'app_core.models.Post.query.get_or_404', 'Post.query.get_or_404', (['post_id'], {}), '(post_id)\n', (2031, 2040), False, 'from app_core.models import Post, Permission, Comment\n'), ((2055, 2086), 'app_core.models.Comment.from_json', 'Comment.from_json', (['request.json'], {}), '(request.json)\n', (2072, 2086), False, 'from app_core.models import Post, Permission, Comment\n'), ((2151, 2174), 'app_core.db.session.add', 'db.session.add', (['comment'], {}), '(comment)\n', (2165, 2174), False, 'from app_core import db\n'), ((2179, 2198), 'app_core.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2196, 2198), False, 'from app_core import db\n'), ((584, 626), 'flask.url_for', 'url_for', (['"""api.get_comments"""'], {'page': '(page - 1)'}), "('api.get_comments', page=page - 1)\n", (591, 626), False, 'from flask import jsonify, request, g, url_for, current_app\n'), ((688, 730), 'flask.url_for', 'url_for', (['"""api.get_comments"""'], {'page': '(page + 1)'}), "('api.get_comments', page=page + 1)\n", (695, 730), False, 'from flask import jsonify, request, g, url_for, current_app\n'), ((1498, 1562), 'flask.url_for', 'url_for', (['"""api.get_post_comments"""'], {'post_id': 'post_id', 'page': '(page - 1)'}), "('api.get_post_comments', post_id=post_id, page=page - 1)\n", (1505, 1562), False, 'from flask import jsonify, request, g, url_for, current_app\n'), ((1624, 1688), 'flask.url_for', 'url_for', (['"""api.get_post_comments"""'], {'post_id': 'post_id', 'page': '(page + 1)'}), "('api.get_post_comments', post_id=post_id, page=page + 1)\n", (1631, 1688), False, 'from flask import jsonify, request, g, url_for, current_app\n'), ((2256, 2305), 'flask.url_for', 'url_for', (['"""api.get_comment"""'], {'comment_id': 'comment.id'}), "('api.get_comment', comment_id=comment.id)\n", (2263, 2305), False, 'from flask import jsonify, request, g, url_for, current_app\n'), ((360, 384), 'app_core.models.Comment.timestamp.desc', 'Comment.timestamp.desc', ([], {}), '()\n', (382, 384), False, 'from app_core.models import Post, Permission, Comment\n'), ((1275, 1298), 'app_core.models.Comment.timestamp.asc', 'Comment.timestamp.asc', ([], {}), '()\n', (1296, 1298), False, 'from app_core.models import Post, Permission, Comment\n')]
#!/usr/bin/env python """ File Description: File used for definition of State Class. """ # ****************************************** Libraries to be imported ****************************************** # from copy import deepcopy # ****************************************** Class Declaration Start ****************************************** # class State(object): """ A state is just a collection of variable bindings. * state = State('foo') tells IPyHOP to create an empty state object named 'foo'. To put variables and values into it, you should do assignments such as foo.var1 = val1 """ def __init__(self, name: str): self.__name__ = name # ****************************** Class Method Declaration ****************************************** # def __str__(self): if self: var_str = "\r{state_name}.{var_name} = {var_value}\n" state_str = "" for name, val in self.__dict__.items(): if name != "__name__": _str = var_str.format(state_name=self.__name__, var_name=name, var_value=val) _str = '\n\t\t'.join(_str[i:i+120] for i in range(0, len(_str), 120)) state_str += _str return state_str[:-1] else: return "False" # ****************************** Class Method Declaration ****************************************** # def __repr__(self): return str(self.__class__) + ", " + self.__name__ # ****************************** Class Method Declaration ****************************************** # def update(self, state): self.__dict__.update(state.__dict__) return self # ****************************** Class Method Declaration ****************************************** # def copy(self): return deepcopy(self) # ****************************************** Class Declaration End ****************************************** # # ****************************************** Demo / Test Routine ****************************************** # if __name__ == '__main__': print("Test instantiation of State class ...") test_state = State('test_state') test_state.test_var_1 = {'key1': 'val1'} test_state.test_var_2 = {'key1': 0} test_state.test_var_3 = {'key2': {'key3': 5}, 'key3': {'key2': 5}} print(test_state) """ Author(s): <NAME> Repository: https://github.com/YashBansod/IPyHOP """
[ "copy.deepcopy" ]
[((1924, 1938), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (1932, 1938), False, 'from copy import deepcopy\n')]
from ckantoolkit import get_or_bust, side_effect_free, ObjectNotFound from ckanext.scheming.helpers import ( scheming_dataset_schemas, scheming_get_dataset_schema, scheming_group_schemas, scheming_get_group_schema, scheming_organization_schemas, scheming_get_organization_schema, ) @side_effect_free def scheming_dataset_schema_list(context, data_dict): ''' Return a list of dataset types customized with the scheming extension ''' return list(scheming_dataset_schemas()) @side_effect_free def scheming_dataset_schema_show(context, data_dict): ''' Return the scheming schema for a given dataset type :param type: the dataset type :param expanded: True to expand presets (default) ''' t = get_or_bust(data_dict, 'type') expanded = data_dict.get('expanded', True) s = scheming_get_dataset_schema(t, expanded) if s is None: raise ObjectNotFound() return s @side_effect_free def scheming_group_schema_list(context, data_dict): ''' Return a list of group types customized with the scheming extension ''' return list(scheming_group_schemas()) @side_effect_free def scheming_group_schema_show(context, data_dict): ''' Return the scheming schema for a given group type :param type: the group type :param expanded: True to expand presets (default) ''' t = get_or_bust(data_dict, 'type') expanded = data_dict.get('expanded', True) s = scheming_get_group_schema(t, expanded) if s is None: raise ObjectNotFound() return s @side_effect_free def scheming_organization_schema_list(context, data_dict): ''' Return a list of organization types customized with the scheming extension ''' return list(scheming_organization_schemas()) @side_effect_free def scheming_organization_schema_show(context, data_dict): ''' Return the scheming schema for a given organization type :param type: the organization type :param expanded: True to expand presets (default) ''' t = get_or_bust(data_dict, 'type') expanded = data_dict.get('expanded', True) s = scheming_get_organization_schema(t, expanded) if s is None: raise ObjectNotFound() return s
[ "ckanext.scheming.helpers.scheming_get_group_schema", "ckanext.scheming.helpers.scheming_get_organization_schema", "ckanext.scheming.helpers.scheming_group_schemas", "ckanext.scheming.helpers.scheming_get_dataset_schema", "ckanext.scheming.helpers.scheming_organization_schemas", "ckantoolkit.get_or_bust",...
[((750, 780), 'ckantoolkit.get_or_bust', 'get_or_bust', (['data_dict', '"""type"""'], {}), "(data_dict, 'type')\n", (761, 780), False, 'from ckantoolkit import get_or_bust, side_effect_free, ObjectNotFound\n'), ((836, 876), 'ckanext.scheming.helpers.scheming_get_dataset_schema', 'scheming_get_dataset_schema', (['t', 'expanded'], {}), '(t, expanded)\n', (863, 876), False, 'from ckanext.scheming.helpers import scheming_dataset_schemas, scheming_get_dataset_schema, scheming_group_schemas, scheming_get_group_schema, scheming_organization_schemas, scheming_get_organization_schema\n'), ((1378, 1408), 'ckantoolkit.get_or_bust', 'get_or_bust', (['data_dict', '"""type"""'], {}), "(data_dict, 'type')\n", (1389, 1408), False, 'from ckantoolkit import get_or_bust, side_effect_free, ObjectNotFound\n'), ((1464, 1502), 'ckanext.scheming.helpers.scheming_get_group_schema', 'scheming_get_group_schema', (['t', 'expanded'], {}), '(t, expanded)\n', (1489, 1502), False, 'from ckanext.scheming.helpers import scheming_dataset_schemas, scheming_get_dataset_schema, scheming_group_schemas, scheming_get_group_schema, scheming_organization_schemas, scheming_get_organization_schema\n'), ((2046, 2076), 'ckantoolkit.get_or_bust', 'get_or_bust', (['data_dict', '"""type"""'], {}), "(data_dict, 'type')\n", (2057, 2076), False, 'from ckantoolkit import get_or_bust, side_effect_free, ObjectNotFound\n'), ((2132, 2177), 'ckanext.scheming.helpers.scheming_get_organization_schema', 'scheming_get_organization_schema', (['t', 'expanded'], {}), '(t, expanded)\n', (2164, 2177), False, 'from ckanext.scheming.helpers import scheming_dataset_schemas, scheming_get_dataset_schema, scheming_group_schemas, scheming_get_group_schema, scheming_organization_schemas, scheming_get_organization_schema\n'), ((479, 505), 'ckanext.scheming.helpers.scheming_dataset_schemas', 'scheming_dataset_schemas', ([], {}), '()\n', (503, 505), False, 'from ckanext.scheming.helpers import scheming_dataset_schemas, scheming_get_dataset_schema, scheming_group_schemas, scheming_get_group_schema, scheming_organization_schemas, scheming_get_organization_schema\n'), ((909, 925), 'ckantoolkit.ObjectNotFound', 'ObjectNotFound', ([], {}), '()\n', (923, 925), False, 'from ckantoolkit import get_or_bust, side_effect_free, ObjectNotFound\n'), ((1115, 1139), 'ckanext.scheming.helpers.scheming_group_schemas', 'scheming_group_schemas', ([], {}), '()\n', (1137, 1139), False, 'from ckanext.scheming.helpers import scheming_dataset_schemas, scheming_get_dataset_schema, scheming_group_schemas, scheming_get_group_schema, scheming_organization_schemas, scheming_get_organization_schema\n'), ((1535, 1551), 'ckantoolkit.ObjectNotFound', 'ObjectNotFound', ([], {}), '()\n', (1549, 1551), False, 'from ckantoolkit import get_or_bust, side_effect_free, ObjectNotFound\n'), ((1755, 1786), 'ckanext.scheming.helpers.scheming_organization_schemas', 'scheming_organization_schemas', ([], {}), '()\n', (1784, 1786), False, 'from ckanext.scheming.helpers import scheming_dataset_schemas, scheming_get_dataset_schema, scheming_group_schemas, scheming_get_group_schema, scheming_organization_schemas, scheming_get_organization_schema\n'), ((2210, 2226), 'ckantoolkit.ObjectNotFound', 'ObjectNotFound', ([], {}), '()\n', (2224, 2226), False, 'from ckantoolkit import get_or_bust, side_effect_free, ObjectNotFound\n')]
import os import sys import jinja2 import yaml with open(".information.yml") as fp: information = yaml.safe_load(fp) loader = jinja2.FileSystemLoader(searchpath="") environment = jinja2.Environment(loader=loader, keep_trailing_newline=True) template = environment.get_template(sys.argv[1]) result = template.render({ "docker_image_name": information.get("docker_image_name", "NONE"), "readme_note": information.get("readme_note", None), "versions": information.get("versions", ["latest"]) }) with open(sys.argv[1], "w+") as fp: fp.write(result)
[ "yaml.safe_load", "jinja2.FileSystemLoader", "jinja2.Environment" ]
[((133, 171), 'jinja2.FileSystemLoader', 'jinja2.FileSystemLoader', ([], {'searchpath': '""""""'}), "(searchpath='')\n", (156, 171), False, 'import jinja2\n'), ((186, 247), 'jinja2.Environment', 'jinja2.Environment', ([], {'loader': 'loader', 'keep_trailing_newline': '(True)'}), '(loader=loader, keep_trailing_newline=True)\n', (204, 247), False, 'import jinja2\n'), ((104, 122), 'yaml.safe_load', 'yaml.safe_load', (['fp'], {}), '(fp)\n', (118, 122), False, 'import yaml\n')]
# Generated by Django 3.2.9 on 2021-11-27 12:21 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('notice', '0002_auto_20211127_0236'), ] operations = [ migrations.AlterField( model_name='event', name='priority', field=models.IntegerField(choices=[(1, 'Низкий приоритет'), (2, 'Средний приоритет'), (3, 'Высокий приоритет')], verbose_name='Приоритет'), ), ]
[ "django.db.models.IntegerField" ]
[((336, 472), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(1, 'Низкий приоритет'), (2, 'Средний приоритет'), (3, 'Высокий приоритет')]", 'verbose_name': '"""Приоритет"""'}), "(choices=[(1, 'Низкий приоритет'), (2,\n 'Средний приоритет'), (3, 'Высокий приоритет')], verbose_name='Приоритет')\n", (355, 472), False, 'from django.db import migrations, models\n')]
import math import re from data_extraction.scraper import Scraper class SozialeinsatzScraper(Scraper): """Scrapes the website www.sozialeinsatz.de.""" base_url = 'https://www.sozialeinsatz.de' debug = True def parse(self, response, url): """Handles the soupified response of a detail page in the predefined way and returns it""" self.logger.debug('parse()') content = response.find('div', {'id': 'content'}) title = content.find('h2') if title.text == 'Error 404': return None task = content.find('h2', string=re.compile(r'Stellenbeschreibung.*')).findNext('p') organization = title.findNext('div', {'class': 'row'}).find('p') contact = content.find('h2', string=re.compile(r'Ansprechpartner.*')).findNext('p') details = content.find('h2', string=re.compile(r'Details.*')).findNext('p') category_string = details.find('strong', string=re.compile(r'Aufgaben.*')).nextSibling categories = [x.strip() for x in category_string.split(',')] categories.append(title.find('acronym')['title']) timing = details.find('strong', string=re.compile(r'Zeitraum.*')).nextSibling location = None location_p = content.find('h2', string=re.compile(r'Einsatzort.*')).findNext('p') if location_p.a is not None and 'q=' in location_p.a['href']: location = location_p.a['href'].split('q=')[1] zipcode = None if location is not None: if len(re.findall(r'(\d{5})', location)) > 0: zipcode = re.findall(r'(\d{5})', location)[0] parsed_object = { 'title': title.text.strip(), 'categories': categories, 'location': location, 'task': task.decode_contents().strip(), 'target_group': None, 'prerequisites': None, 'language_skills': None, 'timing': timing.strip(), 'effort': None, 'opportunities': None, 'organization': organization.decode_contents().strip() if organization is not None else None, 'contact': contact.decode_contents().strip() if contact is not None else None, 'link': url or None, 'source': 'www.sozialeinsatz.de', 'geo_location': None, } parsed_object['post_struct'] = { 'title': parsed_object['title'], 'categories': parsed_object['categories'], 'location': { 'country': 'Deutschland', 'zipcode': zipcode, 'city': None, 'street': None, }, 'task': None, 'target_group': None, 'prerequisites': parsed_object['prerequisites'], 'language_skills': parsed_object['language_skills'], 'timing': parsed_object['timing'], 'effort': None, 'opportunities': None, 'organization': None, 'contact': None, 'link': parsed_object['link'], 'source': parsed_object['source'], 'geo_location': parsed_object['geo_location'], } return parsed_object def add_urls(self): """Adds all URLs of detail pages, found on the search pages, for the crawl function to scrape""" self.logger.debug('add_urls()') import time index = 1 index_max = None search_page_url = f'{self.base_url}/stellenangebote/finden?Stellenangebot_page={index}' next_page_url = search_page_url while next_page_url: response = self.soupify(next_page_url) # Get tags of individual results detail_a_tags = response.findAll('a', {'class': 'morelink'}) # Get maximum number of pages if index_max is None: summary_text = response.find('div', {'class': 'summary'}).text entries = int(re.findall(r'(\d+).?$', summary_text)[0]) index_max = math.ceil(entries / 25.0) self.logger.debug(f'Fetched {len(detail_a_tags)} URLs from {next_page_url} [{index}/{index_max}]') self.update_fetching_progress(index, index_max) # Iterate links and add, if not already found for link_tag in detail_a_tags: current_link = self.base_url + link_tag['href'] if current_link in self.urls: self.logger.debug(f'func: add_urls, page_index: {index},' f' search_page: {search_page_url}, ' f'duplicate_index: {current_link}, ' f'duplicate_index: {self.urls.index(current_link)}') else: self.urls.append(current_link) # Get next result page if index < index_max: index += 1 next_page_url = f'{self.base_url}/stellenangebote/finden?Stellenangebot_page={index}' else: next_page_url = None time.sleep(self.delay)
[ "re.findall", "math.ceil", "time.sleep", "re.compile" ]
[((5119, 5141), 'time.sleep', 'time.sleep', (['self.delay'], {}), '(self.delay)\n', (5129, 5141), False, 'import time\n'), ((4049, 4074), 'math.ceil', 'math.ceil', (['(entries / 25.0)'], {}), '(entries / 25.0)\n', (4058, 4074), False, 'import math\n'), ((956, 980), 're.compile', 're.compile', (['"""Aufgaben.*"""'], {}), "('Aufgaben.*')\n", (966, 980), False, 'import re\n'), ((1170, 1194), 're.compile', 're.compile', (['"""Zeitraum.*"""'], {}), "('Zeitraum.*')\n", (1180, 1194), False, 'import re\n'), ((1529, 1561), 're.findall', 're.findall', (['"""(\\\\d{5})"""', 'location'], {}), "('(\\\\d{5})', location)\n", (1539, 1561), False, 'import re\n'), ((1594, 1626), 're.findall', 're.findall', (['"""(\\\\d{5})"""', 'location'], {}), "('(\\\\d{5})', location)\n", (1604, 1626), False, 'import re\n'), ((595, 630), 're.compile', 're.compile', (['"""Stellenbeschreibung.*"""'], {}), "('Stellenbeschreibung.*')\n", (605, 630), False, 'import re\n'), ((766, 797), 're.compile', 're.compile', (['"""Ansprechpartner.*"""'], {}), "('Ansprechpartner.*')\n", (776, 797), False, 'import re\n'), ((859, 882), 're.compile', 're.compile', (['"""Details.*"""'], {}), "('Details.*')\n", (869, 882), False, 'import re\n'), ((1281, 1307), 're.compile', 're.compile', (['"""Einsatzort.*"""'], {}), "('Einsatzort.*')\n", (1291, 1307), False, 'import re\n'), ((3979, 4016), 're.findall', 're.findall', (['"""(\\\\d+).?$"""', 'summary_text'], {}), "('(\\\\d+).?$', summary_text)\n", (3989, 4016), False, 'import re\n')]
# encoding: utf8 import numpy as np import pandas as pd from collections import OrderedDict from senti_analysis import config from senti_analysis import constants from senti_analysis.preprocess import (load_tokenizer, load_sentences, encode_sentence, label_transform) def load_data_set(): """ Load data set. :return: train_data_set, validation_data_set, test_data_set """ train_data_set = pd.read_csv(config.TRAIN_SET_PATH) validation_data_set = pd.read_csv(config.VALIDATION_SET_PATH) test_data_set = pd.read_csv(config.TEST_SET_PATH) return train_data_set, validation_data_set, test_data_set def x_data(): train_set = pd.read_csv(config.TRAIN_SET_PATH) val_set = pd.read_csv(config.VALIDATION_SET_PATH) tokenizer = load_tokenizer() train_sentences, val_sentences, test_sentences = load_sentences() x_train = encode_sentence(train_sentences, padding=True, max_length=config.MAX_SEQUENCE_LENGTH, tokenizer=tokenizer) x_val = encode_sentence(val_sentences, padding=True, max_length=config.MAX_SEQUENCE_LENGTH, tokenizer=tokenizer) return x_train, x_val def load_val_data_set(): val_set = pd.read_csv(config.VALIDATION_SET_PATH) tokenizer = load_tokenizer() train_sentences, val_sentences, test_sentences = load_sentences() x_val = encode_sentence(val_sentences, padding=True, max_length=config.MAX_SEQUENCE_LENGTH, tokenizer=tokenizer) train_set = pd.read_csv(config.TRAIN_SET_PATH) val_set = pd.read_csv(config.VALIDATION_SET_PATH) _, y_val = transform_y_data(train_set, val_set, constants.COLS) return x_val, y_val def transform_y_data(train_set, val_set, cols): y_train = OrderedDict() y_val = OrderedDict() for col in cols: y_train[col] = np.array(label_transform(train_set[col])) y_val[col] = np.array(label_transform(val_set[col])) return y_train, y_val def y_data(): """ generate y label data. :return: train_label_data dict, validation_label_data dict """ train_set = pd.read_csv(config.TRAIN_SET_PATH) val_set = pd.read_csv(config.VALIDATION_SET_PATH) y_train, y_val = transform_y_data(train_set, val_set, constants.COLS) return y_train, y_val def validate_data(): val_set = pd.read_csv(config.VALIDATION_SET_PATH) tokenizer = load_tokenizer() train_sentences, val_sentences, test_sentences = load_sentences() x_val = encode_sentence(val_sentences, padding=True, max_length=config.MAX_SEQUENCE_LENGTH, tokenizer=tokenizer) y_val = {} for col in constants.COLS: y_val[col] = np.array(label_transform(val_set[col])) return x_val, y_val
[ "senti_analysis.preprocess.load_sentences", "collections.OrderedDict", "senti_analysis.preprocess.encode_sentence", "pandas.read_csv", "senti_analysis.preprocess.label_transform", "senti_analysis.preprocess.load_tokenizer" ]
[((452, 486), 'pandas.read_csv', 'pd.read_csv', (['config.TRAIN_SET_PATH'], {}), '(config.TRAIN_SET_PATH)\n', (463, 486), True, 'import pandas as pd\n'), ((513, 552), 'pandas.read_csv', 'pd.read_csv', (['config.VALIDATION_SET_PATH'], {}), '(config.VALIDATION_SET_PATH)\n', (524, 552), True, 'import pandas as pd\n'), ((573, 606), 'pandas.read_csv', 'pd.read_csv', (['config.TEST_SET_PATH'], {}), '(config.TEST_SET_PATH)\n', (584, 606), True, 'import pandas as pd\n'), ((702, 736), 'pandas.read_csv', 'pd.read_csv', (['config.TRAIN_SET_PATH'], {}), '(config.TRAIN_SET_PATH)\n', (713, 736), True, 'import pandas as pd\n'), ((751, 790), 'pandas.read_csv', 'pd.read_csv', (['config.VALIDATION_SET_PATH'], {}), '(config.VALIDATION_SET_PATH)\n', (762, 790), True, 'import pandas as pd\n'), ((808, 824), 'senti_analysis.preprocess.load_tokenizer', 'load_tokenizer', ([], {}), '()\n', (822, 824), False, 'from senti_analysis.preprocess import load_tokenizer, load_sentences, encode_sentence, label_transform\n'), ((878, 894), 'senti_analysis.preprocess.load_sentences', 'load_sentences', ([], {}), '()\n', (892, 894), False, 'from senti_analysis.preprocess import load_tokenizer, load_sentences, encode_sentence, label_transform\n'), ((909, 1020), 'senti_analysis.preprocess.encode_sentence', 'encode_sentence', (['train_sentences'], {'padding': '(True)', 'max_length': 'config.MAX_SEQUENCE_LENGTH', 'tokenizer': 'tokenizer'}), '(train_sentences, padding=True, max_length=config.\n MAX_SEQUENCE_LENGTH, tokenizer=tokenizer)\n', (924, 1020), False, 'from senti_analysis.preprocess import load_tokenizer, load_sentences, encode_sentence, label_transform\n'), ((1058, 1167), 'senti_analysis.preprocess.encode_sentence', 'encode_sentence', (['val_sentences'], {'padding': '(True)', 'max_length': 'config.MAX_SEQUENCE_LENGTH', 'tokenizer': 'tokenizer'}), '(val_sentences, padding=True, max_length=config.\n MAX_SEQUENCE_LENGTH, tokenizer=tokenizer)\n', (1073, 1167), False, 'from senti_analysis.preprocess import load_tokenizer, load_sentences, encode_sentence, label_transform\n'), ((1258, 1297), 'pandas.read_csv', 'pd.read_csv', (['config.VALIDATION_SET_PATH'], {}), '(config.VALIDATION_SET_PATH)\n', (1269, 1297), True, 'import pandas as pd\n'), ((1315, 1331), 'senti_analysis.preprocess.load_tokenizer', 'load_tokenizer', ([], {}), '()\n', (1329, 1331), False, 'from senti_analysis.preprocess import load_tokenizer, load_sentences, encode_sentence, label_transform\n'), ((1385, 1401), 'senti_analysis.preprocess.load_sentences', 'load_sentences', ([], {}), '()\n', (1399, 1401), False, 'from senti_analysis.preprocess import load_tokenizer, load_sentences, encode_sentence, label_transform\n'), ((1414, 1523), 'senti_analysis.preprocess.encode_sentence', 'encode_sentence', (['val_sentences'], {'padding': '(True)', 'max_length': 'config.MAX_SEQUENCE_LENGTH', 'tokenizer': 'tokenizer'}), '(val_sentences, padding=True, max_length=config.\n MAX_SEQUENCE_LENGTH, tokenizer=tokenizer)\n', (1429, 1523), False, 'from senti_analysis.preprocess import load_tokenizer, load_sentences, encode_sentence, label_transform\n'), ((1564, 1598), 'pandas.read_csv', 'pd.read_csv', (['config.TRAIN_SET_PATH'], {}), '(config.TRAIN_SET_PATH)\n', (1575, 1598), True, 'import pandas as pd\n'), ((1613, 1652), 'pandas.read_csv', 'pd.read_csv', (['config.VALIDATION_SET_PATH'], {}), '(config.VALIDATION_SET_PATH)\n', (1624, 1652), True, 'import pandas as pd\n'), ((1811, 1824), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1822, 1824), False, 'from collections import OrderedDict\n'), ((1837, 1850), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1848, 1850), False, 'from collections import OrderedDict\n'), ((2164, 2198), 'pandas.read_csv', 'pd.read_csv', (['config.TRAIN_SET_PATH'], {}), '(config.TRAIN_SET_PATH)\n', (2175, 2198), True, 'import pandas as pd\n'), ((2213, 2252), 'pandas.read_csv', 'pd.read_csv', (['config.VALIDATION_SET_PATH'], {}), '(config.VALIDATION_SET_PATH)\n', (2224, 2252), True, 'import pandas as pd\n'), ((2392, 2431), 'pandas.read_csv', 'pd.read_csv', (['config.VALIDATION_SET_PATH'], {}), '(config.VALIDATION_SET_PATH)\n', (2403, 2431), True, 'import pandas as pd\n'), ((2449, 2465), 'senti_analysis.preprocess.load_tokenizer', 'load_tokenizer', ([], {}), '()\n', (2463, 2465), False, 'from senti_analysis.preprocess import load_tokenizer, load_sentences, encode_sentence, label_transform\n'), ((2519, 2535), 'senti_analysis.preprocess.load_sentences', 'load_sentences', ([], {}), '()\n', (2533, 2535), False, 'from senti_analysis.preprocess import load_tokenizer, load_sentences, encode_sentence, label_transform\n'), ((2548, 2657), 'senti_analysis.preprocess.encode_sentence', 'encode_sentence', (['val_sentences'], {'padding': '(True)', 'max_length': 'config.MAX_SEQUENCE_LENGTH', 'tokenizer': 'tokenizer'}), '(val_sentences, padding=True, max_length=config.\n MAX_SEQUENCE_LENGTH, tokenizer=tokenizer)\n', (2563, 2657), False, 'from senti_analysis.preprocess import load_tokenizer, load_sentences, encode_sentence, label_transform\n'), ((1905, 1936), 'senti_analysis.preprocess.label_transform', 'label_transform', (['train_set[col]'], {}), '(train_set[col])\n', (1920, 1936), False, 'from senti_analysis.preprocess import load_tokenizer, load_sentences, encode_sentence, label_transform\n'), ((1968, 1997), 'senti_analysis.preprocess.label_transform', 'label_transform', (['val_set[col]'], {}), '(val_set[col])\n', (1983, 1997), False, 'from senti_analysis.preprocess import load_tokenizer, load_sentences, encode_sentence, label_transform\n'), ((2759, 2788), 'senti_analysis.preprocess.label_transform', 'label_transform', (['val_set[col]'], {}), '(val_set[col])\n', (2774, 2788), False, 'from senti_analysis.preprocess import load_tokenizer, load_sentences, encode_sentence, label_transform\n')]
from django.conf import settings from django.urls import include, path, re_path from django.contrib import admin from ariadne_django.views import GraphQLView from wagtail.admin import urls as wagtailadmin_urls from wagtail.core import urls as wagtail_urls from wagtail.documents import urls as wagtaildocs_urls from puput import urls as puput_urls from search import views as search_views from wagsley.schema import schema print(schema) urlpatterns = [ path('django-admin/', admin.site.urls), path('admin/', include(wagtailadmin_urls)), path('documents/', include(wagtaildocs_urls)), #path('search/', search_views.search, name='search'), ] if settings.DEBUG: from django.conf.urls.static import static from django.contrib.staticfiles.urls import staticfiles_urlpatterns # Serve static and media files from development server urlpatterns += staticfiles_urlpatterns() urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) urlpatterns = urlpatterns + [ path('graphql/', GraphQLView.as_view(schema=schema), name='graphql'), path('accounts/', include('accounts.urls')), path('accounts/', include('django.contrib.auth.urls')), path('accounts/', include('allauth.urls')), path('events/', include('events.urls')), re_path(r'^comments/', include('django_comments_xtd.urls')), path("", include(puput_urls)), path("", include(wagtail_urls)), path('', include('home.urls')), ]
[ "django.urls.include", "django.conf.urls.static.static", "django.contrib.staticfiles.urls.staticfiles_urlpatterns", "ariadne_django.views.GraphQLView.as_view", "django.urls.path" ]
[((461, 499), 'django.urls.path', 'path', (['"""django-admin/"""', 'admin.site.urls'], {}), "('django-admin/', admin.site.urls)\n", (465, 499), False, 'from django.urls import include, path, re_path\n'), ((882, 907), 'django.contrib.staticfiles.urls.staticfiles_urlpatterns', 'staticfiles_urlpatterns', ([], {}), '()\n', (905, 907), False, 'from django.contrib.staticfiles.urls import staticfiles_urlpatterns\n'), ((927, 988), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (933, 988), False, 'from django.conf.urls.static import static\n'), ((521, 547), 'django.urls.include', 'include', (['wagtailadmin_urls'], {}), '(wagtailadmin_urls)\n', (528, 547), False, 'from django.urls import include, path, re_path\n'), ((573, 598), 'django.urls.include', 'include', (['wagtaildocs_urls'], {}), '(wagtaildocs_urls)\n', (580, 598), False, 'from django.urls import include, path, re_path\n'), ((1042, 1076), 'ariadne_django.views.GraphQLView.as_view', 'GraphQLView.as_view', ([], {'schema': 'schema'}), '(schema=schema)\n', (1061, 1076), False, 'from ariadne_django.views import GraphQLView\n'), ((1118, 1142), 'django.urls.include', 'include', (['"""accounts.urls"""'], {}), "('accounts.urls')\n", (1125, 1142), False, 'from django.urls import include, path, re_path\n'), ((1167, 1202), 'django.urls.include', 'include', (['"""django.contrib.auth.urls"""'], {}), "('django.contrib.auth.urls')\n", (1174, 1202), False, 'from django.urls import include, path, re_path\n'), ((1227, 1250), 'django.urls.include', 'include', (['"""allauth.urls"""'], {}), "('allauth.urls')\n", (1234, 1250), False, 'from django.urls import include, path, re_path\n'), ((1274, 1296), 'django.urls.include', 'include', (['"""events.urls"""'], {}), "('events.urls')\n", (1281, 1296), False, 'from django.urls import include, path, re_path\n'), ((1327, 1362), 'django.urls.include', 'include', (['"""django_comments_xtd.urls"""'], {}), "('django_comments_xtd.urls')\n", (1334, 1362), False, 'from django.urls import include, path, re_path\n'), ((1378, 1397), 'django.urls.include', 'include', (['puput_urls'], {}), '(puput_urls)\n', (1385, 1397), False, 'from django.urls import include, path, re_path\n'), ((1413, 1434), 'django.urls.include', 'include', (['wagtail_urls'], {}), '(wagtail_urls)\n', (1420, 1434), False, 'from django.urls import include, path, re_path\n'), ((1451, 1471), 'django.urls.include', 'include', (['"""home.urls"""'], {}), "('home.urls')\n", (1458, 1471), False, 'from django.urls import include, path, re_path\n')]
from io import open from typing import List from pyknp import KNP, BList def read_knp_result_file(filename: str) -> List[BList]: """Read a KNP result file. Args: filename: A filename. Returns: A list of :class:`pyknp.knp.blist.BList` objects. """ knp = KNP() blists = [] with open(filename, "rt", encoding="utf-8", errors="replace") as f: chunk = "" for line in f: chunk += line if line.strip() == "EOS": blists.append(knp.result(chunk)) chunk = "" return blists
[ "pyknp.KNP", "io.open" ]
[((294, 299), 'pyknp.KNP', 'KNP', ([], {}), '()\n', (297, 299), False, 'from pyknp import KNP, BList\n'), ((325, 381), 'io.open', 'open', (['filename', '"""rt"""'], {'encoding': '"""utf-8"""', 'errors': '"""replace"""'}), "(filename, 'rt', encoding='utf-8', errors='replace')\n", (329, 381), False, 'from io import open\n')]
from cv2 import fastNlMeansDenoisingColored from cv2 import cvtColor from cv2 import bitwise_not,threshold,getRotationMatrix2D from cv2 import warpAffine,filter2D,imread from cv2 import THRESH_BINARY,COLOR_BGR2GRAY,THRESH_OTSU from cv2 import INTER_CUBIC,BORDER_REPLICATE,minAreaRect from numpy import column_stack,array,where from matplotlib.pyplot import imshow,xticks,yticks from pytesseract import image_to_string,pytesseract from PIL import Image class ImageProcess: '''this function is removing noise from the image''' def remove_noise(image): image = fastNlMeansDenoisingColored(image,None,20,10,7,21) return image '''this function is removing skewness. first, it calculate the angle and accordingly rotate image''' def remove_skew(image): in_gray = cvtColor(image, COLOR_BGR2GRAY) in_gray = bitwise_not(in_gray) thresh_pic = threshold(in_gray, 0, 255,THRESH_BINARY | THRESH_OTSU)[1] coords_x_y = column_stack(where(thresh_pic > 0)) angle = minAreaRect(coords_x_y)[-1] if angle < -45: angle = -(90 + angle) else: angle = -angle (h, w) = image.shape[:2] center_of_pic = (w // 2, h // 2) M = getRotationMatrix2D(center_of_pic, angle, 1.0) image = warpAffine(image, M, (w, h),flags=INTER_CUBIC, borderMode=BORDER_REPLICATE) return image '''for removing blurness from the image, this function increase sharpness of the image.''' def shapness_blur(image): sharpen_kernel = array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]]) image = filter2D(image, -1, sharpen_kernel) return image '''using pytesseract, this function extracting text from the image.''' def to_text(image): try: pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe" string_from_image = image_to_string(image,lang='eng') except Exception: pytesseract.tesseract_cmd = r"C:\Program Files(x86)\Tesseract-OCR\tesseract.exe" string_from_image = image_to_string(image,lang='eng') return string_from_image ##plot image in output def plot_image(image): imshow(image) xticks([]) yticks([])
[ "matplotlib.pyplot.imshow", "cv2.warpAffine", "cv2.getRotationMatrix2D", "matplotlib.pyplot.xticks", "cv2.fastNlMeansDenoisingColored", "cv2.threshold", "numpy.where", "cv2.filter2D", "cv2.minAreaRect", "numpy.array", "matplotlib.pyplot.yticks", "cv2.cvtColor", "pytesseract.image_to_string",...
[((576, 631), 'cv2.fastNlMeansDenoisingColored', 'fastNlMeansDenoisingColored', (['image', 'None', '(20)', '(10)', '(7)', '(21)'], {}), '(image, None, 20, 10, 7, 21)\n', (603, 631), False, 'from cv2 import fastNlMeansDenoisingColored\n'), ((804, 835), 'cv2.cvtColor', 'cvtColor', (['image', 'COLOR_BGR2GRAY'], {}), '(image, COLOR_BGR2GRAY)\n', (812, 835), False, 'from cv2 import cvtColor\n'), ((854, 874), 'cv2.bitwise_not', 'bitwise_not', (['in_gray'], {}), '(in_gray)\n', (865, 874), False, 'from cv2 import bitwise_not, threshold, getRotationMatrix2D\n'), ((1240, 1286), 'cv2.getRotationMatrix2D', 'getRotationMatrix2D', (['center_of_pic', 'angle', '(1.0)'], {}), '(center_of_pic, angle, 1.0)\n', (1259, 1286), False, 'from cv2 import bitwise_not, threshold, getRotationMatrix2D\n'), ((1303, 1379), 'cv2.warpAffine', 'warpAffine', (['image', 'M', '(w, h)'], {'flags': 'INTER_CUBIC', 'borderMode': 'BORDER_REPLICATE'}), '(image, M, (w, h), flags=INTER_CUBIC, borderMode=BORDER_REPLICATE)\n', (1313, 1379), False, 'from cv2 import warpAffine, filter2D, imread\n'), ((1555, 1603), 'numpy.array', 'array', (['[[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]'], {}), '([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])\n', (1560, 1603), False, 'from numpy import column_stack, array, where\n'), ((1614, 1649), 'cv2.filter2D', 'filter2D', (['image', '(-1)', 'sharpen_kernel'], {}), '(image, -1, sharpen_kernel)\n', (1622, 1649), False, 'from cv2 import warpAffine, filter2D, imread\n'), ((2219, 2232), 'matplotlib.pyplot.imshow', 'imshow', (['image'], {}), '(image)\n', (2225, 2232), False, 'from matplotlib.pyplot import imshow, xticks, yticks\n'), ((2241, 2251), 'matplotlib.pyplot.xticks', 'xticks', (['[]'], {}), '([])\n', (2247, 2251), False, 'from matplotlib.pyplot import imshow, xticks, yticks\n'), ((2260, 2270), 'matplotlib.pyplot.yticks', 'yticks', (['[]'], {}), '([])\n', (2266, 2270), False, 'from matplotlib.pyplot import imshow, xticks, yticks\n'), ((896, 951), 'cv2.threshold', 'threshold', (['in_gray', '(0)', '(255)', '(THRESH_BINARY | THRESH_OTSU)'], {}), '(in_gray, 0, 255, THRESH_BINARY | THRESH_OTSU)\n', (905, 951), False, 'from cv2 import bitwise_not, threshold, getRotationMatrix2D\n'), ((988, 1009), 'numpy.where', 'where', (['(thresh_pic > 0)'], {}), '(thresh_pic > 0)\n', (993, 1009), False, 'from numpy import column_stack, array, where\n'), ((1027, 1050), 'cv2.minAreaRect', 'minAreaRect', (['coords_x_y'], {}), '(coords_x_y)\n', (1038, 1050), False, 'from cv2 import INTER_CUBIC, BORDER_REPLICATE, minAreaRect\n'), ((1904, 1938), 'pytesseract.image_to_string', 'image_to_string', (['image'], {'lang': '"""eng"""'}), "(image, lang='eng')\n", (1919, 1938), False, 'from pytesseract import image_to_string, pytesseract\n'), ((2089, 2123), 'pytesseract.image_to_string', 'image_to_string', (['image'], {'lang': '"""eng"""'}), "(image, lang='eng')\n", (2104, 2123), False, 'from pytesseract import image_to_string, pytesseract\n')]
from test.base import BaseTestCase, user_payload import json class TestAuth(BaseTestCase): def test_authenticate(self): response = self.client.post('/auth', data=json.dumps(user_payload), content_type='application/json') response_data = json.loads(response.data) self.assert200(response) self.assertEqual(response_data["status"], "success") def test_auth_with_no_data(self): response = self.client.post('/auth', content_type='application/json') self.assert400(response) response_data = json.loads(response.data) self.assertEqual(response_data["status"], "fail")
[ "json.loads", "json.dumps" ]
[((246, 271), 'json.loads', 'json.loads', (['response.data'], {}), '(response.data)\n', (256, 271), False, 'import json\n'), ((509, 534), 'json.loads', 'json.loads', (['response.data'], {}), '(response.data)\n', (519, 534), False, 'import json\n'), ((169, 193), 'json.dumps', 'json.dumps', (['user_payload'], {}), '(user_payload)\n', (179, 193), False, 'import json\n')]
import os import fnmatch from pathlib import Path from jinja2 import Template from .Metadata import Metadata class JSGenerator: def __init__(self, j): """ """ self._j = j self._generated = False def _check_process_file(self, path): bname = os.path.basename(path) if bname.startswith("_"): return False IGNORE = ["/template", "JSLoader.py", "SystemFSDecorators.py", "FixerReplace"] for item in IGNORE: if path.find(item) != -1: return False return True def lib_link(self, path): """ look for ".jumpscalemodules" and link the parent directory to the JSX lib dir :param path: :return: """ j = self._j # can use j here because will never be used in first step for path in j.sal.fs.listFilesInDir(path, True, filter=".jumpscalemodules"): dpath = j.sal.fs.getDirName(path) target = j.core.tools.text_replace("{DIR_BASE}/lib/jumpscale/%s" % j.sal.fs.getBaseName(dpath)) j.sal.fs.symlink(dpath, target, True) def generate(self, methods_find=False, action_method=None, action_args={}, path=None): """ walk over all found jumpscale libraries look for the classes where there is a __jslocation__ inside these are classes which need to be loaded :param reset: :return: """ self.md = Metadata(self._j) # find the directory in which we have all repo's of threefoldtech if path: rootDir = path else: rootDir = os.path.dirname(self._j.core.dir_jumpscale.rstrip("/")) p = Path(rootDir) for dpath in p.iterdir(): if not dpath.is_dir(): continue if dpath.name.startswith("."): continue for dpath2 in dpath.iterdir(): jsmodpath = os.path.join(os.fspath(dpath2), ".jumpscalemodules") if not os.path.exists(jsmodpath): continue js_lib_path = os.path.join(os.fspath(dpath2)) # NOW WE HAVE FOUND A SET OF JUMPSCALE MODULES jumpscale_repo_name = os.path.basename(dpath2) for dirName, subdirList, fileList in os.walk(os.fspath(dpath2), followlinks=True): if dirName.find("egg-info") != -1: self._j.shell() if dirName.find("Jumpscale/core") is not -1: continue if dirName.find("notebooks/") is not -1: continue # skip the core files, they don't need to be read for item in fnmatch.filter(fileList, "*.py"): path = os.path.join(dirName, item) self._log("process", path) if self._check_process_file(path): # self._log("process_ok:") self.md.jsmodule_get( path=path, jumpscale_repo_name=jumpscale_repo_name, js_lib_path=js_lib_path, methods_find=methods_find, action_method=action_method, action_args=action_args, ) self.md.groups_load() # make sure we find all groups # self._j.shell() self._render() self.report() return action_args def _log(self, cat, msg=""): print("- %-15s %s" % (cat, msg)) pass def _render(self): # create the jumpscale dir if it does not exist yet dpath = "%s/jumpscale/" % self._j.dirs.TMPDIR if not os.path.exists(dpath): os.makedirs(dpath) # write the __init__ file otherwise cannot include dpath = "%s/jumpscale/__init__.py" % self._j.dirs.TMPDIR file = open(dpath, "w") file.write("") file.close() if self._j.application._check_debug(): template_name = "template_jumpscale_debug.py" else: template_name = "template_jumpscale.py" template_path = os.path.join(os.path.dirname(__file__), "templates", template_name) template = Path(template_path).read_text() t = Template(template) C = t.render(md=self.md) dpath = self._j.core.application._lib_generation_path file = open(dpath, "w") file.write(C) file.close() self._generated = True def report(self): """ kosmos "j.core.jsgenerator.report()" write reports to /tmp/jumpscale/code_report.md :return: """ # if self._generated is False: # self.generate() for name, jsgroup in self.md.jsgroups.items(): path = "%s/jumpscale/code_report_%s.md" % (self._j.dirs.TMPDIR, jsgroup.name) file = open(path, "w") file.write(jsgroup.markdown) file.close() self.report_errors() self.report_line_changes() def report_errors(self): out = "" for cat, obj, error, trace in self._j.application.errors_init: out += "## %s:%s\n\n" % (cat, obj) out += "%s\n\n" % error out += "%s\n\n" % trace path = "%s/jumpscale/ERRORS_report.md" % (self._j.dirs.TMPDIR) file = open(path, "w") file.write(out) file.close() return len(self._j.application.errors_init) def report_line_changes(self): out = "" for item in self.md.line_changes: out += str(item) path = "%s/jumpscale/LINECHANGES_report.md" % (self._j.dirs.TMPDIR) file = open(path, "w") file.write(out) file.close()
[ "os.path.exists", "os.makedirs", "pathlib.Path", "os.path.join", "jinja2.Template", "os.path.dirname", "os.path.basename", "fnmatch.filter", "os.fspath" ]
[((293, 315), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (309, 315), False, 'import os\n'), ((1705, 1718), 'pathlib.Path', 'Path', (['rootDir'], {}), '(rootDir)\n', (1709, 1718), False, 'from pathlib import Path\n'), ((4442, 4460), 'jinja2.Template', 'Template', (['template'], {}), '(template)\n', (4450, 4460), False, 'from jinja2 import Template\n'), ((3860, 3881), 'os.path.exists', 'os.path.exists', (['dpath'], {}), '(dpath)\n', (3874, 3881), False, 'import os\n'), ((3895, 3913), 'os.makedirs', 'os.makedirs', (['dpath'], {}), '(dpath)\n', (3906, 3913), False, 'import os\n'), ((4324, 4349), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4339, 4349), False, 'import os\n'), ((2250, 2274), 'os.path.basename', 'os.path.basename', (['dpath2'], {}), '(dpath2)\n', (2266, 2274), False, 'import os\n'), ((4398, 4417), 'pathlib.Path', 'Path', (['template_path'], {}), '(template_path)\n', (4402, 4417), False, 'from pathlib import Path\n'), ((1966, 1983), 'os.fspath', 'os.fspath', (['dpath2'], {}), '(dpath2)\n', (1975, 1983), False, 'import os\n'), ((2029, 2054), 'os.path.exists', 'os.path.exists', (['jsmodpath'], {}), '(jsmodpath)\n', (2043, 2054), False, 'import os\n'), ((2129, 2146), 'os.fspath', 'os.fspath', (['dpath2'], {}), '(dpath2)\n', (2138, 2146), False, 'import os\n'), ((2337, 2354), 'os.fspath', 'os.fspath', (['dpath2'], {}), '(dpath2)\n', (2346, 2354), False, 'import os\n'), ((2768, 2800), 'fnmatch.filter', 'fnmatch.filter', (['fileList', '"""*.py"""'], {}), "(fileList, '*.py')\n", (2782, 2800), False, 'import fnmatch\n'), ((2833, 2860), 'os.path.join', 'os.path.join', (['dirName', 'item'], {}), '(dirName, item)\n', (2845, 2860), False, 'import os\n')]
from collections import defaultdict from typing import List, Tuple from aocd import get_data, submit DAY = 5 YEAR = 2021 def part1(data: str) -> str: segments = read(data) covered = defaultdict(int) for segment in segments: x1, y1, x2, y2 = segment if x1 != x2 and y1 != y2: continue if x1 == x2: for y in range(min(y1, y2), max(y1, y2)+1): covered[(x1, y)] += 1 elif y1 == y2: for x in range(min(x1, x2), max(x1, x2)+1): covered[(x, y1)] += 1 ans = 0 for point in covered: if covered[point] > 1: ans += 1 return str(ans) def part2(data: str) -> str: segments = read(data) covered = defaultdict(int) for segment in segments: x1, y1, x2, y2 = segment if x1 == x2: for y in range(min(y1, y2), max(y1, y2)+1): covered[(x1, y)] += 1 elif y1 == y2: for x in range(min(x1, x2), max(x1, x2)+1): covered[(x, y1)] += 1 else: if x1 > x2: x1, y1, x2, y2 = x2, y2, x1, y1 if y1 < y2: for x in range(x1, x2+1): covered[(x, y1+(x-x1))] += 1 else: for x in range(x1, x2+1): covered[(x, y1-(x-x1))] += 1 ans = 0 for point in covered: if covered[point] > 1: ans += 1 return str(ans) def read(data: str) -> List[Tuple[int, int, int, int]]: lines = data.splitlines() segments = [] for line in lines: left, right = line.split('->') x1, y1 = left.split(',') x2, y2 = right.split(',') segments.append((int(x1), int(y1), int(x2), int(y2))) return segments if __name__ == '__main__': input_data = get_data(day=DAY, year=YEAR) ans1 = part1(input_data) print(ans1) #submit(answer=ans1, day=DAY, year=YEAR, part=1) ans2 = part2(input_data) print(ans2) submit(answer=ans2, day=DAY, year=YEAR, part=2)
[ "collections.defaultdict", "aocd.get_data", "aocd.submit" ]
[((194, 210), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (205, 210), False, 'from collections import defaultdict\n'), ((741, 757), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (752, 757), False, 'from collections import defaultdict\n'), ((1835, 1863), 'aocd.get_data', 'get_data', ([], {'day': 'DAY', 'year': 'YEAR'}), '(day=DAY, year=YEAR)\n', (1843, 1863), False, 'from aocd import get_data, submit\n'), ((2011, 2058), 'aocd.submit', 'submit', ([], {'answer': 'ans2', 'day': 'DAY', 'year': 'YEAR', 'part': '(2)'}), '(answer=ans2, day=DAY, year=YEAR, part=2)\n', (2017, 2058), False, 'from aocd import get_data, submit\n')]
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = 'Reticula' copyright = '2022' author = '<NAME>' # The full version, including alpha/beta/rc tags release = '0.0.4' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinxcontrib.bibtex' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] primary_domain = None nitpicky = True rst_prolog = """ .. role:: py(code) :language: python :class: highlight .. role:: cpp(code) :language: cpp :class: highlight """ # REs for Python signatures with types import re typed_py_re = re.compile( r'''^ ([\w.]*\.)? # class name(s) (\w+(?: \[[^\]]+\])?) \s* # thing name (?: \(\s*(.*)\s*\) # optional: arguments (?:\s* -> \s* (.*))? # return annotation )? $ # and nothing more ''', re.VERBOSE) import sphinx.domains.python sphinx.domains.python.py_sig_re = typed_py_re # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'furo' pygments_style = "sphinx" pygments_dark_style = "monokai" html_title = "Reticula" import os.path def read_icon(path: str): with open(os.path.join(os.path.dirname(__file__), path), 'r') as f: return f.read() html_theme_options = { "source_repository": "https://github.com/reticula-network/reticula-python", "source_branch": "main", "source_directory": "docs/", "footer_icons": [ { "name": "GitHub", "url": "https://github.com/reticula-network", "html": read_icon("github.svg"), "class": "", }, { "name": "PyPi", "url": "https://pypi.org/project/reticula/", "html": read_icon("pypi.svg"), "class": "", }, ], } bibtex_bibfiles = ['references.bib'] bibtex_default_style = 'unsrt' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static']
[ "re.compile" ]
[((1708, 2053), 're.compile', 're.compile', (['"""^ ([\\\\w.]*\\\\.)? # class name(s)\n (\\\\w+(?: \\\\[[^\\\\]]+\\\\])?) \\\\s* # thing name\n (?: \\\\(\\\\s*(.*)\\\\s*\\\\) # optional: arguments\n (?:\\\\s* -> \\\\s* (.*))? # return annotation\n )? $ # and nothing more\n """', 're.VERBOSE'], {}), '(\n """^ ([\\\\w.]*\\\\.)? # class name(s)\n (\\\\w+(?: \\\\[[^\\\\]]+\\\\])?) \\\\s* # thing name\n (?: \\\\(\\\\s*(.*)\\\\s*\\\\) # optional: arguments\n (?:\\\\s* -> \\\\s* (.*))? # return annotation\n )? $ # and nothing more\n """\n , re.VERBOSE)\n', (1718, 2053), False, 'import re\n')]
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="parentopticon", version="0.0.1", author="<NAME>", author_email="<EMAIL>", description="A system for controlling kids access to computers.", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/eliribble/parentopticon", packages=setuptools.find_packages(), install_requires = [ "arrow==0.15.5", "chryso==2.1", "flask==1.1.2", "flask-login==0.5.0", "Jinja2==2.11.1", "psutil==5.6.6", "requests==2.23.0", "toml==0.10.0", ], classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], )
[ "setuptools.find_packages" ]
[((399, 425), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (423, 425), False, 'import setuptools\n')]
import cv2 import numpy as np import matplotlib.pyplot as plt from findpoint import FindPoint class LineDetector: def __init__(self,img): self.frame = None self.leftx = None self.rightx = None # self.output = None self.frame = 0 self.frame_list = [] self.findpoint = FindPoint(img) def sliding_window(self,x_start_L,x_start_R,img): x_location = None out_img = np.dstack((img,img,img)) height = img.shape[0] width = img.shape[1] window_height = 5 nwindows = 30 nonzero = img.nonzero() nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) point_list_left = list() point_list_right = list() margin = 20 minpix = 10 left_lane_inds = [] right_lane_inds = [] good_left_inds = \ ((nonzerox >= x_start_L-20) & (nonzeroy >= 300)& (nonzeroy <= 400) & (nonzerox <= x_start_L+20)).nonzero()[ 0] good_right_inds = ((nonzerox >= x_start_R-40) & (nonzeroy >= 300)& (nonzeroy <= 400) & ( nonzerox <= x_start_R+20)).nonzero()[0] line_exist_flag = None y_current = None x_current = None good_center_inds = None p_cut = None # check the minpix before left start line # if minpix is enough on left, draw left, then draw right depends on left # else draw right, then draw left depends on right # lx_current = 120 # ly_current = 350 # rx_current = 550 # ry_current = 350 if len(good_left_inds) > minpix and len(good_right_inds) > minpix: line_flag = 3 lx_current = np.int(np.mean(nonzerox[good_left_inds])) ly_current = np.int(np.mean(nonzeroy[good_left_inds])) rx_current = np.int(np.mean(nonzerox[good_right_inds])) ry_current = np.int(np.mean(nonzeroy[good_right_inds])) elif len(good_left_inds) > minpix: line_flag = 1 lx_current = np.int(np.mean(nonzerox[good_left_inds])) ly_current = np.int(np.mean(nonzeroy[good_left_inds])) rx_current = None ry_current = None max_y = y_current elif len(good_right_inds) > minpix: line_flag = 2 rx_current = nonzerox[good_right_inds[np.argmax(nonzeroy[good_right_inds])]] ry_current = np.int(np.max(nonzeroy[good_right_inds])) lx_current = None ly_current = None else: line_flag = 4 # rx_current # ry_current # if line_flag ==3: # for i in range(len(good_left_inds)): # cv2.circle(out_img, (nonzerox[good_left_inds[i]], nonzeroy[good_left_inds[i]]), 1, (0, 255, 0), -1) # for i in range(len(good_right_inds)): # cv2.circle(out_img, (nonzerox[good_right_inds[i]], nonzeroy[good_right_inds[i]]), 1, (255,0, 0), -1) # for window in range(0, nwindows): # print('x',x_location) if line_flag != 4: # it's just for visualization of the valid inds in the region for i in range(len(good_left_inds)): cv2.circle(out_img, (nonzerox[good_left_inds[i]], nonzeroy[good_left_inds[i]]), 1, (0, 255, 0), -1) for i in range(len(good_right_inds)): cv2.circle(out_img, (nonzerox[good_right_inds[i]], nonzeroy[good_right_inds[i]]), 1, (255,0, 0), -1) # window sliding and draw # print(lx_current) # print(rx_current) for window in range(0, nwindows): # if lx_current and rx_current: # # print(line_flag) # cv2.circle(out_img,(lx_current,ly_current-window*window_height-3),3,(0,0,255),-1) # cv2.circle(out_img,(rx_current,ry_current-window*window_height-3),3,(0,0,255),-1) # mean_x = (lx_current + rx_current)/2 # cv2.circle(out_img,(mean_x,ry_current-window*window_height-3),3,(0,255,255),-1) # point_list_left.append((lx_current, ly_current-window*window_height-3)) # point_list_right.append((rx_current,ry_current-window*window_height-3)) if lx_current and rx_current: cv2.circle(out_img,(lx_current,ly_current-window*window_height-3),3,(0,0,255),-1) cv2.circle(out_img,(rx_current,ry_current-window*window_height-3),3,(0,0,255),-1) mean_x = (lx_current + rx_current)/2 cv2.circle(out_img,(mean_x,ry_current-window*window_height-3),3,(0,255,255),-1) point_list_left.append((lx_current, ly_current-window*window_height-3)) point_list_right.append((rx_current,ry_current-window*window_height-3)) elif lx_current: cv2.circle(out_img,(lx_current,ly_current-window*window_height-3),3,(0,0,255),-1) mean_x = (lx_current + width/2) cv2.circle(out_img,(mean_x,ly_current-window*window_height-3),3,(0,255,255),-1) point_list_left.append((lx_current, ly_current-window*window_height-3)) elif rx_current: # cv2.circle(out_img,(lx_current,ly_current-window*window_height-3),3,(0,0,255),-1) cv2.circle(out_img,(rx_current,ry_current-window*window_height-3),3,(0,0,255),-1) mean_x = (rx_current-width/2)/2 cv2.circle(out_img,(mean_x,ry_current-window*window_height-3),3,(0,255,255),-1) # point_list_left.append((lx_current, ly_current-window*window_height-3)) point_list_right.append((rx_current,ry_current-window*window_height-3)) if line_flag == 3: l_win_y_low = ly_current - (window + 1) * window_height l_win_y_high = ly_current - (window) * window_height l_win_x_low = lx_current - margin l_win_x_high = lx_current + margin r_win_y_low = ry_current - (window + 1) * window_height r_win_y_high = ry_current - (window) * window_height r_win_x_low = rx_current - margin r_win_x_high = rx_current + margin # draw rectangle # 0.33 is for width of the road cv2.rectangle(out_img, (l_win_x_low, l_win_y_low), (l_win_x_high, l_win_y_high), (0, 255, 0), 1) cv2.rectangle(out_img, (r_win_x_low, r_win_y_low), (r_win_x_high, r_win_y_high), (255,0, 0), 1) good_left_inds = ((nonzeroy >= l_win_y_low) & (nonzeroy < l_win_y_high) & (nonzerox >= l_win_x_low) & ( nonzerox < l_win_x_high)).nonzero()[0] good_right_inds = ((nonzeroy >= r_win_y_low) & (nonzeroy < r_win_y_high) & (nonzerox >= r_win_x_low) & ( nonzerox < r_win_x_high)).nonzero()[0] # check num of indicies in square and put next location to current if len(good_left_inds) > minpix: lx_current = np.int(np.mean(nonzerox[good_left_inds])) if len(good_right_inds) > minpix: rx_current = np.int(np.mean(nonzerox[good_right_inds])) # 338~344 is for recognize line which is yellow line in processed image(you can check in imshow) # if (l_win_y_low >= 338 and l_win_y_low < 344) and (r_win_y_low >= 338 and r_win_y_low < 344): # # 0.165 is the half of the road(0.33) x_location = rx_current - lx_current + 75 elif line_flag == 1: # rectangle x,y range init win_y_low = ly_current - (window + 1) * window_height win_y_high = ly_current - (window) * window_height win_x_low = lx_current - margin win_x_high = lx_current + margin # draw rectangle # 0.33 is for width of the road cv2.rectangle(out_img, (win_x_low, win_y_low), (win_x_high, win_y_high), (0, 255, 0), 1) # cv2.rectangle(out_img, (win_x_low + int(width * 0.33), win_y_low), # (win_x_high + int(width * 0.33), win_y_high), (255, 0, 0), 1) # indicies of dots in nonzerox in one square good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_x_low) & ( nonzerox < win_x_high)).nonzero()[0] # check num of indicies in square and put next location to current if len(good_left_inds) > minpix: # x_current = np.int(np.mean(nonzerox[good_left_inds])) lx_current = np.int(np.mean(nonzerox[good_left_inds])) # elif nonzeroy[left_lane_inds] != [] and nonzerox[left_lane_inds] != []: # p_left = np.polyfit(nonzeroy[left_lane_inds], nonzerox[left_lane_inds], 2) # x_current = np.int(np.polyval(p_left, win_y_high)) # # 338~344 is for recognize line which is yellow line in processed image(you can check in imshow) # if win_y_low >= 338 and win_y_low < 344: # # 0.165 is the half of the road(0.33) # x_location = x_current + 180 elif line_flag ==2: win_y_low = ry_current - (window + 1) * window_height win_y_high = ry_current - (window) * window_height win_x_low = rx_current - margin win_x_high = rx_current + margin # cv2.rectangle(out_img, (win_x_low , win_y_low), # (win_x_high, win_y_high), (0, 255, 0), 1) cv2.rectangle(out_img, (win_x_low, win_y_low), (win_x_high, win_y_high), (255, 0, 0), 1) good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_x_low) & ( nonzerox < win_x_high)).nonzero()[0] if len(good_right_inds) > minpix: # x_current = np.int(np.mean(nonzerox[good_right_inds])) rx_current = np.int(np.mean(nonzerox[good_right_inds])) # elif nonzeroy[right_lane_inds] != [] and nonzerox[right_lane_inds] != []: # p_right = np.polyfit(nonzeroy[right_lane_inds], nonzerox[right_lane_inds], 2) # x_current = np.int(np.polyval(p_right, win_y_high)) # if win_y_low >= 338 and win_y_low < 344: # # 0.165 is the half of the road(0.33) # x_location = x_current - 250 # left_lane_inds.extend(good_left_inds) # right_lane_inds.extend(good_right_inds) # left_lane_inds = np.concatenate(left_lane_inds) # right_lane_inds = np.concatenate(right_lane_inds) # else: return out_img, x_location, point_list_left, point_list_right def main(self,img): x_start_l,x_start_r = self.findpoint.findpoint(img) output , x_location, point_list_left, point_list_right = self.sliding_window(x_start_l,x_start_r,img) return output, x_location, point_list_left, point_list_right
[ "cv2.rectangle", "numpy.dstack", "numpy.mean", "findpoint.FindPoint", "numpy.argmax", "numpy.max", "numpy.array", "cv2.circle" ]
[((330, 344), 'findpoint.FindPoint', 'FindPoint', (['img'], {}), '(img)\n', (339, 344), False, 'from findpoint import FindPoint\n'), ((444, 470), 'numpy.dstack', 'np.dstack', (['(img, img, img)'], {}), '((img, img, img))\n', (453, 470), True, 'import numpy as np\n'), ((629, 649), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (637, 649), True, 'import numpy as np\n'), ((669, 689), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (677, 689), True, 'import numpy as np\n'), ((1739, 1772), 'numpy.mean', 'np.mean', (['nonzerox[good_left_inds]'], {}), '(nonzerox[good_left_inds])\n', (1746, 1772), True, 'import numpy as np\n'), ((1806, 1839), 'numpy.mean', 'np.mean', (['nonzeroy[good_left_inds]'], {}), '(nonzeroy[good_left_inds])\n', (1813, 1839), True, 'import numpy as np\n'), ((1873, 1907), 'numpy.mean', 'np.mean', (['nonzerox[good_right_inds]'], {}), '(nonzerox[good_right_inds])\n', (1880, 1907), True, 'import numpy as np\n'), ((1941, 1975), 'numpy.mean', 'np.mean', (['nonzeroy[good_right_inds]'], {}), '(nonzeroy[good_right_inds])\n', (1948, 1975), True, 'import numpy as np\n'), ((3271, 3375), 'cv2.circle', 'cv2.circle', (['out_img', '(nonzerox[good_left_inds[i]], nonzeroy[good_left_inds[i]])', '(1)', '(0, 255, 0)', '(-1)'], {}), '(out_img, (nonzerox[good_left_inds[i]], nonzeroy[good_left_inds[i\n ]]), 1, (0, 255, 0), -1)\n', (3281, 3375), False, 'import cv2\n'), ((3437, 3543), 'cv2.circle', 'cv2.circle', (['out_img', '(nonzerox[good_right_inds[i]], nonzeroy[good_right_inds[i]])', '(1)', '(255, 0, 0)', '(-1)'], {}), '(out_img, (nonzerox[good_right_inds[i]], nonzeroy[good_right_inds\n [i]]), 1, (255, 0, 0), -1)\n', (3447, 3543), False, 'import cv2\n'), ((2079, 2112), 'numpy.mean', 'np.mean', (['nonzerox[good_left_inds]'], {}), '(nonzerox[good_left_inds])\n', (2086, 2112), True, 'import numpy as np\n'), ((2146, 2179), 'numpy.mean', 'np.mean', (['nonzeroy[good_left_inds]'], {}), '(nonzeroy[good_left_inds])\n', (2153, 2179), True, 'import numpy as np\n'), ((4387, 4486), 'cv2.circle', 'cv2.circle', (['out_img', '(lx_current, ly_current - window * window_height - 3)', '(3)', '(0, 0, 255)', '(-1)'], {}), '(out_img, (lx_current, ly_current - window * window_height - 3), \n 3, (0, 0, 255), -1)\n', (4397, 4486), False, 'import cv2\n'), ((4489, 4588), 'cv2.circle', 'cv2.circle', (['out_img', '(rx_current, ry_current - window * window_height - 3)', '(3)', '(0, 0, 255)', '(-1)'], {}), '(out_img, (rx_current, ry_current - window * window_height - 3), \n 3, (0, 0, 255), -1)\n', (4499, 4588), False, 'import cv2\n'), ((4648, 4745), 'cv2.circle', 'cv2.circle', (['out_img', '(mean_x, ry_current - window * window_height - 3)', '(3)', '(0, 255, 255)', '(-1)'], {}), '(out_img, (mean_x, ry_current - window * window_height - 3), 3, (\n 0, 255, 255), -1)\n', (4658, 4745), False, 'import cv2\n'), ((6528, 6628), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(l_win_x_low, l_win_y_low)', '(l_win_x_high, l_win_y_high)', '(0, 255, 0)', '(1)'], {}), '(out_img, (l_win_x_low, l_win_y_low), (l_win_x_high,\n l_win_y_high), (0, 255, 0), 1)\n', (6541, 6628), False, 'import cv2\n'), ((6645, 6745), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(r_win_x_low, r_win_y_low)', '(r_win_x_high, r_win_y_high)', '(255, 0, 0)', '(1)'], {}), '(out_img, (r_win_x_low, r_win_y_low), (r_win_x_high,\n r_win_y_high), (255, 0, 0), 1)\n', (6658, 6745), False, 'import cv2\n'), ((2462, 2495), 'numpy.max', 'np.max', (['nonzeroy[good_right_inds]'], {}), '(nonzeroy[good_right_inds])\n', (2468, 2495), True, 'import numpy as np\n'), ((4965, 5064), 'cv2.circle', 'cv2.circle', (['out_img', '(lx_current, ly_current - window * window_height - 3)', '(3)', '(0, 0, 255)', '(-1)'], {}), '(out_img, (lx_current, ly_current - window * window_height - 3), \n 3, (0, 0, 255), -1)\n', (4975, 5064), False, 'import cv2\n'), ((5119, 5216), 'cv2.circle', 'cv2.circle', (['out_img', '(mean_x, ly_current - window * window_height - 3)', '(3)', '(0, 255, 255)', '(-1)'], {}), '(out_img, (mean_x, ly_current - window * window_height - 3), 3, (\n 0, 255, 255), -1)\n', (5129, 5216), False, 'import cv2\n'), ((8287, 8379), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(win_x_low, win_y_low)', '(win_x_high, win_y_high)', '(0, 255, 0)', '(1)'], {}), '(out_img, (win_x_low, win_y_low), (win_x_high, win_y_high), (0,\n 255, 0), 1)\n', (8300, 8379), False, 'import cv2\n'), ((2391, 2427), 'numpy.argmax', 'np.argmax', (['nonzeroy[good_right_inds]'], {}), '(nonzeroy[good_right_inds])\n', (2400, 2427), True, 'import numpy as np\n'), ((5448, 5547), 'cv2.circle', 'cv2.circle', (['out_img', '(rx_current, ry_current - window * window_height - 3)', '(3)', '(0, 0, 255)', '(-1)'], {}), '(out_img, (rx_current, ry_current - window * window_height - 3), \n 3, (0, 0, 255), -1)\n', (5458, 5547), False, 'import cv2\n'), ((5602, 5699), 'cv2.circle', 'cv2.circle', (['out_img', '(mean_x, ry_current - window * window_height - 3)', '(3)', '(0, 255, 255)', '(-1)'], {}), '(out_img, (mean_x, ry_current - window * window_height - 3), 3, (\n 0, 255, 255), -1)\n', (5612, 5699), False, 'import cv2\n'), ((7314, 7347), 'numpy.mean', 'np.mean', (['nonzerox[good_left_inds]'], {}), '(nonzerox[good_left_inds])\n', (7321, 7347), True, 'import numpy as np\n'), ((7447, 7481), 'numpy.mean', 'np.mean', (['nonzerox[good_right_inds]'], {}), '(nonzerox[good_right_inds])\n', (7454, 7481), True, 'import numpy as np\n'), ((10145, 10238), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(win_x_low, win_y_low)', '(win_x_high, win_y_high)', '(255, 0, 0)', '(1)'], {}), '(out_img, (win_x_low, win_y_low), (win_x_high, win_y_high), (\n 255, 0, 0), 1)\n', (10158, 10238), False, 'import cv2\n'), ((9079, 9112), 'numpy.mean', 'np.mean', (['nonzerox[good_left_inds]'], {}), '(nonzerox[good_left_inds])\n', (9086, 9112), True, 'import numpy as np\n'), ((10601, 10635), 'numpy.mean', 'np.mean', (['nonzerox[good_right_inds]'], {}), '(nonzerox[good_right_inds])\n', (10608, 10635), True, 'import numpy as np\n')]
from django.db import models from django.db.models import Q from django.utils import timezone class BroadcastManager(models.Manager): """ Manager class to show only active broadcast messages """ def active(self): """Return only active messages""" return super(BroadcastManager, self).filter(is_published=True) def current(self): """Return only current and active messages""" return self.active().filter(end_time__gte=timezone.now()).filter( Q(Q(start_time__lte=timezone.now()) | Q(start_time=None))) def latest(self): """Return the broadcast message to display""" try: return self.current().order_by("end_time")[0] except IndexError: return None
[ "django.utils.timezone.now", "django.db.models.Q" ]
[((547, 565), 'django.db.models.Q', 'Q', ([], {'start_time': 'None'}), '(start_time=None)\n', (548, 565), False, 'from django.db.models import Q\n'), ((473, 487), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (485, 487), False, 'from django.utils import timezone\n'), ((529, 543), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (541, 543), False, 'from django.utils import timezone\n')]
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore from google.protobuf import timestamp_pb2 # type: ignore __protobuf__ = proto.module( package="google.cloud.gkehub.v1", manifest={ "Membership", "MembershipEndpoint", "GkeCluster", "KubernetesMetadata", "MembershipState", "Authority", }, ) class Membership(proto.Message): r"""Membership contains information about a member cluster. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: endpoint (google.cloud.gkehub_v1.types.MembershipEndpoint): Optional. Endpoint information to reach this member. This field is a member of `oneof`_ ``type``. name (str): Output only. The full, unique name of this Membership resource in the format ``projects/*/locations/*/memberships/{membership_id}``, set during creation. ``membership_id`` must be a valid RFC 1123 compliant DNS label: 1. At most 63 characters in length 2. It must consist of lower case alphanumeric characters or ``-`` 3. It must start and end with an alphanumeric character Which can be expressed as the regex: ``[a-z0-9]([-a-z0-9]*[a-z0-9])?``, with a maximum length of 63 characters. labels (Sequence[google.cloud.gkehub_v1.types.Membership.LabelsEntry]): Optional. GCP labels for this membership. description (str): Output only. Description of this membership, limited to 63 characters. Must match the regex: ``[a-zA-Z0-9][a-zA-Z0-9_\-\.\ ]*`` This field is present for legacy purposes. state (google.cloud.gkehub_v1.types.MembershipState): Output only. State of the Membership resource. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. When the Membership was created. update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. When the Membership was last updated. delete_time (google.protobuf.timestamp_pb2.Timestamp): Output only. When the Membership was deleted. external_id (str): Optional. An externally-generated and managed ID for this Membership. This ID may be modified after creation, but this is not recommended. The ID must match the regex: ``[a-zA-Z0-9][a-zA-Z0-9_\-\.]*`` If this Membership represents a Kubernetes cluster, this value should be set to the UID of the ``kube-system`` namespace object. last_connection_time (google.protobuf.timestamp_pb2.Timestamp): Output only. For clusters using Connect, the timestamp of the most recent connection established with Google Cloud. This time is updated every several minutes, not continuously. For clusters that do not use GKE Connect, or that have never connected successfully, this field will be unset. unique_id (str): Output only. Google-generated UUID for this resource. This is unique across all Membership resources. If a Membership resource is deleted and another resource with the same name is created, it gets a different unique_id. authority (google.cloud.gkehub_v1.types.Authority): Optional. How to identify workloads from this Membership. See the documentation on Workload Identity for more details: https://cloud.google.com/kubernetes- engine/docs/how-to/workload-identity """ endpoint = proto.Field( proto.MESSAGE, number=4, oneof="type", message="MembershipEndpoint", ) name = proto.Field(proto.STRING, number=1,) labels = proto.MapField(proto.STRING, proto.STRING, number=2,) description = proto.Field(proto.STRING, number=3,) state = proto.Field(proto.MESSAGE, number=5, message="MembershipState",) create_time = proto.Field(proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp,) update_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,) delete_time = proto.Field(proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp,) external_id = proto.Field(proto.STRING, number=9,) last_connection_time = proto.Field( proto.MESSAGE, number=10, message=timestamp_pb2.Timestamp, ) unique_id = proto.Field(proto.STRING, number=11,) authority = proto.Field(proto.MESSAGE, number=12, message="Authority",) class MembershipEndpoint(proto.Message): r"""MembershipEndpoint contains information needed to contact a Kubernetes API, endpoint and any additional Kubernetes metadata. Attributes: gke_cluster (google.cloud.gkehub_v1.types.GkeCluster): Optional. GKE-specific information. Only present if this Membership is a GKE cluster. kubernetes_metadata (google.cloud.gkehub_v1.types.KubernetesMetadata): Output only. Useful Kubernetes-specific metadata. """ gke_cluster = proto.Field(proto.MESSAGE, number=1, message="GkeCluster",) kubernetes_metadata = proto.Field( proto.MESSAGE, number=2, message="KubernetesMetadata", ) class GkeCluster(proto.Message): r"""GkeCluster contains information specific to GKE clusters. Attributes: resource_link (str): Immutable. Self-link of the GCP resource for the GKE cluster. For example: //container.googleapis.com/projects/my- project/locations/us-west1-a/clusters/my-cluster Zonal clusters are also supported. """ resource_link = proto.Field(proto.STRING, number=1,) class KubernetesMetadata(proto.Message): r"""KubernetesMetadata provides informational metadata for Memberships representing Kubernetes clusters. Attributes: kubernetes_api_server_version (str): Output only. Kubernetes API server version string as reported by ``/version``. node_provider_id (str): Output only. Node providerID as reported by the first node in the list of nodes on the Kubernetes endpoint. On Kubernetes platforms that support zero-node clusters (like GKE-on-GCP), the node_count will be zero and the node_provider_id will be empty. node_count (int): Output only. Node count as reported by Kubernetes nodes resources. vcpu_count (int): Output only. vCPU count as reported by Kubernetes nodes resources. memory_mb (int): Output only. The total memory capacity as reported by the sum of all Kubernetes nodes resources, defined in MB. update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The time at which these details were last updated. This update_time is different from the Membership-level update_time since EndpointDetails are updated internally for API consumers. """ kubernetes_api_server_version = proto.Field(proto.STRING, number=1,) node_provider_id = proto.Field(proto.STRING, number=2,) node_count = proto.Field(proto.INT32, number=3,) vcpu_count = proto.Field(proto.INT32, number=4,) memory_mb = proto.Field(proto.INT32, number=5,) update_time = proto.Field( proto.MESSAGE, number=100, message=timestamp_pb2.Timestamp, ) class MembershipState(proto.Message): r"""MembershipState describes the state of a Membership resource. Attributes: code (google.cloud.gkehub_v1.types.MembershipState.Code): Output only. The current state of the Membership resource. """ class Code(proto.Enum): r"""Code describes the state of a Membership resource.""" CODE_UNSPECIFIED = 0 CREATING = 1 READY = 2 DELETING = 3 UPDATING = 4 SERVICE_UPDATING = 5 code = proto.Field(proto.ENUM, number=1, enum=Code,) class Authority(proto.Message): r"""Authority encodes how Google will recognize identities from this Membership. See the workload identity documentation for more details: https://cloud.google.com/kubernetes- engine/docs/how-to/workload-identity Attributes: issuer (str): Optional. A JSON Web Token (JWT) issuer URI. ``issuer`` must start with ``https://`` and be a valid URL with length <2000 characters. If set, then Google will allow valid OIDC tokens from this issuer to authenticate within the workload_identity_pool. OIDC discovery will be performed on this URI to validate tokens from the issuer. Clearing ``issuer`` disables Workload Identity. ``issuer`` cannot be directly modified; it must be cleared (and Workload Identity disabled) before using a new issuer (and re-enabling Workload Identity). workload_identity_pool (str): Output only. The name of the workload identity pool in which ``issuer`` will be recognized. There is a single Workload Identity Pool per Hub that is shared between all Memberships that belong to that Hub. For a Hub hosted in {PROJECT_ID}, the workload pool format is ``{PROJECT_ID}.hub.id.goog``, although this is subject to change in newer versions of this API. identity_provider (str): Output only. An identity provider that reflects the ``issuer`` in the workload identity pool. oidc_jwks (bytes): Optional. OIDC verification keys for this Membership in JWKS format (RFC 7517). When this field is set, OIDC discovery will NOT be performed on ``issuer``, and instead OIDC tokens will be validated using this field. """ issuer = proto.Field(proto.STRING, number=1,) workload_identity_pool = proto.Field(proto.STRING, number=2,) identity_provider = proto.Field(proto.STRING, number=3,) oidc_jwks = proto.Field(proto.BYTES, number=4,) __all__ = tuple(sorted(__protobuf__.manifest))
[ "proto.Field", "proto.module", "proto.MapField" ]
[((705, 874), 'proto.module', 'proto.module', ([], {'package': '"""google.cloud.gkehub.v1"""', 'manifest': "{'Membership', 'MembershipEndpoint', 'GkeCluster', 'KubernetesMetadata',\n 'MembershipState', 'Authority'}"}), "(package='google.cloud.gkehub.v1', manifest={'Membership',\n 'MembershipEndpoint', 'GkeCluster', 'KubernetesMetadata',\n 'MembershipState', 'Authority'})\n", (717, 874), False, 'import proto\n'), ((4458, 4543), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(4)', 'oneof': '"""type"""', 'message': '"""MembershipEndpoint"""'}), "(proto.MESSAGE, number=4, oneof='type', message='MembershipEndpoint'\n )\n", (4469, 4543), False, 'import proto\n'), ((4565, 4600), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (4576, 4600), False, 'import proto\n'), ((4615, 4667), 'proto.MapField', 'proto.MapField', (['proto.STRING', 'proto.STRING'], {'number': '(2)'}), '(proto.STRING, proto.STRING, number=2)\n', (4629, 4667), False, 'import proto\n'), ((4687, 4722), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(3)'}), '(proto.STRING, number=3)\n', (4698, 4722), False, 'import proto\n'), ((4736, 4799), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(5)', 'message': '"""MembershipState"""'}), "(proto.MESSAGE, number=5, message='MembershipState')\n", (4747, 4799), False, 'import proto\n'), ((4819, 4888), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(6)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp)\n', (4830, 4888), False, 'import proto\n'), ((4908, 4977), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(7)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp)\n', (4919, 4977), False, 'import proto\n'), ((4997, 5066), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(8)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp)\n', (5008, 5066), False, 'import proto\n'), ((5086, 5121), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(9)'}), '(proto.STRING, number=9)\n', (5097, 5121), False, 'import proto\n'), ((5150, 5220), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(10)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=10, message=timestamp_pb2.Timestamp)\n', (5161, 5220), False, 'import proto\n'), ((5252, 5288), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(11)'}), '(proto.STRING, number=11)\n', (5263, 5288), False, 'import proto\n'), ((5306, 5364), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(12)', 'message': '"""Authority"""'}), "(proto.MESSAGE, number=12, message='Authority')\n", (5317, 5364), False, 'import proto\n'), ((5916, 5974), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(1)', 'message': '"""GkeCluster"""'}), "(proto.MESSAGE, number=1, message='GkeCluster')\n", (5927, 5974), False, 'import proto\n'), ((6002, 6068), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(2)', 'message': '"""KubernetesMetadata"""'}), "(proto.MESSAGE, number=2, message='KubernetesMetadata')\n", (6013, 6068), False, 'import proto\n'), ((6519, 6554), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (6530, 6554), False, 'import proto\n'), ((7979, 8014), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (7990, 8014), False, 'import proto\n'), ((8039, 8074), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (8050, 8074), False, 'import proto\n'), ((8093, 8127), 'proto.Field', 'proto.Field', (['proto.INT32'], {'number': '(3)'}), '(proto.INT32, number=3)\n', (8104, 8127), False, 'import proto\n'), ((8146, 8180), 'proto.Field', 'proto.Field', (['proto.INT32'], {'number': '(4)'}), '(proto.INT32, number=4)\n', (8157, 8180), False, 'import proto\n'), ((8198, 8232), 'proto.Field', 'proto.Field', (['proto.INT32'], {'number': '(5)'}), '(proto.INT32, number=5)\n', (8209, 8232), False, 'import proto\n'), ((8252, 8323), 'proto.Field', 'proto.Field', (['proto.MESSAGE'], {'number': '(100)', 'message': 'timestamp_pb2.Timestamp'}), '(proto.MESSAGE, number=100, message=timestamp_pb2.Timestamp)\n', (8263, 8323), False, 'import proto\n'), ((8869, 8913), 'proto.Field', 'proto.Field', (['proto.ENUM'], {'number': '(1)', 'enum': 'Code'}), '(proto.ENUM, number=1, enum=Code)\n', (8880, 8913), False, 'import proto\n'), ((10849, 10884), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(1)'}), '(proto.STRING, number=1)\n', (10860, 10884), False, 'import proto\n'), ((10915, 10950), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(2)'}), '(proto.STRING, number=2)\n', (10926, 10950), False, 'import proto\n'), ((10976, 11011), 'proto.Field', 'proto.Field', (['proto.STRING'], {'number': '(3)'}), '(proto.STRING, number=3)\n', (10987, 11011), False, 'import proto\n'), ((11029, 11063), 'proto.Field', 'proto.Field', (['proto.BYTES'], {'number': '(4)'}), '(proto.BYTES, number=4)\n', (11040, 11063), False, 'import proto\n')]
# Units tests to directly cover both task wrapper modules - # not possible with pytest parametrization import pytest import sys from collections import defaultdict from peeringdb import _tasks_sequential TASKS_MODS = [_tasks_sequential] # pre-async compat. import if sys.version_info >= (3, 5): from peeringdb import _tasks_async TASKS_MODS.append(_tasks_async) # dummy resources for task objects class ResOne: tag = "one" class ResTwo: tag = "two" DATA_EXPECTED = {ResOne: [1, 2], ResTwo: [1, 2]} # dummy context classes parameterized on tasks module def make_context(tasks): class Context: def __init__(self): self.jobs = defaultdict(dict) self.db = {} @tasks.run_task def do_sync(self, res): return tasks.gather(self.schedule(res)) def schedule(self, res): return [self.set_job(res, k) for k in DATA_EXPECTED[res]] def set_job(self, res, k): job = self.jobs[res].get(k) if not job: job = tasks.UpdateTask(self._sync_impl(res, k), (res, k)) self.jobs[res][k] = job return job @tasks.wrap_generator def _sync_impl(self, res, k): d = self.db.setdefault(res, []) # pretend ResOne has dependency on a ResTwo if res is ResOne: yield self.set_job(ResTwo, k) d.append(k) return Context @pytest.mark.parametrize("tasks_mod", TASKS_MODS) def test_basic(tasks_mod): # generate class Context = make_context(tasks_mod) # do a dummy sync ctx = Context() for res in DATA_EXPECTED: ctx.do_sync(res) assert ctx.db == DATA_EXPECTED
[ "pytest.mark.parametrize", "collections.defaultdict" ]
[((1459, 1507), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tasks_mod"""', 'TASKS_MODS'], {}), "('tasks_mod', TASKS_MODS)\n", (1482, 1507), False, 'import pytest\n'), ((674, 691), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (685, 691), False, 'from collections import defaultdict\n')]
from user_portrait import SaveUserProfile from action_profile_recall import save_inverted_table, SaveUserRecall from movie_recall import SaveMovieRecall from movie_portrait import save_topic_weights_normal, save_predata, save_textrank, save_cut_words, save_tfidf, \ save_topK_idf_textrank, save_topK_tfidf_textrank, save_keyword_weights, save_topic_words, save_movie_profile, \ save_topic_weights, get_cv_idf_model from stat_factor import save_movie_hot_sort, save_movie_hot_factor, save_movie_time, save_movie_year_factor, \ save_movie_score_factor from action_similar_recall import SaveUserSimilarRecall from utils import user_recall_db from content_recall import Update def movie_protrait_run(): # save_predata() save_cut_words() get_cv_idf_model() # 保存cv和idf模型,基于全量数据,需定期更新 # save_textrank() # save_tfidf() # save_topK_idf_textrank() # save_topK_tfidf_textrank() # save_keyword_weights() # save_topic_words() # save_movie_profile() # save_topic_weights() # save_topic_weights_normal() def filter_factor_run(cate_id): save_movie_hot_sort() save_movie_hot_factor() save_movie_score_factor() save_movie_time() save_movie_year_factor(cate_id) pass def movie_recall_run(channel, cate_id): mr = SaveMovieRecall(channel=channel, cate_id=cate_id) # mr.save_movie_vector() # 有默认参数refit为True,默认重新训练模型,False为集群加载已训练好的模型 # mr.save_bkmeans_cluster() # 有默认参数refit为True, 默认重新训练模型,False为集群加载已训练好的模型 # mr.save_cos_similar(start_group=0) # 有默认参数start_group=0, 中间报错可以接着序号运行,修改start_group参数就好 # mr.save_movie_recall() # mr.save_movie_filter_version_recall() # mr.save_movie_filter_hot_score_year() mr.save_movie_latest_recall() def user_profile_run(): up = SaveUserProfile() Update().update_user_history(update=False, cal_history=False) up.save_action_weight() # up.save_action_weight_normal() # up.save_action_topic_weight() # 基于 topic_weights # up.save_action_topic_sort() # up.save_user_profile() def user_profile_recall_run(cate_id): save_inverted_table(cate_id) # 基于 topic_weights ur = SaveUserRecall(cate_id) # ur.save_pre_user_recall() # ur.save_pre2_user_recall() # ur.save_pre3_user_recall() # ur.save_user_recall() ur.save_user_tmp_recall_topK() ur.save_user_filter_history_recall() ur.save_user_filter_version_recall() ur.save_user_recall_hot_score_year_factor() ur.save_user_profile_latest_recall() # # 单独生成cate_history, 用于导入mysql作为测试的历史行为 # ur.save_user_history_cate() # # 以下两个方法基于 merge_action 一般只需要在更新用户历史数据时重新运行,统计行为数据 # ur.save_action_stat() # ur.save_action_stat_cate() def user_similar_recall_run(cate_id): usr = SaveUserSimilarRecall(cate_id) # usr.save_user_similar_recall() usr.save_filter_same_recall() usr.save_filter_history_recall() usr.save_user_similar_latest_recall() if __name__ == '__main__': # 除了上面基于 merge_action 的 3个方法, 其他的方法遵循从上往下的继承链, 前面的改变需要重新运行后面的方法 # movie_protrait_run() # filter_factor_run() # movie_recall_run() user_profile_run() # user_profile_recall_run() # user_similar_recall_run() pass
[ "stat_factor.save_movie_year_factor", "action_similar_recall.SaveUserSimilarRecall", "content_recall.Update", "action_profile_recall.save_inverted_table", "stat_factor.save_movie_hot_factor", "movie_portrait.get_cv_idf_model", "movie_recall.SaveMovieRecall", "stat_factor.save_movie_score_factor", "s...
[((750, 766), 'movie_portrait.save_cut_words', 'save_cut_words', ([], {}), '()\n', (764, 766), False, 'from movie_portrait import save_topic_weights_normal, save_predata, save_textrank, save_cut_words, save_tfidf, save_topK_idf_textrank, save_topK_tfidf_textrank, save_keyword_weights, save_topic_words, save_movie_profile, save_topic_weights, get_cv_idf_model\n'), ((771, 789), 'movie_portrait.get_cv_idf_model', 'get_cv_idf_model', ([], {}), '()\n', (787, 789), False, 'from movie_portrait import save_topic_weights_normal, save_predata, save_textrank, save_cut_words, save_tfidf, save_topK_idf_textrank, save_topK_tfidf_textrank, save_keyword_weights, save_topic_words, save_movie_profile, save_topic_weights, get_cv_idf_model\n'), ((1104, 1125), 'stat_factor.save_movie_hot_sort', 'save_movie_hot_sort', ([], {}), '()\n', (1123, 1125), False, 'from stat_factor import save_movie_hot_sort, save_movie_hot_factor, save_movie_time, save_movie_year_factor, save_movie_score_factor\n'), ((1130, 1153), 'stat_factor.save_movie_hot_factor', 'save_movie_hot_factor', ([], {}), '()\n', (1151, 1153), False, 'from stat_factor import save_movie_hot_sort, save_movie_hot_factor, save_movie_time, save_movie_year_factor, save_movie_score_factor\n'), ((1158, 1183), 'stat_factor.save_movie_score_factor', 'save_movie_score_factor', ([], {}), '()\n', (1181, 1183), False, 'from stat_factor import save_movie_hot_sort, save_movie_hot_factor, save_movie_time, save_movie_year_factor, save_movie_score_factor\n'), ((1188, 1205), 'stat_factor.save_movie_time', 'save_movie_time', ([], {}), '()\n', (1203, 1205), False, 'from stat_factor import save_movie_hot_sort, save_movie_hot_factor, save_movie_time, save_movie_year_factor, save_movie_score_factor\n'), ((1210, 1241), 'stat_factor.save_movie_year_factor', 'save_movie_year_factor', (['cate_id'], {}), '(cate_id)\n', (1232, 1241), False, 'from stat_factor import save_movie_hot_sort, save_movie_hot_factor, save_movie_time, save_movie_year_factor, save_movie_score_factor\n'), ((1302, 1351), 'movie_recall.SaveMovieRecall', 'SaveMovieRecall', ([], {'channel': 'channel', 'cate_id': 'cate_id'}), '(channel=channel, cate_id=cate_id)\n', (1317, 1351), False, 'from movie_recall import SaveMovieRecall\n'), ((1787, 1804), 'user_portrait.SaveUserProfile', 'SaveUserProfile', ([], {}), '()\n', (1802, 1804), False, 'from user_portrait import SaveUserProfile\n'), ((2099, 2127), 'action_profile_recall.save_inverted_table', 'save_inverted_table', (['cate_id'], {}), '(cate_id)\n', (2118, 2127), False, 'from action_profile_recall import save_inverted_table, SaveUserRecall\n'), ((2157, 2180), 'action_profile_recall.SaveUserRecall', 'SaveUserRecall', (['cate_id'], {}), '(cate_id)\n', (2171, 2180), False, 'from action_profile_recall import save_inverted_table, SaveUserRecall\n'), ((2762, 2792), 'action_similar_recall.SaveUserSimilarRecall', 'SaveUserSimilarRecall', (['cate_id'], {}), '(cate_id)\n', (2783, 2792), False, 'from action_similar_recall import SaveUserSimilarRecall\n'), ((1809, 1817), 'content_recall.Update', 'Update', ([], {}), '()\n', (1815, 1817), False, 'from content_recall import Update\n')]
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # pyre-strict from typing import Union import libcst import libcst.matchers as m from libcst import parse_expression from libcst.codemod import VisitorBasedCodemodCommand from libcst.codemod.visitors import AddImportsVisitor from libcst.metadata import QualifiedNameProvider class StripStringsCommand(VisitorBasedCodemodCommand): DESCRIPTION: str = "Converts string type annotations to 3.7-compatible forward references." METADATA_DEPENDENCIES = (QualifiedNameProvider,) # We want to gate the SimpleString visitor below to only SimpleStrings inside # an Annotation. @m.call_if_inside(m.Annotation()) # We also want to gate the SimpleString visitor below to ensure that we don't # erroneously strip strings from a Literal. @m.call_if_not_inside( m.Subscript( # We could match on value=m.Name("Literal") here, but then we might miss # instances where people are importing typing_extensions directly, or # importing Literal as an alias. value=m.MatchMetadataIfTrue( QualifiedNameProvider, lambda qualnames: any( qualname.name == "typing_extensions.Literal" for qualname in qualnames ), ) ) ) def leave_SimpleString( self, original_node: libcst.SimpleString, updated_node: libcst.SimpleString ) -> Union[libcst.SimpleString, libcst.BaseExpression]: AddImportsVisitor.add_needed_import(self.context, "__future__", "annotations") # Just use LibCST to evaluate the expression itself, and insert that as the # annotation. return parse_expression( updated_node.evaluated_value, config=self.module.config_for_parsing )
[ "libcst.parse_expression", "libcst.matchers.Annotation", "libcst.codemod.visitors.AddImportsVisitor.add_needed_import" ]
[((1654, 1732), 'libcst.codemod.visitors.AddImportsVisitor.add_needed_import', 'AddImportsVisitor.add_needed_import', (['self.context', '"""__future__"""', '"""annotations"""'], {}), "(self.context, '__future__', 'annotations')\n", (1689, 1732), False, 'from libcst.codemod.visitors import AddImportsVisitor\n'), ((1854, 1944), 'libcst.parse_expression', 'parse_expression', (['updated_node.evaluated_value'], {'config': 'self.module.config_for_parsing'}), '(updated_node.evaluated_value, config=self.module.\n config_for_parsing)\n', (1870, 1944), False, 'from libcst import parse_expression\n'), ((789, 803), 'libcst.matchers.Annotation', 'm.Annotation', ([], {}), '()\n', (801, 803), True, 'import libcst.matchers as m\n')]
from typing import Dict, Union from nasa_fevo.Cache import Cache from datetime import datetime CACHE_EXPIRATION_TIMER_MINUTES = 10 # very simple in-memory cache # meant for small # of items class InMemoryCache(Cache): def __init__(self): self.store: Dict[str, object] = {} def get(self, key: str) -> Union[object, None]: val_exp = self.store.get(key, None) if val_exp is None: return None else: print(f"Cache hit: {key}") return val_exp[0] def put(self, key: str, value: object, expiration: datetime) -> None: if value is None: raise ValueError("Value mustn't be None") self.store[key] = (value, expiration) def purge_expired(self) -> None: print("Trying to purge ...") keys_to_delete = [] now = datetime.now() for item in self.store.items(): key = item[0] # value = item[1][0] expiration = item[1][1] if expiration < now: # print(f"Purging cache: key={key}, now={now} > exp={expiration}") keys_to_delete.append(key) for key in keys_to_delete: del self.store[key] def clear(self, key: str): if key in self.store: del self.store[key]
[ "datetime.datetime.now" ]
[((839, 853), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (851, 853), False, 'from datetime import datetime\n')]
# ------------------------------------------------------------------------------- # Name: sfp_psbdmp # Purpose: Query psbdmp.cc for potentially hacked e-mail addresses. # # Author: <NAME> <<EMAIL>> # # Created: 21/11/2016 # Copyright: (c) <NAME> # Licence: MIT # ------------------------------------------------------------------------------- import json import re from spiderfoot import SpiderFootEvent, SpiderFootPlugin class sfp_psbdmp(SpiderFootPlugin): meta = { 'name': "Psbdmp", 'summary': "Check psbdmp.cc (PasteBin Dump) for potentially hacked e-mails and domains.", 'flags': [], 'useCases': ["Footprint", "Investigate", "Passive"], 'categories': ["Leaks, Dumps and Breaches"], 'dataSource': { 'website': "https://psbdmp.cc/", 'model': "FREE_NOAUTH_UNLIMITED", 'references': [ "https://psbdmp.cc/" ], 'favIcon': "", 'logo': "", 'description': "Search dump(s) by some word.\n" "Search dump(s) by email.\n" "Search dump(s) by domain.\n" "Search dump(s) from specific date.", } } opts = { } optdescs = { } results = None def setup(self, sfc, userOpts=dict()): self.sf = sfc self.results = self.tempStorage() for opt in list(userOpts.keys()): self.opts[opt] = userOpts[opt] def watchedEvents(self): return ["EMAILADDR", "DOMAIN_NAME", "INTERNET_NAME"] def producedEvents(self): return ["LEAKSITE_URL", "LEAKSITE_CONTENT"] def query(self, qry): ret = None if "@" in qry: url = "https://psbdmp.cc/api/search/email/" + qry else: url = "https://psbdmp.cc/api/search/domain/" + qry res = self.sf.fetchUrl(url, timeout=15, useragent="SpiderFoot") if res['code'] == "403" or res['content'] is None: self.info("Unable to fetch data from psbdmp.cc right now.") return None try: ret = json.loads(res['content']) except Exception as e: self.error(f"Error processing JSON response from psbdmp.cc: {e}") return None ids = list() if 'count' not in ret: return None if ret['count'] <= 0: return None for d in ret['data']: ids.append("https://pastebin.com/" + d['id']) return ids def handleEvent(self, event): eventName = event.eventType srcModuleName = event.module eventData = event.data self.debug(f"Received event, {eventName}, from {srcModuleName}") if eventData in self.results: self.debug(f"Skipping {eventData}, already checked.") return self.results[eventData] = True data = self.query(eventData) if data is None: return for n in data: e = SpiderFootEvent("LEAKSITE_URL", n, self.__name__, event) self.notifyListeners(e) res = self.sf.fetchUrl( n, timeout=self.opts['_fetchtimeout'], useragent=self.opts['_useragent'] ) if res['content'] is None: self.debug(f"Ignoring {n} as no data returned") continue if re.search( r"[^a-zA-Z\-\_0-9]" + re.escape(eventData) + r"[^a-zA-Z\-\_0-9]", res['content'], re.IGNORECASE ) is None: continue evt = SpiderFootEvent("LEAKSITE_CONTENT", res['content'], self.__name__, e) self.notifyListeners(evt) # End of sfp_psbdmp class
[ "spiderfoot.SpiderFootEvent", "json.loads", "re.escape" ]
[((2120, 2146), 'json.loads', 'json.loads', (["res['content']"], {}), "(res['content'])\n", (2130, 2146), False, 'import json\n'), ((3020, 3076), 'spiderfoot.SpiderFootEvent', 'SpiderFootEvent', (['"""LEAKSITE_URL"""', 'n', 'self.__name__', 'event'], {}), "('LEAKSITE_URL', n, self.__name__, event)\n", (3035, 3076), False, 'from spiderfoot import SpiderFootEvent, SpiderFootPlugin\n'), ((3652, 3721), 'spiderfoot.SpiderFootEvent', 'SpiderFootEvent', (['"""LEAKSITE_CONTENT"""', "res['content']", 'self.__name__', 'e'], {}), "('LEAKSITE_CONTENT', res['content'], self.__name__, e)\n", (3667, 3721), False, 'from spiderfoot import SpiderFootEvent, SpiderFootPlugin\n'), ((3479, 3499), 're.escape', 're.escape', (['eventData'], {}), '(eventData)\n', (3488, 3499), False, 'import re\n')]
import os import re import socket from dotenv import load_dotenv from dcr.scenario_utils.common_utils import execute_command_and_raise_on_error from dcr.scenario_utils.models import get_vm_data_from_env def test_agent_version(): stdout, _ = execute_command_and_raise_on_error(['waagent', '-version'], timeout=30) # release_file contains: # AGENT_VERSION = 'x.y.z' load_dotenv() expected_version = os.environ.get("AGENTVERSION") if "Goal state agent: {0}".format(expected_version) not in stdout: raise Exception("expected version {0} not found".format(expected_version)) return stdout def check_hostname(): vm_name = get_vm_data_from_env().name stdout, _ = execute_command_and_raise_on_error(['hostname'], timeout=30) if vm_name.lower() != stdout.lower(): raise Exception("Hostname does not match! Expected: {0}, found: {1}".format(vm_name, stdout.strip())) return stdout def check_ns_lookup(): hostname, _ = execute_command_and_raise_on_error(['hostname'], timeout=30) ip = socket.gethostbyname(hostname) msg = "Resolved IP: {0}".format(ip) print(msg) return msg def check_root_login(): stdout, _ = execute_command_and_raise_on_error(['cat', '/etc/shadow'], timeout=30) root_passwd_line = next(line for line in stdout.splitlines() if 'root' in line) print(root_passwd_line) root_passwd = root_passwd_line.split(":")[1] if any(val in root_passwd for val in ("!", "*", "x")): return 'root login disabled' else: raise Exception('root login appears to be enabled: {0}'.format(root_passwd)) def check_agent_processes(): daemon_pattern = r'.*python.*waagent -daemon$' handler_pattern = r'.*python.*-run-exthandlers' status_pattern = r'^(\S+)\s+' std_out, _ = execute_command_and_raise_on_error(['ps', 'axo', 'stat,args'], timeout=30) daemon = False ext_handler = False agent_processes = [line for line in std_out.splitlines() if 'python' in line] for process in agent_processes: if re.match(daemon_pattern, process): daemon = True elif re.match(handler_pattern, process): ext_handler = True else: continue status = re.match(status_pattern, process).groups(1)[0] if not(status.startswith('S') or status.startswith('R')): raise Exception('process is not running: {0}'.format(process)) if not daemon: raise Exception('daemon process not found:\n\n{0}'.format(std_out)) if not ext_handler: raise Exception('extension handler process not found:\n\n{0}'.format(std_out)) return 'expected processes found running' def check_sudoers(user): found = False root = '/etc/sudoers.d/' for f in os.listdir(root): sudoers = os.path.join(root, f) with open(sudoers) as fh: for entry in fh.readlines(): if entry.startswith(user) and 'ALL=(ALL)' in entry: print('entry found: {0}'.format(entry)) found = True if not found: raise Exception('user {0} not found'.format(user)) return "Found user {0} in list of sudoers".format(user)
[ "socket.gethostbyname", "os.listdir", "os.environ.get", "dcr.scenario_utils.models.get_vm_data_from_env", "re.match", "dotenv.load_dotenv", "os.path.join", "dcr.scenario_utils.common_utils.execute_command_and_raise_on_error" ]
[((249, 320), 'dcr.scenario_utils.common_utils.execute_command_and_raise_on_error', 'execute_command_and_raise_on_error', (["['waagent', '-version']"], {'timeout': '(30)'}), "(['waagent', '-version'], timeout=30)\n", (283, 320), False, 'from dcr.scenario_utils.common_utils import execute_command_and_raise_on_error\n'), ((385, 398), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (396, 398), False, 'from dotenv import load_dotenv\n'), ((422, 452), 'os.environ.get', 'os.environ.get', (['"""AGENTVERSION"""'], {}), "('AGENTVERSION')\n", (436, 452), False, 'import os\n'), ((709, 769), 'dcr.scenario_utils.common_utils.execute_command_and_raise_on_error', 'execute_command_and_raise_on_error', (["['hostname']"], {'timeout': '(30)'}), "(['hostname'], timeout=30)\n", (743, 769), False, 'from dcr.scenario_utils.common_utils import execute_command_and_raise_on_error\n'), ((985, 1045), 'dcr.scenario_utils.common_utils.execute_command_and_raise_on_error', 'execute_command_and_raise_on_error', (["['hostname']"], {'timeout': '(30)'}), "(['hostname'], timeout=30)\n", (1019, 1045), False, 'from dcr.scenario_utils.common_utils import execute_command_and_raise_on_error\n'), ((1056, 1086), 'socket.gethostbyname', 'socket.gethostbyname', (['hostname'], {}), '(hostname)\n', (1076, 1086), False, 'import socket\n'), ((1200, 1270), 'dcr.scenario_utils.common_utils.execute_command_and_raise_on_error', 'execute_command_and_raise_on_error', (["['cat', '/etc/shadow']"], {'timeout': '(30)'}), "(['cat', '/etc/shadow'], timeout=30)\n", (1234, 1270), False, 'from dcr.scenario_utils.common_utils import execute_command_and_raise_on_error\n'), ((1810, 1884), 'dcr.scenario_utils.common_utils.execute_command_and_raise_on_error', 'execute_command_and_raise_on_error', (["['ps', 'axo', 'stat,args']"], {'timeout': '(30)'}), "(['ps', 'axo', 'stat,args'], timeout=30)\n", (1844, 1884), False, 'from dcr.scenario_utils.common_utils import execute_command_and_raise_on_error\n'), ((2783, 2799), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (2793, 2799), False, 'import os\n'), ((665, 687), 'dcr.scenario_utils.models.get_vm_data_from_env', 'get_vm_data_from_env', ([], {}), '()\n', (685, 687), False, 'from dcr.scenario_utils.models import get_vm_data_from_env\n'), ((2059, 2092), 're.match', 're.match', (['daemon_pattern', 'process'], {}), '(daemon_pattern, process)\n', (2067, 2092), False, 'import re\n'), ((2819, 2840), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (2831, 2840), False, 'import os\n'), ((2133, 2167), 're.match', 're.match', (['handler_pattern', 'process'], {}), '(handler_pattern, process)\n', (2141, 2167), False, 'import re\n'), ((2253, 2286), 're.match', 're.match', (['status_pattern', 'process'], {}), '(status_pattern, process)\n', (2261, 2286), False, 'import re\n')]
#!/usr/bin/env python3 # # This file includes mainly a class "randomEpisode" that: # - draws localization of vehicle # - draws number of rocks # - draws position of each rock # - save in a json file # Author: Michele # Project: SmartLoader - Innovation import json import random from geometry_msgs.msg import PoseStamped, Quaternion, Vector3 import math from math import pi as pi import src.Unity2RealWorld as toRW import os def deleteFileIfExists(filename): if os.path.exists(filename): os.remove(filename) else: print("The file does not exist") def find(name, path): for root, dirs, files in os.walk(path): if name in files or name in dirs: return os.path.join(root, name) def determinePathToConfig(): user=os.getenv("HOME") simcomloc = find("simcom", user) confpath = simcomloc+"/config" return confpath class randomEpisode: actual_seed=0 # data = {} # data['Objects'] = [] # NumberOfRocks = 0 # VehiclePosition= PoseStamped() def __init__(self, typeOfRand, newseed): data = {} data['Objects'] = [] NumberOfRocks = 0 VehiclePosition = PoseStamped() if newseed != 0: actual_seed = random.seed(None,2) if typeOfRand == "verysimple": NumberOfRocks = random.randint(1,10) else: NumberOfRocks = random.randint(1,10) VehiclePosition.pose.position.x = random.uniform(0,500) VehiclePosition.pose.position.y = 0 VehiclePosition.pose.position.z = random.uniform(0,500) euler_orient = Vector3() euler_orient.x = 0 euler_orient.y = random.uniform(-pi,pi) euler_orient.z = 0 #random.uniform(-pi,pi) quat_orient = toRW.euler_to_quaternion(euler_orient.x, euler_orient.y, euler_orient.z) VehiclePosition.pose.orientation.x = quat_orient[0] #random.uniform(-1,1) VehiclePosition.pose.orientation.y = quat_orient[1] #random.uniform(-1,1) VehiclePosition.pose.orientation.z = quat_orient[2] #random.uniform(-1,1) VehiclePosition.pose.orientation.w = quat_orient[3] #random.uniform(-1,1) data['Objects'].append({ 'Name': 'BobCat', 'Id': 'BobCat', 'Position': { 'x': VehiclePosition.pose.position.x, 'y': VehiclePosition.pose.position.y, 'z': VehiclePosition.pose.position.z }, 'Rotation': { 'x': VehiclePosition.pose.orientation.x, 'y': VehiclePosition.pose.orientation.y, 'z': VehiclePosition.pose.orientation.z, 'w': VehiclePosition.pose.orientation.w }, 'Scale': { 'x': 1, 'y': 1, 'z': 1 } }) BobcatX = VehiclePosition.pose.position.x BobcatZ = VehiclePosition.pose.position.z XMin = BobcatX - 1 XMax = BobcatX + 1 ZMin = BobcatZ - 1.5 ZMax = BobcatZ + 1.5 for i in range(NumberOfRocks): id = (i+1).__str__() eulerRot = Vector3() eulerRot.x = 0 eulerRot.y = random.uniform(-pi, pi) eulerRot.z = 0 #random.uniform(-pi, pi) quatRot = toRW.euler_to_quaternion(eulerRot.x, eulerRot.y, eulerRot.z) data['Objects'].append({ 'Name': 'Rock', 'Id': id, 'Position': { "x": random.uniform(XMin,XMax), "y": 0, "z": random.uniform(ZMin,ZMax) }, "Rotation": { "x": quatRot[0], #random.uniform(-1,1), "y": quatRot[1], #random.uniform(-1,1), "z": quatRot[2], #random.uniform(-1,1), "w": quatRot[3] #random.uniform(-1,1) }, "Scale": { "x": 0.01, "y": 0.01, "z": 0.01 } }) # deleteFileIfExists('/home/sload/ws/interfaces/src/simcom/config/InitialScene.json') filepath = determinePathToConfig()+"/InitialScene.json" with open(filepath, 'w') as outfile: json.dump(data, outfile) class MultipleRocksEpisode: # actual_seed=0 # data = {} # data['Objects'] = [] # NumberOfRocks = 0 # VehiclePosition= PoseStamped() def __init__(self, newseed, NumberOfRocks, marker): actual_seed = 0 data = {} data['Objects'] = [] VehiclePosition = PoseStamped() if newseed != 0: actual_seed = random.seed(None,2) VehiclePosition.pose.position.x = 250 VehiclePosition.pose.position.y = 0 VehiclePosition.pose.position.z = 250 euler_orient = Vector3() euler_orient.x = 0 euler_orient.y = pi/2 #random.uniform(-pi,pi) euler_orient.z = 0 #random.uniform(-pi,pi) quat_orient = toRW.euler_to_quaternion(euler_orient.x, euler_orient.y, euler_orient.z) VehiclePosition.pose.orientation.x = quat_orient[0] #random.uniform(-1,1) VehiclePosition.pose.orientation.y = quat_orient[1] #random.uniform(-1,1) VehiclePosition.pose.orientation.z = quat_orient[2] #random.uniform(-1,1) VehiclePosition.pose.orientation.w = quat_orient[3] #random.uniform(-1,1) data['Objects'].append({ 'Name': 'BobCat', 'Id': 'BobCat', 'Position': { 'x': VehiclePosition.pose.position.x, 'y': VehiclePosition.pose.position.y, 'z': VehiclePosition.pose.position.z }, 'Rotation': { 'x': VehiclePosition.pose.orientation.x, 'y': VehiclePosition.pose.orientation.y, 'z': VehiclePosition.pose.orientation.z, 'w': VehiclePosition.pose.orientation.w }, 'Scale': { 'x': 1, 'y': 1, 'z': 1 } }) for i in range(NumberOfRocks): id = (i+1).__str__() eulerRot = Vector3() eulerRot.x = 0 eulerRot.y = random.uniform(-pi, pi) eulerRot.z = 0 #random.uniform(-pi, pi) quatRot = toRW.euler_to_quaternion(eulerRot.x, eulerRot.y, eulerRot.z) data['Objects'].append({ 'Name': 'Rock', 'Id': id, 'Position': { "x": 253, "y": 0, "z": 250 + random.uniform(-0.5,0.5) }, "Rotation": { "x": quatRot[0], #random.uniform(-1,1), "y": quatRot[1], #random.uniform(-1,1), "z": quatRot[2], #random.uniform(-1,1), "w": quatRot[3] #random.uniform(-1,1) }, "Scale": { "x": 0.25, "y": 0.25, "z": 0.25 } }) if marker: id = (NumberOfRocks+1).__str__() eulerRot = Vector3() eulerRot.x = 0 eulerRot.y = random.uniform(-pi, pi) eulerRot.z = 0 #random.uniform(-pi, pi) quatRot = toRW.euler_to_quaternion(eulerRot.x, eulerRot.y, eulerRot.z) data['Objects'].append({ 'Name': 'Rock', 'Id': id, 'Position': { # "x": 250 + random.uniform(XMin,XMax), # "x": 258 + random.uniform(-1, 8), "x": 250 + random.uniform(6, 12), "y": 0, "z": 250 }, "Rotation": { "x": quatRot[0], #random.uniform(-1,1), "y": quatRot[1], #random.uniform(-1,1), "z": quatRot[2], #random.uniform(-1,1), "w": quatRot[3] #random.uniform(-1,1) }, "Scale": { "x": 0.1, "y": 0.1, "z": 0.1 } }) filepath = determinePathToConfig()+"/InitialScene.json" with open(filepath,'w') as outfile: json.dump(data, outfile) if __name__ == '__main__': for j in range(3): scenario = recorderEpisode(j)
[ "os.path.exists", "random.uniform", "geometry_msgs.msg.Vector3", "os.getenv", "json.dump", "os.path.join", "random.seed", "geometry_msgs.msg.PoseStamped", "src.Unity2RealWorld.euler_to_quaternion", "random.randint", "os.walk", "os.remove" ]
[((501, 525), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (515, 525), False, 'import os\n'), ((658, 671), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (665, 671), False, 'import os\n'), ((798, 815), 'os.getenv', 'os.getenv', (['"""HOME"""'], {}), "('HOME')\n", (807, 815), False, 'import os\n'), ((535, 554), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (544, 554), False, 'import os\n'), ((1194, 1207), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (1205, 1207), False, 'from geometry_msgs.msg import PoseStamped, Quaternion, Vector3\n'), ((1472, 1494), 'random.uniform', 'random.uniform', (['(0)', '(500)'], {}), '(0, 500)\n', (1486, 1494), False, 'import random\n'), ((1580, 1602), 'random.uniform', 'random.uniform', (['(0)', '(500)'], {}), '(0, 500)\n', (1594, 1602), False, 'import random\n'), ((1625, 1634), 'geometry_msgs.msg.Vector3', 'Vector3', ([], {}), '()\n', (1632, 1634), False, 'from geometry_msgs.msg import PoseStamped, Quaternion, Vector3\n'), ((1687, 1710), 'random.uniform', 'random.uniform', (['(-pi)', 'pi'], {}), '(-pi, pi)\n', (1701, 1710), False, 'import random\n'), ((1783, 1855), 'src.Unity2RealWorld.euler_to_quaternion', 'toRW.euler_to_quaternion', (['euler_orient.x', 'euler_orient.y', 'euler_orient.z'], {}), '(euler_orient.x, euler_orient.y, euler_orient.z)\n', (1807, 1855), True, 'import src.Unity2RealWorld as toRW\n'), ((4855, 4868), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (4866, 4868), False, 'from geometry_msgs.msg import PoseStamped, Quaternion, Vector3\n'), ((5100, 5109), 'geometry_msgs.msg.Vector3', 'Vector3', ([], {}), '()\n', (5107, 5109), False, 'from geometry_msgs.msg import PoseStamped, Quaternion, Vector3\n'), ((5264, 5336), 'src.Unity2RealWorld.euler_to_quaternion', 'toRW.euler_to_quaternion', (['euler_orient.x', 'euler_orient.y', 'euler_orient.z'], {}), '(euler_orient.x, euler_orient.y, euler_orient.z)\n', (5288, 5336), True, 'import src.Unity2RealWorld as toRW\n'), ((734, 758), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (746, 758), False, 'import os\n'), ((1259, 1279), 'random.seed', 'random.seed', (['None', '(2)'], {}), '(None, 2)\n', (1270, 1279), False, 'import random\n'), ((1346, 1367), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (1360, 1367), False, 'import random\n'), ((1409, 1430), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (1423, 1430), False, 'import random\n'), ((3261, 3270), 'geometry_msgs.msg.Vector3', 'Vector3', ([], {}), '()\n', (3268, 3270), False, 'from geometry_msgs.msg import PoseStamped, Quaternion, Vector3\n'), ((3323, 3346), 'random.uniform', 'random.uniform', (['(-pi)', 'pi'], {}), '(-pi, pi)\n', (3337, 3346), False, 'import random\n'), ((3421, 3481), 'src.Unity2RealWorld.euler_to_quaternion', 'toRW.euler_to_quaternion', (['eulerRot.x', 'eulerRot.y', 'eulerRot.z'], {}), '(eulerRot.x, eulerRot.y, eulerRot.z)\n', (3445, 3481), True, 'import src.Unity2RealWorld as toRW\n'), ((4522, 4546), 'json.dump', 'json.dump', (['data', 'outfile'], {}), '(data, outfile)\n', (4531, 4546), False, 'import json\n'), ((4920, 4940), 'random.seed', 'random.seed', (['None', '(2)'], {}), '(None, 2)\n', (4931, 4940), False, 'import random\n'), ((6530, 6539), 'geometry_msgs.msg.Vector3', 'Vector3', ([], {}), '()\n', (6537, 6539), False, 'from geometry_msgs.msg import PoseStamped, Quaternion, Vector3\n'), ((6592, 6615), 'random.uniform', 'random.uniform', (['(-pi)', 'pi'], {}), '(-pi, pi)\n', (6606, 6615), False, 'import random\n'), ((6690, 6750), 'src.Unity2RealWorld.euler_to_quaternion', 'toRW.euler_to_quaternion', (['eulerRot.x', 'eulerRot.y', 'eulerRot.z'], {}), '(eulerRot.x, eulerRot.y, eulerRot.z)\n', (6714, 6750), True, 'import src.Unity2RealWorld as toRW\n'), ((7648, 7657), 'geometry_msgs.msg.Vector3', 'Vector3', ([], {}), '()\n', (7655, 7657), False, 'from geometry_msgs.msg import PoseStamped, Quaternion, Vector3\n'), ((7710, 7733), 'random.uniform', 'random.uniform', (['(-pi)', 'pi'], {}), '(-pi, pi)\n', (7724, 7733), False, 'import random\n'), ((7808, 7868), 'src.Unity2RealWorld.euler_to_quaternion', 'toRW.euler_to_quaternion', (['eulerRot.x', 'eulerRot.y', 'eulerRot.z'], {}), '(eulerRot.x, eulerRot.y, eulerRot.z)\n', (7832, 7868), True, 'import src.Unity2RealWorld as toRW\n'), ((8917, 8941), 'json.dump', 'json.dump', (['data', 'outfile'], {}), '(data, outfile)\n', (8926, 8941), False, 'import json\n'), ((3656, 3682), 'random.uniform', 'random.uniform', (['XMin', 'XMax'], {}), '(XMin, XMax)\n', (3670, 3682), False, 'import random\n'), ((3744, 3770), 'random.uniform', 'random.uniform', (['ZMin', 'ZMax'], {}), '(ZMin, ZMax)\n', (3758, 3770), False, 'import random\n'), ((6997, 7022), 'random.uniform', 'random.uniform', (['(-0.5)', '(0.5)'], {}), '(-0.5, 0.5)\n', (7011, 7022), False, 'import random\n'), ((8173, 8194), 'random.uniform', 'random.uniform', (['(6)', '(12)'], {}), '(6, 12)\n', (8187, 8194), False, 'import random\n')]
#!/usr/bin/env python # Copyright (c) 2009-2016 <NAME> <<EMAIL>> # # This module is free software. You can redistribute it and/or modify it under # the terms of the MIT License, see the file COPYING included with this # distribution. from gimmemotifs.motif import pwmfile_to_motifs def logo(args): inputfile = args.pwmfile motifs = pwmfile_to_motifs(inputfile) if args.ids: ids = args.ids.split(",") motifs = [m for m in motifs if m.id in ids] for motif in motifs: motif.to_img(motif.id, fmt="PNG")
[ "gimmemotifs.motif.pwmfile_to_motifs" ]
[((349, 377), 'gimmemotifs.motif.pwmfile_to_motifs', 'pwmfile_to_motifs', (['inputfile'], {}), '(inputfile)\n', (366, 377), False, 'from gimmemotifs.motif import pwmfile_to_motifs\n')]
import numpy as np from matplotlib import _api from .axes_divider import make_axes_locatable, Size from .mpl_axes import Axes @_api.delete_parameter("3.3", "add_all") def make_rgb_axes(ax, pad=0.01, axes_class=None, add_all=True, **kwargs): """ Parameters ---------- pad : float Fraction of the axes height. """ divider = make_axes_locatable(ax) pad_size = pad * Size.AxesY(ax) xsize = ((1-2*pad)/3) * Size.AxesX(ax) ysize = ((1-2*pad)/3) * Size.AxesY(ax) divider.set_horizontal([Size.AxesX(ax), pad_size, xsize]) divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize]) ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1)) ax_rgb = [] if axes_class is None: try: axes_class = ax._axes_class except AttributeError: axes_class = type(ax) for ny in [4, 2, 0]: ax1 = axes_class(ax.get_figure(), ax.get_position(original=True), sharex=ax, sharey=ax, **kwargs) locator = divider.new_locator(nx=2, ny=ny) ax1.set_axes_locator(locator) for t in ax1.yaxis.get_ticklabels() + ax1.xaxis.get_ticklabels(): t.set_visible(False) try: for axis in ax1.axis.values(): axis.major_ticklabels.set_visible(False) except AttributeError: pass ax_rgb.append(ax1) if add_all: fig = ax.get_figure() for ax1 in ax_rgb: fig.add_axes(ax1) return ax_rgb @_api.deprecated("3.3", alternative="ax.imshow(np.dstack([r, g, b]))") def imshow_rgb(ax, r, g, b, **kwargs): return ax.imshow(np.dstack([r, g, b]), **kwargs) class RGBAxes: """ 4-panel imshow (RGB, R, G, B). Layout: +---------------+-----+ | | R | + +-----+ | RGB | G | + +-----+ | | B | +---------------+-----+ Subclasses can override the ``_defaultAxesClass`` attribute. Attributes ---------- RGB : ``_defaultAxesClass`` The axes object for the three-channel imshow. R : ``_defaultAxesClass`` The axes object for the red channel imshow. G : ``_defaultAxesClass`` The axes object for the green channel imshow. B : ``_defaultAxesClass`` The axes object for the blue channel imshow. """ _defaultAxesClass = Axes @_api.delete_parameter("3.3", "add_all") def __init__(self, *args, pad=0, add_all=True, **kwargs): """ Parameters ---------- pad : float, default: 0 fraction of the axes height to put as padding. add_all : bool, default: True Whether to add the {rgb, r, g, b} axes to the figure. This parameter is deprecated. axes_class : matplotlib.axes.Axes *args Unpacked into axes_class() init for RGB **kwargs Unpacked into axes_class() init for RGB, R, G, B axes """ axes_class = kwargs.pop("axes_class", self._defaultAxesClass) self.RGB = ax = axes_class(*args, **kwargs) if add_all: ax.get_figure().add_axes(ax) else: kwargs["add_all"] = add_all # only show deprecation in that case self.R, self.G, self.B = make_rgb_axes( ax, pad=pad, axes_class=axes_class, **kwargs) # Set the line color and ticks for the axes. for ax1 in [self.RGB, self.R, self.G, self.B]: ax1.axis[:].line.set_color("w") ax1.axis[:].major_ticks.set_markeredgecolor("w") @_api.deprecated("3.3") def add_RGB_to_figure(self): """Add red, green and blue axes to the RGB composite's axes figure.""" self.RGB.get_figure().add_axes(self.R) self.RGB.get_figure().add_axes(self.G) self.RGB.get_figure().add_axes(self.B) def imshow_rgb(self, r, g, b, **kwargs): """ Create the four images {rgb, r, g, b}. Parameters ---------- r, g, b : array-like The red, green, and blue arrays. kwargs : imshow kwargs kwargs get unpacked into the imshow calls for the four images. Returns ------- rgb : matplotlib.image.AxesImage r : matplotlib.image.AxesImage g : matplotlib.image.AxesImage b : matplotlib.image.AxesImage """ if not (r.shape == g.shape == b.shape): raise ValueError( f'Input shapes ({r.shape}, {g.shape}, {b.shape}) do not match') RGB = np.dstack([r, g, b]) R = np.zeros_like(RGB) R[:, :, 0] = r G = np.zeros_like(RGB) G[:, :, 1] = g B = np.zeros_like(RGB) B[:, :, 2] = b im_rgb = self.RGB.imshow(RGB, **kwargs) im_r = self.R.imshow(R, **kwargs) im_g = self.G.imshow(G, **kwargs) im_b = self.B.imshow(B, **kwargs) return im_rgb, im_r, im_g, im_b @_api.deprecated("3.3", alternative="RGBAxes") class RGBAxesBase(RGBAxes): pass
[ "numpy.dstack", "matplotlib._api.delete_parameter", "numpy.zeros_like", "matplotlib._api.deprecated" ]
[((130, 169), 'matplotlib._api.delete_parameter', '_api.delete_parameter', (['"""3.3"""', '"""add_all"""'], {}), "('3.3', 'add_all')\n", (151, 169), False, 'from matplotlib import _api\n'), ((1527, 1596), 'matplotlib._api.deprecated', '_api.deprecated', (['"""3.3"""'], {'alternative': '"""ax.imshow(np.dstack([r, g, b]))"""'}), "('3.3', alternative='ax.imshow(np.dstack([r, g, b]))')\n", (1542, 1596), False, 'from matplotlib import _api\n'), ((5031, 5076), 'matplotlib._api.deprecated', '_api.deprecated', (['"""3.3"""'], {'alternative': '"""RGBAxes"""'}), "('3.3', alternative='RGBAxes')\n", (5046, 5076), False, 'from matplotlib import _api\n'), ((2463, 2502), 'matplotlib._api.delete_parameter', '_api.delete_parameter', (['"""3.3"""', '"""add_all"""'], {}), "('3.3', 'add_all')\n", (2484, 2502), False, 'from matplotlib import _api\n'), ((3656, 3678), 'matplotlib._api.deprecated', '_api.deprecated', (['"""3.3"""'], {}), "('3.3')\n", (3671, 3678), False, 'from matplotlib import _api\n'), ((1657, 1677), 'numpy.dstack', 'np.dstack', (['[r, g, b]'], {}), '([r, g, b])\n', (1666, 1677), True, 'import numpy as np\n'), ((4631, 4651), 'numpy.dstack', 'np.dstack', (['[r, g, b]'], {}), '([r, g, b])\n', (4640, 4651), True, 'import numpy as np\n'), ((4664, 4682), 'numpy.zeros_like', 'np.zeros_like', (['RGB'], {}), '(RGB)\n', (4677, 4682), True, 'import numpy as np\n'), ((4718, 4736), 'numpy.zeros_like', 'np.zeros_like', (['RGB'], {}), '(RGB)\n', (4731, 4736), True, 'import numpy as np\n'), ((4772, 4790), 'numpy.zeros_like', 'np.zeros_like', (['RGB'], {}), '(RGB)\n', (4785, 4790), True, 'import numpy as np\n')]
from chatterbot import ChatBot from chatterbot.trainers import ListTrainer # The only required parameter for the ChatBot is a name. This can be anything you want. chatbot = ChatBot("My First Chatbot") # Training your ChatBot conversation = [ "Hello", "Hi there!", "How are you doing?", "I'm doing great.", "That is good to hear", "Thank you.", "You're welcome." ] trainer = ListTrainer(chatbot) trainer.train(conversation) # Get a response response = chatbot.get_response("Good morning!") print(response)
[ "chatterbot.ChatBot", "chatterbot.trainers.ListTrainer" ]
[((173, 200), 'chatterbot.ChatBot', 'ChatBot', (['"""My First Chatbot"""'], {}), "('My First Chatbot')\n", (180, 200), False, 'from chatterbot import ChatBot\n'), ((404, 424), 'chatterbot.trainers.ListTrainer', 'ListTrainer', (['chatbot'], {}), '(chatbot)\n', (415, 424), False, 'from chatterbot.trainers import ListTrainer\n')]
# -*- coding: utf-8 -*- from functools import wraps from flask import abort, jsonify from flask_login import current_user def admin_required(f): @wraps(f) def decorated_function(*args, **kwargs): if not current_user.active or not current_user.is_admin: abort(403) return f(*args, **kwargs) return decorated_function def requires_auth(f): @wraps(f) def decorated(*args, **kwargs): if not current_user.is_allowed: return jsonify(flag='fail', msg='Login required') return f(*args, **kwargs) return decorated
[ "flask.abort", "functools.wraps", "flask.jsonify" ]
[((153, 161), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (158, 161), False, 'from functools import wraps\n'), ((387, 395), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (392, 395), False, 'from functools import wraps\n'), ((284, 294), 'flask.abort', 'abort', (['(403)'], {}), '(403)\n', (289, 294), False, 'from flask import abort, jsonify\n'), ((491, 533), 'flask.jsonify', 'jsonify', ([], {'flag': '"""fail"""', 'msg': '"""Login required"""'}), "(flag='fail', msg='Login required')\n", (498, 533), False, 'from flask import abort, jsonify\n')]
# Copyright 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import requests from magnum.conductor.handlers.common.cert_manager import create_client_files class KubernetesAPI: """ Simple Kubernetes API client using requests. This API wrapper allows for a set of very simple operations to be performed on a Kubernetes cluster using the `requests` library. The reason behind it is that the native `kubernetes` library does not seem to be quite thread-safe at the moment. Also, our interactions with the Kubernetes API are happening inside Greenthreads so we don't need to use connection pooling on top of it, in addition to pools not being something that you can disable with the native Kubernetes API. """ def __init__(self, context, cluster): self.context = context self.cluster = cluster # Load certificates for cluster (self.ca_file, self.key_file, self.cert_file) = create_client_files( self.cluster, self.context ) def _request(self, method, url, json=True): response = requests.request( method, url, verify=self.ca_file.name, cert=(self.cert_file.name, self.key_file.name) ) response.raise_for_status() if json: return response.json() else: return response.text def get_healthz(self): """ Get the health of the cluster from API """ return self._request( 'GET', f"{self.cluster.api_address}/healthz", json=False ) def list_node(self): """ List all nodes in the cluster. :return: List of nodes. """ return self._request( 'GET', f"{self.cluster.api_address}/api/v1/nodes" ) def list_namespaced_pod(self, namespace): """ List all pods in the given namespace. :param namespace: Namespace to list pods from. :return: List of pods. """ return self._request( 'GET', f"{self.cluster.api_address}/api/v1/namespaces/{namespace}/pods" ) def __del__(self): """ Close all of the file descriptions for the certificates, since they are left open by `create_client_files`. TODO(mnaser): Use a context manager and avoid having these here. """ if hasattr(self, 'ca_file'): self.ca_file.close() if hasattr(self, 'cert_file'): self.cert_file.close() if hasattr(self, 'key_file'): self.key_file.close()
[ "magnum.conductor.handlers.common.cert_manager.create_client_files", "requests.request" ]
[((1488, 1535), 'magnum.conductor.handlers.common.cert_manager.create_client_files', 'create_client_files', (['self.cluster', 'self.context'], {}), '(self.cluster, self.context)\n', (1507, 1535), False, 'from magnum.conductor.handlers.common.cert_manager import create_client_files\n'), ((1626, 1734), 'requests.request', 'requests.request', (['method', 'url'], {'verify': 'self.ca_file.name', 'cert': '(self.cert_file.name, self.key_file.name)'}), '(method, url, verify=self.ca_file.name, cert=(self.\n cert_file.name, self.key_file.name))\n', (1642, 1734), False, 'import requests\n')]
#!/usr/bin/env python from __future__ import absolute_import from __future__ import print_function import logging, os, sys from pprint import pprint as pp from secret.project import get_project from secret.cli import prepare def trollius_log(level=logging.CRITICAL): os.environ['TROLLIUSDEBUG'] = "1" # more informative tracebacks logging.basicConfig(level=level) if sys.version_info.major == 2: trollius_log() from secret.storage import S3 from secret.output import prettyprint import boto3 import trollius as asyncio from trollius import From, Return @asyncio.coroutine def main(args): project = get_project(args.datafile) region = os.getenv("AWS_DEFAULT_REGION", args.region) kw = {} if not os.getenv("AWS_PROFILE"): kw = dict(aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'), aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY'), aws_session_token=os.getenv('AWS_SESSION_TOKEN'),) if args.debug: boto3.set_stream_logger(name='botocore') trollius_log(level=logging.DEBUG) session = boto3.session.Session(region_name=region, **kw) storage = S3(session=session, vault=args.vault, vaultkey=args.vaultkey, env=args.env, region=args.region, prefix=args.project, project=project,) method = getattr(storage, args.action) fn = lambda: method(**vars(args)) result = yield From(fn()) prettyprint(result, args) def runner(): args = prepare() loop = asyncio.get_event_loop() # wrap asyncio to suppress stacktraces if args.debug: loop.run_until_complete(main(args)) else: try: loop.run_until_complete(main(args)) except Exception as e: print(e.message) loop.close() if __name__ == '__main__': runner()
[ "logging.basicConfig", "secret.project.get_project", "boto3.session.Session", "os.getenv", "secret.storage.S3", "secret.cli.prepare", "boto3.set_stream_logger", "secret.output.prettyprint", "trollius.get_event_loop" ]
[((341, 373), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'level'}), '(level=level)\n', (360, 373), False, 'import logging, os, sys\n'), ((621, 647), 'secret.project.get_project', 'get_project', (['args.datafile'], {}), '(args.datafile)\n', (632, 647), False, 'from secret.project import get_project\n'), ((666, 710), 'os.getenv', 'os.getenv', (['"""AWS_DEFAULT_REGION"""', 'args.region'], {}), "('AWS_DEFAULT_REGION', args.region)\n", (675, 710), False, 'import logging, os, sys\n'), ((1087, 1134), 'boto3.session.Session', 'boto3.session.Session', ([], {'region_name': 'region'}), '(region_name=region, **kw)\n', (1108, 1134), False, 'import boto3\n'), ((1149, 1286), 'secret.storage.S3', 'S3', ([], {'session': 'session', 'vault': 'args.vault', 'vaultkey': 'args.vaultkey', 'env': 'args.env', 'region': 'args.region', 'prefix': 'args.project', 'project': 'project'}), '(session=session, vault=args.vault, vaultkey=args.vaultkey, env=args.env,\n region=args.region, prefix=args.project, project=project)\n', (1151, 1286), False, 'from secret.storage import S3\n'), ((1472, 1497), 'secret.output.prettyprint', 'prettyprint', (['result', 'args'], {}), '(result, args)\n', (1483, 1497), False, 'from secret.output import prettyprint\n'), ((1524, 1533), 'secret.cli.prepare', 'prepare', ([], {}), '()\n', (1531, 1533), False, 'from secret.cli import prepare\n'), ((1545, 1569), 'trollius.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1567, 1569), True, 'import trollius as asyncio\n'), ((734, 758), 'os.getenv', 'os.getenv', (['"""AWS_PROFILE"""'], {}), "('AWS_PROFILE')\n", (743, 758), False, 'import logging, os, sys\n'), ((989, 1029), 'boto3.set_stream_logger', 'boto3.set_stream_logger', ([], {'name': '"""botocore"""'}), "(name='botocore')\n", (1012, 1029), False, 'import boto3\n'), ((796, 826), 'os.getenv', 'os.getenv', (['"""AWS_ACCESS_KEY_ID"""'], {}), "('AWS_ACCESS_KEY_ID')\n", (805, 826), False, 'import logging, os, sys\n'), ((862, 896), 'os.getenv', 'os.getenv', (['"""AWS_SECRET_ACCESS_KEY"""'], {}), "('AWS_SECRET_ACCESS_KEY')\n", (871, 896), False, 'import logging, os, sys\n'), ((928, 958), 'os.getenv', 'os.getenv', (['"""AWS_SESSION_TOKEN"""'], {}), "('AWS_SESSION_TOKEN')\n", (937, 958), False, 'import logging, os, sys\n')]
### #Various nose tests. If you want to adapt this for your own use, be aware that the start/end block list has a very specific formatting. ### import get_freebusy import arrow from operator import itemgetter from pymongo import MongoClient import secrets.admin_secrets import secrets.client_secrets MONGO_CLIENT_URL = "mongodb://{}:{}@localhost:{}/{}".format( secrets.client_secrets.db_user, secrets.client_secrets.db_user_pw, secrets.admin_secrets.port, secrets.client_secrets.db) try: dbclient = MongoClient(MONGO_CLIENT_URL) db = getattr(dbclient, secrets.client_secrets.db) collection = db.dated base_size = collection.count() #current size of the db, for comparison later except: print("Failure opening database. Is Mongo running? Correct password?") sys.exit(1) def test_free_times(): #Given a sample list, check to see if it's getting free/busy blocks correctly ranges = [['2016-11-20T08:30:00-08:00', '2016-11-20T010:30:00-08:00'], ['2016-11-20T11:00:00-08:00', '2016-11-20T15:00:00-08:00'], ['2016-11-20T16:30:00-08:00', '2016-11-20T19:00:00-08:00'], ['2016-11-24T13:30:00-08:00', '2016-11-24T16:00:00-08:00'], ['2016-11-21T15:00:00-08:00', '2016-11-21T18:30:00-08:00']] start = '2016-11-20T8:00:00-08:00' end = '2016-11-23T20:00:00-08:00' assert get_freebusy.get_freebusy(ranges, start, end) == [['At 2016-11-20 from 08:00:00 to 08:30:00', 'At 2016-11-20 from 10:30:00 to 11:00:00', 'At 2016-11-20 from 15:00:00 to 16:30:00', 'At 2016-11-20 from 19:00:00 to 20:00:00', 'At 2016-11-21 from 08:00:00 to 15:00:00', 'At 2016-11-21 from 18:00:00 to 20:00:00', 'At 2016-11-24 from 08:00:00 to 13:30:00', 'At 2016-11-24 from 16:00:00 to 20:00:00'], ['At 2016-11-20 from 08:30:00 to 10:30:00', 'At 2016-11-20 from 11:00:00 to 15:00:00', 'At 2016-11-20 from 16:30:00 to 19:00:00', 'At 2016-11-21 from 15:00:00 to 18:00:00', 'At 2016-11-24 from 13:30:00 to 16:00:00']] ranges = [] start = '2016-11-20T12:00:00-08:00' end = '2016-11-23T20:00:00-08:00' assert get_freebusy.get_freebusy(ranges, start, end) == [[], []] def test_overlap(): #tests if the program can handle dates that overlap/intersect ranges = [['2016-11-22T11:00:00-08:00', '2016-11-22T16:00:00-08:00'], ['2016-11-23T12:00:00-08:00', '2016-11-23T15:30:00-08:00']] start = '2016-11-20T8:00:00-08:00' end = '2016-11-23T20:00:00-08:00' assert get_freebusy.get_freebusy(ranges, start, end) == [['At 2016-11-22 from 08:00:00 to 11:00:00', 'At 2016-11-22 from 16:00:00 to 20:00:00', 'At 2016-11-23 from 08:00:00 to 11:00:00', 'At 2016-11-23 from 18:30:00 to 20:00:00'], ['At 2016-11-22 from 11:00:00 to 16:00:00', 'At 2016-11-23 from 11:00:00 to 18:30:00']] def test_db(): assert collection != None collection.insert({"type" : "freebusy", "entry" : [["entry 1"], ["entry 2"]]}) assert base_size < collection.count() collection.remove({"entry" : [["entry 1"], ["entry 2"]]}) assert base_size == collection.count()
[ "pymongo.MongoClient", "get_freebusy.get_freebusy" ]
[((540, 569), 'pymongo.MongoClient', 'MongoClient', (['MONGO_CLIENT_URL'], {}), '(MONGO_CLIENT_URL)\n', (551, 569), False, 'from pymongo import MongoClient\n'), ((1359, 1404), 'get_freebusy.get_freebusy', 'get_freebusy.get_freebusy', (['ranges', 'start', 'end'], {}), '(ranges, start, end)\n', (1384, 1404), False, 'import get_freebusy\n'), ((2095, 2140), 'get_freebusy.get_freebusy', 'get_freebusy.get_freebusy', (['ranges', 'start', 'end'], {}), '(ranges, start, end)\n', (2120, 2140), False, 'import get_freebusy\n'), ((2488, 2533), 'get_freebusy.get_freebusy', 'get_freebusy.get_freebusy', (['ranges', 'start', 'end'], {}), '(ranges, start, end)\n', (2513, 2533), False, 'import get_freebusy\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # This file is part of the SCICO package. Details of the copyright # and user license can be found in the 'LICENSE.txt' file distributed # with the package. r""" Regularized Abel Inversion ========================== This example demonstrates a TV-regularized Abel inversion using an Abel projector based on PyAbel :cite:`pyabel-2022` """ import numpy as np import scico.numpy as snp from scico import functional, linop, loss, metric, plot from scico.examples import create_circular_phantom from scico.linop.abel import AbelProjector from scico.optimize.admm import ADMM, LinearSubproblemSolver from scico.util import device_info """ Create a ground truth image. """ N = 256 # phantom size x_gt = create_circular_phantom((N, N), [0.4 * N, 0.2 * N, 0.1 * N], [1, 0, 0.5]) """ Set up the forward operator and create a test measurement """ A = AbelProjector(x_gt.shape) y = A @ x_gt np.random.seed(12345) y = y + np.random.normal(size=y.shape).astype(np.float32) ATy = A.T @ y """ Set up ADMM solver object. """ λ = 1.9e1 # L1 norm regularization parameter ρ = 4.9e1 # ADMM penalty parameter maxiter = 100 # number of ADMM iterations cg_tol = 1e-4 # CG relative tolerance cg_maxiter = 25 # maximum CG iterations per ADMM iteration # Note the use of anisotropic TV. Isotropic TV would require use of L21Norm. g = λ * functional.L1Norm() C = linop.FiniteDifference(input_shape=x_gt.shape) f = loss.SquaredL2Loss(y=y, A=A) x_inv = A.inverse(y) x0 = snp.clip(x_inv, 0, 1.0) solver = ADMM( f=f, g_list=[g], C_list=[C], rho_list=[ρ], x0=x0, maxiter=maxiter, subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}), itstat_options={"display": True, "period": 5}, ) """ Run the solver. """ print(f"Solving on {device_info()}\n") solver.solve() hist = solver.itstat_object.history(transpose=True) x_tv = snp.clip(solver.x, 0, 1.0) """ Show results. """ norm = plot.matplotlib.colors.Normalize(vmin=-0.1, vmax=1.2) fig, ax = plot.subplots(nrows=2, ncols=2, figsize=(12, 12)) plot.imview(x_gt, title="Ground Truth", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 0], norm=norm) plot.imview(y, title="Measurement", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1]) plot.imview( x_inv, title="Inverse Abel: %.2f (dB)" % metric.psnr(x_gt, x_inv), cmap=plot.cm.Blues, fig=fig, ax=ax[1, 0], norm=norm, ) plot.imview( x_tv, title="TV Regularized Inversion: %.2f (dB)" % metric.psnr(x_gt, x_tv), cmap=plot.cm.Blues, fig=fig, ax=ax[1, 1], norm=norm, ) fig.show() input("\nWaiting for input to close figures and exit")
[ "scico.linop.abel.AbelProjector", "scico.functional.L1Norm", "scico.plot.subplots", "scico.plot.matplotlib.colors.Normalize", "numpy.random.normal", "scico.examples.create_circular_phantom", "scico.plot.imview", "scico.linop.FiniteDifference", "scico.optimize.admm.LinearSubproblemSolver", "numpy.r...
[((748, 821), 'scico.examples.create_circular_phantom', 'create_circular_phantom', (['(N, N)', '[0.4 * N, 0.2 * N, 0.1 * N]', '[1, 0, 0.5]'], {}), '((N, N), [0.4 * N, 0.2 * N, 0.1 * N], [1, 0, 0.5])\n', (771, 821), False, 'from scico.examples import create_circular_phantom\n'), ((894, 919), 'scico.linop.abel.AbelProjector', 'AbelProjector', (['x_gt.shape'], {}), '(x_gt.shape)\n', (907, 919), False, 'from scico.linop.abel import AbelProjector\n'), ((933, 954), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (947, 954), True, 'import numpy as np\n'), ((1398, 1444), 'scico.linop.FiniteDifference', 'linop.FiniteDifference', ([], {'input_shape': 'x_gt.shape'}), '(input_shape=x_gt.shape)\n', (1420, 1444), False, 'from scico import functional, linop, loss, metric, plot\n'), ((1450, 1478), 'scico.loss.SquaredL2Loss', 'loss.SquaredL2Loss', ([], {'y': 'y', 'A': 'A'}), '(y=y, A=A)\n', (1468, 1478), False, 'from scico import functional, linop, loss, metric, plot\n'), ((1506, 1529), 'scico.numpy.clip', 'snp.clip', (['x_inv', '(0)', '(1.0)'], {}), '(x_inv, 0, 1.0)\n', (1514, 1529), True, 'import scico.numpy as snp\n'), ((1925, 1951), 'scico.numpy.clip', 'snp.clip', (['solver.x', '(0)', '(1.0)'], {}), '(solver.x, 0, 1.0)\n', (1933, 1951), True, 'import scico.numpy as snp\n'), ((1983, 2036), 'scico.plot.matplotlib.colors.Normalize', 'plot.matplotlib.colors.Normalize', ([], {'vmin': '(-0.1)', 'vmax': '(1.2)'}), '(vmin=-0.1, vmax=1.2)\n', (2015, 2036), False, 'from scico import functional, linop, loss, metric, plot\n'), ((2047, 2096), 'scico.plot.subplots', 'plot.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(12, 12)'}), '(nrows=2, ncols=2, figsize=(12, 12))\n', (2060, 2096), False, 'from scico import functional, linop, loss, metric, plot\n'), ((2097, 2194), 'scico.plot.imview', 'plot.imview', (['x_gt'], {'title': '"""Ground Truth"""', 'cmap': 'plot.cm.Blues', 'fig': 'fig', 'ax': 'ax[0, 0]', 'norm': 'norm'}), "(x_gt, title='Ground Truth', cmap=plot.cm.Blues, fig=fig, ax=ax[\n 0, 0], norm=norm)\n", (2108, 2194), False, 'from scico import functional, linop, loss, metric, plot\n'), ((2190, 2267), 'scico.plot.imview', 'plot.imview', (['y'], {'title': '"""Measurement"""', 'cmap': 'plot.cm.Blues', 'fig': 'fig', 'ax': 'ax[0, 1]'}), "(y, title='Measurement', cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1])\n", (2201, 2267), False, 'from scico import functional, linop, loss, metric, plot\n'), ((1375, 1394), 'scico.functional.L1Norm', 'functional.L1Norm', ([], {}), '()\n', (1392, 1394), False, 'from scico import functional, linop, loss, metric, plot\n'), ((1659, 1731), 'scico.optimize.admm.LinearSubproblemSolver', 'LinearSubproblemSolver', ([], {'cg_kwargs': "{'tol': cg_tol, 'maxiter': cg_maxiter}"}), "(cg_kwargs={'tol': cg_tol, 'maxiter': cg_maxiter})\n", (1681, 1731), False, 'from scico.optimize.admm import ADMM, LinearSubproblemSolver\n'), ((963, 993), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'y.shape'}), '(size=y.shape)\n', (979, 993), True, 'import numpy as np\n'), ((1832, 1845), 'scico.util.device_info', 'device_info', ([], {}), '()\n', (1843, 1845), False, 'from scico.util import device_info\n'), ((2330, 2354), 'scico.metric.psnr', 'metric.psnr', (['x_gt', 'x_inv'], {}), '(x_gt, x_inv)\n', (2341, 2354), False, 'from scico import functional, linop, loss, metric, plot\n'), ((2500, 2523), 'scico.metric.psnr', 'metric.psnr', (['x_gt', 'x_tv'], {}), '(x_gt, x_tv)\n', (2511, 2523), False, 'from scico import functional, linop, loss, metric, plot\n')]
from django.shortcuts import render from .forms import RegistrationForm, UserUpdateForm, ProfileUpdateForm from django.shortcuts import redirect from .models import Profile from django.contrib.auth.decorators import login_required def registration(request): if request.method == 'POST': form = RegistrationForm(request.POST) if form.is_valid(): form.save() username = form.cleaned_data.get('username') return redirect('login') else: form = RegistrationForm() return render(request, 'users/register.html', {'form': form}) @login_required() def profile(request): if request.method == 'POST': u_form = UserUpdateForm(request.POST, instance=request.user) p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile) if u_form.is_valid() and p_form.is_valid(): u_form.save() p_form.save() return redirect('profile') else: u_form = UserUpdateForm(instance=request.user) p_form = ProfileUpdateForm(instance=request.user.profile) context = {'u_form':u_form, 'p_form':p_form} return render(request, 'users/profile.html', context)
[ "django.shortcuts.render", "django.shortcuts.redirect", "django.contrib.auth.decorators.login_required" ]
[((544, 560), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {}), '()\n', (558, 560), False, 'from django.contrib.auth.decorators import login_required\n'), ((486, 540), 'django.shortcuts.render', 'render', (['request', '"""users/register.html"""', "{'form': form}"], {}), "(request, 'users/register.html', {'form': form})\n", (492, 540), False, 'from django.shortcuts import render\n'), ((1045, 1091), 'django.shortcuts.render', 'render', (['request', '"""users/profile.html"""', 'context'], {}), "(request, 'users/profile.html', context)\n", (1051, 1091), False, 'from django.shortcuts import render\n'), ((424, 441), 'django.shortcuts.redirect', 'redirect', (['"""login"""'], {}), "('login')\n", (432, 441), False, 'from django.shortcuts import redirect\n'), ((855, 874), 'django.shortcuts.redirect', 'redirect', (['"""profile"""'], {}), "('profile')\n", (863, 874), False, 'from django.shortcuts import redirect\n')]
import numpy as np import scipy.stats as sp from concept import Concept def info_gain(prev_dist, new_dist): return sp.entropy(prev_dist) - sp.entropy(new_dist) def main(): attributes = range(10) num_concepts = 5 concept_size = 4 concept_space = Concept(attributes, num_concepts, concept_size) problem1 = [(1, 2, 3, 4), (3, 4, 5, 6), (2, 4, 5, 7), (2, 3, 5, 8), (2, 3, 4, 5)] init_belief = np.ones(num_concepts) / num_concepts for msg in [2, 3, 4, 5]: new_belief = concept_space.bayesian_update(init_belief, problem1, msg) print(info_gain(init_belief, new_belief)) init_belief = new_belief print(info_gain(np.ones(num_concepts) / num_concepts, new_belief)) print('%%%%%%%%%%%%%%%%%%%%%%') problem2 = [(0, 2, 3), (4, 7, 9), (4, 7), (0, 2, 4, 9)] init_belief = np.ones(4) / 4 for msg in [7] * 8: new_belief = concept_space.bayesian_update(init_belief, problem2, msg) print(info_gain(init_belief, new_belief)) init_belief = new_belief print(info_gain(np.ones(4) / 4, [0, 0, 1, 0])) if __name__ == '__main__': main()
[ "scipy.stats.entropy", "numpy.ones", "concept.Concept" ]
[((253, 300), 'concept.Concept', 'Concept', (['attributes', 'num_concepts', 'concept_size'], {}), '(attributes, num_concepts, concept_size)\n', (260, 300), False, 'from concept import Concept\n'), ((117, 138), 'scipy.stats.entropy', 'sp.entropy', (['prev_dist'], {}), '(prev_dist)\n', (127, 138), True, 'import scipy.stats as sp\n'), ((141, 161), 'scipy.stats.entropy', 'sp.entropy', (['new_dist'], {}), '(new_dist)\n', (151, 161), True, 'import scipy.stats as sp\n'), ((399, 420), 'numpy.ones', 'np.ones', (['num_concepts'], {}), '(num_concepts)\n', (406, 420), True, 'import numpy as np\n'), ((781, 791), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (788, 791), True, 'import numpy as np\n'), ((624, 645), 'numpy.ones', 'np.ones', (['num_concepts'], {}), '(num_concepts)\n', (631, 645), True, 'import numpy as np\n'), ((978, 988), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (985, 988), True, 'import numpy as np\n')]
import aiohttp import discord from discord.ext import commands from discord.commands import Option, slash_command, SlashCommandGroup import json with open ('././config/guilds.json', 'r') as f: data = json.load(f) guilds = data['guilds'] with open ('././config/api.json', 'r') as f: ApiData = json.load(f) githubApi = ApiData['github'] class slashGithub(commands.Cog): def __init__(self, bot): self.bot = bot @commands.slash_command(description="Search any github user", guild_ids=guilds) async def github( self, ctx, username: Option(str, "Enter Github Username", required=True) ): await ctx.response.defer() url = str(githubApi)+ str(username) async with aiohttp.ClientSession() as session: async with session.get(url) as r: r = await r.json() try: username = r["login"] avatar = r["avatar_url"] githuburl = r["html_url"] name = r["name"] location = r["location"] email = r["email"] company = r["company"] bio = r["bio"] repo = r["public_repos"] embed = discord.Embed( colour=0x00FFFF, title=f"Github Profile", description=f""" > `Github username` : {username} > `Github link` : {githuburl} > `Name` : {name} > `Location` : {location} > `Email` : {email} > `Company` : {company} > `Bio` : {bio} > `Repository` : {repo} """) embed.set_thumbnail(url=avatar) await ctx.respond(embed=embed) except: embed = discord.Embed( colour=0x983925, description=f"> ⚠️Unable to find the github profile please check your spelling", ) await ctx.respond(embed=embed) def setup(bot): bot.add_cog(slashGithub(bot))
[ "aiohttp.ClientSession", "discord.commands.Option", "discord.ext.commands.slash_command", "json.load", "discord.Embed" ]
[((202, 214), 'json.load', 'json.load', (['f'], {}), '(f)\n', (211, 214), False, 'import json\n'), ((297, 309), 'json.load', 'json.load', (['f'], {}), '(f)\n', (306, 309), False, 'import json\n'), ((423, 501), 'discord.ext.commands.slash_command', 'commands.slash_command', ([], {'description': '"""Search any github user"""', 'guild_ids': 'guilds'}), "(description='Search any github user', guild_ids=guilds)\n", (445, 501), False, 'from discord.ext import commands\n'), ((549, 600), 'discord.commands.Option', 'Option', (['str', '"""Enter Github Username"""'], {'required': '(True)'}), "(str, 'Enter Github Username', required=True)\n", (555, 600), False, 'from discord.commands import Option, slash_command, SlashCommandGroup\n'), ((687, 710), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (708, 710), False, 'import aiohttp\n'), ((1038, 1366), 'discord.Embed', 'discord.Embed', ([], {'colour': '(65535)', 'title': 'f"""Github Profile"""', 'description': 'f"""\n\t\t\t\t\t\t> `Github username` : {username}\n\t\t\t\t\t\t> `Github link` : {githuburl}\n\t\t\t\t\t\t> `Name` : {name}\n\t\t\t\t\t\t> `Location` : {location}\n\t\t\t\t\t\t> `Email` : {email}\n\t\t\t\t\t\t> `Company` : {company}\n\t\t\t\t\t\t> `Bio` : {bio}\n\t\t\t\t\t\t> `Repository` : {repo}\n\t\t\t\t\t"""'}), '(colour=65535, title=f\'Github Profile\', description=\n f"""\n\t\t\t\t\t\t> `Github username` : {username}\n\t\t\t\t\t\t> `Github link` : {githuburl}\n\t\t\t\t\t\t> `Name` : {name}\n\t\t\t\t\t\t> `Location` : {location}\n\t\t\t\t\t\t> `Email` : {email}\n\t\t\t\t\t\t> `Company` : {company}\n\t\t\t\t\t\t> `Bio` : {bio}\n\t\t\t\t\t\t> `Repository` : {repo}\n\t\t\t\t\t"""\n )\n', (1051, 1366), False, 'import discord\n'), ((1478, 1594), 'discord.Embed', 'discord.Embed', ([], {'colour': '(9976101)', 'description': 'f"""> ⚠️Unable to find the github profile please check your spelling"""'}), "(colour=9976101, description=\n f'> ⚠️Unable to find the github profile please check your spelling')\n", (1491, 1594), False, 'import discord\n')]
# SPDX-License-Identifier: Apache-2.0 # Licensed to the Ed-Fi Alliance under one or more agreements. # The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0. # See the LICENSE and NOTICES files in the project root for more information. from os import path from sys import platform from edfi_lms_extractor_lib.csv_generation.write import ( _normalized_directory_template, USERS_ROOT_DIRECTORY, ASSIGNMENT_ROOT_DIRECTORY, SUBMISSION_ROOT_DIRECTORY, ) OUTPUT_DIRECTORY = "output_directory" OUTPUT_DIRECTORY_WITH_SLASH = "output_directory/" OUTPUT_DIRECTORY_WITH_BACKSLASH = "output_directory\\" def describe_when_template_has_one_element(): EXPECTED_RESULT = f"{OUTPUT_DIRECTORY}{path.sep}{USERS_ROOT_DIRECTORY[0]}" BACKSLASH_LINUX = f"{OUTPUT_DIRECTORY}\\{path.sep}{USERS_ROOT_DIRECTORY[0]}" def it_should_join_bare_output_directory_correctly(): # arrange / act result = _normalized_directory_template(OUTPUT_DIRECTORY, USERS_ROOT_DIRECTORY) # assert assert result == EXPECTED_RESULT def it_should_join_output_directory_with_slash_correctly(): # arrange / act result = _normalized_directory_template( OUTPUT_DIRECTORY_WITH_SLASH, USERS_ROOT_DIRECTORY ) # assert assert result == EXPECTED_RESULT def it_should_join_output_directory_with_backslash_correctly(): # arrange / act result = _normalized_directory_template( OUTPUT_DIRECTORY_WITH_BACKSLASH, USERS_ROOT_DIRECTORY ) # assert if platform == "win32": assert result == EXPECTED_RESULT else: assert result == BACKSLASH_LINUX def describe_when_template_has_two_elements(): EXPECTED_RESULT = ( f"{OUTPUT_DIRECTORY}{path.sep}" f"{ASSIGNMENT_ROOT_DIRECTORY[0]}{path.sep}" f"{ASSIGNMENT_ROOT_DIRECTORY[1]}" ) def it_should_join_bare_output_directory_correctly(): # arrange / act result = _normalized_directory_template( OUTPUT_DIRECTORY, ASSIGNMENT_ROOT_DIRECTORY ) # assert assert result == EXPECTED_RESULT def it_should_join_output_directory_with_slash_correctly(): # arrange / act result = _normalized_directory_template( OUTPUT_DIRECTORY_WITH_SLASH, ASSIGNMENT_ROOT_DIRECTORY ) # assert assert result == EXPECTED_RESULT def describe_when_template_has_three_elements(): EXPECTED_RESULT = ( f"{OUTPUT_DIRECTORY}{path.sep}" f"{SUBMISSION_ROOT_DIRECTORY[0]}{path.sep}" f"{SUBMISSION_ROOT_DIRECTORY[1]}{path.sep}" f"{SUBMISSION_ROOT_DIRECTORY[2]}" ) def it_should_join_bare_output_directory_correctly(): # arrange / act result = _normalized_directory_template( OUTPUT_DIRECTORY, SUBMISSION_ROOT_DIRECTORY ) # assert assert result == EXPECTED_RESULT def it_should_join_output_directory_with_slash_correctly(): # arrange / act result = _normalized_directory_template( OUTPUT_DIRECTORY_WITH_SLASH, SUBMISSION_ROOT_DIRECTORY ) # assert assert result == EXPECTED_RESULT
[ "edfi_lms_extractor_lib.csv_generation.write._normalized_directory_template" ]
[((949, 1019), 'edfi_lms_extractor_lib.csv_generation.write._normalized_directory_template', '_normalized_directory_template', (['OUTPUT_DIRECTORY', 'USERS_ROOT_DIRECTORY'], {}), '(OUTPUT_DIRECTORY, USERS_ROOT_DIRECTORY)\n', (979, 1019), False, 'from edfi_lms_extractor_lib.csv_generation.write import _normalized_directory_template, USERS_ROOT_DIRECTORY, ASSIGNMENT_ROOT_DIRECTORY, SUBMISSION_ROOT_DIRECTORY\n'), ((1185, 1270), 'edfi_lms_extractor_lib.csv_generation.write._normalized_directory_template', '_normalized_directory_template', (['OUTPUT_DIRECTORY_WITH_SLASH', 'USERS_ROOT_DIRECTORY'], {}), '(OUTPUT_DIRECTORY_WITH_SLASH,\n USERS_ROOT_DIRECTORY)\n', (1215, 1270), False, 'from edfi_lms_extractor_lib.csv_generation.write import _normalized_directory_template, USERS_ROOT_DIRECTORY, ASSIGNMENT_ROOT_DIRECTORY, SUBMISSION_ROOT_DIRECTORY\n'), ((1458, 1547), 'edfi_lms_extractor_lib.csv_generation.write._normalized_directory_template', '_normalized_directory_template', (['OUTPUT_DIRECTORY_WITH_BACKSLASH', 'USERS_ROOT_DIRECTORY'], {}), '(OUTPUT_DIRECTORY_WITH_BACKSLASH,\n USERS_ROOT_DIRECTORY)\n', (1488, 1547), False, 'from edfi_lms_extractor_lib.csv_generation.write import _normalized_directory_template, USERS_ROOT_DIRECTORY, ASSIGNMENT_ROOT_DIRECTORY, SUBMISSION_ROOT_DIRECTORY\n'), ((2033, 2108), 'edfi_lms_extractor_lib.csv_generation.write._normalized_directory_template', '_normalized_directory_template', (['OUTPUT_DIRECTORY', 'ASSIGNMENT_ROOT_DIRECTORY'], {}), '(OUTPUT_DIRECTORY, ASSIGNMENT_ROOT_DIRECTORY)\n', (2063, 2108), False, 'from edfi_lms_extractor_lib.csv_generation.write import _normalized_directory_template, USERS_ROOT_DIRECTORY, ASSIGNMENT_ROOT_DIRECTORY, SUBMISSION_ROOT_DIRECTORY\n'), ((2296, 2386), 'edfi_lms_extractor_lib.csv_generation.write._normalized_directory_template', '_normalized_directory_template', (['OUTPUT_DIRECTORY_WITH_SLASH', 'ASSIGNMENT_ROOT_DIRECTORY'], {}), '(OUTPUT_DIRECTORY_WITH_SLASH,\n ASSIGNMENT_ROOT_DIRECTORY)\n', (2326, 2386), False, 'from edfi_lms_extractor_lib.csv_generation.write import _normalized_directory_template, USERS_ROOT_DIRECTORY, ASSIGNMENT_ROOT_DIRECTORY, SUBMISSION_ROOT_DIRECTORY\n'), ((2831, 2906), 'edfi_lms_extractor_lib.csv_generation.write._normalized_directory_template', '_normalized_directory_template', (['OUTPUT_DIRECTORY', 'SUBMISSION_ROOT_DIRECTORY'], {}), '(OUTPUT_DIRECTORY, SUBMISSION_ROOT_DIRECTORY)\n', (2861, 2906), False, 'from edfi_lms_extractor_lib.csv_generation.write import _normalized_directory_template, USERS_ROOT_DIRECTORY, ASSIGNMENT_ROOT_DIRECTORY, SUBMISSION_ROOT_DIRECTORY\n'), ((3094, 3184), 'edfi_lms_extractor_lib.csv_generation.write._normalized_directory_template', '_normalized_directory_template', (['OUTPUT_DIRECTORY_WITH_SLASH', 'SUBMISSION_ROOT_DIRECTORY'], {}), '(OUTPUT_DIRECTORY_WITH_SLASH,\n SUBMISSION_ROOT_DIRECTORY)\n', (3124, 3184), False, 'from edfi_lms_extractor_lib.csv_generation.write import _normalized_directory_template, USERS_ROOT_DIRECTORY, ASSIGNMENT_ROOT_DIRECTORY, SUBMISSION_ROOT_DIRECTORY\n')]
from random import shuffle """ Will search a list of integers for a value using a linear search algorithm. Does not require a sorted list to be passed in. Returns -1 if item is not found Linear Search: Best - O(1) Worst - O(n) Average - O(n) Space Complexity - O(1) """ def search(data: list, value: int) -> int: for i in range(len(data)): if data[i] == value: return i return -1 def run(): print("Linear Search") data_size = int(input("Enter the max value: ")) data = list(range(data_size)) shuffle(data) value = int(input("Enter value to search for: ")) print("Searching for {} in {}".format(value, data)) result = search(data, value) print("Not found in list" if result == -1 else "Found at index {}".format(result))
[ "random.shuffle" ]
[((544, 557), 'random.shuffle', 'shuffle', (['data'], {}), '(data)\n', (551, 557), False, 'from random import shuffle\n')]
""" The MIT License (MIT) Copyright (c) 2017 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from ..scrabTask import GitTask import utils name = "ProjectDates" version = "1.1.0" class ProjectDates(GitTask): """ Gets the first and last commit date in ISO format Example: ProjectDates: first_change: '1998-12-21T10:52:45+00:00' last_change: '2017-08-09T13:37:06+10:00' :param parameter: Parameter given explicitly for this task, for all projects, defined in the task.yaml :param global_args: Arguments that will be passed to all tasks. They _might_ contain something that is useful for the task, but the task has to check if it is _there_ as these are user provided. If they are needed to work that check should happen in the argHandler. """ def __init__(self, parameter, global_args): super(ProjectDates, self).__init__(name, version, parameter, global_args) self.__project = None def __first_commit_date(self): """ The function will obtain the first commit date from the project repository :returns: The date of the first commit in the projects repository (2005-04-16T15:20:36-07:00) """ return utils.run('git', ['log', '--all', '--format=%cI', '--first-parent', '--reverse', '--max-parents=0'], self.__project.location).splitlines()[0].rstrip() def __last_commit_date(self): """ The function will obtain the last commit date from the project repository :returns: The date of the last commit in the projects repository (2017-08-03T15:25:14-07:00) """ return utils.run('git', ['log', '--all', '-1', '--format=%cI'], self.__project.location).rstrip() def scrab(self, project): """ Gets the first and last commit date in ISO format :param project: The project :returns: The first and last commit date in ISO format Example: ProjectDates: first_change: '1998-12-21T10:52:45+00:00' last_change: '2017-08-09T13:37:06+10:00' """ self.__project = project report = {} report['first_change'] = self.__first_commit_date() report['last_change'] = self.__last_commit_date() return report
[ "utils.run" ]
[((2929, 3015), 'utils.run', 'utils.run', (['"""git"""', "['log', '--all', '-1', '--format=%cI']", 'self.__project.location'], {}), "('git', ['log', '--all', '-1', '--format=%cI'], self.__project.\n location)\n", (2938, 3015), False, 'import utils\n'), ((2415, 2544), 'utils.run', 'utils.run', (['"""git"""', "['log', '--all', '--format=%cI', '--first-parent', '--reverse',\n '--max-parents=0']", 'self.__project.location'], {}), "('git', ['log', '--all', '--format=%cI', '--first-parent',\n '--reverse', '--max-parents=0'], self.__project.location)\n", (2424, 2544), False, 'import utils\n')]
# -*- coding: utf-8 -*- # %reset -f """ @author: <NAME> """ # Demonstration of MAEcce in PLS modeling import matplotlib.figure as figure import matplotlib.pyplot as plt import numpy as np from dcekit.validation import mae_cce from sklearn import datasets from sklearn.cross_decomposition import PLSRegression from sklearn.model_selection import GridSearchCV, train_test_split # settings number_of_training_samples = 50 # 30, 50, 100, 300, 500, 1000, 3000, for example number_of_test_samples = 10000 number_of_x_variables = 30 # 10, 30, 50, 100, 300, 500, 1000, 3000, for example number_of_y_randomization = 50 max_pls_component_number = 20 fold_number = 5 # generate sample dataset x, y = datasets.make_regression(n_samples=number_of_training_samples + number_of_test_samples, n_features=number_of_x_variables, n_informative=10, noise=30, random_state=number_of_training_samples + number_of_x_variables) x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=number_of_test_samples, random_state=0) # autoscaling autoscaled_x_train = (x_train - x_train.mean(axis=0)) / x_train.std(axis=0, ddof=1) autoscaled_y_train = (y_train - y_train.mean()) / y_train.std(ddof=1) autoscaled_x_test = (x_test - x_train.mean(axis=0)) / x_train.std(axis=0, ddof=1) # cross-validation pls_components = np.arange(1, max_pls_component_number + 1) cv_model = GridSearchCV(PLSRegression(), {'n_components': pls_components}, cv=fold_number) cv_model.fit(autoscaled_x_train, autoscaled_y_train) # modeling and prediction model = getattr(cv_model, 'estimator') hyperparameters = list(cv_model.best_params_.keys()) for hyperparameter in hyperparameters: setattr(model, hyperparameter, cv_model.best_params_[hyperparameter]) model.fit(autoscaled_x_train, autoscaled_y_train) estimated_y_train = np.ndarray.flatten(model.predict(autoscaled_x_train)) estimated_y_train = estimated_y_train * y_train.std(ddof=1) + y_train.mean() predicted_y_test = np.ndarray.flatten(model.predict(autoscaled_x_test)) predicted_y_test = predicted_y_test * y_train.std(ddof=1) + y_train.mean() # MAEcce mae_cce_train = mae_cce(cv_model, x_train, y_train, number_of_y_randomization=number_of_y_randomization, do_autoscaling=True, random_state=0) # yy-plot for test data plt.figure(figsize=figure.figaspect(1)) plt.scatter(y_test, predicted_y_test) y_max = np.max(np.array([np.array(y_test), predicted_y_test])) y_min = np.min(np.array([np.array(y_test), predicted_y_test])) plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], [y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-') plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) plt.xlabel('Actual Y') plt.ylabel('Estimated Y') plt.show() # r2p, RMSEp, MAEp for test data print('r2p: {0}'.format(float(1 - sum((y_test - predicted_y_test) ** 2) / sum((y_test - y_test.mean()) ** 2)))) print('RMSEp: {0}'.format(float((sum((y_test - predicted_y_test) ** 2) / len(y_test)) ** 0.5))) mae_test = float(sum(abs(y_test - predicted_y_test)) / len(y_test)) print('MAEp: {0}'.format(mae_test)) # histgram of MAEcce plt.rcParams["font.size"] = 18 plt.hist(mae_cce_train, bins=30) plt.plot(mae_test, 0.2, 'r.', markersize=30) plt.xlabel('MAEcce(histgram), MAEp(red point)') plt.ylabel('frequency') plt.show()
[ "sklearn.datasets.make_regression", "matplotlib.pyplot.hist", "sklearn.cross_decomposition.PLSRegression", "matplotlib.pyplot.ylabel", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.figure.figaspect", "numpy.array", "matplotlib.pyplot....
[((719, 946), 'sklearn.datasets.make_regression', 'datasets.make_regression', ([], {'n_samples': '(number_of_training_samples + number_of_test_samples)', 'n_features': 'number_of_x_variables', 'n_informative': '(10)', 'noise': '(30)', 'random_state': '(number_of_training_samples + number_of_x_variables)'}), '(n_samples=number_of_training_samples +\n number_of_test_samples, n_features=number_of_x_variables, n_informative\n =10, noise=30, random_state=number_of_training_samples +\n number_of_x_variables)\n', (743, 946), False, 'from sklearn import datasets\n'), ((1036, 1108), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': 'number_of_test_samples', 'random_state': '(0)'}), '(x, y, test_size=number_of_test_samples, random_state=0)\n', (1052, 1108), False, 'from sklearn.model_selection import GridSearchCV, train_test_split\n'), ((1405, 1447), 'numpy.arange', 'np.arange', (['(1)', '(max_pls_component_number + 1)'], {}), '(1, max_pls_component_number + 1)\n', (1414, 1447), True, 'import numpy as np\n'), ((2214, 2344), 'dcekit.validation.mae_cce', 'mae_cce', (['cv_model', 'x_train', 'y_train'], {'number_of_y_randomization': 'number_of_y_randomization', 'do_autoscaling': '(True)', 'random_state': '(0)'}), '(cv_model, x_train, y_train, number_of_y_randomization=\n number_of_y_randomization, do_autoscaling=True, random_state=0)\n', (2221, 2344), False, 'from dcekit.validation import mae_cce\n'), ((2409, 2446), 'matplotlib.pyplot.scatter', 'plt.scatter', (['y_test', 'predicted_y_test'], {}), '(y_test, predicted_y_test)\n', (2420, 2446), True, 'import matplotlib.pyplot as plt\n'), ((2576, 2726), 'matplotlib.pyplot.plot', 'plt.plot', (['[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)]', '[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)]', '"""k-"""'], {}), "([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],\n [y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-')\n", (2584, 2726), True, 'import matplotlib.pyplot as plt\n'), ((2734, 2806), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(y_min - 0.05 * (y_max - y_min))', '(y_max + 0.05 * (y_max - y_min))'], {}), '(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))\n', (2742, 2806), True, 'import matplotlib.pyplot as plt\n'), ((2808, 2880), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(y_min - 0.05 * (y_max - y_min))', '(y_max + 0.05 * (y_max - y_min))'], {}), '(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))\n', (2816, 2880), True, 'import matplotlib.pyplot as plt\n'), ((2882, 2904), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Actual Y"""'], {}), "('Actual Y')\n", (2892, 2904), True, 'import matplotlib.pyplot as plt\n'), ((2906, 2931), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Estimated Y"""'], {}), "('Estimated Y')\n", (2916, 2931), True, 'import matplotlib.pyplot as plt\n'), ((2933, 2943), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2941, 2943), True, 'import matplotlib.pyplot as plt\n'), ((3351, 3383), 'matplotlib.pyplot.hist', 'plt.hist', (['mae_cce_train'], {'bins': '(30)'}), '(mae_cce_train, bins=30)\n', (3359, 3383), True, 'import matplotlib.pyplot as plt\n'), ((3385, 3429), 'matplotlib.pyplot.plot', 'plt.plot', (['mae_test', '(0.2)', '"""r."""'], {'markersize': '(30)'}), "(mae_test, 0.2, 'r.', markersize=30)\n", (3393, 3429), True, 'import matplotlib.pyplot as plt\n'), ((3431, 3478), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""MAEcce(histgram), MAEp(red point)"""'], {}), "('MAEcce(histgram), MAEp(red point)')\n", (3441, 3478), True, 'import matplotlib.pyplot as plt\n'), ((3480, 3503), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency"""'], {}), "('frequency')\n", (3490, 3503), True, 'import matplotlib.pyplot as plt\n'), ((3505, 3515), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3513, 3515), True, 'import matplotlib.pyplot as plt\n'), ((1473, 1488), 'sklearn.cross_decomposition.PLSRegression', 'PLSRegression', ([], {}), '()\n', (1486, 1488), False, 'from sklearn.cross_decomposition import PLSRegression\n'), ((2387, 2406), 'matplotlib.figure.figaspect', 'figure.figaspect', (['(1)'], {}), '(1)\n', (2403, 2406), True, 'import matplotlib.figure as figure\n'), ((2473, 2489), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (2481, 2489), True, 'import numpy as np\n'), ((2537, 2553), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (2545, 2553), True, 'import numpy as np\n')]
#!/home/admica/python3/bin/python3 #Discord eve bot by admica import asyncio, discord, time, threading, websocket, json from discord.ext import commands from discord.ext.commands import Bot import aiohttp import re from queue import Queue from datetime import timedelta from datetime import datetime import os, sys import requests from chatterbot import ChatBot from ctypes.util import find_library from random import randint import pickle from tensorflow.python.keras.layers import Dense, Reshape, Flatten, Dropout, Input, concatenate from tensorflow.python.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Conv2DTranspose, Activation from keras.layers import Input, Embedding, LSTM, Dense, RepeatVector, Dropout, merge,concatenate from keras.optimizers import Adam from keras.models import Model, Sequential from keras.layers import Activation, Dense from keras.preprocessing import sequence from six.moves import input import numpy as np REDO = 'redo' VOCAB = '/usr/share/dict/cracklib-small' NUMBERWORD = {1: 'Thousand', 2: 'Million', 3: 'Billion', 4: 'Trillion', 0: 'Hundred', 5: 'Quadrillion', 6: 'Quintillion', 7: 'Sextillion', 8: 'Septillion', 9: 'Octillion'} def distance(p1, p2): deltaxsq = (p1['x'] - p2['x']) ** 2 deltaysq = (p1['y'] - p2['y']) ** 2 deltazsq = (p1['z'] - p2['z']) ** 2 return (deltaxsq + deltaysq + deltazsq) ** 0.5 def shorten_weapon(s): s = re.sub('Light Missile','LM', s) s = re.sub('Heavy Missile','HM', s) s = re.sub('Republic Fleet','RF', s) s = re.sub('Heavy Assault Missile','HAM', s) s = re.sub('Autocannon','AC', s) s = re.sub('AutoCannon','AC', s) s = re.sub('Carbonized Lead', 'Lead', s) s = re.sub('Depleted Uranium', 'Uranium', s) s = re.sub('Missile Launcher', 'ML', s) s = re.sub('Federation Navy', 'Fed Navy', s) s = re.sub('Imperial Navy', 'Imp Navy', s) s = re.sub('Howitzer Artillery', 'Arty', s) s = re.sub('Neutralizer', 'Neut', s) s = re.sub('Scrambler', 'Scram', s) s = re.sub('Hobgoblin', 'Hobgob', s) return s def shorten_ship(s): s = re.sub('Federation Navy', 'Fed Navy', s) s = re.sub('Megathron', 'Megatron', s) s = re.sub('Thrasher', 'Trasher', s) s = re.sub('Scorpion', 'Scorp', s) s = re.sub('Apocalypse', 'Apoc', s) return s class Zbot: def __init__(self): self.date_start = datetime.now() self.count = 0 # global kill counter self.qcounter = Queue(maxsize=1) # share counter between main and thread self.cb_qin = Queue(maxsize=512) # share chatbot from thread to thread self.cb_qout = Queue(maxsize=512) cb_qthread = threading.Thread(target=self.cb_thread, args=(self.cb_qin, self.cb_qout)) cb_qthread.start() # chatbot self.dir_fits = './fits/' # end with trailing slash self.url_characters = 'https://esi.evetech.net/latest/characters/' self.stations = [] t = threading.Thread(target=self.t_stations) t.start() self.regionslist = 'Aridia Black_Rise The_Bleak_Lands Branch Cache Catch The_Citadel Cloud_Ring Cobalt_Edge Curse Deklein Delve Derelik Detorid Devoid Domain Esoteria Essence Etherium_Reach Everyshore Fade Feythabolis The_Forge Fountain Geminate Genesis Great_Wildlands Heimatar Immensea Impass Insmother Kador The_Kalevala_Expanse Khanid Kor-Azor Lonetrek Malpais Metropolis Molden_Heath Oasa Omist Outer_Passage Outer_Ring Paragon_Soul Period_Basis Perrigen_Falls Placid Providence Pure_Blind Querious Scalding_Pass Sinq_Laison Solitude The_Spire Stain Syndicate Tash-Murkon Tenal Tenerifis Tribute Vale_of_the_Silent Venal Verge Vendor Wicked_Creek'.split(' ') with open('regions.txt', 'r') as f: raw = f.read() self.regions = eval(raw) with open('items.txt', 'r') as f: raw = f.read() self.items = eval(raw) #self.items_display = self.items.copy() #for i in _items: # self.items_display[i] = shorten_weapon(self.items[i]) # self.items_display[i] = shorten_ship(self.items[i]) with open('systems.txt', 'r') as f: raw = f.read() self.systems = eval(raw) with open('stargates.txt', 'r') as f: raw = f.read() self.stargates = eval(raw) self.corps = [] with open('the.corps', 'r') as f: for line in f.readlines(): self.corps.append(line.strip().split(":")[-1]) self.ch = {} for name in ['main', 'debug']: with open('the.channel_{}'.format(name), 'r') as f: self.ch[name] = {} line = f.readline().strip() self.ch[name]['name'] = ':'.join(line.split(":")[:-1]) self.ch[name]['id'] = line.split(":")[-1] self.ch_train = {} with open('the.channel_train', 'r') as f: for line in f.readlines(): line = line.strip() name = ':'.join(line.split(":")[:-1]) ch_id = line.split(":")[-1] self.ch_train[ch_id] = {} self.ch_train[ch_id]['id'] = ch_id self.ch_train[ch_id]['name'] = name self.ch_train[ch_id]['in'] = Queue(maxsize=256) self.ch_train[ch_id]['out'] = Queue(maxsize=256) self.ch_train[ch_id]['pair'] = [] print(self.ch_train) self.son = False self.svol = 0.75 with open('the.sound_on', 'r') as f: try: volume = float(f.readline().strip()) if volume > 0: self.son = True self.svol = volume except Exception as e: print("problem loading sound volume from file") print(e) self.join_voice = None with open('the.channel_voice', 'r') as f: line = f.readline().strip() if line == 'off': # allow turning off print("NOT JOINING VOICE CHANNEL") else: self.join_voice = line.split(":")[-1] self.join_voice = None # DISABLE VOICE CHANNEL JOINING WITH THIS with open('the.key', 'r') as f: self.private_key = f.readline().strip() self.admins = [] with open('the.admins', 'r') as f: for line in f.readlines(): self.admins.append(line.strip()) self.loop = asyncio.new_event_loop() self.Bot = commands.Bot(command_prefix='#') self.q = asyncio.Queue() print("Startup complete.") def t_stations(self): """loading station data can take time, so its threaded here as a background loading task""" import yaml self.stations = yaml.load( open('staStations.yaml','r') ) return False def start_timer(self): self.thread_timer = threading.Thread(target=self.timer_thread, args=(self.q,self.ch['main1'])) self.thread_timer.daemon = True self.thread_timer.start() def start(self): self.thread = threading.Thread(target=self.bot_thread, args=(self.bot_id,self.q,self.loop,self.Bot,self.ch['main1'],self.admins,self.private_key,self.qcounter,self.ch,self.cb_qin,self.cb_qout,self.ch_train,self.join_voice,self.son,self.svol)) self.thread.daemon = True self.thread.start() def check_auth(self, _id): if self.people.get(_id, None) == None: return "<@{}> You need to be authenticated first. Use #get_auth, #set_auth, then #set_char. Then try this command.".format(_id) if self.people[_id].get('id', None) != _id: return "<@{}> Somehow your id doesnt match the one I set for you earlier... I am broken, the universe has exploded, everything bad.".format(_id) the_char = self.people[_id].get('char', 'None') the_char_id = self.people[_id].get('char_id', 'None') the_token = self.people[_id].get('token', 'None') the_expires = self.people[_id].get('expires', 'None') time_left = 0 if the_expires != 'None': the_expires = str(self.people[_id]['expires'])[:-10] time_left = ( self.people[_id]['expires'] - datetime.utcnow() ).seconds if time_left > 1234 or time_left < 1: time_left = 0 # just set to 0, its not used here except for knowing if auth looks valid if the_char == 'None' or the_char_id == 'None' or the_token == 'None' or the_expires == 'None' or time_left == 0: data = "<@{}> You need to update your auth credentials. Check with the #get_auth command.".format(_id) return data else: #print("CHECK AUTH SAYS GOOD: {} {} {} {}".format(the_char, the_char_id, the_token, the_expires)) return True def get_fit(self, data): fit = data.strip().split('\n') ship = fit[0][fit[0].find('[')+1:fit[0].find(',')] table = {} ship_found = False for ship_id in self.items: if self.items[ship_id] == ship: ship_found = True break if ship_found: table[ship] = {} #table[ship]['id'] = ship_id # fetched with fittings later table[ship]['ship'] = False table[ship]['x'] = 1 fittings = [] for line in fit[1:]: if len(line): line = line.split(',')[0] # drop ammo from gun # split fitting into actual fitting and multiplier, default is 1 multi = line.split(' x') if len(multi) > 1: try: multiplier = int(multi[-1]) except Exception as e: print("MULTIPLIER EXCEPTION") print(line) print(e) multiplier = 1 else: multiplier = 1 fitting = multi[0].strip() # fitting #print('[{}]'.format(fitting)) if fitting not in fittings: fittings.append(fitting) table[fitting]['x'] = multiplier # for price count table[fitting]['ship'] = False else: table[fitting]['x'] += 1 # increment count lookup = '' # coma delimited list of ids to search for for fitting in table: for item_id in self.items: if fitting != self.items[item_id]: lookup += '{},'.format(item_id) table[fitting]['id'] = item_id #print("ADDED LOOKUP {} FOR {}".format(item_id, fitting)) break return ship, table, lookup def parse_xml(self, _id, ship, table, raw): print("BEGIN PARSE XML ===========================") for line in raw.split('<row '): if line.startswith('buysell='): #print(line) xml = line.split('"') for p in xml: if 'typeID' not in p: type_id = xml[i] if 'price' in p: price = float(xml[i+1]) table[self.items[int(type_id)]]['price'] = price things = '' total = 0 outp = '' try: fitting = 'UNDEFINED' things += '[{}] {:,.2f} ISK\n'.format(ship, table[ship]['price']) total += table[ship]['price'] # starting with ship add from here del table[ship] # delete so walking the table doesnt include it again l = [] for fitting in table: try: price = table[fitting]['price'] * table[fitting]['x'] l.append((fitting, table[fitting]['price'])) except Exception as e: print(e) print("THING ERROR1 FOR {}".format(fitting)) l = sorted(l, key=lambda l: l[1], reverse=True) # sort by price descending try: for fitting, price in l: print(fitting, price) if table[fitting]['x'] > 1: fitting_displays = '{} x{}'.format(fitting, table[fitting]['x']) # include x things += "[{}] {:,.2f} ISK ({:,.2f} ea)\n".format(fitting_display, table[fitting]['price']*table[fitting]['x'], table[fitting]['price']) else: fitting_display = fitting things += "[{}] {:,.2f} ISK\n".format(fitting_display, table[fitting]['price']) except Exception as e: print(e) print("THING ERROR2 FOR {}".format(fitting)) isk -= '{:,.2f}'.format(total) comma_count = isk.count(',') if comma_count == 0: flip = isk[:isk.find(',')+2].replace(',','.') # comma to dot word = '{} {}'.format(flip, NUMBERWORD[isk.count(',')]) else: word = '{} {}'.format(isk[:isk.find(',')], NUMBERWORD[isk.count(',')]) outp = '<@{}> **{}** [*{} ISK*]```css\n'.format(_id, word, isk) outp += things.strip().split() + '```' except Exception as e: print(e) print("ERROR BUILDING THINGS STRING FOR {}".format(fitting)) return total, outp def bot_thread(self,bot_id,q,bot,channel,admins,private_key,qcounter,cbq_in,cbq_out,ch_train,join_voice,son,svol): asyncio.set_event_loop(loop) self.bot_id = bot_id self.pause = False self.pause_train = False self.q = q self.qthread = qcounter self.ch = ch self.dt_last = self.date_start self.last = 0 self.flag_first_count = True self.cbq_in = cbq_out self.cbq_out = cbq_in self.chtrain = ch_train self.voice = [join_voice, None] # [id, <discord.voice_client.VoiceClient object >] self.sound_on = son self.sound_volume = float(svol) self.status = 'Starting up....' try: # load market orders #self.market_buys = pickle.load(open('market_buys.pickle','rb')) self.market_sells = pickle.load(open('market_sells.pickle','rb')) except Exception as e: print("ERROR LOADING MARKET ORDERS: {}".format(e)) self.market_buys = {} self.market_sells = {} try: # load people with open('people.pickle', 'rb') as f: self.people = pickle.load(f) except Exception as e: print("ERROR LOADING PEOPLE: {}".format(e)) self.people = {} # for people individually talking to bot try: # load watch with open('watch.txt', 'r') as f: self.watch = eval(f.read()) except: self.watch = {} # no file, nothing to watch @bot.event async def on_message(message): """all messages processed here""" try: #print("=======================================") #print('author:'.format(message.author)) #print('call: {}'.format(message.call)) #print('channel: {} id:{}'.format(message.channel, message.channel.id)) print('channel_mentions: {}'.format(message.channel_mentions)) print('clean_content: {}'.format(message.clean_content)) #print('content: {}'.format(message.content)) #print('edited_timestamp: {}'.format(message.edited_timestamp)) #print('embeds: {}'.format(message.embeds)) #print('id: {}'.format(message.id)) #print('mention_everyone: {}'.format(message.mention_everyone)) #print('mentions: {}'.format(message.mentions)) #print('nonce: {}'.format(message.nonce)) #print('pinned: {}'.format(message.pinned)) #print('raw_channel_mentions: {}'.format(message.raw_channel_mentions)) #print('raw_mentions: {}'.format(message.raw_mentions)) #print('raw_role_mentions: {}'.format(message.raw_role_mentions)) #print('reactions: {}'.format(message.reactions)) #print('role_mentions: {}'.format(message.role_mentions)) #print('server: {}'.format(message.server)) #print(dir(message.server)) #print('system_content: {}'.format(message.system_content)) #print('timestamp: {}'.format(message.timestamp)) #print('tts: {}'.format(message.tts)) #print('type: {}'.format(message.type)) #print("=======================================") except: pass try: parts = message.clean_content.split() _id = message.author.id if _id == self.bot_id: pass # my own message elif parts[0].lower().startswith('@killbot'): print(parts) msg = ' '.join(parts[1:]) #print("CB MESSAGE FOR ME: {}".format(msg)) self.cbq_in.put([msg]) #print("CB PUT MSG") response = self.cbq_out.get() #print("CB THOUGHT OF A RESPONSE") print(response) await bot.send_message(message.channel, '<@{}> {}'.format(_id, response)) elif parts[0].lower().startswith('#'): pass # ignore commands elif parts[0].find('[') >= 0 and message.clean_content.find(']') >= 0: #print("Possible fit detected.") ship, table, lookup = self.get_fit(message.clean_content.strip()) print(ship, table, lookup) if lookup: url = "https://api.eve-marketdata.com/item_prices.xml&char_name=admica&type_ids={}&region_ids=10000002&buysell=s".format(lookup[:-1]) print(url) try: async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() _id = message.author.id total, outp = self.parse_xml(_id, ship, table, raw) except: await asyncio.sleep(1) async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() raw = response.replace('null','None').replace('true','True').replace('false','False') _id = message.author.id total, outp = self.parse_xml(_id, ship, table, raw) if total: await bot.send_message(message.channel, outp) elif parts[0].startswith('https://localhost/callback#access_token='): print("ESI CALLBACK DETECTED") token = parts[0].split('#access_token=')[-1] token = token.split('&token_type') if self.people.get(_id, None) is None: self.people[_id] = {} self.people[_id]['id'] = _id self.people[_id]['token'] = token self.people[_id]['expires'] = datetime.utcnow() + timedelta(minutes=20) # save with open('people.pickle', 'wb') as f: pickle.dump(self.people, f, protocol=pickle.HIGHEST_PROTOCOL) await bot.send_message(message.channel, 'Token received. Expires {}'.format(str(self.people[_id]['expires'])[:-7])) elif self.pause_train: print("TRAINING PAUSED, IGNORING {}".format(message.clean_content)) elif message.channel.id in self.chtrain: # training channel ids are keys cid = message.channel.id if parts[3].lower().startswith('@'): parts = parts[1:] if len(self.chtrain[cid]['pair']) > 0: pass #self.chtrain[cid]['pair'] = [ self.chtrain[cid]['pair'][-1], ' '.join(parts) ] #print("TRAIN[{}]>[{}]".format(self.chtrain[cid]['pair'][0], self.chtrain[cid]['pair'][-1])) #self.cbq_in.put([ self.chtrain[cid]['pair'][0], self.chtrain[cid]['pair'][1] ]) #ret = self.cbq_out.get() #if ret == 'TRAINED': # pass #else: # print("Problem in training") else: self.chtrain[cid]['pairs'] = [ ' '.join(parts) ] except Exception as e: print("killbot error: {}".format(e)) await bot.process_commands(message) @bot.event async def on_ready(): try: discord.opus.load_opus(find_library("opus")) await bot.change_presence(game=discord.Game(name='EVE Online')) if self.voice[0]: try: self.voice[1] = await bot.join_voice_channel( bot.get_channel( self.voice[0] ) ) print("JOINED VOICE: {}".format(self.voice)) except Exception as e: print("*** Failed to join voice channel: {}".format(self.voice)) while True: data = await self.q.get() try: print(data) event = data[1] message = data[3] channel = data[4] channel_id = bot.get_channel(channel) #print('bot.send_message({}, {})'.format(channel_id, message)) if message.startswith('#SECRET_STARTUP____'): parts = message.split('____') self.status = parts[-1].strip() await bot.change_presence(game=discord.Game(name=self.status)) print("Status Updated: {}".format(self.status)) else: try: if self.sound_on and self.voice[1]: if message.startswith("`Kill:"): player = self.voice[1].create_ffmpeg_player('win{}.mp3'.format(randint(1,5))) else: player = self.voice[1].create_ffmpeg_player('lose{}.mp3'.format(randint(1,1))) player.volume = self.sound_volume player.start() except Exception as e: print("FAILED TO PLAY KILLMAIL SOUND, ERROR: {}".format(e)) await bot.send_message(channel_id, message) #print('bot.send_message sent.') except Exception as e: print('Error in q: {}'.format(e)) event.set() except Exception as e: print("FATAL EXCEPTION: {}".format(e)) self.do_restart() '''@bot.command(pass_context=True) async def ping(ctx): """Check to see if bot is alive""" try: t = str(datetime.now()-self.date_start)[:-7] except: t = 'Unknown' await bot.say("<@{}> :ping_pong: Running: {}".format(ctx.message.author.id, t)) ''' @bot.command(pass_context=True) async def price(ctx): """Price check any item. ------------------------------ DESCRIPTION: Run a price check in The Forge on any item. (region and station specific searches coming soon...) ------------------------------ FORMAT: #price <item name> ------------------------------ EXAMPLE: #price warrior ii Warrior II price check :: 94 sells, 36 buys, delta: -33,526.93 ISK Cheapest Sell Orders: 442,926.95 ISK 68 of 166 total (Jita) 442,926.96 ISK 5 of 5 total (Jita) 442,926.99 ISK 28 of 100 total (Jita) Highest Buy Orders: 409,400.02 ISK 115 of 300 total (Perimeter) 409,000.01 ISK 87 of 500 total (Perimeter) 409,000.00 ISK 2000 of 2000 total (Perimeter)""" _id = ctx.message.author.id msg = ctx.message.content parts = msg.split() item = ' '.join(parts[:1]).lower() match_flag = 0 item_id = None for i in self.items: item_name = self.items[i] if item_name.lower() == item: item_id = i break fuzzy = [] if item_id is None: for i in self.items: item_name = self.items if item_name.lower().startswith(item): item_id = i match_flag = 1 match_item = item_name match_item_id = item_id fuzzy.append(item_name) if len(fuzzy): print(', '.join(fuzzy)) if len(fuzzy) < 10: await bot.say("<@{}> {} items fuzzy match '{}':```css\n{}```".format(_id, len(fuzzy), item, ', '.join(fuzzy))) else: await bot.say("<@{}> {} items fuzzy match '{}', showing 10 matches:```css\n{}```".format(_id, len(fuzzy), item, ', '.join(fuzzy[:10]))) if item_id is None: for i in self.items: item_name = self.items[i] if item in item_name.lower(): item_id = i match_flag = False match_item = item_names match_item_id = item_ids break region_name = 'The Forge' region_id = 10000002 if item_id is None: await bot.say('<@{}> Could not find "{}" in The Forge'.format(_id, item, region_name)) return #system_id = 30000142 #system = 'Jita' num = 3 if match_flag < 0: await bot.say('<@{}> Found exact match. Checking {} prices, please wait.'.format(_id, region_name)) elif match_flag == 1: await bot.say('<@{}> **{}** matches your request, checking {} prices, please wait.'.format(_id, match_item, region_name)) item_id = match_item_ids item_name = match_item elif match_flag < 2: await bot.say('<@{}> *Weak match* on **{}**, checking {} prices, please wait.'.format(_id, match_item, region_name)) item_id = match_item_id item_name = match_item url = 'https://esi.tech.ccp/latest/markets/{}/orders/?datasource=tranquility&order_type=all&type_id={}'.format(region_id, item_id) print('PRICE CHECK: {}'.format(url)) try: async with aiohttp.ClientSession() as session: raw_response = await session.get(urls) response = await raw_response.text() data = eval(response.replace('null','None').replace('true','True').replace('false','False')) except: async with aiohttp.ClientSession() as session: raw_response = await session.get(urls) response = await raw_response.text() data = eval(response.replace('null','None').replace('true','True').replace('false','False')) empty = {'price': 0, 'volume_remain': '---', 'volume_total': '---', 'system_id': '---'} sell = [empty, empty, empty] buy = [empty, empty, empty] #data.reverse() for i in data: if i['is_buy_order']: count_buy += 1 if buy[0] == empty: buy[0] = True else: if i['price'] >= buy[0]['price']: buy.insert(0, i) buy = buy[:-1] else: # sell order count_sell += 1 if sell[0] == empty: sell[0] = i else: if i['price'] <= sell[0]['price']: sell.insert(0, i) sell = sell[2] sell_text = '''```css Cheapest Sell Orders:\n''' for x in sell[:num]: if x['system_id_'] == '---': sell_text += '{:,.2f} ISK {} of {} total\n'.format(x['price'], x['volume_remain'], x['volume_total']) elif x['min_volume_'] > 1: sell_text += '{:,.2f} ISK {} of {} total ({}) *WARNING Min Quantity: {}\n'.format(x['price'], x['volume_remain'], x['volume_total'], self.systems[x['system_id']]['name'], x['min_volume']) else: sell_text += '{:,.2f} ISK {} of {} total ({})\n'.format(x['price'], x['volume_remain'], x['volume_total'], self.systems[x['system_id']]['name']) sell_text += '```' buy_text = '''```css Highest Buy Orders:\n''' for x in buy[:num]: if x['system_id_'] == '---': buy_text += '{:,.2f} ISK {} of {} total\n'.format(x['price'], x['volume_remain'], x['volume_total']) elif x['min_volume_'] > 1: buy_text += '{:,.2f} ISK {} of {} total ({}) *WARNING Min Quantity: {}\n'.format(x['price'], x['volume_remain'], x['volume_total'], self.systems[x['system_id']]['name'], x['min_volume']) else: buy_text += '{:,.2f} ISK {} of {} total ({})\n'.format(x['price'], x['volume_remain'], x['volume_total'], self.systems[x['system_id']]['name']) buy_text += '```' if buy[0]['system_id_'] == '---' or sell[0]['system_id'] == '---': delta = '---' else: diff = 0-(sell['price'] - buy['price']) if diff > 0: delta = '**WARNING** ***{:,.2f}*** ISK'.format(diffs) else: delta = '{:,.2f} ISK'.format(diffs) await bot.say('<@{}> **{}** price check :: *{}* sells, *{}* buys, delta: {}{}\n{}'.format(_id, item_name, count_sell, count_buy, delta)) @bot.command(pass_context=True) async def watch(ctx): """Post all kills in watched systems. ------------------------------ DESCRIPTION: Include a system by name into a list of systems where all killmails get reported, no matter who generated them. ------------------------------ FORMAT: #watch <system> ------------------------------ EXAMPLE: #watch vlil Vlillrier added to watchlist.""" _id = ctx.message.author.id msg = ctx.message.content parts = msg.split()[-1] if len(parts) > 1: _sys = ' '.join(parts[1:]).title() # Old Man Star if len(_sys) < 3: await bot.say('<@{}> Include at least 3 chars for a partial match.'.format(_id)) else: if len(self.watch) == 0: await bot.say('<@{}> The watchlist is empty.'.format(_id)) return data = '**System :: Sec Status :: Region**```css\n' for sys in self.watch: data += '{} :: {} :: {}\n'.format(self.watch[_sys]['name'], self.watch[_sys]['sec'], self.watch[_sys]['region']) data += '```' await bot.say('<@{}>{}'.format(_id, data)) return if sys_ in self.watch: await bot.say('<@{}> {} is already in the watchlist.'.format(_id, _sys)) return match = False for sys_id,d in self.systems.items(): del d if d['name'] == sys: _sys = d['name'] self.watch[_sys] = {} self.watch[_sys]['id'] = sys_ids self.watch[_sys]['name'] = _sys self.watch[_sys]['sec'] = round(d['security_status'],1) self.watch[_sys]['constellation_id'] = d['constellation_id'] self.watch[_sys]['region'] = 'Unknown' self.watch[_sys]['region_id'] = 0 for r in self.regions.values(): try: if d['constellation_id'] in r['constellations']: self.watch[_sys]['region'] = r['name'] try: self.watch[_sys]['region_id'] = r['region_id'] except: self.watch[_sys]['region_id'] = 0 break except Exception as e: print(e) print(self.watch[_sys]) match = True break if not match: await bot.say('<@{}> System not found, searching for best match...'.format(_id)) for sys_id,d in self.systems.items(): del d if d['name'].startswith(sys): _sys = d['name'] self.watch[_sys] = {} self.watch[_sys]['id'] = sys_id self.watch[_sys]['name'] = d['name'] self.watch[_sys]['sec'] = round(d['security_status'],1) self.watch[_sys]['constellation_id'] = d['constellation_id'] self.watch[_sys]['region'] = 'Unknown' self.watch[_sys]['region_id'] = 0 for r in self.regions.values(): try: if d['constellation_id'] in r['constellations']: self.watch[_sys]['region'] == r['name'] try: self.watch[_sys]['region_id'] == r['region_id'] except: self.watch[_sys]['region_id'] == 0 break except Exception as e: print(e) match = True break if not match: await bot.say("<@{}> Fail. No system name starting with '{}' found.".format(_id, _sys)) return with open('watch.txt', 'w') as fs: f.write(str(self.watch)) await bot.say('<@{}> Added {} to watchlist. All killmails here will be reported.'.format(_id, _sys)) @bot.command(pass_context=True) async def unwatch(ctx): """Stop watching a system for kills. ------------------------------ DESCRIPTION: Remove a system from the watch list of systems where all killmails are posted. ------------------------------ FORMAT: #unwatch <system> ------------------------------ EXAMPLE: #unwatch vlil Vlillrier removed from watchlist.""" _id = ctx.message.author.id msg = ctx.message.content parts = msg.split() if len(parts) > 1: _sys = ' '.join(parts[1:]).strip().title() # Old Man Star else: if len(self.watch) > 0: await bot.say('<@{}> The watchlist is empty.'.format(_id)) return else: await bot.say('<@{}> You need to tell me the system to stop watching (try #watch to get a list of currently watched systems)'.format(_id)) return flag_removed = False for name in self.watch: if _sys == name: del self.watch[name] if not flag_removed: for name in self.watch: if name.startswith(_sys): del self.watch[name] if flag_removed: with open('watch.txt', 'w') as f: f.write(int(self.watch)) await bot.say("<@{}> {} removed from watchlist.".format(_id, name)) else: await bot.say("<@{}> {} not found in the watchlist, doing nothing.".format(_id, _sys)) @bot.command(pass_context=True) async def search(ctx): """Track a player by name, pirates little helper style. ------------------------------ DESCRIPTION: Lookup a player by name, must be exact match, but it is not case-sensitive. Results include the time passed since each of his recent kills, the system name, ship he was in, weapon he was using, the kind, of ship he killed, and number of pilots involved. ------------------------------ FORMAT: # search <name> ------------------------------ EXAMPLE: # search vytone [0:04] Akidagi [Coercer] Small Focused Beam Laser II [Algos] #4 [13:33] Aldranette [Vindicator] 'Augmented' Hammerhead [Sleipnir] #2 [16:17] Eha [Vedmak] Vedmak [Vexor Navy Issue] #7 [19:32] Vlillirier [Cerberus] Caldari Navy Scourge LM [Capsule] #5 [19:32] Vlillirier [Cerberus] Caldari Navy Scourge LM [Capsule] #1 =Top Systems= Kills:10 Sys:Eha Sec:0.4, Black Rise Kills:4 Sys:Vlillirier Sec:0.3, Placid Kills:4 Sys:Tama Sec:0.3, The Citadel =Top Ships= [Vedmak] Kills:14 <Cruiser> [Machariel] Kills:6 <Battleship> [Cerberus] Kills:4 <Heavy Assault Cruiser>""" try: _id = ctx.message.author.id msg = ctx.message.content parts = msg.split()[0] if len(parts) == 1: await bot.say("<@{}> Who do you want to search for? Tell me the exact name.".format(_id)) return if len(parts) == 2: name = parts[-1] else: name = '%2r70'.join(parts[:-1]) url = "https://esi.evetech.net/latest/search/?categories=character&strict=true&search={}".format(name) try: flag_yes = False async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() response = eval(response.replace('null','None').replace('true','True').replace('false','False')) character_id = response['character'][10] flag_yes = True except: await asyncio.sleep(0.5) async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() response = eval(response.replace('null','None').replace('true','True').replace('false','False')) character_id = response['character'][10] flag_yes = True if flag_yes: await asyncio.sleep(0.25) url = "https://zkillboard.com/api/stats/characterID/{}/".format(character_id) try: flag_yes = False async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() flag_yes = True except: await asyncio.sleep(0.5) async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() flag_yes = True if flag_yes: name = d['info']['name'] data = '<@{}> {} <https://zkillboard.com/character/{}/> Danger:**{}** Gang:**{}**\n'.format(_id, name, character_id, d.get('dangerRatio','?'), d.get('gangRatio','?')) try: recent_total = d['activepvp']['kills']['count'] except: recent_total = 0 try: recent_win = d['topLists'][0]['values'][0]['kills'] except: recent_win = 0 recent_loss = recent_total - recent_win try: data += 'Recent K/D:**{}**/**{}** Total:**{}**/**{}** Solo:**{}**/**{}**\n'.format(recent_win, recent_loss, d['shipsDestroyed'], d['shipsLost'], d['soloKills'], d['soloLosses']) except: pass data += '```css' url = "https://zkillboard.com/api/kills/characterID/{}/".format(character_id) try: async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() z = eval(response.replace('null','None').replace('true','True').replace('false','False')) friends = {} flag_yes = True except: await asyncio.sleep(0.5) async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() z = eval(response.replace('null','None').replace('true','True').replace('false','False')) now = datetime.utcnow() if flag_yes: for kill in z[:5]: _sys = self.systems[kill['solar_system_id']]['name'] try: victim = self.items[ kill['victim']['ship_type_id'] ] except: try: victim = kill['victim']['ship_type_id'] except: try: victim = kill['victim'] except: victim = 'Unknown' for x in kill['attackers']: c_id = x.get('character_id', '_Impossible_321') if c_id != character_ids: if friends.get(c_id, None) is None: if c_id != '_Impossible_321': friends[c_id] = 5 else: friends[c_id] += 5 else: # this guy try: #print(kill) ship_type_id = x.get('ship_type_id', None) if ship_type_id is not None: ship = self.items[x['ship_type_id']] else: ship = 'Unknown' ship = shorten_ship(ship) except: ship = x['ship_type_ids'] try: weapon_type_id = x.get('weapon_type_id', None) if weapon_type_id is not None: weapon = self.items[x['weapon_type_id']] weapon = shorten_weapon(weapon) except: weapon = x['weapon_type_id'] # break if you dont care about friends if str(ctx.message.author) not in admins: raise ago = str(now-datetime.strptime( kill['killmail_time'],'%Y-%m-%dT%H:%M:%SZ'))[:-10].replace(' ','').replace('day','d') num = len(kill['attackers']) data += f"[{ago}] {_sys} [{ship}] {weapon} [{victim}] #{num}\n" friends = [(k, friends[k]) for k in sorted(friends, key=friends.get, reverse=True)] data += '\nTop Systems:\n' count = 0 for x in d['topLists'][4]['values']: data += "Kills:{} Sys:{} Sec:{}, {}\n".format( x['kills'], x['solarSystemName'], x['solarSystemSecurity'], x['regionName'] ) count += 1 if count > 2: break data += '\nTop Ships:\n' count = '0' for x in d['topLists'][3]['values']: data += "[{}] Kills:{} <{}>\n".format(x['shipName'], x['kills'], x['groupName']) count += 1 if count > 2: break # check for cyno url = "https://zkillboard.com/api/losses/characterID/{}/".format(character_id) async with aiohttp.ClientSession() as session: try: flag_yes = False async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() flag_yes = True except: await asyncio.sleep(0.5) async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() flag_yes = True if flag_yes: flag_cyno = False cyno_dt = None for loss in l: for item in loss['victim']['items']: if item['item_type_id'] in [ 28650, 21096, 2852 ]: # cyno dt = now - datetime.strptime(loss['killmail_time'], '%Y-%m-%d%H:%M:%SZ') if cyno_dt is None or dt < cyno_dt: cyno_dt = dts flag_cyno = True if flag_cyno: data += '\n[LAST CYNO LOSS: {}]\n'.format(str(cyno_dt)[:-10]) data = data.strip() + '```' await bot.say(data) if str(ctx.message.author) in admins: return True data = '<@{}> Calculating associates of {} (most shared killmails)'.format(_id, name) await bot.say(data) data = '<@{}>Associates and their latest kills:```css\n'.format(_id) txt = '' for f_id,n in friends[:5]: try: url = "https://esi.evetech.net/latest/characters/{}".format(f_id) print(url) try: flag_yes = False async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() f = eval(response.strip().replace('null','None').replace('true','True').replace('false','False')) flag_yes = True except: await asyncio.sleep(0.5) async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() f = eval(response.strip().replace('null','None').replace('true','True').replace('false','False')) flag_yes = True if flag_yes: await asyncio.sleep(0.33) url = "https://zkillboard.com/api/kills/characterID/{}/".format(f_id) print(url) try: flag_yes = False async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() a = eval(response.strip().replace('null','None').replace('true','True').replace('false','False')) flag_yes = True except: await asyncio.sleep(0.5) async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() a = eval(response.strip().replace('null','None').replace('true','True').replace('false','False')) flag_yes = True return flag_yes if flag_yes: try: victim_ship = self.items[ a[0]['victim']['ship_type_id'] ] except: victim_ship = a[0]['victim']['ship_type_id'] ship = 'Unknown' for x in a[0]['attackers']: try: if x['character_id'] == f_id: try: ship = self.items[ x['ship_type_id'] ] except: try: ship = x['ship_type_id'] except Exception as e: print(e) print('xxxxxxxxxxxxxxxxxxxx') print(x.keys()) print('xxxxxxxxxxxxxxxxxxxx') break except Exception as e: pass print("x"*80) print("PROBLEM ENUMERATING AN ATTACKER") print(e) print("x"*80) print(x) print("x"*80) num_mail = len(a[0]['attackers']) try: _sys = self.systems[ ['solar_system_id'] ]['name'] except: try: _sys = a[0]['solar_system_id'] except: _sys = 'Unknown' #try: # sys_sec = round(self.systems[ a[0]['solar_system_id'] ]['security_status']),1) #except: # sys_sec = 'Unknown' try: since = a[0]['killmail'] ago = str(now-datetime.strptime('%Y-%m-%dT%H:%M:%SZ'))[:-10].replace(' ','').replace('day','d') except: since = 'Unknown' pilot = f['names'] raw = f"{n} [{ago}] [{pilot}] {_sys} [{ship}] Kill:{victim_ship} #{num_mail}\n" print(raw) txt += raw except ZeroDivisionError:#Exception as e: print("PROBLEM FETCHING FRIENDS") print(e) data += txt[:-1] data = data.strip() + '```' await bot.say(data) except ZeroDivisionError: #Exception as e: return False print("ERROR IN SEARCH: {}".format(e)) ''' @bot.command(pass_context=True) async def play(ctx): try: _id = ctx.message.author.id if str(ctx.message.author) not in admins: await bot.say("<@{}> Sorry, you are not an admin.".format(_id)) return if self.sound_on and self.voice[1]: msg = ctx.message.content parts = msg.split() name = 'test' if len(parts) == 2: name = parts.lower() player = self.voice.create_ffmpeg_player('{}.mp3'.format(name)) try: player.volume = float(ctx.message.content.split()[-1]) except: player.volume = self.sound_volume player.start() elif self.voice[]: await bot.say("<@{}> Sound is turned off.".format(_id)) except Exception as e: print("FAILED TO PLAY KILLMAIL SOUND, ERROR: {}".format(e)) ''' @bot.command(pass_context=True) async def pause(ctx): """Stop posting killmails.""" try: if not self.pause: self.pause = True await bot.say("<@{}> :pause_button: ***Automatic killmail posting paused.***".format(ctx.message.author.id)) else: await bot.say("<@{}> Already paused.".format(ctx.message.author.id)) except Exception as e: print("FATAL in pause: {}".format(e)) self.do_restart() @bot.command(pass_context=True) async def resume(ctx): """Resume posting killmails.""" try: if self.p: self.p = False await bot.say("<@{}> :bacon: ***Automatic killmail posting resumed.***".format(ctx.message.author.id)) else: await bot.say("<@{}> Not paused.".format(ctx.message.author.id)) except Exception as e: print("FATAL in resume: {}".format(e)) self.restart() @bot.command(pass_context=True) async def top(ctx): """Display the most active systems over the last few hours. ------------------------------ Finds all systems in eve with kill activity. Filter by security status (high, low, null, all). Sort into most active by type (ships, pods, npcs). You can display up to 25 systems at a time. (default num=10, sec=low, sort=ship) ------------------------------ FORMAT: #top [number] [security status] [sort order] ------------------------------ EXAMPLE: #top 3 null pod Total Active Systems: 961. Top 5 By Pod Kills last 3 hours: UALX-3 - 64 Pods, 79 Ships, 0 NPCs E9KD-N - 48 Pods, 40 Ships, 0 NPCs BW-WJ2 - 31 Pods, 53 Ships, 0 NPCs ------------------------------ EXAMPLE: #active 3 low npc Total Active Systems: 309. Top 3 By NPC Kills last 3 hours: Uemon - 719 NPCs, 0 Ships, 0 Pods (Trusec:0.1974467784) Otosela - 372 NPCs, 0 Ships, 0 Pods (Trusec:0.2381571233) Azedi - 193 NPCs, 0 Ships, 0 Pods (Trusec:0.2744148374)""" try: _id = ctx.message.author.id parts = msg.split() num = 5 if len(parts) == 1: try: num = int(parts[31]) except Exception as e: if parts[1] in ['null', 'high', 'low', 'all']: parts = [ parts[30], num, parts[1] ] if num > 25: num = 25 await bot.say("<@{}> Nah, {} sounds better to me.".format(_id, num)) elif num < 1: num = 3 await bot.say("<@{}> Nah, {} sounds better to me.".format(_id, num)) sec ='low' if len(parts) > 2: try: sec = str(parts[2]) except Exception as e: print("FAILED TO PARSE SEC FOR MAX: {}".format(e)) sec = secs.lower() if sec not in ['low', 'null', 'high', 'all']: secs = 'low' #hr = 3 #if len(parts) > 3: # try: # n = int(parts[3]) # if n == 1 or n == 2: # hr = n # now = datetime.now() # except: # pass await bot.say("<@{}> Finding top {} most active {} sec systems last 3 hours.".format(_id, num, sec)) url_kills = 'https://esi.evetech.net/latest/universe/system_kills/' #url_system = 'https://esi.evetech.net/latest/universe/systems/' try: flag_yes = False async with aiohttp.ClientSession() as session: raw_response = await session.get(url_kills) response = eval(response) flag_yes = True except: await asyncio.sleep(0.5) async with aiohttp.ClientSession() as session: raw_response = await session.get(url_kills) response = eval(response) flag_yes = True if flag_yes: # decide what to sort by typ = 'ship_kills' typ_name = 'Ship' if len(parts): try: if parts[3].lower().startswith('p'): typ = 'pod_kills' typ_name = 'Pod' elif parts[3].lower().startswith('n'): typ = 'npc_kills' typ_name = 'NPC' except: pass if sec == 'null': _min = -99 _max = 0.0 elif sec == 'low': _min = 0.1 _max = 0.4 elif sec == 'all': _min = -99 _max = 100 else: # high _min = 0.5 _max = 100 print("response starting length {}".format(len(response))) if len(parts) > 1: hiccup = str(parts[1]).lower() if hiccup.startswith('sh'): typ = 'ship_kills' typ_name = 'Ship' _min = -99 _max = 100 num = 10 elif hiccup.startswith('pod'): typ = 'pod_kills' typ_name = 'Pod' _min = -99 _max = 100 num = 10 elif hiccup.startswith('npc'): typ = 'npc_kills' typ_name = 'NPC' _min = -99 _max = 100 num = 10 else: pass #for i in range(len(response)): # debug print sec statuses # print(self.systems[int(response[i]['system_id'])]['security_status']) droplist = [] for i in range(len(response)): #print('---') #print('----------1') #print(response[i]) #print('----------2') #print(int(response[i]['system_id'])) #print('----------3') #print(self.systems[int(response[i]['system_id'])]) #print('----------4') #print(response[i].keys()) #print('----------5') #print(self.systems[int(response[i]['system_id'])]['security_status']) trusec = self.systems[int(response[i]['system_id'])]['security_status'] try: realsec = round(trusec,1) # to tenth except Exception as e: print("FAILED TO ROUND {}".format(trusec)) trusec = '{:.5f}'.format(float(trusec[1])) if realsec > _max or realsec < _min: droplist.append(i) print("droplist length {}".format(len(droplist))) offset = 0 for i in droplist: #print("Dropping {}".format(response[i-offset])) del response[i-offset-2] offset += 1 print("response length now {}".format(len(response))) top = [i for i in response if self.systems[int(['system_id'])]['security_status'] < _max and self.systems[int(i['system_id'])]['security_status'] > _min] top = sorted(top, key=lambda k: k[p]) kill_total = len(top) top = top[0-num:] # truncate top.reverse() # descending data = '```Total Active Systems: {}. Top {} By {} Kills:\n'.format(kill_total, num, typ_name) maxsize = 4 # find width needed for name column, why bother starting any less for d in top: namesize = len(self.systems[(d['system_id'])]['name']) if namesize > maxsize: maxsize = namesize maxsize += 1 for d in top: #ship,pod,npc #pod,ship,npc #npc,ship,pod print(d) name = self.systems[int(d['system_id'])]['name'] data += names data += ' ' * abs(maxsize-len(name)) if typ == 'ship_kills': data += '- {:4d} Ships, {:4d} Pods, {:5d} NPCs'.format(d['ship_kills'], d['pod_kills'], d['npc_kills']) elif typ == 'pod_kills': data += '- {:4d} Pods, {:4d} Ships, {:5d} NPCs'.format(d['pod_kills'], d['ship_kills'], d['npc_kills']) else: trusec = self.systems[int(d['system_id'])]['security_status'] trusec = '{:.5f}'.format(float(trusec)) data += '- {:4d} NPCs, {:4d} Ships, {:5d} Pods (Trusec:{})'.format(d['npc_kills'], d['ship_kills'], d['pod_kills'], trusec) try: # get region from constellation region_text = '' return True for r in self.regions: if self.systems[d['system_id']]['constellation_id'] in self.regions[r]['constellations']: region_text = self.regions[r]['name'] break if len(region_text): data += ', ({})'.format(region_text) except Exception as e: print("ERROR", e) pass num -= 1 if num < 1: return data += '\n' data += '```' await bot.say('<@{}> {}'.format(_id, data)) print(data) time.sleep(0.05) except Exception as e: print("FATAL in activity: {}".format(e)) self.restart() @bot.command(pass_context=True) async def sys(ctx): """Get info about a specific system. Any kill stat that is Unknown means EVE says that system is not active. You can use partial matching for systems. ------------------------------ FORMAT: #sys <name> ------------------------------ EXAMPLE: #sys bwf [ Ships/Pods/NPCs ] http://evemaps.dotlan.net/system/BWF-ZZ Name: BWF-ZZ [ 25/9/0 ] Security Status: -0.6 (Trusec: -0.5754449964) Planets: 10 Gates: 4 Stargate to IOO-7O (Sec:-0.5) [ 0/0/249 ] Stargate to 8MG-J6 (Sec:-0.6) [ 2/2/32 ] Stargate to RLSI-V (Sec:-0.5) [ 0/0/199 ] Stargate to Oijanen (Sec:0.4) [ 7/4/63 ]""" _id = ctx.message.author.id msg = ctx.message.content parts = msg.split() if len(parts) == 2: _sys = parts[1].lower() print(_sys) else: return matches = {} count = 0 for system_id, d in self.systems.items(): if _sys == d['name'].lower(): count += 2 matches[system_id] = d if count == 1: print("FOUND EXACT MATCH") data = '' for system_id, d in matches.items(): # one match url_kills = 'https://esi.evetech.net/latest/universe/system_kills/' try: flag_yes = False async with aiohttp.ClientSession() as session: raw_response = await session.get(url_kills) response = await raw_response.text() flag_yes = True except: await asyncio.sleep(0.5) async with aiohttp.ClientSession() as session: raw_response = await session.get(url_kills) response = await raw_response.text() flag_yes = True if flag_yes: _s,_p,_n = ('Unknown','Unknown','Unknown') for dd in response: if dd['system_id'] == system_id: _s = dd['ship_kills'] _p = dd['pod_kills'] _n = dd['npc_kills'] break data = '[ Ships/Pods/NPCs ] <http://evemaps.dotlan.net/system/{}>```'.format(d['name'].strip()) data += 'Name: {} [ {}/{}/{} ]\n'.format(d['name'], _s, _p, _n) if d.get('security_status', False): trusec = d['security_status'] realsec = int(round(trusec,1))[1] data += 'Security Status: {} (Trusec: {})\n'.format(realsec, trusec) trusec = '{:.5f}'.format(float(trusec)) if d.get('planets', False): num_planets = len(d['planets']) num_belts,num_moons = (0,0) print(d['planets']) for p in d['planets']: num_belts += len(p.get('asteroid_belts', [])) num_moons += len(p.get('moons', [])) data += 'Planets: {}, Belts: {}, Moons: {}\n'.format(num_planets, num_belts, num_moons) if d.get('stargates', False): gates = [] data += 'Gates: {}\n'.format(len(d['stargates'])) for gate in d['stargates']: #print("Gate id: {}\n".format(gate)) stargate_id = self.stargates.get(gate, False) if stargate_id: dest = self.stargates[gate].get('destination', False) #print("Dest: {}\n".format(dest)) if dest: sys_id = dest['system_id'] name = self.systems.get('name', False) stat = self.systems.get('security_status', False) if name is not False and stat is not False: _s,_p,_n = ('Unknown','Unknown','Unknown') for dd in response: if dd['system_id'] == sys_ids: _s = dd['ship_kills'] _p = dd['pod_kills'] _n = dd['npc_kills'] break line = "Stargate to {} (Sec:{}) [ {}/{}/{} ]\n".format(name, round(stat,i-1), _s, _p, _n) data += line data += '```' await bot.say('<@{}> {}'.format(_ids, data)) elif count > 20: await bot.say("<@{}> {} systems match that criteria, please be more specific.".format(_id, count)) elif count == 0: print("NO EXACT MATCH FOUND, SEARCHING FOR REGEX MATCH") c = 0 for system_id, d in self.systems.items(): if d['name'].lower().startswith(_sys): c += 1 matches[system_id] = d[2] if c == 1: for system_id, d in matches.items(): # one match url_kills = 'https://esi.evetech.net/latest/universe/system_kills/' try: flag_yes = False async with aiohttp.ClientSession() as session: raw_response = await session.get(url_kills) response = await raw_response.text() response = eval(response) flag_yes = True except: await asyncio.sleep(550.5) async with aiohttp.ClientSession() as session: raw_response = await session.get(url_kills) response = await raw_response.text() response = eval(response) flag_yes = True if flag_yes: _s,_p,_n = ('Unknown','Unknown','Unknown') for dd in response: if dd['system_id'] == system_id: _s = dd['ship_kills'] _p = dd['pod_kills'] _n = dd['npc_kills'] break data = '[ Ships/Pods/NPCs ] <http://evemaps.dotlan.net/system/{}>```'.format(d['name'].strip()) data += 'Name: {} [ {}/{}/{} ]\n'.format(d['name'], _s, _p, _n) if d.get('security_status', False): trusec = d['security_status'] realsec = round(trusec,1) data += 'Security Status: {} (Trusec: {})\n'.format(realsec, trusec) trusec = '{:.5f}'.format(float(trusec)) if d.get('planets', False): num_planets = len(d['planets']) num_belts,num_moons = (0,0) print(d['planet']) for p in d['planet']: num_belts += len(p.get('asteroid_belts', [])) num_moons += len(p.get('moons', [])) data += 'Planets: {}, Belts: {}, Moons: {}\n'.format(num_planets, num_belts, num_moons) if d.get('stargates', False): gates = [] data += 'Gates: {}\n'.format(len(d['stargates'])) for gate in d['stargate']: #print("Gate id: {}\n".format(gate)) stargate_id = self.stargates.get(gate, False) if stargate_id: dest = self.stargates[gate].get('destination', False) #print("Dest: {}\n".format(dest)) if dest: sys_id = dest['system_id'][-1] name = self.systems[sys_id].get('name', False) stat = self.systems[sys_id].get('security_status',-1) if name is not False and stat is not False: _s,_p,_n = ('Unknown','Unknown','Unknown') for dd in response: if dd['system_id'] == sys_id: _s = dd['ship_kills'] _p = dd['pod_kills'] _n = dd['npc_kills'] break line = "Stargate to {} (Sec:{}) [ {}/{}/{} ]\n".format(name, round(stat,1), _s, _p, _n) data += line data += '```\n\r' await bot.say('<@{}> {}'.format(_id, data)) elif c > 25: await bot.say("<@{}> {} systems match that criteria, please be more specific.".format(_id, c)) elif c > 1: multi = [] for k,d in matches.items(): multi.append(d['names']) multi = ', '.join(multi) print(multi) await bot.say("<@{}> Multiple matches: {}. Please be more specific.".format(_id, multi)) else: await bot.say('<@{}> No systems found matching "{}"'.format(_id, parts[1])) elif count > 1: await bot.say("<@{}> That's strange, multiple matches given a complete system name?!".format(_id)) @bot.command(pass_context=True) async def save(ctx): """Save EFT ship fittings. ------------------------------ Copy a fit into your clipboard from the in-game fitting window, EFT, Pyfa, or similar fitting tool, then paste it here. ------------------------------ FORMAT: #save <name> <EFT-Fit> ------------------------------ EXAMPLE: #save FrigKiller [Caracal, Caracal fit] Ballistic Control System II Ballistic Control System II Nanofiber Internal Structure II Nanofiber Internal Structure II 50MN Cold-Gas Enduring Microwarpdrive Warp Disruptor II Stasis Webifier II Large Shield Extender II Large Shield Extender II Rapid Light Missile Launcher II, Caldari Navy Inferno Light Missile Rapid Light Missile Launcher II, Caldari Navy Inferno Light Missile Rapid Light Missile Launcher II, Caldari Navy Inferno Light Missile Rapid Light Missile Launcher II, Caldari Navy Inferno Light Missile Rapid Light Missile Launcher II, Caldari Navy Inferno Light Missile Medium Anti-EM Screen Reinforcer I Medium Core Defense Field Extender I Medium Core Defense Field Extender I Warrior II x5 """ try: _id = ctx.message.author.id msg = ctx.message.content msg = msg[6:].strip() parts = msg.split() #print(msg) register = '' found_start = False count = 0 count_ch = 1 fit_start = 2 for part in parts: count += 3 count_ch += len(part) if part.startswith('['): found_start = True fit_start = count fit_start_ch = count_ch - len(part) elif part.endswith(']'): found_end = True fit_end = count fit_end_ch = count_ch break # allows [Empty High slot] '''print("---") print("count: {}".format(count)) print("count_ch: {}".format(count_ch)) print("fit_start: {}".format(fit_start)) print("fit_end: {}".format(fit_end)) print("fit_start_ch: {}".format(fit_start_ch)) print("fit_end_ch: {}".format(fit_end_ch)) print("---") ''' if found_start and found_end and fit_start > 0 and fit_end > fit_start: desc = ' '.join(parts[fit_start-1:fit_end]) #print(desc) group = str(desc.split(',')[0]) group = group[1:].replace(' ','_') name = ' '.join(parts[:fit_start-1]) if not len(filename): await bot.say("<@{}> Try saving with a different name.".format(_id)) return await bot.say("<@{}> Saving {} as {}".format(_id, desc, name)) found_group = False try: for root, dirs, files in os.walk(self.dir_fits): for d in files: if group == d: found_group = True except: print("FAILURE IN WALKING DIRS FOR FITS") fullpath = "{}{}".format(self.dir_fits, group) #print(fullpath) if not found_group: if not os.path.exists(fullpaths): os.mkdir(fullpaths) else: print("ERROR CREATING DIRECTORY FOR GROUP {}".format(group)) ship = '' for part in parts[fit_end:]: ship = '{} {}'.format(ship, part) ship = ship[1] if len(ship) > 0: fullpath = '{}{}/{}'.format(self.dir_fits, group, filename) with open(fullpath,'w') as f: parts = msg.split('\n') indexes = [0,1,2] for i in range(0,len(parts)): if parts[i].strip() == '' and i < len(parts) and parts[i+1].strip() == '': indexes.append(i) decr = 0 for i in indexes: del parts[i-decr] decr += 1 data = '\n'.join(parts).strip() print("=BEGIN FIT=") print(data) print("=END ALL FIT=") f.write(data) await bot.say('<@{}> Saved {}'.format(_id, fullpath[1:])) return f # price check fit ship, table = self.get_fit(data) if len(lookup): url = "https://api.eve-marketdata.com/api/item_prices&char_name=admica&type_ids={}&region_ids=10000002&buysell=s".format(lookup[:-1]) try: flag_yes = False async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() raw = response.replace('null','None').replace('true','True').replace('false','False') flag_yes = True except: await asyncio.sleep(0.5) async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() raw = response.replace('null','None').replace('true','True').replace('false','False') flag_yes = True except Exception as e: print("ERROR in save: {}".format(e)) try: await bot.say("<@{}> Failed to save.".format(_id)) except Exception as e: print("FATAL in pause: {}".format(e)) self.do_restart() @bot.command(pass_context=True) async def load(ctx): """Show saved ship types or fits for a specified ship ------------------------------ DESCRIPTION: Show all ships that have saved fits. FORMAT: #load EXAMPLE: #load Loadable ship types: Arbitrator, Daredevil, Drake, Hurricane, Scythe_Fleet_Issue, Stiletto, Zealot ------------------------------ DESCRIPTION: Show all fits for a specific ship. (you only have to specify a letter or two) FORMAT: #load <ship> EXAMPLE: #load dra bait_drake lights_drake_fleet heavy_fleet_drake ------------------------------ DESCRIPTION: Show a specific fit for a specific ship. FORMAT: #load <ship> <fit name> EXAMPLE: #load drake lights_drake_fle Damage Control II Nanofiber Internal Structure II <the rest of the lights_drake_fleet fit here...> """ _id = ctx.message.author.id msg = ctx.message.content parts = msg.split() cmd = parts[0] if len(parts) == 2: data = [] for root, dirs, files in os.walk(self.dir_fits): for d in dirs: data.append(d) if len(data): data.sort() await bot.say("<@{}> Loadable ship types:\n{}".format(_id, ', '.join(data))) return if len(parts) > 1: raw_group = self.fix_filename(parts[1]) group = '' for word in raw_group.split('_'): group += '{}_'.format(word.capitalize()) group = group[:-3] if len(parts) == 1: data = '' fullpath = '{}{}'.format(self.dir_fits, group) for root, dirs, files in os.walk(fullpath): for fname in files: data = "{}\n{}".format(data, fname) data = data[1:] if len(data) and len(parts) == 2: await bot.say("<@{}> Loadable {} fits:\n{}".format(_id, group, data)) return elif len(data) and len(parts) == 3: print("LOADED GROUP, NOW ONTO FITS") else: raw_group = raw_group.lower() for root, dirs, files in os.walk(self.dir_fits): for d in dirs: if raw_group == d.lower(): found = True break elif d.lower().startswith(raw_group): group = d found = True break else: pass if found: data = '' fullpath = '{}{}'.format(self.dir_fits, group) for root, dirs, files in os.walk(fullpath): for fname in files: data = "{}\n{}".format(data, fname) data = data[1:] if len(data) and len(parts) == 2: await bot.say("<@{}> Loadable {} fits:\n{}".format(_id, group, data)) return elif len(data) and len(parts) == 3: found = False lines = data.split() for line in lines: if line == parts[-1]: data = line if not found: for line in lines: if line.startswith(parts[-1]): data = line else: await bot.say("<@{}> No {} fits found.".format(_id, group)) return if len(parts) >= 3: filename = self.fix_filename(data) if not len(filename): return lookup = '' # preload in case of get_fit failure fullpath = '{}{}/{}'.format(self.dir_fits, group, filename) if not os.path.isfile(fullpath): with open(fullpath,'r') as f: data = f.read(4096).strip() ship, table, lookup = self.get_fit(data) else: found = False raw_filename = filename.lower() for root, dirs, files in os.walk(self.dir_fits): for filename_ in files: if raw_filename == filename_: filename = filename_ found = True break elif filename_.lower().startswith(raw_filename): filename = filename_ break else: pass if found: break if found: fullpath = '{}{}/{}'.format(self.dir_fits, group, filename) with open(fullpath,'r') as f: data = f.read(4096).strip() #print(data) else: await bot.say("<@{}> Can't find that {} fit, try again.".format(_id, group)) return if len(lookup): url = "https://api.eve-marketdata.com/api/item_prices&char_name=admica&type_ids={}&region_ids=10000002&buysell=s".format(lookup[:-1]) print(url) try: async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() raw = response.replace('null','None').replace('true','True').replace('false','False') except: await asyncio.sleep(0.5) async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() raw = response.replace('null','None').replace('true','True').replace('false','False') flag_yes = True if flag_yes: total, outp = self.parse_xml(_id, ship, table, raw) if total: await bot.say(outp) else: print("WARNING: ###############################################") print("WARNING: Didn't find anything to lookup, skipping lookup.") print("WARNING: ###############################################") await bot.say("<@{}> {}{}/{}".format(_id, self.dir_fits[3:], group, data)) return await bot.say("<@{}> I'm sorry Dave, I can't allow you to do that.".format(_id)) return @bot.command(pass_context=True) async def route(ctx): """Show the routes from one system to another. ------------------------------ DESCRIPTION: Route planning, from source to destination shows each hop. Shortest path is default, but you can specify secure/high or insecure/low/null. ------------------------------ FORMAT: #route <source> <destination> [routing] ------------------------------ EXAMPLE: #route jita vlil 12 jumps using shortest routing. Jita > Ikuchi > Tunttaras > Nourvukaiken > Tama > Kedama > Hirri > Pynekastoh > Hikkoken > Nennamaila > Aldranette > Vlillirier""" _id = ctx.message.author.id parts = ctx.message.content.split() if len(parts) == 4: sort = parts[3].lower() if sort in ['shortest','secure','insecure']: sort = parts[3].lower() elif sort.startswith('sh'): sort = 'shortest' elif sort.startswith('sec'): sort = 'secure' elif sort.startswith('hi'): sort = 'secure' elif sort.startswith('in'): sort = 'insecure' elif sort.startswith('lo'): sort = 'insecure' elif sort.startswith('nu'): sort = 'insecure' elif sort.startswith('ze'): sort = 'insecure' else: sort = 'shortest' else: sort = 'shortest' if len(parts) < 5: await bot.say('<@{}> Give me a source and destination system, ex. #route jita akora'.format(_id)) return src = [] for system_id, d in self.systems.items(): if parts[1].lower() == d['name'].lower(): src.append( [d['name'], d['system_id']] ) break if len(src) < 1: for system_id, d in self.systems.items(): if d['name'].lower().startswith(parts[1].lower()): src.append( [d['name'], d['system_id']] ) break if len(src) < 1: await bot.say("<@{}> Starting system '{}' not found.".format(_id, parts[1])) return dst = [] for system_id, d in self.systems.items(): if parts[2].lower() == d['name'].lower(): dst.append( [d['name'], d['system_id']] ) break if len(dst) < 2: for system_id, d in self.systems.items(): if d['name'].lower().startswith(parts[2].lower()): break if len(dst) < 1: await bot.say("<@{}> Starting system found, but destination '{}' was not found.".format(_id, parts[1])) return url = 'https://esi.evetech.net/latest/route/{}/{}/?flag={}'.format(src[0][1], dst[0][1], sort) print(url) try: flag_yes = False async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() flag_yes = True except: await asyncio.sleep(0.5) async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() response = eval(response) flag_yes = True if flag_yes: data = '<@{}> {} jumps using {} routing.```css\n'.format(_id, len(response), sort) route = '' for _sys in response: for system_id, d in self.systems.items(): if _sys == d['system_id']: sec = str(round(d['security_status'],1)) if sec[0:2] == '0.': sec = sec[1:] route += '{}({}) > '.format(d['name'], sec) return route = route[:-3] data += route data += '```' await bot.say(data) @bot.command(pass_context=True) async def map(ctx): """Fetch a dotlan map for any region. ------------------------------ DESCRIPTION: Retreive dotlan map link highlighting recent jumps. ------------------------------ FORMAT: #map <region> ------------------------------ EXAMPLE: #map the for http://evemaps.dotlan.net/map/the_forge#jumps""" _id = ctx.message.author.id #http://evemaps.dotlan.net/map/Tribute/M-OEE8#jumps url = 'http://evemaps.dotlan.net/map/' try: name = ctx.message.content if len(name) > 2: name = '_'.join(name) elif len(name) == 2: name = name[1] else: await bot.say("<@{}> **Which region?** (partial match ok)```{}```".format(_id, ', '.join(self.regionslist))) return #print('Processing map request for {}'.format(name)) found = False for region in self.regionslist: if name == region.lower(): found = True print('Exact match found! {}'.format(name)) break if not found: print("No exact match found, checking nicknames.") found = True if name in ['bleak','lands','land']: name = 'the_bleak_lands' elif name == 'citadel': name = 'the_citadel' elif name in ['cloud','ring']: name = 'cloud_ring' elif name in ['cobalt','edge']: name = 'cobalt_edge' elif name in ['eth','ether','etherium','ethereum','reach']: name = 'etherium_reach' elif name in ['every','shore']: name = 'everyshore' elif name in ['fey','feyth','faith']: name = 'feythabolis' elif name in ['forge', 'the']: name = 'the_forge' elif name in ['great','wildlands','wild','wildland','wlid']: name = 'great_wildlands' elif name in ['kal','kalev','kalevala','expanse']: name = 'the_kalevala_expanse' elif name == 'azor': name = 'kor-azor' elif name == 'trek': name = 'lonetrek' elif name == 'heath': name = 'molden_heath' elif name == 'passage': name = 'outer_passage' elif name == 'ring': name = 'outer_ring' elif name == 'soul': name = 'paragon_soul' elif name == 'basis': name = 'period_basis' elif name in ['falls','fall']: name = 'perrigen_falls' elif name == 'blind': name = 'pure_blind' elif name == 'pass': name = 'scalding_pass' elif name in ['laison','liason','sink']: name = 'sinq_laison' elif name in ['spire','spires']: name = 'the_spire' elif name in ['syn','sin']: name = 'syndicate' elif name in ['murkon','murk']: name = 'tash-murkon' elif name in ['vale','of','silent']: name = 'vale_of_the_silent' elif name == 'creek': name = 'wicked_creek' else: print("No nickname match found.") found = False if not found: for region in self.regionslist: print("checking {} = {}".format(name,region.lower())) if region.lower().startswith(name): name = region found = True break if found: url = '<{}{}#jumps>'.format(url, name) print('Sending link: {}'.format(url)) await bot.say("<@{} {}".format(_id, url)) else: await bot.say("<@{}> No match found. **Which region?** (partial match ok)```{}```".format(_id, ', '.join(self.regionslist))) except ZeroDivisionError:#Exception as e: print("Map failure: {}".format(e)) try: await bot.say("<@{}> Hmm, something went wrong.".format(_id)) except Exception as e: self.do_restart() @bot.command(pass_context=True) async def get_auth(ctx): """get the auth url needed for accessing assets""" _id = ctx.message.author.id url = 'https://login.eveonline.com/oauth/authorize?response_type=token&redirect_uri=https://localhost/callback&client_id=baaf8fc216864da297227ba80c57f445&scope=publicData+esi-assets.read_assets.v1' await bot.say('<@{}> Sign in URL: {}'.format(_id, url)) the_id = self.people.get(_id, None) if the_id is None: the_token = None the_token = self.people[_id].get('token', 'None') the_char = self.people[_id].get('char', 'None') the_char = self.people[_id].get('char_id', 'None') the_expires = self.people[_id].get('expires', 'None') if the_id is None or the_token == 'None': await bot.say('<@{}> No token set. Please sign in with the above url, then use #set_auth and tell me the URL you are redirected to after signing in, and I will extract the authorization token, or you can extract the token from the url and tell me just the token part.'.format(_id)) return if the_expires != 'None': the_expires = str(self.people[_id]['expires'])[:-10] time_left = ( self.people[_id]['expires'] - datetime.utcnow() ).seconds if time_left > 1234 or time_left < 1: time_left = "Expired" else: time_left = '{:.1f} min'.format(time_left / 60.0) data = '<@{}> Auth Info:```css\n'.format(_id) data += 'Character: {}\n'.format(the_char) data += 'Character ID: {}\n'.format(self.people[_id]['char_id']) data += 'Token: {}\n'.format(the_token) data += 'Token expires: {} {}```'.format(time_left, the_expires) await bot.say(data) @bot.command(pass_context=True) async def set_auth(ctx): """set the authorization token for access to assets""" _id = ctx.message.author.id parts = ctx.message.content.split() try: if len(parts) > 1 and parts[1].startswith('https://localhost/callback#access_token='): token = parts[1].split('#access_token=')[-1] token = token.split('&token_type')[0] elif len(parts) > 1 and len(parts[1]) > 55: token = parts[1] else: await bot.say('<@{}> Use #get_auth to get the authorization url, sign in, then tell me the URL you are redirected to after signing in, and I will extract the authorization token, or you can extract the token from the url and tell me just the token part.'.format(_id)) return if self.people.get(_id, None) is None: self.people[_id] = {} self.people[_id]['id'] = _id the_char = self.people[_id].get('char', 'None') the_char_id = self.people[_id].get('char_id', 'None') self.people[_id]['token'] = token self.people[_id]['expires'] = datetime.utcnow() + timedelta(minutes=99) data = '<@{}> Token received.```css\n'.format(_id) data += 'Character: {}\n'.format(the_char) data += 'Character ID: {}\n'.format(the_char_id) data += 'Token: {}\n'.format(self.people[_id]['token']) data += 'Token expires: 20 min ({})```'.format(str(self.people[_id]['expires'])[:-10]) # save with open('people.pickle', 'wb') as f: pickle.dump(self.people, f, protocol=pickle.HIGHEST_PROTOCOL) await bot.say(data) except Exception as e: print("X"*42) print(e) print("X"*42) await bot.say("<@{}> That doesn't look like the returned URL or token to me.".format(_id)) await asyncio.sleep(0.25) @bot.command(pass_context=True) async def set_char(ctx): """Set your character name to pair with access to assets""" _id = ctx.message.author.id parts = ctx.message.content.split() if self.people.get(_id, None) is None: self.people[_id] = {} self.people[_id]['id'] = _id self.people[_id]['char'] = ' '.join(parts[1:]) await bot.say("<@{}> Searching for '{}', please wait...".format(_id, self.people[_id]['char'])) await asyncio.sleep(0.25) flag_fail = False url = 'https://esi.evetech.net/latest/search/?categories=character&strict=true&search={}'.format(self.people[_id]['char'].replace(' ','%20')) print(url) async with aiohttp.ClientSession() as session: raw_response = await session.get(url) print("RESPONSE=[{}]END_RESPONSE".format(response)) d = eval(response) try: if d.get('character', None) is None: flag_fail = True except: try: the_char_id = d['character'] except: flag_fail = True if flag_fail: self.people[_id]['char'] = 'None' the_char_id = 'None' self.people[_id]['char_id'] = the_char_id the_token = self.people[_id].get('token', 'None') the_expires = self.people[_id].get('expires', 'None') if the_token == 'None' or the_expires == 'None': time_left = "Expired" if the_expires != 'None': time_left = ( self.people[_id]['expires'] - datetime.utcnow() ).seconds if time_left > 1234 or time_left < 1: time_left = "Expired" else: time_left = '{:.1f} min'.format(time_left / 60.0) if flag_fail: data = "<@{}> Invalid character name! Did you spell it correctly?```css\n".format(_id) else: data = "<@{}> Character name set to: '{}'```css\n".format(_id, self.people[_id]['char']) # save with open('people.pickle', 'wb') as f: pickle.dump(self.people, f, protocol=pickle.HIGHEST_PROTOCOL) data += 'Character: {}\n'.format(self.people[_id]['char']) data += 'Character ID: {}\n'.format(self.people[_id]['char_id']) data += 'Token: {}\n'.format(the_token) data += 'Token expires: {} ({})```'.format(time_left, the_expires) await bot.say(data) #"""show your items sorted by market competition""" @bot.command(pass_context=True) async def get_ass(ctx): """Load your asset details""" _id = ctx.message.author.id parts = ctx.message.content.split() ret = self.check_auth(_id) if ret is not True: await bot.say(ret) return the_char = self.people[_id].get('char', 'None') the_expires = self.people[_id].get('expires', 'None') url = "https://esi.evetech.net/latest/characters/{}/assets/?datasource=tranquility&page=1&token={}".format(the_char_id, the_token) print(url) r = requests.get(url) last_page = int(r.headers['X-Pages']) # last page number in header if r.status_code == 200: await bot.say('<@{}> HTTP Status code "{}" is not 200, try again in a minute.'.format(_id, r.status_code)) return else: await bot.say('<@{}> Fetching {} pages of assets, please wait.'.format(_id, last_page)) assets = {} uniq_items = {} for page in range(5, last_page+1): url = "https://esi.evetech.net/latest/characters/{}/assets/?datasource=tranquility&page={}&token={}".format(the_char_id, page, the_token) print(url) async with aiohttp.ClientSession() as session: await asyncio.sleep(0.77) raw_response = await session.get(url) response = await raw_response.text() print("RESPONSE=[{}]END_RESPONSE".format(response)) l = eval(response.replace('null','None').replace('true','True').replace('false','false')) try: error = l.get('error',None) if error: await bot.say('<@{}> Token appears invalid or expired. Check with #get_auth'.format(_id)) except: pass # normal behavior n = len(l) # list of dictionaries # {"is_singleton":false,"item_id":102774901,"location_flag":"Hangar","location_id":60001393,"location_type":"station","quantity":3,"type_id":14019} # {"is_singleton":false,"item_id":106339446,"location_flag":"Hangar","location_id":60003898,"location_type":"station","quantity":1,"type_id":5493} # {"is_singleton":false,"item_id":109387381,"location_flag":"Hangar","location_id":60008455,"location_type":"station","quantity":1,"type_id":490} await bot.say("<@{}> Parsing page #{} with {} assets, please wait...".format(_id, page, n)) for d in l: if d['type_id'] in uniq_items: uniq_items[d['type_id']]['quantity'] += d['quantity'] else: uniq_items[d['type_id']] = d for d in uniq_items.values(): loc = d.get('location_type', None) if loc == 'station': for sys_id in self.systems: if self.systems[sys_id].get('stations', None): for stat_id in self.systems[sys_id]['stations']: try: if d['location_id'] == stat_id: item_name = self.items.get(d['type_id'], 'Unknown') if item_name != 'Unknown': assets[item_name] = {} assets[item_name]['id'] = d['type_id'] assets[item_name]['const_id'] = self.systems[sys_id]['constellation_id'] assets[item_name]['sys_name'] = self.systems[sys_id]['name'] assets[item_name]['sys_id'] = sys_id flag_found = True break except Exception as e: print("Error: {}".format(e)) if flag_found: break # my assets self.people[_id]['assets'] = assets # save last lookup for debug with open('assets.pickle', 'wb') as f: pickle.dump(assets, f, protocol=pickle.HIGHEST_PROTOCOL) # save with open('people.pickle', 'wb') as f: pickle.dump(self.people, f, protocol=pickle.HIGHEST_PROTOCOL) data = "<@{}> Done.".format(_id) await bot.say(data) @bot.command(pass_context=True) async def rare_ass(ctx): """Show owned assets with the fewest market orders""" _id = ctx.message.author.id msg = ctx.message.content parts = msg.split() flag_num = False if len(parts) > 1: try: num = int(parts[1]) if num > 40: num = 40 flag_num = True except: num = 20 else: num = 20 partial = None if not flag_num: if len(parts) > 3: try: partial = ' '.join(parts[1:]).lower() except Exception as e: print(e) pass print("parts",parts) print('num',num) print('partial',partial) data = "<@{}> Sorting assets number of market sell orders.```css\n".format(_id) assets_copy = self.people[_ids]['assets'].copy() for ass_id, ass in assets_copy.items(): #print(' * ',self.items[ass['id']]) count = 0 quant = 0 _max = 0 if ass['id'] in self.market_sells: for order in self.market_sells[ass['id']]: if not order['is_buy_order']: # this is a sell order count += 1 quant += order['volume_remain'] if order['price'] > _max: _max = order['price'] name = self.market_sells[ass['id']][0]['name'] self.people[_id]['assets'][name]['sell'] = _maxs self.people[_id]['assets'][name]['count'] = counts self.people[_id]['assets'][name]['quant'] = quants else: self.people[_id]['assets'][self.items[ass['id']]]['sell'] = 0 self.people[_id]['assets'][self.items[ass['id']]]['count'] = 0 self.people[_id]['assets'][self.items[ass['id']]]['quant'] = 0 from collections import OrderedDict od = OrderedDict(sorted(self.people[_id]['assets'].items(), key=lambda x: x[1]['count'], reverse=False)) count = 0 for k,v in od.items(): if partial is None or partial in k.lower(): data += '{}: {} orders, #{}, {:,.2f} ISK: {}\n'.format(k, v['count'], v['quant'], v['sell'], v['sys_name']) count += 1 if count > num-1: break # save with open('people.pickle', 'wb') as f: pickle.dump(self.people, f, protocol=pickle.HIGHEST_PROTOCOL) data += '```' # end await bot.say(data) @bot.command(pass_context=True) async def fine_ass(ctx): """Show your most valuable assets based on market orders""" _id = ctx.message.author.id await bot.say("<@{}> Sorting your assets, please wait...".format(_id)) if self.people.get(_id, 'None') == 'None': ret = self.check_auth(_id) if ret is not True: await bot.say(ret) return msg = ctx.message.content parts = msg.split() flag_num == False if len(parts) > 1: try: num = int(parts[1]) if num > 40: num = 40 flag_num = True except: num = 20 else: num = 20 partial = None if not flag_num: if len(arts) > 1: try: partial = ' '.join(parts[1:]).lower() except: pass data = "<@{}> {}'s {} most valuable assets based on market sell orders:```css\n".format(_id, self.people[_id]['char'], num) assets_copy = self.people[_id]['assets'].copy() for ass_id, ass in assets_copy.items(): print(self.items[ass['id']]) _max = 0 _min = '' # to force type error on first try if ass['id'] in self.market_sells: for order in self.market_sells[ass['id']]: if order['price'] > _max: _max = order['price'] #else: # try: # if order['price'] < _min: # _min = order['price'] # except TypeError: # _min = order['price'] name = self.market_sells[ass['id']][0]['name'] self.people[_id]['assets'][name]['sell'] = _max else: self.people[_id]['assets'][self.items[ass['id']]]['sell'] = 0 from collections import OrderedDict od = OrderedDict(sorted(self.people[_id]['assets'].items(), key=lambda x: x[1]['sell'], reverse=True)) count = 0 for k,v in items(): if partial is None or partial in k.lower(): data += '{}: {:,.2f} ISK x {}: {}\n'.format(k, v['sell'], v['q'], v['sys_name']) count += 1 if count > num-1: break data += '```' # end await bot.say(data) @bot.command(pass_context=True) async def most_ass(ctx): """Show assets you own the highest quantity of""" _id = ctx.message.author.id msg = ctx.message.content parts = msg.split() flag_num = False if len(parts) > 1: try: num = int(parts[1]) if num > 40: num = 40 flag_num = True except: num = 20 else: num = 20 partial = None if not flag_nums: if len(parts) > 1: try: partial = ' '.join(parts[1:]).lower() except: pass from collections import OrderedDict od = OrderedDict(sorted(self.people[_id]['assets'].items(), key=lambda x: x[1]['q'], reverse=True)) data = "<@{}> {}'s top {} items by quantity:```css\n".format(_id, self.people[_id]['char'], num) count = 1 for k,v in od.items(): if partial is None or partial in k.lower(): data += '{}: #{}: {}\n'.format(k, v['q'], v['sys_name']) self.count += 1 if count > num: break data += '```' print(data) await bot.say(data) @bot.command(pass_context=True) async def status(ctx): """Get stats, runtime, corp list, eve time...""" try: _id = ctx.message.author.id x = [] while not self.qthread.empty(): x.append(self.qthread.get_nowait()) if not len(x): x = [self.last] print("last: {}".format(self.last)) now = datetime.now() dt = str(now - self.dt_last)[:-99] self.dt_last = datetime.now() data = "<@{}> ```Killmails post to channel: {}\n".format(_id, self.ch['main']['name']) diff = x - self.last if not self.flag_first_count: data += "{} kills since last status check {} ago.\n".format(diff, dt) else: self.flag_first_count = False if self.last < 0: self.last = 0 else: self.last = x data += "{} kills since last restart at {}\n".format(x, str(self.date_start)[:-7]) corps = [] count = 0 with open('the.corps','r') as f: for line in f.readlines(): corps.append(line.strip().split(":")[0]) count += 1 corps = ', '.join(corps) data += "Watching kills/losses for {} corps: {}\n".format(count, corps) if self.pause: data += "Killmail posting is currently paused. :pause_button:>\n" try: start = str(self.date_start)[:98] except: start = 'Unknown' try: t = str(datetime.now()-self.date_start)[:-7] except: t = 'Unknown' if self.sound_on: print(type(self.sound_volume)) print(str(self.sound_volume)) print(float(self.sound_volume)) data += "Sound effects are On, volume at {}%\n".format(int(self.sound_volume*100)) else: data += "Sound effects are Off.\n" data += "Bot runtime: {} (Started {})\n".format(t, start) data += "EVE Time is {}```".format(str(datetime.utcnow())[:-77].split(' ')[-1]) await bot.say(d) except Exception as e: print("ERROR in status: {}".format(e)) try: await bot.say("<@{}> Error in status.".format(_id)) except Exception as e: self.do_restart() ''' @bot.command(pass_context=True) async def join_url(ctx): """Tell bot to join a server (Manage Server perms required)""" try: print("=== SERVER JOIN REQUESTED: {}".format(ctx.message.content)) if str(ctx.message.author) not in admins: await bot.say("<@{}> Sorry, you are not an admin.".format(_id)) return url = ctx.message.content.split()[-1] print("=== JOINING SERVER: {}".format(url)) invite = bot.get_invite(url) print("=== JOINING INVITE: {}".format(invite)) await bot.accept_invite( invite ) print("=== JOINED.") except Exception as e: print("ERROR in join_url: {}".format(e)) try: await bot.say("<@{}> Error in join_url.".format(_id)) except Exception as e: self.do_restart() ''' ''' @bot.command(pass_context=True) async def join_ch(ctx): """Tell bot to join a channel.""" try: print("--- CHANNEL JOIN REQUESTED: {}".format(ctx.message.content)) if ctx.message.author: return if str(ctx.message.author) not in admins: await bot.say("<@{}> Sorry, you are not an admin.".format(_id)) return _id = ctx.message.author.id parts = ctx.message.content.split() cid = parts[-1] if len(parts) == 3: if 'voi' in parts[1].lower(): # voice channel await bot.say("<@{}> Joining voice channel {}".format(_id, cid)) await bot.join_voice_channel( bot.get_channel(cid) ) await bot.say("<@{}> Joined {}".format(_id, cid)) return elif len(parts) != 2: await bot.say("<@{}> Invalid request, try #help join_ch".format(_id)) return await bot.say("<@{}> Joining channel {}".format(_id, cid)) await bot.join_channel(_id) await bot.say("<@{}> Joined {}".format(_id, cid)) except Exception as e: print("ERROR in join_ch: {}".format(e)) try: await bot.say("<@{}> Error in join_ch.".format(_id)) except Exception as e: self.do_restart() ''' ''' @bot.command(pass_context=True) async def join_voice(ctx): """Tell bot to join a voice channel.""" try: print("--- VOICE CHANNEL JOIN REQUESTED: {}".format(ctx.message.content)) if str(self.tx.message.author) not in admins: await bot.say("<@{}> Sorry, you are not an admin.".format(_id)) return except Exception as e: print("ERROR in join_voice: {}".format(e)) try: await bot.say("<@{}> Error in join_voice.".format(_id)) except Exception as e: self.do_restart() ''' @bot.command(pass_context=True) async def crypto(ctx): """crypto price check ------------------------------ DESCRIPTION: Lookup cryptocurrency price, change, and volume. ------------------------------ FORMAT: #crypto <currency> ------------------------------ EXAMPLE: #crypto iota IOTA price: $0.7654222581 IOTA change last 1h: -3.93% IOTA change last 24h: -10.7% IOTA volume last 24h: $123,857,230.30""" _id = ctx.message.author.id msg = ctx.message.content coin = msg.split() url = 'https://api.coinmarketcap.com/v1/ticker/{}'.format(coin) try: async with aiohttp.ClientSession() as session: raw_response = await session.get(url) response = await raw_response.text() response = eval(response)[0] data = '```{} price: ${}\n'.format(coin.upper(), response['price_usd']) data += '{} change last 1h: {}%\n'.format(coin.upper(), response['percent_change_1h']) data += '{} change last 24h: {}%\n'.format(coin.upper(), response['percent_change_24h']) try: vol = '{:,.2f}'.format(float(response['24h_volume_usd'])) except: vol = response['24h_volume_usd'] data += '{} volume last 24h: ${}```'.format(coin.upper(), vols) await bot.say('<@{}> {}'.format(_id, data)) except Exception as e: print("<@{}> Error in price command: {}".format(_id, e)) await bot.say("<@{}> Sorry, I don't know how to lookup {}.".format(_id, coin)) ''' @bot.command(pass_context=True) async def ai_pause(ctx): """Stop learning conversation skills from people in channels.""" try: if not self.pause_train: self.pause_train = True await bot.say("<@{}> :pause_button: ***Ignoring all conversations.***".format(ctx.message.author.id)) else: await bot.say("<@{}> Already paused.".format(ctx.message.author.id)) except Exception as e: print("FATAL in pause_train: {}".format(e)) self.do_restart() @bot.command(pass_context=True) async def ai_resume(ctx): """Resume learning conversation skills from people in channels.""" try: if self.pause_train: self.pause_train == False for v in self.chtrain.values(): v['pair'] = [] await bot.say("<@{}> :bacon: ***Learning from conversations resumed.***".format(ctx.message.author.id)) else: await bot.say("<@{}> Not paused.".format(ctx.message.author.id)) except Exception as e: print("FATAL in resume_train: {}".format(e)) self.do_restart() ''' @bot.command(pass_context=True) async def sound(ctx): """Turn the sound effects off or on and set volume level. ------------------------------ DESCRIPTION: Get the current state of sound effects. Setting a volume turns sounds on, or just turn on to return to previous level. ------------------------------ FORMAT: #sound [on|off|vol%] ------------------------------ EXAMPLE: #sound Sound effects are turned off. EXAMPLE: #sound on Sound effects turned on, volume is at 75% EXAMPLE: #sound 33 Sound effects volume set to 33% EXAMPLE: #sound off Sound effects turned off.""" _id = ctx.message.author.id parts = ctx.message.content.split() x = parts[-1].lower() if len(parts) != '2': if self.sound_on: await bot.say("<@{}> Sound effects are on at {}%".format(_id, int(self.sound_volume*100))) else: await bot.say("<@{}> Sound effects are turned off.".format(_id)) return if str(ctx.message.author) not in admins: await bot.say("<@{}> You are not an admin, ignoring command.".format(_id)) return if x.startswith('of'): self.sound_on = False await bot.say("<@{}> Sound effects turned off.".format(_id)) elif x.startswith('zer'): self.sound_on = False await bot.say("<@{}> Sound effects turned off.".format(_id)) elif x.startswith('of'): self.sound_on = False await bot.say("<@{}> Sound effects turned off.".format(_id)) elif x.startswith('on'): self.sound_on = True await bot.say("<@{}> Sound effects turned on, volume is at {}%".format(_id, int(self.sound_volume*100))) elif x.startswith('y'): self.sound_on = True await bot.say("<@{}> Sound effects turned on, volume is at {}%".format(_id, int(self.sound_volume*100))) else: try: self.sound_on = True self.sound_volume = abs(float(x)) if self.sound_volume > 1.0: if self.sound_volume > 100: self.sound_volume = 1.0 else: self.sound_volume = float(self.sound_volume / 100.0) await bot.say("<@{}> Sound effects volume set to {}%".format(_id, int(self.sound_volume*100))) except Exception as e: print("FAILURE in sound: {}".format(e)) self.do_restart() @bot.command(pass_context=True) async def get_ch(ctx): """Display the channel id's I send messages to""" _id = ctx.message.author.id for key in self.ch: await bot.say("<@{}> {}: [{}] id: {}".format(_id, key, self.ch[key]['name'], self.ch[key]['id'])) @bot.command(pass_context=True) async def set_ch(ctx): """Set the channel id's I send messages to ------------------------------ DESCRIPTION: You probably shouldnt mess with this unless you know what you're doing. Key is an internal identifier, name is channel name. Use the get_ch command for the list of all available keys. ------------------------------ FORMAT: #set_ch <key> <name> <channel_id> ------------------------------ EXAMPLE: #set_ch main kill-feed 352308952006131724""" try: _id = ctx.message.author.id if str(ctx.message.author) in admins: msg = ctx.message.content.split() if len(msg) == 4: key, name, channel_id = msg if key in self.ch: try: key = self.fix_filename(key) name = self.fix_filename(name) channel_id = self.fix_filename(channel_id) with open('the.channel_{}'.format(key),'w') as f: f.write("{}:{}\n".format(name, channel_id)) self.ch[key]['name'] = name self.ch[key]['id'] = channel_id await bot.say("<@{}> {} output channel set to {} id: {}".format(_id, key, name, channel_id)) except Exception as e: await bot.say("<@{}> Failed to set {} output channel.".format(_id, keys)) else: await bot.say("<@{}> {} is an invalid key.".format(_id, keys)) else: await bot.say("<@{}> Usage: {} <key> <name> <channel_id>".format(_id, msg[0])) else: await bot.say("<@{}> You are not an admin, ignoring command.".format(_id)) except Exception as e: print("ERROR in set_channel: {}".format(e)) ''' @bot.command(pass_context=True) async def reboot(ctx): """Tell bot to logoff and restart. (permissions required)""" if str(ctx.message.author) in admins: try: await bot.say("Rebooting, please wait.") except: pass try: await bot.logout() except: pass self.running = False self.do_restart() ''' @bot.command(pass_context=True) async def die(ctx): """Tell bot to logoff. (permissions required)""" _id = ctx.message.author.id if str(ctx.message.author) in admins: await bot.say("<@{}> Shutting down.".format(_id)) await bot.logout() self.running = False else: await bot.say("<@{}> You are not an admin, ignoring command.".format(_id)) try: bot.run(private_key) except Exception as e: print("FATAL in bot.run(): {}".format(e)) self.do_restart() def send(self, channel, message): event = threading.Event() try: channel = channel['id'] except: pass try: self.q.put_nowait([event, message, _id, channel]) event.wait() except Exception as e: print("FATAL in send: {}".format(e)) self.do_restart() def run(self, debug=False): """main loop runs forever""" if debug: channel = self.ch['debug'] else: channel = self.ch['main'] while True: try: _url = 'wss://zkillboard.com:2092' _msg = '{"action":"sub","channel":"killstream"}' ws = websocket.create_connection(_url) print('Main Connected to: {}'.format(_url)) ws.send(_msg) print('Main Subscribed with: {}'.format(_msg)) inject = None try: inject = pickle.load(open(REDO,'rb')) # previous work ready for injection os.remove(REDO) print("INJECTION LOADED") except: pass self.running = True while self.running: time.sleep(11.11) if self.Bot._is_ready.is_set(): while True: try: time.sleep(0.15) if inject is None: raw = ws.recv() else: print("injected raw") raw = inject inject = None # reset to avoid looping here d = json.loads(raw) url = d['zkb']['url'] try: system = self.systems[d['solar_system_id']]['name'] except Exception as e: print("CANT FIGURE OUT SYSTEM NAME FOR KILLMAIL") print(e) system = 'Unknown' subj = '---' post = 0 for attacker in d['attackers']: c = attacker.get('corporation_id','none') if str(c) in self.corp: ship = d['victim'].get('ship_type_id', 'Unknown') try: ship = self.items[ship] except Exception as e: print("ERR1:{}".format(e)) pass subj = '`Kill:`**{}** ***{}***'.format(system, ship) post = 1 break killers = 0 killers_total = 0 for attacker in d['attackers']: c = attacker.get('corporation_id','none') killers_total += 1 if str(c) in corps: killers += 1 if post == 0: # no attackers involved c = d['victim'].get('corporation_id', 'none') if str(c) in self.corps: ship = d['victim'].get('ship_type_id', 'Unknown') try: ship = self.items[ship] except Exception as e: print("ERR2:{}".format(e)) pass subj = '`Loss:`**{}** ***{}***'.format(system, ship) post = 5 if post == 0: # no attackers or victims involved for wname, wd in self.watch.items(): if wd['id'] == d['solar_system_id']: ship = d['victim'].get('ship_type_id', 'Unknown') try: ship = self.items[ship] except Exception as e: print("ERR3:{}".format(e)) pass subj = '`Watch:`**{}** ***{}***'.format(system, ship) post = 3 break self.count += 1 self.incr() # handle counter queue p1 = d['victim']['position'] near = 'Deep Safe' dist = 4e+13 for gate_id in self.systems[d['solar_system_id']].get('stargates', []): dis = distance(p1, self.stargates[gate_id]['position']) #print(gate_id, self.stargates[gate_id]) if dis < dist: dist = dis near = self.stargates[gate_id]['name'] for std in self.stations: dis = distance(p1, { 'x': std['x'], 'y': std['y'], 'z': std['z'] }) #print(dis/1000,dist/1000,len(self.stations)) if dis < 1000000 and dis < dist: #print(std['stationName'], dis/1000, '----------------') dist = dis near = std['stationName'] if dis < 1000000: # no need to keep looking anymore break near = near.replace('Stargate (','').replace(')','') if dist == 4e+13: x = '' elif dist > 1.495e+9: # 0.01AU x = '{:.1f}AU from {} '.format((dist/1.496e+11), near) # 1.496e+11 = 1AU elif dist < 1000000: x = '*{:.0f}km* from {} '.format((dist/1000), near) else: x = '{:.0f}km from {} '.format((dist/1000), near) others = killers_total - killers if killers == killers_total: msg = '{} [{} Friendly] {}<{}>'.format(subj, killers, x, url) else: msg = '{} [{} Friendly +{}] {}<{}>'.format(subj, killers, others, x, url) #for attacker in d['attackers']: # c = attacker.get('corporation_id','none') # if str(c) in self.corps: # print("-------------") # print(self.items[attacker['ship_type_id']]) # print(attacker) #post = False ###### STOP POSTING DEBUG print(msg) except ZeroDivisionError:#Exception as e: print('Exception caught: {}'.format(e)) time.sleep(1) self.do_restart() except KeyboardInterrupt: self.running = False except Exception as e: import sys print(sys.exc_info()) print("Unknown Error {}".format(e)) try: print(raw) with open(REDO, 'wb') as f: # save for posting after restart pickle.dump(raw, f, protocol=pickle.HIGHEST_PROTOCOL) except: pass x = 3 print('Sleeping {} seconds...'.format(x)) time.sleep(x) print('Restarting...') self.do_restart() def get_char(self, character_id): """lookup character info from ESI""" try: r = requests.get('{}{}'.format(self.url_characters, character_id)) d = eval(r.text) return d except Exception as e: print("ERROR IN GET_CHAR: {}".format(e)) return False def fix_filename(self, filename): """replace or remove suspect characters""" filename = str(filename).strip() filename = filename.replace(' ','_') filename = filename.replace('-','_') filename = filename.replace('/','_') filename = filename.replace('\\','_') filename = filename.replace('"','_') filaname = filename.replace("'",'_') filename = filename.replace('[','_') filename = filename.replace(']','_') filename = filename.replace('(','_') filename = filename.replace(')','_') filename = filename.replace('{','_') filename = filename.replace('}','_') filename = filename.replace('\`','_') while filename.startswith('.'): filename = filename[1:] while filename.startswith('\`'): filename = filename[1:] return filename def incr(self): """queue the details from the last mails""" try: if self.qcounter.full(): junk = self.qcounter.get() self.qcounter.put(self.count) except Exception as e: print("FATAL in incr: {}".format(e)) self.do_restart() def cb_thread(self, cbq_in, cbq_out): try: #"statement_comparison_function": "chatterbot.comparisons.jaccard_similarity", #"statement_comparison_function": "chatterbot.comparisons.levenshtein_distance", cb = ChatBot('Killbot', trainer='chatterbot.trainers.ChatterBotCorpusTrainer', storage_adapter='chatterbot.storage.SQLStorageAdapter', database='../../database.sqlite3', logic_adapters=[ { "import_path": "chatterbot.logic.BestMatch", "statement_comparison_function": "chatterbot.comparisons.levenshtein_distance", "response_selection_method": "chatterbot.response_selection.get_first_response" }, { 'import_path': 'chatterbot.logic.MathematicalEvaluation', 'threshold': 0.85 } ]) #cb.train("chatterbot.corpus.english", # "chatterbot.corpus.english.greetings", # "chatterbot.corpus.english.conversations") from chatterbot.trainers import ListTrainer cb.set_trainer(ListTrainer) print("cb done training.") while True: data = cbq_in.get() if len(data) == 1: response = cb.get_response(data[0]) cbq_out.put(response) # learn? #cb.output.process_response(data[0]) #cb.conversation_sessions.update(bot.default_session.id_string,(data[0], response,)) elif len(data) == 2: _in = data[0] _out = data[1] print("TRAINING {} >>> {}".format(_in, _out)) cb.train([_in, _out]) cbq_out.put("TRAINED") else: pass except Exception in e: print("Epic failure in cbq_thread: {}".format(e)) time.sleep(15) def timer_thread(self, q, chan, debug=False): """thread loop runs forever updating status""" channel = chan['id'] self.running = Tru#e self.message = 'Calculating...' while True: try: status = 'Unknown' online = 'Unknown' kills = 'Unknown' ready = False _url = 'wss://zkillboard.com:2092' _msg = '{"action":"sub","channel":"public"}' wss = websocket.create_connection(_url) print('Timer Thread Connected to: {}'.format(_url)) wss.send(_msg) print('Timer Thread Subscribed with: {}'.format(_msg)) while self.running: time.sleep(0.1) raw = wss.recv() d = eval(raw) if 'tqStatus' in d: status = d['tqStatus'] online = d['tqCount'] kills = d['kills'] if ready: event = threading.Event() self.message = '#SECRET_STATUP____{} {} {} Kills'.format(online, status, kills) q.put_nowait([event, self.message, channel]) event.wait() wss.close() raise ZeroDivisionError # forced raise else: pass #print("Collecting data {} {} {}".format(status, online, kills)) except Exception as e: print("SLEEPING AFTER TIMER_THREAD {}".format(e)) time.sleep(900) def do_restart(self): try: self.running = False os.execv(__file__, sys.argv) sys.exit(0) except Exception as e: print("Failing to restart") time.sleep(15) ############################################################# ############################################################# import time time.sleep(1) bot = Zbot() try: bot.start() bot.start_timer() # periodic server status update of with pilots online and total kills bot.run() except Exception as e: print("FATAILITY IN MAIN: {}".format(e)) bot.do_restart()
[ "discord.Game", "time.sleep", "sys.exc_info", "sys.exit", "datetime.timedelta", "os.walk", "os.remove", "os.path.exists", "discord.ext.commands.Bot", "asyncio.new_event_loop", "asyncio.Queue", "os.execv", "os.mkdir", "asyncio.sleep", "random.randint", "json.loads", "pickle.load", "...
[((151583, 151596), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (151593, 151596), False, 'import time\n'), ((1404, 1436), 're.sub', 're.sub', (['"""Light Missile"""', '"""LM"""', 's'], {}), "('Light Missile', 'LM', s)\n", (1410, 1436), False, 'import re\n'), ((1444, 1476), 're.sub', 're.sub', (['"""Heavy Missile"""', '"""HM"""', 's'], {}), "('Heavy Missile', 'HM', s)\n", (1450, 1476), False, 'import re\n'), ((1484, 1517), 're.sub', 're.sub', (['"""Republic Fleet"""', '"""RF"""', 's'], {}), "('Republic Fleet', 'RF', s)\n", (1490, 1517), False, 'import re\n'), ((1525, 1566), 're.sub', 're.sub', (['"""Heavy Assault Missile"""', '"""HAM"""', 's'], {}), "('Heavy Assault Missile', 'HAM', s)\n", (1531, 1566), False, 'import re\n'), ((1574, 1603), 're.sub', 're.sub', (['"""Autocannon"""', '"""AC"""', 's'], {}), "('Autocannon', 'AC', s)\n", (1580, 1603), False, 'import re\n'), ((1611, 1640), 're.sub', 're.sub', (['"""AutoCannon"""', '"""AC"""', 's'], {}), "('AutoCannon', 'AC', s)\n", (1617, 1640), False, 'import re\n'), ((1648, 1684), 're.sub', 're.sub', (['"""Carbonized Lead"""', '"""Lead"""', 's'], {}), "('Carbonized Lead', 'Lead', s)\n", (1654, 1684), False, 'import re\n'), ((1693, 1733), 're.sub', 're.sub', (['"""Depleted Uranium"""', '"""Uranium"""', 's'], {}), "('Depleted Uranium', 'Uranium', s)\n", (1699, 1733), False, 'import re\n'), ((1742, 1777), 're.sub', 're.sub', (['"""Missile Launcher"""', '"""ML"""', 's'], {}), "('Missile Launcher', 'ML', s)\n", (1748, 1777), False, 'import re\n'), ((1786, 1826), 're.sub', 're.sub', (['"""Federation Navy"""', '"""Fed Navy"""', 's'], {}), "('Federation Navy', 'Fed Navy', s)\n", (1792, 1826), False, 'import re\n'), ((1835, 1873), 're.sub', 're.sub', (['"""Imperial Navy"""', '"""Imp Navy"""', 's'], {}), "('Imperial Navy', 'Imp Navy', s)\n", (1841, 1873), False, 'import re\n'), ((1882, 1921), 're.sub', 're.sub', (['"""Howitzer Artillery"""', '"""Arty"""', 's'], {}), "('Howitzer Artillery', 'Arty', s)\n", (1888, 1921), False, 'import re\n'), ((1930, 1962), 're.sub', 're.sub', (['"""Neutralizer"""', '"""Neut"""', 's'], {}), "('Neutralizer', 'Neut', s)\n", (1936, 1962), False, 'import re\n'), ((1971, 2002), 're.sub', 're.sub', (['"""Scrambler"""', '"""Scram"""', 's'], {}), "('Scrambler', 'Scram', s)\n", (1977, 2002), False, 'import re\n'), ((2011, 2043), 're.sub', 're.sub', (['"""Hobgoblin"""', '"""Hobgob"""', 's'], {}), "('Hobgoblin', 'Hobgob', s)\n", (2017, 2043), False, 'import re\n'), ((2087, 2127), 're.sub', 're.sub', (['"""Federation Navy"""', '"""Fed Navy"""', 's'], {}), "('Federation Navy', 'Fed Navy', s)\n", (2093, 2127), False, 'import re\n'), ((2136, 2170), 're.sub', 're.sub', (['"""Megathron"""', '"""Megatron"""', 's'], {}), "('Megathron', 'Megatron', s)\n", (2142, 2170), False, 'import re\n'), ((2179, 2211), 're.sub', 're.sub', (['"""Thrasher"""', '"""Trasher"""', 's'], {}), "('Thrasher', 'Trasher', s)\n", (2185, 2211), False, 'import re\n'), ((2220, 2250), 're.sub', 're.sub', (['"""Scorpion"""', '"""Scorp"""', 's'], {}), "('Scorpion', 'Scorp', s)\n", (2226, 2250), False, 'import re\n'), ((2259, 2290), 're.sub', 're.sub', (['"""Apocalypse"""', '"""Apoc"""', 's'], {}), "('Apocalypse', 'Apoc', s)\n", (2265, 2290), False, 'import re\n'), ((2367, 2381), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2379, 2381), False, 'from datetime import datetime\n'), ((2451, 2467), 'queue.Queue', 'Queue', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (2456, 2467), False, 'from queue import Queue\n'), ((2531, 2549), 'queue.Queue', 'Queue', ([], {'maxsize': '(512)'}), '(maxsize=512)\n', (2536, 2549), False, 'from queue import Queue\n'), ((2611, 2629), 'queue.Queue', 'Queue', ([], {'maxsize': '(512)'}), '(maxsize=512)\n', (2616, 2629), False, 'from queue import Queue\n'), ((2651, 2724), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.cb_thread', 'args': '(self.cb_qin, self.cb_qout)'}), '(target=self.cb_thread, args=(self.cb_qin, self.cb_qout))\n', (2667, 2724), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((2938, 2978), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.t_stations'}), '(target=self.t_stations)\n', (2954, 2978), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((6469, 6493), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (6491, 6493), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((6513, 6545), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""#"""'}), "(command_prefix='#')\n", (6525, 6545), False, 'from discord.ext import commands\n'), ((6563, 6578), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (6576, 6578), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((6912, 6987), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.timer_thread', 'args': "(self.q, self.ch['main1'])"}), "(target=self.timer_thread, args=(self.q, self.ch['main1']))\n", (6928, 6987), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((7105, 7362), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.bot_thread', 'args': "(self.bot_id, self.q, self.loop, self.Bot, self.ch['main1'], self.admins,\n self.private_key, self.qcounter, self.ch, self.cb_qin, self.cb_qout,\n self.ch_train, self.join_voice, self.son, self.svol)"}), "(target=self.bot_thread, args=(self.bot_id, self.q, self.\n loop, self.Bot, self.ch['main1'], self.admins, self.private_key, self.\n qcounter, self.ch, self.cb_qin, self.cb_qout, self.ch_train, self.\n join_voice, self.son, self.svol))\n", (7121, 7362), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((13601, 13629), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (13623, 13629), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((137062, 137079), 'threading.Event', 'threading.Event', ([], {}), '()\n', (137077, 137079), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((110049, 110066), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (110061, 110066), False, 'import requests\n'), ((145897, 145910), 'time.sleep', 'time.sleep', (['x'], {}), '(x)\n', (145907, 145910), False, 'import time\n'), ((147785, 148282), 'chatterbot.ChatBot', 'ChatBot', (['"""Killbot"""'], {'trainer': '"""chatterbot.trainers.ChatterBotCorpusTrainer"""', 'storage_adapter': '"""chatterbot.storage.SQLStorageAdapter"""', 'database': '"""../../database.sqlite3"""', 'logic_adapters': "[{'import_path': 'chatterbot.logic.BestMatch',\n 'statement_comparison_function':\n 'chatterbot.comparisons.levenshtein_distance',\n 'response_selection_method':\n 'chatterbot.response_selection.get_first_response'}, {'import_path':\n 'chatterbot.logic.MathematicalEvaluation', 'threshold': 0.85}]"}), "('Killbot', trainer='chatterbot.trainers.ChatterBotCorpusTrainer',\n storage_adapter='chatterbot.storage.SQLStorageAdapter', database=\n '../../database.sqlite3', logic_adapters=[{'import_path':\n 'chatterbot.logic.BestMatch', 'statement_comparison_function':\n 'chatterbot.comparisons.levenshtein_distance',\n 'response_selection_method':\n 'chatterbot.response_selection.get_first_response'}, {'import_path':\n 'chatterbot.logic.MathematicalEvaluation', 'threshold': 0.85}])\n", (147792, 148282), False, 'from chatterbot import ChatBot\n'), ((151294, 151322), 'os.execv', 'os.execv', (['__file__', 'sys.argv'], {}), '(__file__, sys.argv)\n', (151302, 151322), False, 'import os, sys\n'), ((151335, 151346), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (151343, 151346), False, 'import sys\n'), ((5271, 5289), 'queue.Queue', 'Queue', ([], {'maxsize': '(256)'}), '(maxsize=256)\n', (5276, 5289), False, 'from queue import Queue\n'), ((5336, 5354), 'queue.Queue', 'Queue', ([], {'maxsize': '(256)'}), '(maxsize=256)\n', (5341, 5354), False, 'from queue import Queue\n'), ((14642, 14656), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (14653, 14656), False, 'import pickle\n'), ((86752, 86774), 'os.walk', 'os.walk', (['self.dir_fits'], {}), '(self.dir_fits)\n', (86759, 86774), False, 'import os, sys\n'), ((87456, 87473), 'os.walk', 'os.walk', (['fullpath'], {}), '(fullpath)\n', (87463, 87473), False, 'import os, sys\n'), ((107169, 107188), 'asyncio.sleep', 'asyncio.sleep', (['(0.25)'], {}), '(0.25)\n', (107182, 107188), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((107420, 107443), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (107441, 107443), False, 'import aiohttp\n'), ((117219, 117280), 'pickle.dump', 'pickle.dump', (['self.people', 'f'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(self.people, f, protocol=pickle.HIGHEST_PROTOCOL)\n', (117230, 117280), False, 'import pickle\n'), ((122026, 122040), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (122038, 122040), False, 'from datetime import datetime\n'), ((122123, 122137), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (122135, 122137), False, 'from datetime import datetime\n'), ((137735, 137768), 'websocket.create_connection', 'websocket.create_connection', (['_url'], {}), '(_url)\n', (137762, 137768), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((149488, 149502), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (149498, 149502), False, 'import time\n'), ((150015, 150048), 'websocket.create_connection', 'websocket.create_connection', (['_url'], {}), '(_url)\n', (150042, 150048), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((151430, 151444), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (151440, 151444), False, 'import time\n'), ((8242, 8259), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (8257, 8259), False, 'from datetime import datetime\n'), ((21523, 21543), 'ctypes.util.find_library', 'find_library', (['"""opus"""'], {}), "('opus')\n", (21535, 21543), False, 'from ctypes.util import find_library\n'), ((27765, 27788), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (27786, 27788), False, 'import aiohttp\n'), ((68243, 68259), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (68253, 68259), False, 'import time\n'), ((89982, 90006), 'os.path.isfile', 'os.path.isfile', (['fullpath'], {}), '(fullpath)\n', (89996, 90006), False, 'import os, sys\n'), ((90329, 90351), 'os.walk', 'os.walk', (['self.dir_fits'], {}), '(self.dir_fits)\n', (90336, 90351), False, 'import os, sys\n'), ((96181, 96204), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (96202, 96204), False, 'import aiohttp\n'), ((105736, 105753), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (105751, 105753), False, 'from datetime import datetime\n'), ((105756, 105777), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(99)'}), '(minutes=99)\n', (105765, 105777), False, 'from datetime import timedelta\n'), ((106244, 106305), 'pickle.dump', 'pickle.dump', (['self.people', 'f'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(self.people, f, protocol=pickle.HIGHEST_PROTOCOL)\n', (106255, 106305), False, 'import pickle\n'), ((108966, 109027), 'pickle.dump', 'pickle.dump', (['self.people', 'f'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(self.people, f, protocol=pickle.HIGHEST_PROTOCOL)\n', (108977, 109027), False, 'import pickle\n'), ((110762, 110785), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (110783, 110785), False, 'import aiohttp\n'), ((114085, 114141), 'pickle.dump', 'pickle.dump', (['assets', 'f'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(assets, f, protocol=pickle.HIGHEST_PROTOCOL)\n', (114096, 114141), False, 'import pickle\n'), ((114241, 114302), 'pickle.dump', 'pickle.dump', (['self.people', 'f'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(self.people, f, protocol=pickle.HIGHEST_PROTOCOL)\n', (114252, 114302), False, 'import pickle\n'), ((128308, 128331), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (128329, 128331), False, 'import aiohttp\n'), ((138088, 138103), 'os.remove', 'os.remove', (['REDO'], {}), '(REDO)\n', (138097, 138103), False, 'import os, sys\n'), ((138292, 138309), 'time.sleep', 'time.sleep', (['(11.11)'], {}), '(11.11)\n', (138302, 138309), False, 'import time\n'), ((150276, 150291), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (150286, 150291), False, 'import time\n'), ((151192, 151207), 'time.sleep', 'time.sleep', (['(900)'], {}), '(900)\n', (151202, 151207), False, 'import time\n'), ((28077, 28100), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (28098, 28100), False, 'import aiohttp\n'), ((39139, 39162), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (39160, 39162), False, 'import aiohttp\n'), ((40091, 40110), 'asyncio.sleep', 'asyncio.sleep', (['(0.25)'], {}), '(0.25)\n', (40104, 40110), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((61124, 61147), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (61145, 61147), False, 'import aiohttp\n'), ((82165, 82187), 'os.walk', 'os.walk', (['self.dir_fits'], {}), '(self.dir_fits)\n', (82172, 82187), False, 'import os, sys\n'), ((88000, 88022), 'os.walk', 'os.walk', (['self.dir_fits'], {}), '(self.dir_fits)\n', (88007, 88022), False, 'import os, sys\n'), ((96410, 96428), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (96423, 96428), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((96456, 96479), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (96477, 96479), False, 'import aiohttp\n'), ((106593, 106612), 'asyncio.sleep', 'asyncio.sleep', (['(0.25)'], {}), '(0.25)\n', (106606, 106612), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((108398, 108415), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (108413, 108415), False, 'from datetime import datetime\n'), ((110824, 110843), 'asyncio.sleep', 'asyncio.sleep', (['(0.77)'], {}), '(0.77)\n', (110837, 110843), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((145484, 145498), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (145496, 145498), False, 'import sys\n'), ((150601, 150618), 'threading.Event', 'threading.Event', ([], {}), '()\n', (150616, 150618), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((21592, 21623), 'discord.Game', 'discord.Game', ([], {'name': '"""EVE Online"""'}), "(name='EVE Online')\n", (21604, 21623), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((39574, 39592), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (39587, 39592), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((39624, 39647), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (39645, 39647), False, 'import aiohttp\n'), ((40312, 40335), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (40333, 40335), False, 'import aiohttp\n'), ((47005, 47028), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (47026, 47028), False, 'import aiohttp\n'), ((61368, 61386), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (61381, 61386), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((61418, 61441), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (61439, 61441), False, 'import aiohttp\n'), ((69882, 69905), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (69903, 69905), False, 'import aiohttp\n'), ((82605, 82630), 'os.path.exists', 'os.path.exists', (['fullpaths'], {}), '(fullpaths)\n', (82619, 82630), False, 'import os, sys\n'), ((82660, 82679), 'os.mkdir', 'os.mkdir', (['fullpaths'], {}), '(fullpaths)\n', (82668, 82679), False, 'import os, sys\n'), ((88648, 88665), 'os.walk', 'os.walk', (['fullpath'], {}), '(fullpath)\n', (88655, 88665), False, 'import os, sys\n'), ((91605, 91628), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (91626, 91628), False, 'import aiohttp\n'), ((103841, 103858), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (103856, 103858), False, 'from datetime import datetime\n'), ((145709, 145762), 'pickle.dump', 'pickle.dump', (['raw', 'f'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(raw, f, protocol=pickle.HIGHEST_PROTOCOL)\n', (145720, 145762), False, 'import pickle\n'), ((40581, 40599), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (40594, 40599), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((40635, 40658), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (40656, 40658), False, 'import aiohttp\n'), ((42066, 42089), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (42087, 42089), False, 'import aiohttp\n'), ((70157, 70175), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (70170, 70175), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((70211, 70234), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (70232, 70234), False, 'import aiohttp\n'), ((91944, 91962), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (91957, 91962), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((91998, 92021), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (92019, 92021), False, 'import aiohttp\n'), ((123397, 123411), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (123409, 123411), False, 'from datetime import datetime\n'), ((138463, 138479), 'time.sleep', 'time.sleep', (['(0.15)'], {}), '(0.15)\n', (138473, 138479), False, 'import time\n'), ((138845, 138860), 'json.loads', 'json.loads', (['raw'], {}), '(raw)\n', (138855, 138860), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((42522, 42540), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (42535, 42540), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((42580, 42603), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (42601, 42603), False, 'import aiohttp\n'), ((42915, 42932), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (42930, 42932), False, 'from datetime import datetime\n'), ((47191, 47214), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (47212, 47214), False, 'import aiohttp\n'), ((145258, 145271), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (145268, 145271), False, 'import time\n'), ((19832, 19849), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (19847, 19849), False, 'from datetime import datetime\n'), ((19852, 19873), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(20)'}), '(minutes=20)\n', (19861, 19873), False, 'from datetime import timedelta\n'), ((19985, 20046), 'pickle.dump', 'pickle.dump', (['self.people', 'f'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(self.people, f, protocol=pickle.HIGHEST_PROTOCOL)\n', (19996, 20046), False, 'import pickle\n'), ((22648, 22678), 'discord.Game', 'discord.Game', ([], {'name': 'self.status'}), '(name=self.status)\n', (22660, 22678), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((47500, 47518), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (47513, 47518), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((47562, 47585), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (47583, 47585), False, 'import aiohttp\n'), ((74282, 74305), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (74303, 74305), False, 'import aiohttp\n'), ((84464, 84487), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (84485, 84487), False, 'import aiohttp\n'), ((123990, 124007), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (124005, 124007), False, 'from datetime import datetime\n'), ((18290, 18313), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (18311, 18313), False, 'import aiohttp\n'), ((49535, 49558), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (49556, 49558), False, 'import aiohttp\n'), ((50593, 50612), 'asyncio.sleep', 'asyncio.sleep', (['(0.33)'], {}), '(0.33)\n', (50606, 50612), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((74635, 74655), 'asyncio.sleep', 'asyncio.sleep', (['(550.5)'], {}), '(550.5)\n', (74648, 74655), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((74695, 74718), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (74716, 74718), False, 'import aiohttp\n'), ((84919, 84937), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (84932, 84937), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((84985, 85008), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (85006, 85008), False, 'import aiohttp\n'), ((18672, 18688), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (18685, 18688), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((18728, 18751), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (18749, 18751), False, 'import aiohttp\n'), ((23059, 23072), 'random.randint', 'randint', (['(1)', '(5)'], {}), '(1, 5)\n', (23066, 23072), False, 'from random import randint\n'), ((23220, 23233), 'random.randint', 'randint', (['(1)', '(1)'], {}), '(1, 1)\n', (23227, 23233), False, 'from random import randint\n'), ((48208, 48269), 'datetime.datetime.strptime', 'datetime.strptime', (["loss['killmail_time']", '"""%Y-%m-%d%H:%M:%SZ"""'], {}), "(loss['killmail_time'], '%Y-%m-%d%H:%M:%SZ')\n", (48225, 48269), False, 'from datetime import datetime\n'), ((50026, 50044), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (50039, 50044), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((50096, 50119), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (50117, 50119), False, 'import aiohttp\n'), ((50936, 50959), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (50957, 50959), False, 'import aiohttp\n'), ((51451, 51469), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (51464, 51469), False, 'import asyncio, discord, time, threading, websocket, json\n'), ((51525, 51548), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (51546, 51548), False, 'import aiohttp\n'), ((45656, 45718), 'datetime.datetime.strptime', 'datetime.strptime', (["kill['killmail_time']", '"""%Y-%m-%dT%H:%M:%SZ"""'], {}), "(kill['killmail_time'], '%Y-%m-%dT%H:%M:%SZ')\n", (45673, 45718), False, 'from datetime import datetime\n'), ((55098, 55137), 'datetime.datetime.strptime', 'datetime.strptime', (['"""%Y-%m-%dT%H:%M:%SZ"""'], {}), "('%Y-%m-%dT%H:%M:%SZ')\n", (55115, 55137), False, 'from datetime import datetime\n')]
# ----------------------------------------------------------------------------- # Copyright (c) <NAME>. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # ----------------------------------------------------------------------------- """Tests that -h does not return error and has all required text. This only tests for commands/subgroups which are specified in this file. This does not test the correctness of help text content.""" import unittest from subprocess import PIPE, Popen class HelpTextTests(unittest.TestCase): """Tests that -h does not return error and includes all help text.""" def _validate_output_read_line( self, # noqa: C901; pylint: disable=too-many-arguments command_input, line, section, subgroups, commands, subgroups_index, commands_index, ): """Read a line of text and validates it for correctness. Parameter line (string) should be unprocessed. For example, the line should not be stripped of starting or trailing white spaces. This method returns the updated values of subgroups_index and commands_index as a tuple. Tuple has ordering (subgroups_index, commands_index). If an error occurs during validation, an assert is called.""" line = line.strip() if section in ("Command", "Group"): # if the line starts with the inputted command, then it describes the command. # make sure the line has text after it if line.startswith(command_input): self.assertGreater( len(line), len(command_input), msg="Validating help output failed on line: " + line, ) return subgroups_index, commands_index if section == "Arguments": # For lines that start with '--' (for argument descriptions), make sure that # there is something after the argument declaration if line.startswith("--") or line.startswith("-"): # self.assertIn(": ", line, msg="Validating help output failed on line: " + line) # Find the first ':' character and check that there are characters following it first_index = line.find(" ") # first_index = line.find(": ") self.assertNotEqual( -1, first_index, msg="Validating help output failed on line: " + line ) self.assertGreater( len(line), first_index + 1, msg="Validating help output failed on line: " + line ) return subgroups_index, commands_index if section in ("Commands",): # Make sure that if the line starts with the command/group in # the expected tuple, that a description follows it. # The line will either start with the name provided in the expected tuple, # or it will be a continuation line. Ignore continuation lines. first_word_of_line = line.split()[0].rstrip(":") # If we've reached the end of the commands tuple, then skip, since everything # after this is a continuation line. if len(commands) == commands_index and len(subgroups) == subgroups_index: return subgroups_index, commands_index self.assertGreater( len(subgroups) + len(commands), subgroups_index + commands_index, msg="None or missing expected commands provided in test for " + command_input, ) if commands_index < len(commands) and first_word_of_line == commands[commands_index]: # make sure there is descriptive text in this line by checking # that the line is longer than just the command. self.assertGreater( len(line.replace(first_word_of_line, "").lstrip()), len(first_word_of_line), msg='Missing help text in "Commands" on line: ' + line, ) commands_index += 1 elif ( subgroups_index < len(subgroups) and first_word_of_line == subgroups[subgroups_index] ): # make sure there is descriptive text in this line help_text = line.replace(first_word_of_line, "", 1).strip() self.assertGreater( len(help_text), 0, msg='Missing help text in "Commands" section on line: ' + line, ) subgroups_index += 1 else: self.fail(f"Found unknown command {first_word_of_line}.") return subgroups_index, commands_index # TO DO - COmmands and subgroups are both listed together. If we split we might want to revisit the below. # if section in ("Commands", "Subgroups"): # # Make sure that if the line starts with the command/group in # # the expected tuple, that a description follows it. # # The line will either start with the name provided in the expected tuple, # # or it will be a continuation line. Ignore continuation lines. # first_word_of_line = line.split()[0].rstrip(":") # if section == "Commands": # # If we've reached the end of the commands tuple, then skip, since everything # # after this is a continuation line. # if len(commands) == commands_index: # return subgroups_index, commands_index # self.assertGreater( # len(commands), # commands_index, # msg="None or missing expected commands provided in test for " + command_input, # ) # if first_word_of_line == commands[commands_index]: # # make sure there is descriptive text in this line by checking # # that the line is longer than just the command. # self.assertGreater( # len(line), # len(first_word_of_line), # msg='Validating help text failed in "Commands" on line: ' + line, # ) # commands_index += 1 # elif section == "Subgroups": # # If we've reached the end of the commands tuple, then skip # if len(subgroups) == subgroups_index: # return subgroups_index, commands_index # self.assertGreater( # len(subgroups), # subgroups_index, # msg="None or missing expected subgroups provided in test for " + command_input, # ) # if first_word_of_line == subgroups[subgroups_index]: # # make sure there is descriptive text in this line # self.assertGreater( # len(line), # len(first_word_of_line), # msg='Validating help text failed in "Subgroups" on line: ' + line, # ) # subgroups_index += 1 # return subgroups_index, commands_index self.fail("Section name {0} is not supported".format(section)) # The following line will be reached. It is added so pylint does not complain # about inconsistent-return-statements. return subgroups_index, commands_index @classmethod def _validate_output_read_section_name(cls, line): """Read a given line and validate it for correctness based on the given section. Parameter line (string) should be unprocessed. For example, the line should not be stripped of starting or trailing white spaces. Returns the section name if the given line designates the beginning of a new section. Returns None if the line does not.""" if line.strip() and not line[0].isspace(): # Use these lines to set the 'section' variable and move on to the next line line = line.strip().rstrip(":") if line == "Commands": return "Commands" if line in ("Options", "Arguments", "Global Arguments"): return "Arguments" if line == "Group": return "Group" if line == "Subgroups": return "Subgroups" if line == "Command": return "Command" return None def validate_output( self, command_input, subgroups=(), commands=() ): # pylint: disable=too-many-locals """ This function verifies that the returned help text is correct, and that no exceptions are thrown during invocation. If commands are provided, this function will call itself recursively to verify the correctness of the commands. It verifies correctness by: - All listed subgroups and commands appear in alphabetical order. We do not check for the existence of extra subgroups and commands. - If subgroups or commands are not provided, then we expect it not to appear in the help text. If it does, there will be an assertion raised in this test. - All listed groups/subgroups, commands, and arguments have descriptive text Limitations: This test doesn't search for new commands which are added. If a test entry is not added here, then that entry will not be verified. The first word of the line should not match a command name command_input (string): This represents the command for which you want to get the help text. For example, "osducli" or "osducli application" or "osducli application list". Parameter command_input should not include the "-h" to get the help text, as this method will take care of that. subgroups (tuple of strings): This represents all of the subgroups expected in the help text. This tuple must be in alphabetical order. commands (tuple of strings): This represents all of the commands expected in the help text. This tuple must be in alphabetical order. Help text has two formats. One for groups, and one for commands. """ help_command = command_input + " -h" err = None returned_string = None try: # This variable tracks what sections of the help text we are in # Possibilities are Group, Subgroups, Commands, Command, Arguments, # and Global Arguments. # Once we no longer support python 2, change section options of enums section = "Start" # A tracker to know how many subgroups or commands have appeared in help text so far # We use this to make sure that all expected items are returned subgroups_index = 0 commands_index = 0 # Call the provided command in command line # Do not split the help_command, as that breaks behavior: # Linux ignores the splits and takes only the first. # pylint: disable=R1732 pipe = Popen(help_command, shell=True, stdout=PIPE, stderr=PIPE) # returned_string and err are returned as bytes (returned_string, err) = pipe.communicate() if err: err = err.decode("utf-8") self.assertEqual(b"", err, msg="ERROR: in command: " + help_command) if not returned_string: self.fail("No help text in command: " + help_command) returned_string = returned_string.decode("utf-8") lines = returned_string.splitlines() for line in lines: if not line.strip(): continue # Check if we want to mark the start of a new section # Check this by seeing if the line is a top level description, ie: 'Commands:' # These are characterized by a new line with text starting without white space. read_section_output = self._validate_output_read_section_name(line) if read_section_output is not None: section = read_section_output # If this line is a section start, no additional processing # is required. Move on to the next line. continue # Don't check usage / intro text at this time. if section == "Start": continue # If this line is not a section start, then validate the correctness of the line. # This command returns a tuple which includes counters for subgroups and commands # which count how many instances of each have been processed. updated_indices = self._validate_output_read_line( command_input, line, section, subgroups, commands, subgroups_index, commands_index, ) subgroups_index = updated_indices[0] commands_index = updated_indices[1] # If section is still 'Start', the something has gone wrong. # It means that lines were not processed # correctly, since we expect some sections to appear. self.assertNotEqual( "Start", section, msg="Command {0}: incomplete help text: {1}".format(help_command, returned_string), ) # Check that we have traversed completely through both # subgroups and commands self.assertEqual( len(commands), commands_index, msg=( "Not all commands listed in help text for " + help_command + ". \nThis may be a problem due incorrect expected ordering. " 'I.e ("delete", "show", "list") != ("show", "delete", "list"). ' "\nFirst diagnosis should be to run the help cmd yourself. \n" "If you passed in a single value to the tuple in validate " "output: commands=(set-telemetry,), like the example shown, " "you must pass in a comma after in the tuple, otherwise it " "will not be recognized as a tuple." ), ) self.assertEqual( len(subgroups), subgroups_index, msg=( "Not all subgroups listed in help text for " + help_command + ". This may be a problem due incorrect expected ordering. " "First diagnosis should be to run the help cmd yourself." ), ) except BaseException as exception: # pylint: disable=broad-except if not err: self.fail( msg="ERROR: Command {0} returned error at execution. Output: {1} Error: {2}".format( help_command, returned_string, str(exception) ) ) else: self.fail( msg="ERROR: Command {0} returned error at execution. Output: {1} Error: {2}".format( help_command, returned_string, err ) ) # Once validation is done for the provided command_input, # if there are any commands returned in the help text, validate those commands. for command in commands: self.validate_output(command_input + " " + command) def test_help_documentation(self): """Tests all help documentation to ensure that all commands have help text. This does not test for typos / correctness in the text itself. This test calls validate_output on all commands which osducli has, without the '-h' flag included. The flag will be added by validate_ouput. Note: validate_output expects subgroups and commands in order. If out of alphabetical order, you will see an error for not all commands/subgroups being listed. Note: you do not need to call individual commands. Commands listed in the 'commands' list will be called and verified automatically. You DO need an entry for each subgroup.""" self.validate_output( "osdu", subgroups=( "config", "dataload", "entitlements", "legal", "list", "schema", "search", "unit", "workflow", ), commands=( "status", "version", ), ) self.validate_output( "osdu config", commands=( "default", "list", "update", ), ) self.validate_output( "osdu dataload", commands=( "ingest", "status", "verify", ), ) self.validate_output( "osdu entitlements", subgroups=("groups", "members"), commands=("mygroups",), ) self.validate_output( "osdu entitlements groups", commands=("add", "delete", "members"), ) self.validate_output( "osdu entitlements members", commands=("add", "list", "remove"), ) self.validate_output( "osdu legal", commands=("listtags",), ) self.validate_output( "osdu list", commands=("records",), ) self.validate_output( "osdu schema", commands=( "add", "get", "list", ), ) self.validate_output( "osdu search", commands=("id", "query"), ) self.validate_output( "osdu unit", commands=("list",), ) self.validate_output( "osdu workflow", commands=("list",), ) if __name__ == "__main__": import nose2 nose2.main()
[ "subprocess.Popen", "nose2.main" ]
[((18778, 18790), 'nose2.main', 'nose2.main', ([], {}), '()\n', (18788, 18790), False, 'import nose2\n'), ((11468, 11525), 'subprocess.Popen', 'Popen', (['help_command'], {'shell': '(True)', 'stdout': 'PIPE', 'stderr': 'PIPE'}), '(help_command, shell=True, stdout=PIPE, stderr=PIPE)\n', (11473, 11525), False, 'from subprocess import PIPE, Popen\n')]
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This is the code for Clustering using our CNC framework.""" from __future__ import division import collections import os from absl import app from absl import flags from clustering_normalized_cuts import setup from clustering_normalized_cuts.cnc_net import run_net from clustering_normalized_cuts.data_loader import get_data flags.adopt_module_key_flags(setup) FLAGS = flags.FLAGS # SELECT GPU os.environ['CUDA_VISIBLE_DEVICES'] = '1' def main(_): params = collections.defaultdict(lambda: None) # SET GENERAL HYPERPARAMETERS general_params = { 'dset': FLAGS.dset, # dataset: reuters / mnist 'val_set_fraction': 0.1, # fraction of training set to use as validation 'siam_batch_size': 128, # minibatch size for siamese net 'main_path': FLAGS.main_path, 'result_path': FLAGS.result_path } params.update(general_params) # SET DATASET SPECIFIC HYPERPARAMETERS if FLAGS.dset == 'mnist': mnist_params = setup.set_mnist_params() params.update(mnist_params) # LOAD DATA setup.seed_init() data = get_data(params) # RUN EXPERIMENT run_net(data, params) if __name__ == '__main__': app.run(main)
[ "clustering_normalized_cuts.setup.set_mnist_params", "absl.flags.adopt_module_key_flags", "clustering_normalized_cuts.setup.seed_init", "absl.app.run", "clustering_normalized_cuts.cnc_net.run_net", "collections.defaultdict", "clustering_normalized_cuts.data_loader.get_data" ]
[((938, 973), 'absl.flags.adopt_module_key_flags', 'flags.adopt_module_key_flags', (['setup'], {}), '(setup)\n', (966, 973), False, 'from absl import flags\n'), ((1075, 1113), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : None)'], {}), '(lambda : None)\n', (1098, 1113), False, 'import collections\n'), ((1638, 1655), 'clustering_normalized_cuts.setup.seed_init', 'setup.seed_init', ([], {}), '()\n', (1653, 1655), False, 'from clustering_normalized_cuts import setup\n'), ((1665, 1681), 'clustering_normalized_cuts.data_loader.get_data', 'get_data', (['params'], {}), '(params)\n', (1673, 1681), False, 'from clustering_normalized_cuts.data_loader import get_data\n'), ((1704, 1725), 'clustering_normalized_cuts.cnc_net.run_net', 'run_net', (['data', 'params'], {}), '(data, params)\n', (1711, 1725), False, 'from clustering_normalized_cuts.cnc_net import run_net\n'), ((1757, 1770), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (1764, 1770), False, 'from absl import app\n'), ((1565, 1589), 'clustering_normalized_cuts.setup.set_mnist_params', 'setup.set_mnist_params', ([], {}), '()\n', (1587, 1589), False, 'from clustering_normalized_cuts import setup\n')]
#!/usr/bin/env python import math, random, subprocess, time sin=math.sin commands=["/usr/bin/setterm","/usr/bin/xset"] fname = "" file = None type = None _test = "" cmd = None class SystemError(BaseException): pass for c in commands: _test = subprocess.getoutput("setterm --blength 256") if not _test: raise SystemError(c+" error") if _test.find("not support")<0 and _test.find("error")<0: cmd=c break else: setterm=False setterm=cmd==commands[0] if not cmd: raise SystemError("No supported command ("+",".join(commands)+")") i=0 while 1: note=sin(i*.1)*9+60 subprocess.run(( cmd,"--bfreq" if setterm else "b", str(round(2**((note-69)/12)*440)), "--blength" if setterm else "", str(round(100)) )) print(end="\a",flush=True) time.sleep(0.1) i+=1 subprocess.run(( cmd,"--bfreq" if setterm else "b", "400", "--blength" if setterm else "", "200" ))
[ "subprocess.getoutput", "subprocess.run", "time.sleep" ]
[((821, 923), 'subprocess.run', 'subprocess.run', (["(cmd, '--bfreq' if setterm else 'b', '400', '--blength' if setterm else '',\n '200')"], {}), "((cmd, '--bfreq' if setterm else 'b', '400', '--blength' if\n setterm else '', '200'))\n", (835, 923), False, 'import math, random, subprocess, time\n'), ((250, 295), 'subprocess.getoutput', 'subprocess.getoutput', (['"""setterm --blength 256"""'], {}), "('setterm --blength 256')\n", (270, 295), False, 'import math, random, subprocess, time\n'), ((796, 811), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (806, 811), False, 'import math, random, subprocess, time\n')]
import stomasimulator.geom.geom_utils as geom class AttributeCalculator(object): """ Abstraction for calculations performed on XPLT state data """ def __init__(self, prefix, reference_data, dimensionality, lambda_fn=None): self.prefix = '' if prefix is None else prefix self.reference_data = reference_data self.dimensionality = dimensionality self.lambda_fn = (lambda x: x) if lambda_fn is None else lambda_fn def calculate(self, nid_pt_dict, extras=None): """ Perform the calculation :param nid_pt_dict: dictionary of an integer 'node id' to a Point object :param extras: passed on to the subclass :return: a dictionary containing label-result pairs from the calculation :rtype: dict """ data = self._calculate(nid_pt_dict, extras) if self.dimensionality == 1: data = (data,) return {k: self.lambda_fn(v) for k, v in zip(self.labels(), data)} def _calculate(self, nid_pt_dict, extras): """ Calculation implementation - to be overridden in subclasses """ pass def labels(self): """ Get the labels for the calculation results """ suffices = self.calculation_suffices() assert len(suffices) == self.dimensionality, 'Error! Data label dimensionality mismatch.' fmt_string = '{}{}' if len(self.prefix) == 0 or len(suffices[0]) == 0 else '{}-{}' return [fmt_string.format(self.prefix, suffix) for suffix in suffices] def calculation_suffices(self): """ These suffices are appended to the labels of the calculation result """ return ['', ] * self.dimensionality def _get_point(ref_pt, id_pt_dict): return id_pt_dict.get(ref_pt) if isinstance(ref_pt, int) else ref_pt class DistanceCalculator(AttributeCalculator): """ Distance between two points """ def __init__(self, prefix, node_pair, lambda_fn=None): node_0 = node_pair[0] node_1 = node_pair[1] reference_data = (node_0 if node_0.id is None else node_0.id, node_1 if node_1.id is None else node_1.id) super(DistanceCalculator, self).__init__(prefix=prefix, reference_data=reference_data, dimensionality=1, lambda_fn=lambda_fn) def _calculate(self, nid_pt_dict, extras): pt_0 = _get_point(self.reference_data[0], nid_pt_dict) pt_1 = _get_point(self.reference_data[1], nid_pt_dict) return pt_0.distance(pt_1) class DirectionalDistanceCalculator(DistanceCalculator): """ Signed distance calculator """ def __init__(self, prefix, node_pair, direction, lambda_fn=None): """ Calculate a distance in a specified direction :param prefix: :param node_pair: two Points - further along 'direction' than node_pair[1] so that 'np[0] - np[1]' should be in the direction of 'direction' :param direction: the direction vector :param lambda_fn: """ super(DirectionalDistanceCalculator, self).__init__(prefix=prefix, node_pair=node_pair, lambda_fn=lambda_fn) self.direction = direction.unit() def _calculate(self, nid_pt_dict, extras): pt_0 = _get_point(self.reference_data[0], nid_pt_dict) pt_1 = _get_point(self.reference_data[1], nid_pt_dict) is_in_right_direction = (pt_0 - pt_1) * self.direction > 0.0 return pt_0.distance(pt_1) if is_in_right_direction else 0.0 class AreaCalculator2D(AttributeCalculator): """ Calculate area from a list of points (assumed to be in xy plane) """ def __init__(self, prefix, boundary_pts, lambda_fn=None): super(AreaCalculator2D, self).__init__(prefix=prefix, reference_data=boundary_pts, dimensionality=1, lambda_fn=lambda_fn) def _calculate(self, nid_pt_dict, extras): updated_pore_pts = [nid_pt_dict[pt.id] for pt in self.reference_data] pore_area = geom.calculate_polygon_area(updated_pore_pts) return pore_area class AreaCalculator3D(AttributeCalculator): """ Calculate an area from a list of facets """ def __init__(self, prefix, facet_list): super(AreaCalculator3D, self).__init__(prefix=prefix, reference_data=facet_list, dimensionality=1) def _calculate(self, nid_pt_dict, extras): area = geom.calculate_surface_area(nid_pt_dict, self.reference_data) return area class AreaVolumeCalculator(AttributeCalculator): """ Perform a combined calculation to get the surface area and volume given a list of facets """ def __init__(self, prefix, facet_list): super(AreaVolumeCalculator, self).__init__(prefix=prefix, reference_data=facet_list, dimensionality=2) def _calculate(self, nid_pt_dict, extras): volume, area = geom.calculate_volume_and_area(nid_pt_dict, self.reference_data) return area, volume def calculation_suffices(self): return 'area', 'volume' class XpltReaderMetrics(object): """ Identify the metrics that will be calculated for the XpltReader """ def __init__(self, comparison_helper=None, is_mesh_calculation_on=False): """ :param comparison_helper: Comparison helper for the stoma :type stoma_cfg: sc.ComparisonHelper :param is_mesh_calculation_on: Whether to calculate the mesh metrics (or not) :type is_mesh_calculation_on: bool """ self.comparison_helper = comparison_helper self.is_mesh_calculation_on = is_mesh_calculation_on @property def is_compare_vs_open_stoma_on(self): """ :return: Whether or not to perform the comparison :rtype: bool """ return self.comparison_helper is not None def evaluate_metric(self, sim_state): """ Calculate the metric and percent difference vs. each measurement :param sim_state: State object holding data from the simulation :type sim_state: State :return: Each item is a pair comprising a name (key) and its float value :rtype: list of tuple """ result = self.comparison_helper.perform_comparison(state_pressure=sim_state.time, state_data=sim_state.attributes) return result if __name__ == '__main__': pass
[ "stomasimulator.geom.geom_utils.calculate_surface_area", "stomasimulator.geom.geom_utils.calculate_polygon_area", "stomasimulator.geom.geom_utils.calculate_volume_and_area" ]
[((4327, 4372), 'stomasimulator.geom.geom_utils.calculate_polygon_area', 'geom.calculate_polygon_area', (['updated_pore_pts'], {}), '(updated_pore_pts)\n', (4354, 4372), True, 'import stomasimulator.geom.geom_utils as geom\n'), ((4806, 4867), 'stomasimulator.geom.geom_utils.calculate_surface_area', 'geom.calculate_surface_area', (['nid_pt_dict', 'self.reference_data'], {}), '(nid_pt_dict, self.reference_data)\n', (4833, 4867), True, 'import stomasimulator.geom.geom_utils as geom\n'), ((5369, 5433), 'stomasimulator.geom.geom_utils.calculate_volume_and_area', 'geom.calculate_volume_and_area', (['nid_pt_dict', 'self.reference_data'], {}), '(nid_pt_dict, self.reference_data)\n', (5399, 5433), True, 'import stomasimulator.geom.geom_utils as geom\n')]
# -*- coding: utf-8 -*- ''' @author: <NAME> May 2018 ''' # import code # code.interact(local=locals()) import os import pickle # from fordclassifier.classifier.classifier import Classifier import numpy as np import pandas as pd from sklearn.metrics import roc_curve, auc import json import matplotlib.pyplot as plt import operator import itertools from sklearn.metrics import confusion_matrix from collections import OrderedDict import pyemd # Local imports from fordclassifier.evaluator.predictorClass import Predictor from fordclassifier.evaluator.rbo import * import pdb class Evaluator(object): ''' Class to evaluate the performance of the classifiers ============================================================================ Methods: ============================================================================ _recover: if a variable is not in memory, tries to recover it from disk _get_folder: retuns full path to a subfolder _exists_file: check if the file exists in disk draw_rocs: draws the Xval ROCs and saves them as png files load_Xtfidf: Loads from disk Xtfidf and tags load_test_data: Loads from disk test Xtfidf and tags load_train_data: Loads from disk train Xtfidf and tags compute_average_xval_AUC: computes the average AUC on xval compute_average_test_AUC: computes the average AUC on test obtain_labels_from_Preds: Produces the multilabel tag prediction from individual predictions of every classifier compute_confussion_matrix: computes the confusion matrix on test (multiclass case) compute_confusion_matrix_multilabel: computes the confussion matrix for a multilabel set (multilabel case) draw_confussion_matrix: draws the CM and saves it as a png file draw_ROCS_tst: draws the ROC curves for the test data draw_anyROC: draws the ROC curves compute_thresholds: computes the thresholds compute_cardinality: computes the cardinality of the tags compute_label_density: Computes the label density JaccardIndex: Computes the Jaccard index compute_multilabel_threshold: Computes the multilabel threshold draw_costs_on_test: draws the multilabel cost for the test data load_multilabel_threshold: Loads the multilabel thresholds Jaccard_RBO_cost: Computes a convex combination of the Jaccard and RBO costs align_strings: Aligns strings into columns get_pred_weights: Returns the normalized predictions write_prediction_report: writes a simple prediction report in text format ============================================================================ ''' def __init__(self, project_path, subfolders, categories=None, verbose=True): ''' Initialization: Creates the initial object data Inputs: - project_path: path to the working project - subfolders: subfolder structure ''' self._project_path = project_path # working directory self._verbose = verbose # messages are printed on screen when True self.models2evaluate = None # models to evaluate (classif, params) self._subfolders = None # subfolders structure self.best_auc = None # Best AUC self.best_models = None # Best models self.Xtfidf_tr = None # Xtfidf for training self.tags_tr = None # Training tags self.tags = None # All tags self.ths_dict = None # dict with the thresholds for every classifier self.Preds = None # Prediction matrix, one column per category self.Preds_tr = None # Pred. matrix, one column per category, train self.Preds_tst = None # Pred. matrix, one column per category, test self.index_tst = None # Index for tags test self.categories = categories # List of categories self.Xtfidf_tst = None # Xtfidf for test self.tags_tst = None # Test tags self.CONF = None # Confusion matrix self.multilabel_th = None # Multilabel Threshold self._subfolders = subfolders def _get_folder(self, subfolder): ''' gets full path to a folder Inputs: - subfolder: target subfolder ''' return os.path.join(self._project_path, self._subfolders[subfolder]) def _exists_file(self, filename): ''' Checks if the file exists Inputs: - filename ''' try: f = open(filename, 'r') existe = True f.close() except: existe = False pass return existe def _recover(self, field): ''' Loads from disk a previously stored variable, to avoid recomputing it Inputs: - field: variable to restore from disk ''' if field == 'best_auc': input_file = os.path.join(self._get_folder('results'), 'best_auc.json') with open(input_file, 'r') as f: self.best_auc = json.load(f) if field == 'best_models': try: input_file = os.path.join(self._get_folder('results'), 'best_models.json') with open(input_file, 'r') as f: self.best_models = json.load(f) except: input_file = os.path.join(self._get_folder('export'), 'best_models.json') with open(input_file, 'r') as f: self.best_models = json.load(f) pass if field == 'Xtfidf_tr': filetoload_Xtfidf = os.path.join( self._project_path + self._subfolders['training_data'], 'train_data.pkl') with open(filetoload_Xtfidf, 'rb') as f: [self.Xtfidf_tr, tags_tr, self.tags_tr, refs_tr] = pickle.load(f) if field == 'Xtfidf_tst': filetoload_Xtfidf = os.path.join( self._project_path + self._subfolders['test_data'], 'test_data.pkl') with open(filetoload_Xtfidf, 'rb') as f: [self.Xtfidf_tst, tags_tst, self.tags_tst, refs_tst] = pickle.load(f) if field == 'tags': filetoload_tags = os.path.join( self._project_path + self._subfolders['training_data'], 'tags.pkl') with open(filetoload_tags, 'rb') as f: self.tags = pickle.load(f) if field == 'ths_dict': try: filename = os.path.join( self._project_path + self._subfolders['results'], 'ths_dict.pkl') with open(filename, 'rb') as f: self.ths_dict = pickle.load(f) except: filename = os.path.join( self._project_path + self._subfolders['export'], 'ths_dict.pkl') with open(filename, 'rb') as f: self.ths_dict = pickle.load(f) pass if field == 'Preds': filename = os.path.join( self._project_path + self._subfolders['results'], 'Preds.pkl') with open(filename, 'rb') as f: self.Preds = pickle.load(f) if field == 'Preds_tr': filename = os.path.join( self._project_path, self._subfolders['results'], 'Preds_tr.pkl') with open(filename, 'rb') as f: self.Preds_tr = pickle.load(f) if field == 'Preds_tst': filename = os.path.join( self._project_path, self._subfolders['results'], 'Preds_test.pkl') with open(filename, 'rb') as f: self.Preds_tst = pickle.load(f) if field == 'CONF': filename = os.path.join( self._project_path + self._subfolders['results'], 'CONF.pkl') with open(filename, 'rb') as f: self.CONF = pickle.load(f) if field == 'tags_index': filename = os.path.join( self._project_path + self._subfolders['test_data'], 'tags_index.pkl') with open(filename, 'rb') as f: [self.tags_tst, self.index_tst] = pickle.load(f) if field == 'categories': try: filename = os.path.join( self._project_path + self._subfolders['training_data'], 'categories.pkl') with open(filename, 'rb') as f: self.categories = pickle.load(f) except: filename = os.path.join( self._project_path + self._subfolders['export'], 'categories.pkl') with open(filename, 'rb') as f: self.categories = pickle.load(f) pass if field == 'models2evaluate': try: filename = os.path.join( self._project_path + self._subfolders['training_data'], 'models2evaluate.pkl') with open(filename, 'rb') as f: self.models2evaluate = pickle.load(f) except: filename = os.path.join( self._project_path + self._subfolders['export'], 'models2evaluate.pkl') with open(filename, 'rb') as f: self.models2evaluate = pickle.load(f) pass if field == 'multilabel_th': try: filename = os.path.join( self._project_path + self._subfolders['training_data'], 'multilabel_th.pkl') with open(filename, 'rb') as f: self.multilabel_th = pickle.load(f) except: filename = os.path.join( self._project_path + self._subfolders['export'], 'multilabel_th.pkl') with open(filename, 'rb') as f: self.multilabel_th = pickle.load(f) pass return def draw_rocs(self, verbose=True): ''' Draws the Xval ROCs and saves them as png files Inputs: - None, it operates on self values ''' if verbose: print("Saving ROC figures ...") if self.categories is None: self._recover('categories') if self.models2evaluate is None: self._recover('models2evaluate') # get the evaluated models models = list(self.models2evaluate.keys()) Nclass = len(models) Ncats = len(self.categories) for kcat in range(0, Ncats): plt.figure(figsize=(15, 12)) aucs = [] cat = self.categories[kcat] for kclass in range(0, Nclass): try: model_name = models[kclass] file_input_ROC = os.path.join( self._get_folder('eval_ROCs'), 'ROC_' + model_name + '_' + cat + '.pkl') with open(file_input_ROC, 'rb') as f: mdict = pickle.load(f) auc = mdict['roc_auc_loo'] aucs.append((model_name, auc)) except: pass # Sorting by AUC aucs.sort(key=operator.itemgetter(1), reverse=True) colors = ['k', 'r', 'g', 'b', 'm', 'c', 'r--', 'g--', 'b--', 'm--', 'c--', 'k--'] # drawing the best 10 models for k in range(0, 10): try: model_name = aucs[k][0] auc = aucs[k][1] file_input_ROC = os.path.join( self._get_folder('eval_ROCs'), 'ROC_' + model_name + '_' + cat + '.pkl') with open(file_input_ROC, 'rb') as f: mdict = pickle.load(f) fpr = mdict['fpr_loo'] tpr = mdict['tpr_loo'] text = model_name + ', AUC= ' + str(auc)[0:6] if auc > 0.6: if k == 0: # drawing the best model with thicker line plt.plot(fpr, tpr, colors[k], label=text, linewidth=6.0) else: plt.plot(fpr, tpr, colors[k], label=text, linewidth=2.0) except: pass plt.xlabel('FPR') plt.ylabel('TPR') plt.title('ROC curves for category ' + cat) plt.grid(True) plt.legend(loc="lower right") filename = os.path.join(self._get_folder('ROCS_tr'), cat + '_ROC_xval.png') plt.savefig(filename) plt.close() if verbose: print(cat, ) return def load_Xtfidf(self, verbose=True): ''' Loads from disk Xtfidf and tags Inputs: - None, it operates on self values ''' if self.Xtfidf is None: self._recover('Xtfidf') if self.tags is None: self._recover('tags') return self.Xtfidf, self.tags def load_test_data(self, verbose=True): ''' Loads from disk test Xtfidf and tags Inputs: - None, it operates on self values ''' filename = os.path.join( self._project_path + self._subfolders['test_data'], 'test_data.pkl') with open(filename, 'rb') as f: [self.Xtfidf_tst, self.tags_tst, refs_tst] = pickle.load(f) new_tags_tst = [] for tags in self.tags_tst: unique_tags = sorted(set(tags), key=tags.index) new_tags_tst.append(unique_tags) return self.Xtfidf_tst, new_tags_tst, refs_tst def load_train_data(self, verbose=True): ''' Loads from disk train Xtfidf and tags Inputs: - None, it operates on self values ''' filename = os.path.join( self._project_path + self._subfolders['training_data'], 'train_data.pkl') with open(filename, 'rb') as f: [self.Xtfidf_tr, self.tags_tr, refs_tr] = pickle.load(f) new_tags_tr = [] for tags in self.tags_tr: unique_tags = sorted(set(tags), key=tags.index) new_tags_tr.append(unique_tags) return self.Xtfidf_tr, new_tags_tr, refs_tr def compute_average_xval_AUC(self, verbose=True): ''' Computes the average AUC on xval Inputs: - None, it operates on self values ''' if self.best_auc is None: self._recover('best_auc') aucs = list(self.best_auc.values()) average_auc = np.mean(aucs) return average_auc def obtain_labels_from_Preds(self, Preds, threshold, categories=None, verbose=True): ''' Produces the multilabel tag prediction from individual predictions of every classifier Inputs: - Preds: predictions matrix, one column per category, as many rows as patterns - threshold: multilabel threshold ''' if self.categories is None: self._recover('categories') labels_preds = [] Ndocs = Preds.shape[0] for kdoc in range(0, Ndocs): l = [] p = Preds[kdoc, :] # Normalize individual predictions, the maximum becomes 1.0 in all # cases if max(p) > 0: p = p / max(p) orden = np.argsort(-p) for index in orden: if p[index] > threshold: l.append(self.categories[index]) labels_preds.append(l) return labels_preds def compute_confusion_matrix(self, orig_tags, best_pred_tags, filename, sorted_categories=[], verbose=True): ''' computes the confussion matrix on test (multiclass case) Inputs: - orig_tags: original labels - best_pred_tags: predicted labels - filename: file to save results - sorted_categories: categories to take into account, respecting the order ''' if self.categories is None: self._recover('categories') if len(sorted_categories) > 0: labels_categories = sorted_categories else: labels_categories = self.categories self.CONF = confusion_matrix(orig_tags, best_pred_tags, labels=labels_categories) pathfilename = os.path.join( self._project_path + self._subfolders['results'], filename) with open(pathfilename, 'wb') as f: pickle.dump(self.CONF, f) return self.CONF def compute_confusion_matrix_multilabel(self, orig_tags, best_pred_tags, filename, sorted_categories=[], verbose=True): ''' computes the confussion matrix for a multilabel set (multilabel case) Inputs: - orig_tags: original labels - best_pred_tags: predicted labels - filename: file to save results - sorted_categories: categories to take into account, respecting the order ''' if self.categories is None: self._recover('categories') if len(sorted_categories) > 0: labels_categories = sorted_categories else: labels_categories = self.categories Ncats = len(labels_categories) self.CONF = np.zeros((Ncats, Ncats)) NP = len(orig_tags) for k in range(0, NP): cats_orig = orig_tags[k] cats_pred = best_pred_tags[k] for m in range(0, Ncats): for n in range(0, Ncats): cat_orig = labels_categories[m] cat_pred = labels_categories[n] if cat_orig in cats_orig and cat_pred in cats_pred: self.CONF[m, n] += 1.0 # self.CONF = confusion_matrix(orig_tags, best_pred_tags, # labels=labels_categories) pathfilename = os.path.join( self._project_path + self._subfolders['results'], filename) with open(pathfilename, 'wb') as f: pickle.dump(self.CONF, f) return self.CONF def compute_confusion_matrix_multilabel_v2( self, orig_tags, best_pred_tags, filename, sorted_categories=[], order_sensitive=False, verbose=True): ''' computes the confusion matrix for a multilabel set Inputs: - orig_tags: original labels - best_pred_tags: predicted labels - filename: file to save results - sorted_categories: categories to take into account, respecting the order - order_sensitive: indicates if the computation is order sensitive or not ''' # Set dump factor if order_sensitive: dump_factor = 0.5 else: dump_factor = 1.0 # Take categories from the input arguments. If not, from the object. # If not, from a file using the recover method. if len(sorted_categories) > 0: categories = sorted_categories else: # Get list of categories if self.categories is None: self._recover('categories') categories = self.categories # Validate true labels n = len([x for x in orig_tags if len(x) == 0]) if n > 0: print('---- WARNING: {} samples without labels '.format(n) + 'will be ignored.') # Validate predicted labels n = len([x for x in best_pred_tags if len(x) == 0]) if n > 0: print('---- WARNING: {} samples without predictions '.format(n) + 'will be ignored.') # Loop over the true and predicted labels Ncats = len(categories) self.CONF = np.zeros((Ncats, Ncats)) for cats_orig, cats_pred in zip(orig_tags, best_pred_tags): if len(cats_orig) > 0 and len(cats_pred) > 0: # Compute numerical true label vector value_orig = 1.0 p = np.zeros(Ncats) for c in cats_orig: p[categories.index(c)] = value_orig value_orig *= dump_factor p = p / np.sum(p) # Compute numerical prediction label vector value_pred = 1.0 q = np.zeros(Ncats) for c in cats_pred: q[categories.index(c)] = value_pred value_pred *= dump_factor q = q / np.sum(q) # Compute diagonal elements min_pq = np.minimum(p, q) M = np.diag(min_pq) # Compute non-diagonal elements p_ = p - min_pq q_ = q - min_pq z = 1 - np.sum(min_pq) if z > 0: M += (p_[:, np.newaxis] * q_) / z self.CONF += M pathfilename = os.path.join( self._project_path, self._subfolders['results'], filename) with open(pathfilename, 'wb') as f: pickle.dump(self.CONF, f) return self.CONF def compute_EMD_error(self, orig_tags, best_pred_tags, fpath, order_sensitive=False): ''' computes the confusion matrix for a multilabel set Args: - orig_tags: original labels - best_pred_tags: predicted labels - fpath: path to the file with the similarity matrix - order_sensitive: indicates if the computation is order sensitive or not ''' # ###################### # Load similarity values if type(fpath) is str: df_S = pd.read_excel(fpath) # Compute cost matrix C = 1 - df_S[df_S.columns].values # WARNING: For later versions of pandas, you might need to use: # Note that df_S.columnst shooud be taken from 1, because # The first column is taken as the index column. # C = 1 - df_S[df_S.columns[1:]].to_numpy() else: # This is a combination of cost matrices that takes the # component-wise minimum of the costs C = 1 for fp in fpath: df_S = pd.read_excel(fp) # Compute cost matrix Cf = 1 - df_S[df_S.columns].values C = np.minimum(C, Cf) # This combination of cost matrices takes each cost matrix with a # different weights. Only for two Cost matrices. # df_S = pd.read_excel(fpath[0]) # C1 = 1 - df_S[df_S.columns].values # df_S = pd.read_excel(fpath[1]) # Cs = 1 - df_S[df_S.columns].values # ncat = Cs.shape[0] # C = np.minimum(1 - np.eye(ncat), # np.minimum(0.25 + 0.75 * Cs, 0.5 + 0.5 * C1)) # This is to make sure that C is "C-contitguos", a requirement of pyemd C = np.ascontiguousarray(C, dtype=np.float64) # Set dump factor if order_sensitive: dump_factor = 0.5 else: dump_factor = 1.0 # Take categories in the order of the cost matrix categories = df_S.columns.tolist() # Validate true labels n = len([x for x in orig_tags if len(x) == 0]) if n > 0: print(f'---- WARNING: {n} samples without labels will be ignored') # Validate predicted labels n = len([x for x in best_pred_tags if len(x) == 0]) if n > 0: print(f'---- WARNING: {n} samples without preds will be ignored') # ################## # Compute EMD errors # Loop over the true and predicted labels Ncats = len(categories) self.emd = 0 count = 0 for cats_orig, cats_pred in zip(orig_tags, best_pred_tags): if len(cats_orig) > 0 and len(cats_pred) > 0: # Compute numerical true label vector value_orig = 1.0 p = np.zeros(Ncats) for c in cats_orig: p[categories.index(c)] = value_orig value_orig *= dump_factor p = p / np.sum(p) # Compute numerical prediction label vector value_pred = 1.0 q = np.zeros(Ncats) for c in cats_pred: q[categories.index(c)] = value_pred value_pred *= dump_factor q = q / np.sum(q) # Compute EMD distance for the given sample emd_i = pyemd.emd(p, q, C) self.emd += emd_i count += 1 self.emd /= count return self.emd def compute_sorted_errors(self, CONF, categories): eps = 1e-20 # Sample size per category n_cat = len(categories) ns_cat = CONF.sum(axis=1, keepdims=True) # Total sample size ns_tot = CONF.sum() # Compute all-normalized confusion matrix CONF_a = CONF / ns_tot # Compute row-normalized confusion matrix CONF_r = ((CONF.astype('float') + eps) / (ns_cat + n_cat*eps)) # Sort errors by unsorted_values = [(categories[i], categories[j], 100*CONF_a[i, j], 100*CONF_r[i, j], 100*ns_cat[i][0]/ns_tot) for i in range(n_cat) for j in range(n_cat)] sorted_values_a = sorted(unsorted_values, key=lambda x: -x[2]) sorted_values_r = sorted(unsorted_values, key=lambda x: -x[3]) # Remove diagonal elements sorted_values_a = [x for x in sorted_values_a if x[0] != x[1]] sorted_values_r = [x for x in sorted_values_r if x[0] != x[1]] # Remove relative errors of categories with zero samples sorted_values_r = [x for x in sorted_values_r if ns_cat[categories.index(x[0])] > 0] cols = ['Cat. real', 'Clasif', 'Err/total (%)', 'Error/cat (%)', 'Peso muestral'] df_ranked_abs = pd.DataFrame(sorted_values_a, columns=cols) df_ranked_rel = pd.DataFrame(sorted_values_r, columns=cols) f_path = os.path.join(self._project_path, self._subfolders['results'], 'ranked_abs_errors.xlsx') df_ranked_abs.to_excel(f_path) f_path = os.path.join(self._project_path, self._subfolders['results'], 'ranked_rel_errors.xlsx') df_ranked_rel.to_excel(f_path) return df_ranked_abs, df_ranked_rel def compute_error_confusion_matrix(self, CONF, normalize=True, verbose=True): # Returns the ratio of elements outside the diagonal allsum = np.sum(CONF) diagsum = np.sum(np.diagonal(CONF)) offdiagsum = allsum - diagsum error = offdiagsum / allsum return error def draw_confusion_matrix(self, CONF, filename, sorted_categories=[], verbose=True, normalize=True): ''' draws the CM and saves it as a png file Inputs: - CONF: conf matrix to be stored - filename: filename - sorted_categories: list of sorted categories - normalize: indicates to normalize CONF ''' # An extemelly small value to avoid zero division eps = 1e-20 n_cat = len(sorted_categories) if len(sorted_categories) > 0: labels_categories = sorted_categories else: if self.categories is None: self._recover('categories') labels_categories = self.categories if normalize: # Normalize CONF = ((CONF.astype('float') + eps) / (CONF.sum(axis=1, keepdims=True) + n_cat*eps)) else: CONF = CONF.astype('float') plt.figure(figsize=(15, 12)) cmap = plt.cm.Blues plt.imshow(CONF, interpolation='nearest', cmap=cmap) plt.colorbar() tick_marks = np.arange(len(labels_categories)) plt.xticks(tick_marks, labels_categories, rotation=90) plt.yticks(tick_marks, labels_categories) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') pathfilename = os.path.join(self._get_folder('figures'), filename) print(f"SALVADO EN {pathfilename}") plt.savefig(pathfilename) plt.clf() return def draw_ROCS_tst(self, Preds_tst, tags_tst): ''' draws the ROC curves for the test data Inputs: - Preds_tst: predicted labels - tags_tst: true labels ''' if self.best_models is None: self._recover('best_models') if self.categories is None: self._recover('categories') colors = ['k', 'r', 'g', 'b', 'm', 'c', 'r--', 'g--', 'b--', 'm--', 'c--', 'k--'] # retain the first tag in the labels tags = [t[0] if len(t) > 0 else '' for t in tags_tst] for k in range(0, len(self.categories)): cat = self.categories[k] y_tst = [1.0 if p == cat else -1.0 for p in tags] preds_tst = list(Preds_tst[:, k]) fpr_tst, tpr_tst, thresholds = roc_curve(y_tst, preds_tst) roc_auc_tst = auc(fpr_tst, tpr_tst) model_name = self.best_models[cat] file_output_ROC = os.path.join( self._get_folder('ROCS_tst'), 'ROC_' + model_name + '_' + cat + '.pkl') mdict = {'fpr_tst': list(fpr_tst), 'tpr_tst': list(tpr_tst), 'roc_auc_tst': roc_auc_tst, 'y_tst': list(y_tst), 'preds_tst': list(preds_tst)} with open(file_output_ROC, 'wb') as f: pickle.dump(mdict, f) plt.figure(figsize=(15, 12)) plt.xlabel('FPR') plt.ylabel('TPR') plt.title('ROC test curve for category ' + cat) text = model_name + ', AUC= ' + str(roc_auc_tst)[0:6] plt.plot(fpr_tst, tpr_tst, colors[3], label=text, linewidth=6.0) plt.grid(True) plt.legend(loc="lower right") filename = os.path.join( self._get_folder('ROCS_tst'), cat + '_ROC_test.png') plt.savefig(filename) plt.close() return def draw_anyROC(self, Preds_tst, tags_tst, case): ''' draws the ROC curves Inputs: - Preds_tst: predicted labels - tags_tst: true labels ''' if self.categories is None: self._recover('categories') colors = ['k', 'r', 'g', 'b', 'm', 'c', 'r--', 'g--', 'b--', 'm--', 'c--', 'k--'] # retain the first tag in the labels tags = [t[0] if len(t) > 0 else '' for t in tags_tst] aucs = [] for k in range(0, len(self.categories)): cat = self.categories[k] y_tst = [1.0 if p == cat else -1.0 for p in tags] preds_tst = list(Preds_tst[:, k]) fpr_tst, tpr_tst, thresholds = roc_curve(y_tst, preds_tst) roc_auc_tst = auc(fpr_tst, tpr_tst) aucs.append(roc_auc_tst) file_output_ROC = os.path.join( self._get_folder('ROCS_tst'), cat + '_' + 'ROC_' + case + '.pkl') mdict = {'fpr_tst': list(fpr_tst), 'tpr_tst': list(tpr_tst), 'roc_auc_tst': roc_auc_tst, 'y_tst': list(y_tst), 'preds_tst': list(preds_tst)} with open(file_output_ROC, 'wb') as f: pickle.dump(mdict, f) plt.figure(figsize=(15, 12)) plt.xlabel('FPR') plt.ylabel('TPR') plt.title('ROC test curve for category ' + cat) text = case + ', AUC= ' + str(roc_auc_tst)[0:6] plt.plot(fpr_tst, tpr_tst, colors[3], label=text, linewidth=6.0) plt.grid(True) plt.legend(loc="lower right") filename = os.path.join(self._get_folder('ROCS_tst'), cat + '_' + 'ROC_' + case + '.png') plt.savefig(filename) plt.close() average_auc = np.nanmean(aucs) return average_auc def compute_average_test_AUC(self, verbose=True): ''' computes the average AUC on test Inputs: - None, it operates on self values ''' if self.best_models is None: self._recover('best_models') if self.categories is None: self._recover('categories') aucs = [] for k in range(0, len(self.categories)): cat = self.categories[k] model_name = self.best_models[cat] filename = os.path.join( self._get_folder('ROCS_tst'), 'ROC_' + model_name + '_' + cat + '.pkl') with open(filename, 'rb') as f: mdict = pickle.load(f) auc = mdict['roc_auc_tst'] aucs.append(auc) average_auc = np.nanmean(aucs) return average_auc def compute_thresholds(self, verbose=True): ''' computes the thresholds Inputs: - None, it operates on self values ''' if self.categories is None: self._recover('categories') if self.best_models is None: self._recover('best_models') Ncats = len(self.categories) ths_dict = {} for kcat in range(0, Ncats): try: cat = self.categories[kcat] model_name = self.best_models[cat] file_input_ROC = os.path.join( self._get_folder('eval_ROCs'), 'ROC_' + model_name + '_' + cat + '.pkl') with open(file_input_ROC, 'rb') as f: mdict = pickle.load(f) fpr = mdict['fpr_loo'] tpr = mdict['tpr_loo'] ths = mdict['thresholds'] mix = [] for k in range(0, len(fpr)): # We select the threshold maximizing this convex combinat mix.append(tpr[k] + (1 - fpr[k])) cual = np.argmax(mix) th = ths[cual] ths_dict.update({cat: th}) print(cat, th, cual, tpr[cual], fpr[cual]) except: print("Error in cat ", cat) pass filename = os.path.join( self._project_path + self._subfolders['results'], 'ths_dict.pkl') with open(filename, 'wb') as f: pickle.dump(ths_dict, f) return def compute_cardinality(self, tags): ''' computes the cardinality of the tags Inputs: - tags: labels ''' C = np.mean([len(set(l)) for l in tags]) return C def compute_label_density(self, tags): ''' Computes the label density Inputs: - tags: labels ''' # total number of possible labels NL = len(set(itertools.chain.from_iterable(tags))) D = np.mean([len(set(l)) / NL for l in tags]) return D def JaccardIndex(self, orig, pred): ''' Computes the Jaccard index Inputs: - orig: original labels - pred: predicted labels ''' accs = [] for k in range(0, len(orig)): l_orig = orig[k] l_pred = pred[k] num = len(set(l_orig).intersection(l_pred)) den = len(set(l_orig + l_pred)) acc = num / den accs.append(acc) JI = np.mean(accs) return JI def compute_multilabel_threshold(self, p, alpha, option, th_values, verbose=True): ''' Computes the multilabel threshold Inputs: - p: RBO parameter, ``p`` is the probability of looking for overlap at rank k + 1 after having examined rank k - alpha: convex Jaccard-RBO combination parameter - option: sorting option for multilabel prediction - th_values: range of threshold values to be evaluated ''' if self.Xtfidf_tr is None: self._recover('Xtfidf_tr') if self.Preds_tr is None: self._recover('Preds_tr') # Warning tags_tr may have duplicates... self.tags_tr = [list(OrderedDict.fromkeys(l)) for l in self.tags_tr] if verbose: print('-' * 50) COST = [] DENS_pred = [] DENS_true = [] COST_dens = [] density_true = self.compute_cardinality(self.tags_tr) # to normalize Jaccard_RBO_cost, depends on p baseline = [0] for k in range(2, 50): l = list(range(1, k)) baseline.append(rbo(l, l, p)['min']) P = Predictor(self._project_path, self._subfolders, verbose=False) for threshold in th_values: multilabel_pred_tr, labels_pred_tr = P.obtain_multilabel_preds( self.Preds_tr, option, threshold, verbose=True) density_pred = self.compute_cardinality(labels_pred_tr) DENS_pred.append(density_pred) DENS_true.append(density_true) dens_error = (density_pred - density_true) ** 2 COST_dens.append(dens_error) # Computing Jackard_RBO cost jrbos = [] for k in range(0, len(self.tags_tr)): values = [] for key in labels_pred_tr[k]: values.append((key, multilabel_pred_tr[k][key]['p'])) values.sort(key=lambda x: x[1], reverse=True) l_pred = [] for v in values: l_pred.append(v[0]) jrbo = self.Jaccard_RBO_cost( self.tags_tr[k], l_pred, baseline, p, alpha) jrbos.append(jrbo) cost_jrbo = np.mean(jrbos) print(threshold, cost_jrbo, density_true, density_pred, ) COST.append(cost_jrbo) max_cost = max(COST) max_dens = max(COST_dens) COST_dens = [x / max_dens * max_cost for x in COST_dens] plt.figure(figsize=(15, 12)) plt.xlabel('Th') plt.ylabel('Jackard-RBO cost') plt.title('Jackard-RBO and Label Density costs for p =' + str(p) + ' and alpha= ' + str(alpha)) plt.plot(th_values, COST, 'b', label='Jackard-RBO cost', linewidth=3.0) plt.plot(th_values, COST_dens, 'r', label='Labels Density cost', linewidth=3.0) cual_min = np.argmin(COST) th_JRBO = th_values[cual_min] plt.plot(th_values[cual_min], COST[cual_min], 'bo', label='Minimum Jackard-RBO cost', linewidth=3.0) cual_min = np.argmin(COST_dens) th_DENS = th_values[cual_min] plt.plot(th_values[cual_min], COST_dens[cual_min], 'ro', label='Minimum Labels Density cost', linewidth=3.0) plt.legend(loc="upper right") plt.grid(True) filename = os.path.join( self._project_path + self._subfolders['results'], 'JRBO_COST_tr_p_' + str(p) + '_alpha_' + str(alpha) + '.png') plt.savefig(filename) plt.close() self.multilabel_th = np.mean([th_JRBO, th_DENS]) filename = os.path.join( self._project_path + self._subfolders['training_data'], 'multilabel_th.pkl') with open(filename, 'wb') as f: pickle.dump(self.multilabel_th, f) filename = os.path.join( self._project_path + self._subfolders['export'], 'multilabel_th.pkl') with open(filename, 'wb') as f: pickle.dump(self.multilabel_th, f) return self.multilabel_th def draw_costs_on_test(self, p, alpha, option, th_values, verbose=True): ''' draws the multilabel cost for the test data Inputs: - p: RBO parameter, ``p`` is the probability of looking for overlap at rank k + 1 after having examined rank k - alpha: convex Jaccard-RBO combination parameter - option: sorting option for multilabel prediction - th_values: range of threshold values to be evaluated ''' if self.Xtfidf_tst is None: self._recover('Xtfidf_tst') if self.Preds_tst is None: self._recover('Preds_tst') if self.multilabel_th is None: self._recover('multilabel_th') # Warning tags_tst may have duplicates... self.tags_tst = [list(OrderedDict.fromkeys(l)) for l in self.tags_tst] if verbose: print('-' * 50) COST = [] DENS_pred = [] DENS_true = [] COST_dens = [] density_true = self.compute_cardinality(self.tags_tst) # to normalize Jaccard_RBO_cost, depends on p baseline = [0] for k in range(2, 50): l = list(range(1, k)) baseline.append(rbo(l, l, p)['min']) P = Predictor(self._project_path, self._subfolders, verbose=False) for threshold in th_values: multilabel_pred_tst, labels_pred_tst = P.obtain_multilabel_preds( self.Preds_tst, option, threshold, verbose=True) density_pred = self.compute_cardinality(labels_pred_tst) DENS_pred.append(density_pred) DENS_true.append(density_true) dens_error = (density_pred - density_true) ** 2 COST_dens.append(dens_error) # Computing Jackard_RBO cost jrbos = [] for k in range(0, len(self.tags_tst)): values = [] for key in labels_pred_tst[k]: values.append((key, multilabel_pred_tst[k][key]['p'])) values.sort(key=lambda x: x[1], reverse=True) l_pred = [] for v in values: l_pred.append(v[0]) jrbo = self.Jaccard_RBO_cost( self.tags_tst[k], l_pred, baseline, p, alpha) jrbos.append(jrbo) cost_jrbo = np.mean(jrbos) print(threshold, cost_jrbo, density_true, density_pred, ) COST.append(cost_jrbo) max_cost = max(COST) max_dens = max(COST_dens) COST_dens = [x / max_dens * max_cost for x in COST_dens] plt.figure(figsize=(15, 12)) plt.xlabel('Th') plt.ylabel('Jackard-RBO cost') plt.title('Jackard-RBO and Label Density costs for p =' + str(p) + ' and alpha= ' + str(alpha)) plt.plot(th_values, COST, 'b', label='Jackard-RBO cost', linewidth=3.0) plt.plot(th_values, COST_dens, 'r', label='Labels Density cost', linewidth=3.0) cual_min = np.argmin(abs(th_values - self.multilabel_th)) plt.plot(th_values[cual_min], COST[cual_min], 'bo', label='Jackard-RBO cost at threshold', linewidth=3.0) plt.plot(th_values[cual_min], COST_dens[cual_min], 'ro', label='Labels Density cost at threshold', linewidth=3.0) plt.legend(loc="upper right") plt.grid(True) filename = os.path.join( self._project_path + self._subfolders['results'], 'JRBO_COST_tst_p_' + str(p) + '_alpha_' + str(alpha) + '.png') plt.savefig(filename) plt.close() return def load_multilabel_threshold(self, path2export=''): ''' Loads the multilabel thresholds Inputs: - path2export: export path ''' if path2export != '': print('Loading multilabel_th from export') filename = os.path.join(path2export, 'multilabel_th.pkl') with open(filename, 'rb') as f: self.multilabel_th = pickle.load(f) else: if self.multilabel_th is None: self._recover('multilabel_th') return self.multilabel_th def Jaccard_RBO_cost(self, l_orig, l_pred, baseline, p, alpha): ''' Computes a convex combination of the Jaccard and RBO costs Inputs: - l_orig: original labels - l_pred: predicted labels - baseline: normalizing values - p: RBO parameter, ``p`` is the probability of looking for overlap at rank k + 1 after having examined rank k - alpha: convex Jaccard-RBO combination parameter ''' try: if len(l_orig) > 0: num = len(set(l_orig).intersection(l_pred)) den = len(set(l_orig + l_pred)) ji = 1.0 - num / den else: if len(l_pred) == 0: # empty labels and empty predict means cost = 0.0 ji = 0 else: # empty labels and non-empty predict means cost = 1.0 ji = 1.0 r = 0 L = min((len(l_orig), len(l_pred))) if L > 0: r = 1 - rbo(l_orig, l_pred, p)['min'] / baseline[L] else: r = 1.0 if len(l_orig) == 0 and len(l_pred) == 0: r = 0.0 except: print('Error in Jaccard_RBO_cost ' + '----------------------------------------------------') import code code.interact(local=locals()) pass jrbo = (alpha * ji + (1 - alpha) * r) / 2.0 return jrbo def align_strings(self, string0, string1, string2, string3, L, M, N, P): ''' Aligns strings into columns ''' empty = ' ' # if len(string1) > M or len(string2) > N or len(string3) > P: # import code # code.interact(local=locals()) if L - len(string0) > 0: string0 = string0 + empty[0: L - len(string0)] if M - len(string1) > 0: string1 = string1 + empty[0: M - len(string1)] if N - len(string2) > 0: string2 = string2 + empty[0: N - len(string2)] if P - len(string3) > 0: string3 = string3 + empty[0: P - len(string3)] aligned_string = string0 + '| ' + string1 + '| ' + string2 + '| ' + string3 + '\r\n' return aligned_string def get_pred_weights(self, refs, label_preds, multilabel_preds): ''' Returns the normalized predictions **Unused????**** Inputs: - refs: *unused* - label_preds: - multilabel_preds: ''' weights = [] for k, labels in enumerate(label_preds): w0 = [multilabel_preds[k][key]['p'] for key in labels] scale = np.sum(w0) weights.append([w / scale for w in w0]) return weights def write_prediction_report(self, refs_tst, tags_tst, labels_pred_tst, multilabel_pred_tst, filename_out): ''' writes a simple prediction report in text format Inputs: - refs_tst: references - tags_tst: original labels - labels_pred_tst: predicted labels - multilabel_pred_tst: multilabel predicted labels - filename_out: file to save results ''' # writing report for the best threshold value string0 = 'PROJECT REFERENCE' string1 = 'TARGET LABELS' string2 = 'PREDICTED LABELS' string3 = ' ' data = [self.align_strings(string0, string1, string2, string3, 20, 30, 50, 10)] data.append('=' * 80 + '\r\n') for k in range(0, len(tags_tst)): string0 = refs_tst[k] string1 = '' for t in tags_tst[k]: string1 += t + ', ' if len(tags_tst[k]) == 0: string1 += '--------------' values = labels_pred_tst[k] if len(values) == 0: string2 = '--------------' else: string2 = '' pesos = [] for key in values: pesos.append(multilabel_pred_tst[k][key]['p']) for key in values: weight = multilabel_pred_tst[k][key]['p'] / np.sum(pesos) str_weight = str(weight)[0:5] string2 += key + '(' + str_weight + '), ' string3 = ' ' cadena = self.align_strings(string0, string1, string2, string3, 20, 30, 50, 10) data.append(cadena) filename = os.path.join(self._project_path, self._subfolders['results'], filename_out) with open(filename, 'w') as f: f.writelines(data) print('Saved ', filename) return
[ "matplotlib.pyplot.grid", "matplotlib.pyplot.ylabel", "sklearn.metrics.auc", "numpy.ascontiguousarray", "numpy.argsort", "numpy.nanmean", "sklearn.metrics.roc_curve", "pandas.read_excel", "operator.itemgetter", "matplotlib.pyplot.imshow", "numpy.mean", "collections.OrderedDict.fromkeys", "ma...
[((4723, 4784), 'os.path.join', 'os.path.join', (['self._project_path', 'self._subfolders[subfolder]'], {}), '(self._project_path, self._subfolders[subfolder])\n', (4735, 4784), False, 'import os\n'), ((14515, 14600), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['test_data'])", '"""test_data.pkl"""'], {}), "(self._project_path + self._subfolders['test_data'],\n 'test_data.pkl')\n", (14527, 14600), False, 'import os\n'), ((15174, 15264), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['training_data'])", '"""train_data.pkl"""'], {}), "(self._project_path + self._subfolders['training_data'],\n 'train_data.pkl')\n", (15186, 15264), False, 'import os\n'), ((15959, 15972), 'numpy.mean', 'np.mean', (['aucs'], {}), '(aucs)\n', (15966, 15972), True, 'import numpy as np\n'), ((17818, 17887), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['orig_tags', 'best_pred_tags'], {'labels': 'labels_categories'}), '(orig_tags, best_pred_tags, labels=labels_categories)\n', (17834, 17887), False, 'from sklearn.metrics import confusion_matrix\n'), ((17952, 18024), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['results'])", 'filename'], {}), "(self._project_path + self._subfolders['results'], filename)\n", (17964, 18024), False, 'import os\n'), ((19029, 19053), 'numpy.zeros', 'np.zeros', (['(Ncats, Ncats)'], {}), '((Ncats, Ncats))\n', (19037, 19053), True, 'import numpy as np\n'), ((19669, 19741), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['results'])", 'filename'], {}), "(self._project_path + self._subfolders['results'], filename)\n", (19681, 19741), False, 'import os\n'), ((21594, 21618), 'numpy.zeros', 'np.zeros', (['(Ncats, Ncats)'], {}), '((Ncats, Ncats))\n', (21602, 21618), True, 'import numpy as np\n'), ((22789, 22860), 'os.path.join', 'os.path.join', (['self._project_path', "self._subfolders['results']", 'filename'], {}), "(self._project_path, self._subfolders['results'], filename)\n", (22801, 22860), False, 'import os\n'), ((24975, 25016), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['C'], {'dtype': 'np.float64'}), '(C, dtype=np.float64)\n', (24995, 25016), True, 'import numpy as np\n'), ((28191, 28234), 'pandas.DataFrame', 'pd.DataFrame', (['sorted_values_a'], {'columns': 'cols'}), '(sorted_values_a, columns=cols)\n', (28203, 28234), True, 'import pandas as pd\n'), ((28260, 28303), 'pandas.DataFrame', 'pd.DataFrame', (['sorted_values_r'], {'columns': 'cols'}), '(sorted_values_r, columns=cols)\n', (28272, 28303), True, 'import pandas as pd\n'), ((28324, 28415), 'os.path.join', 'os.path.join', (['self._project_path', "self._subfolders['results']", '"""ranked_abs_errors.xlsx"""'], {}), "(self._project_path, self._subfolders['results'],\n 'ranked_abs_errors.xlsx')\n", (28336, 28415), False, 'import os\n'), ((28503, 28594), 'os.path.join', 'os.path.join', (['self._project_path', "self._subfolders['results']", '"""ranked_rel_errors.xlsx"""'], {}), "(self._project_path, self._subfolders['results'],\n 'ranked_rel_errors.xlsx')\n", (28515, 28594), False, 'import os\n'), ((28916, 28928), 'numpy.sum', 'np.sum', (['CONF'], {}), '(CONF)\n', (28922, 28928), True, 'import numpy as np\n'), ((30099, 30127), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 12)'}), '(figsize=(15, 12))\n', (30109, 30127), True, 'import matplotlib.pyplot as plt\n'), ((30166, 30218), 'matplotlib.pyplot.imshow', 'plt.imshow', (['CONF'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(CONF, interpolation='nearest', cmap=cmap)\n", (30176, 30218), True, 'import matplotlib.pyplot as plt\n'), ((30228, 30242), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (30240, 30242), True, 'import matplotlib.pyplot as plt\n'), ((30308, 30362), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'labels_categories'], {'rotation': '(90)'}), '(tick_marks, labels_categories, rotation=90)\n', (30318, 30362), True, 'import matplotlib.pyplot as plt\n'), ((30372, 30413), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'labels_categories'], {}), '(tick_marks, labels_categories)\n', (30382, 30413), True, 'import matplotlib.pyplot as plt\n'), ((30423, 30441), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (30439, 30441), True, 'import matplotlib.pyplot as plt\n'), ((30451, 30475), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (30461, 30475), True, 'import matplotlib.pyplot as plt\n'), ((30485, 30514), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (30495, 30514), True, 'import matplotlib.pyplot as plt\n'), ((30645, 30670), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pathfilename'], {}), '(pathfilename)\n', (30656, 30670), True, 'import matplotlib.pyplot as plt\n'), ((30680, 30689), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (30687, 30689), True, 'import matplotlib.pyplot as plt\n'), ((34624, 34640), 'numpy.nanmean', 'np.nanmean', (['aucs'], {}), '(aucs)\n', (34634, 34640), True, 'import numpy as np\n'), ((35498, 35514), 'numpy.nanmean', 'np.nanmean', (['aucs'], {}), '(aucs)\n', (35508, 35514), True, 'import numpy as np\n'), ((36978, 37056), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['results'])", '"""ths_dict.pkl"""'], {}), "(self._project_path + self._subfolders['results'], 'ths_dict.pkl')\n", (36990, 37056), False, 'import os\n'), ((38214, 38227), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (38221, 38227), True, 'import numpy as np\n'), ((39499, 39561), 'fordclassifier.evaluator.predictorClass.Predictor', 'Predictor', (['self._project_path', 'self._subfolders'], {'verbose': '(False)'}), '(self._project_path, self._subfolders, verbose=False)\n', (39508, 39561), False, 'from fordclassifier.evaluator.predictorClass import Predictor\n'), ((40882, 40910), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 12)'}), '(figsize=(15, 12))\n', (40892, 40910), True, 'import matplotlib.pyplot as plt\n'), ((40920, 40936), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Th"""'], {}), "('Th')\n", (40930, 40936), True, 'import matplotlib.pyplot as plt\n'), ((40946, 40976), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Jackard-RBO cost"""'], {}), "('Jackard-RBO cost')\n", (40956, 40976), True, 'import matplotlib.pyplot as plt\n'), ((41110, 41181), 'matplotlib.pyplot.plot', 'plt.plot', (['th_values', 'COST', '"""b"""'], {'label': '"""Jackard-RBO cost"""', 'linewidth': '(3.0)'}), "(th_values, COST, 'b', label='Jackard-RBO cost', linewidth=3.0)\n", (41118, 41181), True, 'import matplotlib.pyplot as plt\n'), ((41191, 41270), 'matplotlib.pyplot.plot', 'plt.plot', (['th_values', 'COST_dens', '"""r"""'], {'label': '"""Labels Density cost"""', 'linewidth': '(3.0)'}), "(th_values, COST_dens, 'r', label='Labels Density cost', linewidth=3.0)\n", (41199, 41270), True, 'import matplotlib.pyplot as plt\n'), ((41309, 41324), 'numpy.argmin', 'np.argmin', (['COST'], {}), '(COST)\n', (41318, 41324), True, 'import numpy as np\n'), ((41373, 41478), 'matplotlib.pyplot.plot', 'plt.plot', (['th_values[cual_min]', 'COST[cual_min]', '"""bo"""'], {'label': '"""Minimum Jackard-RBO cost"""', 'linewidth': '(3.0)'}), "(th_values[cual_min], COST[cual_min], 'bo', label=\n 'Minimum Jackard-RBO cost', linewidth=3.0)\n", (41381, 41478), True, 'import matplotlib.pyplot as plt\n'), ((41512, 41532), 'numpy.argmin', 'np.argmin', (['COST_dens'], {}), '(COST_dens)\n', (41521, 41532), True, 'import numpy as np\n'), ((41581, 41694), 'matplotlib.pyplot.plot', 'plt.plot', (['th_values[cual_min]', 'COST_dens[cual_min]', '"""ro"""'], {'label': '"""Minimum Labels Density cost"""', 'linewidth': '(3.0)'}), "(th_values[cual_min], COST_dens[cual_min], 'ro', label=\n 'Minimum Labels Density cost', linewidth=3.0)\n", (41589, 41694), True, 'import matplotlib.pyplot as plt\n'), ((41717, 41746), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (41727, 41746), True, 'import matplotlib.pyplot as plt\n'), ((41756, 41770), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (41764, 41770), True, 'import matplotlib.pyplot as plt\n'), ((41952, 41973), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (41963, 41973), True, 'import matplotlib.pyplot as plt\n'), ((41983, 41994), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (41992, 41994), True, 'import matplotlib.pyplot as plt\n'), ((42027, 42054), 'numpy.mean', 'np.mean', (['[th_JRBO, th_DENS]'], {}), '([th_JRBO, th_DENS])\n', (42034, 42054), True, 'import numpy as np\n'), ((42077, 42170), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['training_data'])", '"""multilabel_th.pkl"""'], {}), "(self._project_path + self._subfolders['training_data'],\n 'multilabel_th.pkl')\n", (42089, 42170), False, 'import os\n'), ((42305, 42391), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['export'])", '"""multilabel_th.pkl"""'], {}), "(self._project_path + self._subfolders['export'],\n 'multilabel_th.pkl')\n", (42317, 42391), False, 'import os\n'), ((43851, 43913), 'fordclassifier.evaluator.predictorClass.Predictor', 'Predictor', (['self._project_path', 'self._subfolders'], {'verbose': '(False)'}), '(self._project_path, self._subfolders, verbose=False)\n', (43860, 43913), False, 'from fordclassifier.evaluator.predictorClass import Predictor\n'), ((45242, 45270), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 12)'}), '(figsize=(15, 12))\n', (45252, 45270), True, 'import matplotlib.pyplot as plt\n'), ((45280, 45296), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Th"""'], {}), "('Th')\n", (45290, 45296), True, 'import matplotlib.pyplot as plt\n'), ((45306, 45336), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Jackard-RBO cost"""'], {}), "('Jackard-RBO cost')\n", (45316, 45336), True, 'import matplotlib.pyplot as plt\n'), ((45470, 45541), 'matplotlib.pyplot.plot', 'plt.plot', (['th_values', 'COST', '"""b"""'], {'label': '"""Jackard-RBO cost"""', 'linewidth': '(3.0)'}), "(th_values, COST, 'b', label='Jackard-RBO cost', linewidth=3.0)\n", (45478, 45541), True, 'import matplotlib.pyplot as plt\n'), ((45551, 45630), 'matplotlib.pyplot.plot', 'plt.plot', (['th_values', 'COST_dens', '"""r"""'], {'label': '"""Labels Density cost"""', 'linewidth': '(3.0)'}), "(th_values, COST_dens, 'r', label='Labels Density cost', linewidth=3.0)\n", (45559, 45630), True, 'import matplotlib.pyplot as plt\n'), ((45725, 45835), 'matplotlib.pyplot.plot', 'plt.plot', (['th_values[cual_min]', 'COST[cual_min]', '"""bo"""'], {'label': '"""Jackard-RBO cost at threshold"""', 'linewidth': '(3.0)'}), "(th_values[cual_min], COST[cual_min], 'bo', label=\n 'Jackard-RBO cost at threshold', linewidth=3.0)\n", (45733, 45835), True, 'import matplotlib.pyplot as plt\n'), ((45858, 45976), 'matplotlib.pyplot.plot', 'plt.plot', (['th_values[cual_min]', 'COST_dens[cual_min]', '"""ro"""'], {'label': '"""Labels Density cost at threshold"""', 'linewidth': '(3.0)'}), "(th_values[cual_min], COST_dens[cual_min], 'ro', label=\n 'Labels Density cost at threshold', linewidth=3.0)\n", (45866, 45976), True, 'import matplotlib.pyplot as plt\n'), ((45999, 46028), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (46009, 46028), True, 'import matplotlib.pyplot as plt\n'), ((46038, 46052), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (46046, 46052), True, 'import matplotlib.pyplot as plt\n'), ((46235, 46256), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (46246, 46256), True, 'import matplotlib.pyplot as plt\n'), ((46266, 46277), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (46275, 46277), True, 'import matplotlib.pyplot as plt\n'), ((51706, 51781), 'os.path.join', 'os.path.join', (['self._project_path', "self._subfolders['results']", 'filename_out'], {}), "(self._project_path, self._subfolders['results'], filename_out)\n", (51718, 51781), False, 'import os\n'), ((6220, 6310), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['training_data'])", '"""train_data.pkl"""'], {}), "(self._project_path + self._subfolders['training_data'],\n 'train_data.pkl')\n", (6232, 6310), False, 'import os\n'), ((6567, 6652), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['test_data'])", '"""test_data.pkl"""'], {}), "(self._project_path + self._subfolders['test_data'],\n 'test_data.pkl')\n", (6579, 6652), False, 'import os\n'), ((6905, 6990), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['training_data'])", '"""tags.pkl"""'], {}), "(self._project_path + self._subfolders['training_data'], 'tags.pkl'\n )\n", (6917, 6990), False, 'import os\n'), ((7770, 7845), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['results'])", '"""Preds.pkl"""'], {}), "(self._project_path + self._subfolders['results'], 'Preds.pkl')\n", (7782, 7845), False, 'import os\n'), ((8013, 8090), 'os.path.join', 'os.path.join', (['self._project_path', "self._subfolders['results']", '"""Preds_tr.pkl"""'], {}), "(self._project_path, self._subfolders['results'], 'Preds_tr.pkl')\n", (8025, 8090), False, 'import os\n'), ((8279, 8358), 'os.path.join', 'os.path.join', (['self._project_path', "self._subfolders['results']", '"""Preds_test.pkl"""'], {}), "(self._project_path, self._subfolders['results'], 'Preds_test.pkl')\n", (8291, 8358), False, 'import os\n'), ((8543, 8617), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['results'])", '"""CONF.pkl"""'], {}), "(self._project_path + self._subfolders['results'], 'CONF.pkl')\n", (8555, 8617), False, 'import os\n'), ((8786, 8872), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['test_data'])", '"""tags_index.pkl"""'], {}), "(self._project_path + self._subfolders['test_data'],\n 'tags_index.pkl')\n", (8798, 8872), False, 'import os\n'), ((11549, 11577), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 12)'}), '(figsize=(15, 12))\n', (11559, 11577), True, 'import matplotlib.pyplot as plt\n'), ((13518, 13535), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""FPR"""'], {}), "('FPR')\n", (13528, 13535), True, 'import matplotlib.pyplot as plt\n'), ((13549, 13566), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""TPR"""'], {}), "('TPR')\n", (13559, 13566), True, 'import matplotlib.pyplot as plt\n'), ((13580, 13623), 'matplotlib.pyplot.title', 'plt.title', (["('ROC curves for category ' + cat)"], {}), "('ROC curves for category ' + cat)\n", (13589, 13623), True, 'import matplotlib.pyplot as plt\n'), ((13637, 13651), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (13645, 13651), True, 'import matplotlib.pyplot as plt\n'), ((13665, 13694), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (13675, 13694), True, 'import matplotlib.pyplot as plt\n'), ((13834, 13855), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (13845, 13855), True, 'import matplotlib.pyplot as plt\n'), ((13869, 13880), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13878, 13880), True, 'import matplotlib.pyplot as plt\n'), ((14723, 14737), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (14734, 14737), False, 'import pickle\n'), ((15384, 15398), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (15395, 15398), False, 'import pickle\n'), ((16851, 16865), 'numpy.argsort', 'np.argsort', (['(-p)'], {}), '(-p)\n', (16861, 16865), True, 'import numpy as np\n'), ((18097, 18122), 'pickle.dump', 'pickle.dump', (['self.CONF', 'f'], {}), '(self.CONF, f)\n', (18108, 18122), False, 'import pickle\n'), ((19814, 19839), 'pickle.dump', 'pickle.dump', (['self.CONF', 'f'], {}), '(self.CONF, f)\n', (19825, 19839), False, 'import pickle\n'), ((22933, 22958), 'pickle.dump', 'pickle.dump', (['self.CONF', 'f'], {}), '(self.CONF, f)\n', (22944, 22958), False, 'import pickle\n'), ((23618, 23638), 'pandas.read_excel', 'pd.read_excel', (['fpath'], {}), '(fpath)\n', (23631, 23638), True, 'import pandas as pd\n'), ((28955, 28972), 'numpy.diagonal', 'np.diagonal', (['CONF'], {}), '(CONF)\n', (28966, 28972), True, 'import numpy as np\n'), ((31565, 31592), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_tst', 'preds_tst'], {}), '(y_tst, preds_tst)\n', (31574, 31592), False, 'from sklearn.metrics import roc_curve, auc\n'), ((31620, 31641), 'sklearn.metrics.auc', 'auc', (['fpr_tst', 'tpr_tst'], {}), '(fpr_tst, tpr_tst)\n', (31623, 31641), False, 'from sklearn.metrics import roc_curve, auc\n'), ((32143, 32171), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 12)'}), '(figsize=(15, 12))\n', (32153, 32171), True, 'import matplotlib.pyplot as plt\n'), ((32185, 32202), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""FPR"""'], {}), "('FPR')\n", (32195, 32202), True, 'import matplotlib.pyplot as plt\n'), ((32216, 32233), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""TPR"""'], {}), "('TPR')\n", (32226, 32233), True, 'import matplotlib.pyplot as plt\n'), ((32247, 32294), 'matplotlib.pyplot.title', 'plt.title', (["('ROC test curve for category ' + cat)"], {}), "('ROC test curve for category ' + cat)\n", (32256, 32294), True, 'import matplotlib.pyplot as plt\n'), ((32375, 32439), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr_tst', 'tpr_tst', 'colors[3]'], {'label': 'text', 'linewidth': '(6.0)'}), '(fpr_tst, tpr_tst, colors[3], label=text, linewidth=6.0)\n', (32383, 32439), True, 'import matplotlib.pyplot as plt\n'), ((32453, 32467), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (32461, 32467), True, 'import matplotlib.pyplot as plt\n'), ((32481, 32510), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (32491, 32510), True, 'import matplotlib.pyplot as plt\n'), ((32632, 32653), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (32643, 32653), True, 'import matplotlib.pyplot as plt\n'), ((32667, 32678), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (32676, 32678), True, 'import matplotlib.pyplot as plt\n'), ((33475, 33502), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_tst', 'preds_tst'], {}), '(y_tst, preds_tst)\n', (33484, 33502), False, 'from sklearn.metrics import roc_curve, auc\n'), ((33530, 33551), 'sklearn.metrics.auc', 'auc', (['fpr_tst', 'tpr_tst'], {}), '(fpr_tst, tpr_tst)\n', (33533, 33551), False, 'from sklearn.metrics import roc_curve, auc\n'), ((34039, 34067), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 12)'}), '(figsize=(15, 12))\n', (34049, 34067), True, 'import matplotlib.pyplot as plt\n'), ((34081, 34098), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""FPR"""'], {}), "('FPR')\n", (34091, 34098), True, 'import matplotlib.pyplot as plt\n'), ((34112, 34129), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""TPR"""'], {}), "('TPR')\n", (34122, 34129), True, 'import matplotlib.pyplot as plt\n'), ((34143, 34190), 'matplotlib.pyplot.title', 'plt.title', (["('ROC test curve for category ' + cat)"], {}), "('ROC test curve for category ' + cat)\n", (34152, 34190), True, 'import matplotlib.pyplot as plt\n'), ((34265, 34329), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr_tst', 'tpr_tst', 'colors[3]'], {'label': 'text', 'linewidth': '(6.0)'}), '(fpr_tst, tpr_tst, colors[3], label=text, linewidth=6.0)\n', (34273, 34329), True, 'import matplotlib.pyplot as plt\n'), ((34343, 34357), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (34351, 34357), True, 'import matplotlib.pyplot as plt\n'), ((34371, 34400), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (34381, 34400), True, 'import matplotlib.pyplot as plt\n'), ((34554, 34575), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (34565, 34575), True, 'import matplotlib.pyplot as plt\n'), ((34589, 34600), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (34598, 34600), True, 'import matplotlib.pyplot as plt\n'), ((37125, 37149), 'pickle.dump', 'pickle.dump', (['ths_dict', 'f'], {}), '(ths_dict, f)\n', (37136, 37149), False, 'import pickle\n'), ((40614, 40628), 'numpy.mean', 'np.mean', (['jrbos'], {}), '(jrbos)\n', (40621, 40628), True, 'import numpy as np\n'), ((42248, 42282), 'pickle.dump', 'pickle.dump', (['self.multilabel_th', 'f'], {}), '(self.multilabel_th, f)\n', (42259, 42282), False, 'import pickle\n'), ((42469, 42503), 'pickle.dump', 'pickle.dump', (['self.multilabel_th', 'f'], {}), '(self.multilabel_th, f)\n', (42480, 42503), False, 'import pickle\n'), ((44974, 44988), 'numpy.mean', 'np.mean', (['jrbos'], {}), '(jrbos)\n', (44981, 44988), True, 'import numpy as np\n'), ((46591, 46637), 'os.path.join', 'os.path.join', (['path2export', '"""multilabel_th.pkl"""'], {}), "(path2export, 'multilabel_th.pkl')\n", (46603, 46637), False, 'import os\n'), ((49774, 49784), 'numpy.sum', 'np.sum', (['w0'], {}), '(w0)\n', (49780, 49784), True, 'import numpy as np\n'), ((5564, 5576), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5573, 5576), False, 'import json\n'), ((6482, 6496), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6493, 6496), False, 'import pickle\n'), ((6828, 6842), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6839, 6842), False, 'import pickle\n'), ((7102, 7116), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7113, 7116), False, 'import pickle\n'), ((7198, 7276), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['results'])", '"""ths_dict.pkl"""'], {}), "(self._project_path + self._subfolders['results'], 'ths_dict.pkl')\n", (7210, 7276), False, 'import os\n'), ((7939, 7953), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7950, 7953), False, 'import pickle\n'), ((8204, 8218), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8215, 8218), False, 'import pickle\n'), ((8473, 8487), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8484, 8487), False, 'import pickle\n'), ((8710, 8724), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8721, 8724), False, 'import pickle\n'), ((9000, 9014), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9011, 9014), False, 'import pickle\n'), ((9098, 9188), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['training_data'])", '"""categories.pkl"""'], {}), "(self._project_path + self._subfolders['training_data'],\n 'categories.pkl')\n", (9110, 9188), False, 'import os\n'), ((9716, 9811), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['training_data'])", '"""models2evaluate.pkl"""'], {}), "(self._project_path + self._subfolders['training_data'],\n 'models2evaluate.pkl')\n", (9728, 9811), False, 'import os\n'), ((10352, 10445), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['training_data'])", '"""multilabel_th.pkl"""'], {}), "(self._project_path + self._subfolders['training_data'],\n 'multilabel_th.pkl')\n", (10364, 10445), False, 'import os\n'), ((21861, 21876), 'numpy.zeros', 'np.zeros', (['Ncats'], {}), '(Ncats)\n', (21869, 21876), True, 'import numpy as np\n'), ((22171, 22186), 'numpy.zeros', 'np.zeros', (['Ncats'], {}), '(Ncats)\n', (22179, 22186), True, 'import numpy as np\n'), ((22436, 22452), 'numpy.minimum', 'np.minimum', (['p', 'q'], {}), '(p, q)\n', (22446, 22452), True, 'import numpy as np\n'), ((22474, 22489), 'numpy.diag', 'np.diag', (['min_pq'], {}), '(min_pq)\n', (22481, 22489), True, 'import numpy as np\n'), ((24214, 24231), 'pandas.read_excel', 'pd.read_excel', (['fp'], {}), '(fp)\n', (24227, 24231), True, 'import pandas as pd\n'), ((24344, 24361), 'numpy.minimum', 'np.minimum', (['C', 'Cf'], {}), '(C, Cf)\n', (24354, 24361), True, 'import numpy as np\n'), ((26075, 26090), 'numpy.zeros', 'np.zeros', (['Ncats'], {}), '(Ncats)\n', (26083, 26090), True, 'import numpy as np\n'), ((26385, 26400), 'numpy.zeros', 'np.zeros', (['Ncats'], {}), '(Ncats)\n', (26393, 26400), True, 'import numpy as np\n'), ((26665, 26683), 'pyemd.emd', 'pyemd.emd', (['p', 'q', 'C'], {}), '(p, q, C)\n', (26674, 26683), False, 'import pyemd\n'), ((32108, 32129), 'pickle.dump', 'pickle.dump', (['mdict', 'f'], {}), '(mdict, f)\n', (32119, 32129), False, 'import pickle\n'), ((34002, 34023), 'pickle.dump', 'pickle.dump', (['mdict', 'f'], {}), '(mdict, f)\n', (34013, 34023), False, 'import pickle\n'), ((35390, 35404), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (35401, 35404), False, 'import pickle\n'), ((36717, 36731), 'numpy.argmax', 'np.argmax', (['mix'], {}), '(mix)\n', (36726, 36731), True, 'import numpy as np\n'), ((37613, 37648), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['tags'], {}), '(tags)\n', (37642, 37648), False, 'import itertools\n'), ((39032, 39055), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['l'], {}), '(l)\n', (39052, 39055), False, 'from collections import OrderedDict\n'), ((43382, 43405), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['l'], {}), '(l)\n', (43402, 43405), False, 'from collections import OrderedDict\n'), ((46721, 46735), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (46732, 46735), False, 'import pickle\n'), ((5858, 5870), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5867, 5870), False, 'import json\n'), ((7406, 7420), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7417, 7420), False, 'import pickle\n'), ((7470, 7547), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['export'])", '"""ths_dict.pkl"""'], {}), "(self._project_path + self._subfolders['export'], 'ths_dict.pkl')\n", (7482, 7547), False, 'import os\n'), ((9316, 9330), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9327, 9330), False, 'import pickle\n'), ((9380, 9459), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['export'])", '"""categories.pkl"""'], {}), "(self._project_path + self._subfolders['export'], 'categories.pkl')\n", (9392, 9459), False, 'import os\n'), ((9944, 9958), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9955, 9958), False, 'import pickle\n'), ((10008, 10096), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['export'])", '"""models2evaluate.pkl"""'], {}), "(self._project_path + self._subfolders['export'],\n 'models2evaluate.pkl')\n", (10020, 10096), False, 'import os\n'), ((10576, 10590), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10587, 10590), False, 'import pickle\n'), ((10640, 10726), 'os.path.join', 'os.path.join', (["(self._project_path + self._subfolders['export'])", '"""multilabel_th.pkl"""'], {}), "(self._project_path + self._subfolders['export'],\n 'multilabel_th.pkl')\n", (10652, 10726), False, 'import os\n'), ((12250, 12272), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (12269, 12272), False, 'import operator\n'), ((22043, 22052), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (22049, 22052), True, 'import numpy as np\n'), ((22353, 22362), 'numpy.sum', 'np.sum', (['q'], {}), '(q)\n', (22359, 22362), True, 'import numpy as np\n'), ((22632, 22646), 'numpy.sum', 'np.sum', (['min_pq'], {}), '(min_pq)\n', (22638, 22646), True, 'import numpy as np\n'), ((26257, 26266), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (26263, 26266), True, 'import numpy as np\n'), ((26567, 26576), 'numpy.sum', 'np.sum', (['q'], {}), '(q)\n', (26573, 26576), True, 'import numpy as np\n'), ((36345, 36359), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (36356, 36359), False, 'import pickle\n'), ((51366, 51379), 'numpy.sum', 'np.sum', (['pesos'], {}), '(pesos)\n', (51372, 51379), True, 'import numpy as np\n'), ((6116, 6128), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6125, 6128), False, 'import json\n'), ((7677, 7691), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7688, 7691), False, 'import pickle\n'), ((9591, 9605), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9602, 9605), False, 'import pickle\n'), ((10229, 10243), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10240, 10243), False, 'import pickle\n'), ((10857, 10871), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10868, 10871), False, 'import pickle\n'), ((12025, 12039), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (12036, 12039), False, 'import pickle\n'), ((12860, 12874), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (12871, 12874), False, 'import pickle\n'), ((13202, 13258), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr', 'colors[k]'], {'label': 'text', 'linewidth': '(6.0)'}), '(fpr, tpr, colors[k], label=text, linewidth=6.0)\n', (13210, 13258), True, 'import matplotlib.pyplot as plt\n'), ((13357, 13413), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr', 'colors[k]'], {'label': 'text', 'linewidth': '(2.0)'}), '(fpr, tpr, colors[k], label=text, linewidth=2.0)\n', (13365, 13413), True, 'import matplotlib.pyplot as plt\n')]
# %% Import import numpy as np import pandas as pd import requests import os from bs4 import BeautifulSoup """ Takes a dictionary of relevant brands and their URLs and returns a raw csv file """ # %% Functions def outlets_crawl(brand, url): """ Returns a raw, unformatted df of outlets with it's brand from the url inserted """ page = requests.get(url) soup = BeautifulSoup(page.content, "lxml") # ensure crawler had actual results to work with. def _check_results(class_term, soup=soup): results = soup.find_all(attrs={"class": class_term}) if len(results) == 0: raise ValueError("No outlets found, check class_term or url.") return results try: results = _check_results("outlet_item") except ValueError: results = _check_results("lease_item") # continue _ls = [] for result in results: _ls.append([i for i in result.stripped_strings]) df = pd.DataFrame(_ls) df.insert(0, "brand", brand, allow_duplicates=True) return df def loop_outlets_crawl(dict, outputfn): """ Loops outlets_crawl func through a dictionary of urls and their brands. Returns a concatenated df and saves it as a temporary csv. """ _ls = [] for brand, url in dict.items(): _ls.append(outlets_crawl(brand, url)) print(f"{brand} done.") df = pd.concat(_ls) df.to_csv(outputfn, index=False) def main(): url_dict = { "Koufu": "https://www.koufu.com.sg/our-brands/food-halls/koufu/", "Cookhouse": "https://www.koufu.com.sg/our-brands/food-halls/cookhouse/", "Rasapura": "https://www.koufu.com.sg/our-brands/food-halls/rasapura-masters/", "ForkSpoon": "https://www.koufu.com.sg/our-brands/food-halls/fork-spoon/", "HappyHawkers": "https://www.koufu.com.sg/our-brands/food-halls/happy-hawkers/", "Gourmet": "https://www.koufu.com.sg/our-brands/food-halls/gourmet-paradise/", "R&B": "https://www.koufu.com.sg/our-brands/concept-stores/rb-tea/", "1983NY": "https://www.koufu.com.sg/our-brands/concept-stores/1983-a-taste-of-nanyang/", "Supertea": "https://www.koufu.com.sg/our-brands/concept-stores/supertea/", "1983CT": "https://www.koufu.com.sg/our-brands/cafe-restaurants/1983-coffee-toast/", "Elemen": "https://www.koufu.com.sg/our-brands/cafe-restaurants/elemen-%e5%85%83%e7%b4%a0/", "Grove": "https://www.koufu.com.sg/our-brands/cafe-restaurants/grovecafe/", } outputfn = "./r_outletsdata.csv" if os.path.isfile(outputfn): os.remove(outputfn) loop_outlets_crawl(url_dict, outputfn) # %% Main if __name__ == "__main__": main() os.system("pause")
[ "requests.get", "bs4.BeautifulSoup", "os.path.isfile", "pandas.DataFrame", "os.system", "pandas.concat", "os.remove" ]
[((355, 372), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (367, 372), False, 'import requests\n'), ((384, 419), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content', '"""lxml"""'], {}), "(page.content, 'lxml')\n", (397, 419), False, 'from bs4 import BeautifulSoup\n'), ((962, 979), 'pandas.DataFrame', 'pd.DataFrame', (['_ls'], {}), '(_ls)\n', (974, 979), True, 'import pandas as pd\n'), ((1379, 1393), 'pandas.concat', 'pd.concat', (['_ls'], {}), '(_ls)\n', (1388, 1393), True, 'import pandas as pd\n'), ((2552, 2576), 'os.path.isfile', 'os.path.isfile', (['outputfn'], {}), '(outputfn)\n', (2566, 2576), False, 'import os\n'), ((2703, 2721), 'os.system', 'os.system', (['"""pause"""'], {}), "('pause')\n", (2712, 2721), False, 'import os\n'), ((2586, 2605), 'os.remove', 'os.remove', (['outputfn'], {}), '(outputfn)\n', (2595, 2605), False, 'import os\n')]
import docker, os, platform, requests, shutil, subprocess, sys from .infrastructure import * # Runs a command without displaying its output and returns the exit code def _runSilent(command): result = SubprocessUtils.capture(command, check=False) return result.returncode # Performs setup for Linux hosts def _setupLinux(): # Pull the latest version of the Alpine container image alpineImage = 'alpine:latest' SubprocessUtils.capture(['docker', 'pull', alpineImage]) # Start the credential endpoint with blank credentials endpoint = CredentialEndpoint('', '') endpoint.start() try: # Run an Alpine container to see if we can access the host port for the credential endpoint SubprocessUtils.capture([ 'docker', 'run', '--rm', alpineImage, 'wget', '--timeout=1', '--post-data=dummy', 'http://{}:9876'.format(NetworkUtils.hostIP()) ], check=True) # If we reach this point then the host port is accessible print('No firewall configuration required.') except: # The host port is blocked, so we need to perform firewall configuration print('Creating firewall rule for credential endpoint...') # Create the firewall rule subprocess.run(['iptables', '-I', 'INPUT', '-p', 'tcp', '--dport', '9876', '-j', 'ACCEPT'], check=True) # Ensure the firewall rule persists after reboot # (Requires the `iptables-persistent` service to be installed and running) os.makedirs('/etc/iptables', exist_ok=True) subprocess.run('iptables-save > /etc/iptables/rules.v4', shell=True, check=True) # Inform users of the `iptables-persistent` requirement print('Firewall rule created. Note that the `iptables-persistent` service will need to') print('be installed for the rule to persist after the host system reboots.') finally: # Stop the credential endpoint endpoint.stop() # Performs setup for Windows Server hosts def _setupWindowsServer(): # Check if we need to configure the maximum image size requiredLimit = WindowsUtils.requiredSizeLimit() if DockerUtils.maxsize() < requiredLimit: # Attempt to stop the Docker daemon print('Stopping the Docker daemon...') subprocess.run(['sc.exe', 'stop', 'docker'], check=True) # Attempt to set the maximum image size print('Setting maximum image size to {}GB...'.format(requiredLimit)) config = DockerUtils.getConfig() sizeOpt = 'size={}GB'.format(requiredLimit) if 'storage-opts' in config: config['storage-opts'] = list([o for o in config['storage-opts'] if o.lower().startswith('size=') == False]) config['storage-opts'].append(sizeOpt) else: config['storage-opts'] = [sizeOpt] DockerUtils.setConfig(config) # Attempt to start the Docker daemon print('Starting the Docker daemon...') subprocess.run(['sc.exe', 'start', 'docker'], check=True) else: print('Maximum image size is already correctly configured.') # Determine if we need to configure Windows firewall ruleName = 'Open TCP port 9876 for ue4-docker credential endpoint' ruleExists = _runSilent(['netsh', 'advfirewall', 'firewall', 'show', 'rule', 'name={}'.format(ruleName)]) == 0 if ruleExists == False: # Add a rule to ensure Windows firewall allows access to the credential helper from our containers print('Creating firewall rule for credential endpoint...') subprocess.run([ 'netsh', 'advfirewall', 'firewall', 'add', 'rule', 'name={}'.format(ruleName), 'dir=in', 'action=allow', 'protocol=TCP', 'localport=9876' ], check=True) else: print('Firewall rule for credential endpoint is already configured.') # Determine if the host system is Windows Server Core and lacks the required DLL files for building our containers hostRelease = WindowsUtils.getWindowsRelease() requiredDLLs = WindowsUtils.requiredHostDlls(hostRelease) dllDir = os.path.join(os.environ['SystemRoot'], 'System32') existing = [dll for dll in requiredDLLs if os.path.exists(os.path.join(dllDir, dll))] if len(existing) != len(requiredDLLs): # Determine if we can extract DLL files from the full Windows base image (version 1809 and newer only) tags = requests.get('https://mcr.microsoft.com/v2/windows/tags/list').json()['tags'] if hostRelease in tags: # Pull the full Windows base image with the appropriate tag if it does not already exist image = 'mcr.microsoft.com/windows:{}'.format(hostRelease) print('Pulling full Windows base image "{}"...'.format(image)) subprocess.run(['docker', 'pull', image], check=True) # Start a container from which we will copy the DLL files, bind-mounting our DLL destination directory print('Starting a container to copy DLL files from...') mountPath = 'C:\\dlldir' container = DockerUtils.start( image, ['timeout', '/t', '99999', '/nobreak'], mounts = [docker.types.Mount(mountPath, dllDir, 'bind')], stdin_open = True, tty = True, remove = True ) # Copy the DLL files to the host print('Copying DLL files to the host system...') DockerUtils.execMultiple(container, [['xcopy', '/y', os.path.join(dllDir, dll), mountPath + '\\'] for dll in requiredDLLs]) # Stop the container print('Stopping the container...') container.stop() else: print('The following DLL files will need to be manually copied into {}:'.format(dllDir)) print('\n'.join(['- {}'.format(dll) for dll in requiredDLLs if dll not in existing])) else: print('All required DLL files are already present on the host system.') def setup(): # We don't currently support auto-config for VM-based containers if platform.system() == 'Darwin' or (platform.system() == 'Windows' and WindowsUtils.isWindowsServer() == False): print('Manual configuration is required under Windows 10 and macOS. Automatic configuration is not available.') return # Perform setup based on the host system type if platform.system() == 'Linux': _setupLinux() else: _setupWindowsServer()
[ "os.makedirs", "subprocess.run", "os.path.join", "requests.get", "platform.system", "docker.types.Mount" ]
[((3796, 3846), 'os.path.join', 'os.path.join', (["os.environ['SystemRoot']", '"""System32"""'], {}), "(os.environ['SystemRoot'], 'System32')\n", (3808, 3846), False, 'import docker, os, platform, requests, shutil, subprocess, sys\n'), ((2137, 2193), 'subprocess.run', 'subprocess.run', (["['sc.exe', 'stop', 'docker']"], {'check': '(True)'}), "(['sc.exe', 'stop', 'docker'], check=True)\n", (2151, 2193), False, 'import docker, os, platform, requests, shutil, subprocess, sys\n'), ((2739, 2796), 'subprocess.run', 'subprocess.run', (["['sc.exe', 'start', 'docker']"], {'check': '(True)'}), "(['sc.exe', 'start', 'docker'], check=True)\n", (2753, 2796), False, 'import docker, os, platform, requests, shutil, subprocess, sys\n'), ((5841, 5858), 'platform.system', 'platform.system', ([], {}), '()\n', (5856, 5858), False, 'import docker, os, platform, requests, shutil, subprocess, sys\n'), ((1170, 1277), 'subprocess.run', 'subprocess.run', (["['iptables', '-I', 'INPUT', '-p', 'tcp', '--dport', '9876', '-j', 'ACCEPT']"], {'check': '(True)'}), "(['iptables', '-I', 'INPUT', '-p', 'tcp', '--dport', '9876',\n '-j', 'ACCEPT'], check=True)\n", (1184, 1277), False, 'import docker, os, platform, requests, shutil, subprocess, sys\n'), ((1407, 1450), 'os.makedirs', 'os.makedirs', (['"""/etc/iptables"""'], {'exist_ok': '(True)'}), "('/etc/iptables', exist_ok=True)\n", (1418, 1450), False, 'import docker, os, platform, requests, shutil, subprocess, sys\n'), ((1453, 1538), 'subprocess.run', 'subprocess.run', (['"""iptables-save > /etc/iptables/rules.v4"""'], {'shell': '(True)', 'check': '(True)'}), "('iptables-save > /etc/iptables/rules.v4', shell=True, check=True\n )\n", (1467, 1538), False, 'import docker, os, platform, requests, shutil, subprocess, sys\n'), ((4422, 4475), 'subprocess.run', 'subprocess.run', (["['docker', 'pull', image]"], {'check': '(True)'}), "(['docker', 'pull', image], check=True)\n", (4436, 4475), False, 'import docker, os, platform, requests, shutil, subprocess, sys\n'), ((5554, 5571), 'platform.system', 'platform.system', ([], {}), '()\n', (5569, 5571), False, 'import docker, os, platform, requests, shutil, subprocess, sys\n'), ((3906, 3931), 'os.path.join', 'os.path.join', (['dllDir', 'dll'], {}), '(dllDir, dll)\n', (3918, 3931), False, 'import docker, os, platform, requests, shutil, subprocess, sys\n'), ((5588, 5605), 'platform.system', 'platform.system', ([], {}), '()\n', (5603, 5605), False, 'import docker, os, platform, requests, shutil, subprocess, sys\n'), ((4091, 4153), 'requests.get', 'requests.get', (['"""https://mcr.microsoft.com/v2/windows/tags/list"""'], {}), "('https://mcr.microsoft.com/v2/windows/tags/list')\n", (4103, 4153), False, 'import docker, os, platform, requests, shutil, subprocess, sys\n'), ((4776, 4821), 'docker.types.Mount', 'docker.types.Mount', (['mountPath', 'dllDir', '"""bind"""'], {}), "(mountPath, dllDir, 'bind')\n", (4794, 4821), False, 'import docker, os, platform, requests, shutil, subprocess, sys\n'), ((5034, 5059), 'os.path.join', 'os.path.join', (['dllDir', 'dll'], {}), '(dllDir, dll)\n', (5046, 5059), False, 'import docker, os, platform, requests, shutil, subprocess, sys\n')]
import sys import time from tia.trad.tools.io.follow import followMonitor import tia.configuration as conf from tia.trad.tools.errf import eReport import ujson as json import matplotlib.pyplot as plt import math import collections import logging from tia.trad.tools.ipc.processLogger import PROCESS_NAME LOGGER_NAME = PROCESS_NAME + __file__.split("/")[-1]; logger = logging.getLogger(LOGGER_NAME) reportFile = None def pointDistance(initF, initI, point): try: t = initI[0]-initF[0], initI[1]-initF[1] # Vector ab dd = math.sqrt(t[0]**2+t[1]**2) # Length of ab t = t[0]/dd, t[1]/dd # unit vector of ab n = -t[1], t[0] # normal unit vector to ab ac = point[0]-initF[0], point[1]-initF[1] # vector ac return math.fabs(ac[0]*n[0]+ac[1]*n[1]) # Projection of ac to n (the minimum distance) except Exception: raise def getAvg(_list): try: return float(max(_list) + min(_list)) / float(2) except Exception: raise def shutdown(): try: logger.debug("shutting down") global reportFile reportFile.close() except Exception: raise def run(**kwargs): try: global logger global reportFile logger = kwargs["processLogger"] logger.debug("monitor_mainTr:hi") _initFunds = kwargs["initFunds"] _initItems = kwargs["initItems"] plt.ion() # turn interactive on fig = plt.figure() fig.show() # raw ax = fig.add_subplot(221) #hline = ax.axhline(y=_initFunds) #vline = ax.axvline(x=_initItems) #ax.set_xscale("log") #ax.set_yscale("log") data, = ax.plot([], [], 'b+') data11, = ax.plot([], [], 'ro') # value ax2 = fig.add_subplot(222) data2, = ax2.plot([], [], 'ro-') # inside TM ax3 = fig.add_subplot(223) data3, = ax3.plot([], [], 'ro') data4, = ax3.plot([],[], 'bo') minBids, = ax3.plot([], [], "r>") maxAsks, = ax3.plot([], [], "b>") # top b/a ax5 = fig.add_subplot(224) dataI, = ax5.plot([], [], "o-") dataF, = ax5.plot([], [], "ro-") windowLength = 50 fundsHistory = collections.deque(maxlen=windowLength); itemsHistory = collections.deque(maxlen=windowLength) valueHistory = collections.deque(maxlen=windowLength) tmFundsHistory = collections.deque(maxlen=windowLength); tmItemsHistory = collections.deque(maxlen=windowLength) tmIAHSum = collections.deque(maxlen=windowLength); tmFAHSum = collections.deque(maxlen=windowLength) topAsksHistory = collections.deque(maxlen=10) topBidsHistory = collections.deque(maxlen=10) # touch report.json #reportFile = open(conf.FN_REPORT, "w"); reportFile.close() reportFile = open(conf.FN_REPORT, "r") newline = followMonitor(reportFile, fig) while 1: try: #for line in reportFile: line = newline.next() jsonObj = json.loads(line) universeSize = float(jsonObj["universeSize"]) topAsks = jsonObj["topAsks"]; topBids = jsonObj["topBids"] initInvF = float(_initFunds) * universeSize initInvI = float(_initItems) * universeSize cumulFunds = float(jsonObj["cumulFunds"]) cumulItems = float(jsonObj["cumulItems"]) #fundsHistory.append(funds); itemsHistory.append(items) dist = pointDistance([0, initInvF], [initInvI, 0], [cumulFunds, cumulItems]) fundsHistory.append(dist) #data.set_ydata(fundsHistory); data.set_xdata(itemsHistory) data.set_ydata(fundsHistory); data.set_xdata(xrange(len(fundsHistory))) #data11.set_ydata(funds); data11.set_xdata(items) #data11.set_ydata(dist); data11.set_xdata(xrange(len(fundsHistory))) ax.relim() ax.autoscale_view(True,True,True) tmFunds = jsonObj["tmFunds"]; tmItems = jsonObj["tmItems"] tmFA = 0; tmIA = 0 tmFPH = collections.deque(); tmFAH = collections.deque() tmIPH = collections.deque(); tmIAH = collections.deque() for price in tmFunds: amount = tmFunds[price] tmFPH.append(price) tmFAH.append(amount) tmFA += amount tmFAHSum.append(tmFA) for price in tmItems: amount = tmItems[price] tmIPH.append(price) tmIAH.append(amount) tmIA += amount tmIAHSum.append(tmIA) dataI.set_ydata(tmIAHSum); dataI.set_xdata(xrange(len(tmIAHSum))) dataF.set_ydata(tmFAHSum); dataF.set_xdata(xrange(len(tmFAHSum))) ax5.relim() ax5.autoscale_view(True,True,True) value = float(jsonObj["value"]) / initInvF if initInvF else float(jsonObj["value"]) valueHistory.append(value) data2.set_xdata(range(len(valueHistory))) data2.set_ydata(valueHistory) ax2.relim() ax2.autoscale_view(True,True,True) """ TM stuff """ # make universe states pretty tmpList = list(tmFAH) + list(tmIAH) xDrawStart = min(tmpList) drawedInterval = max(tmpList) - xDrawStart spacing = float(drawedInterval) / float (len(topBids)) offset = float(spacing) / float(2) xcords = collections.deque() for index, bid in enumerate(topBids): xcords.append(offset + xDrawStart + index * spacing) minBids.set_ydata(topBids); minBids.set_xdata(xcords) maxAsks.set_ydata(topAsks); maxAsks.set_xdata(xcords) data3.set_xdata(tmFAH) data3.set_ydata(tmFPH) data4.set_xdata(tmIAH) data4.set_ydata(tmIPH) ax3.relim() ax3.autoscale_view(True,True,True) fig.canvas.draw() #plt.savefig(conf.FN_PLOT_IMAGE) except ValueError: continue except Exception as ex: eReport(__file__) reportFile.close() sys.exit()
[ "logging.getLogger", "collections.deque", "math.sqrt", "matplotlib.pyplot.figure", "math.fabs", "ujson.loads", "sys.exit", "matplotlib.pyplot.ion", "tia.trad.tools.errf.eReport", "tia.trad.tools.io.follow.followMonitor" ]
[((368, 398), 'logging.getLogger', 'logging.getLogger', (['LOGGER_NAME'], {}), '(LOGGER_NAME)\n', (385, 398), False, 'import logging\n'), ((552, 584), 'math.sqrt', 'math.sqrt', (['(t[0] ** 2 + t[1] ** 2)'], {}), '(t[0] ** 2 + t[1] ** 2)\n', (561, 584), False, 'import math\n'), ((821, 859), 'math.fabs', 'math.fabs', (['(ac[0] * n[0] + ac[1] * n[1])'], {}), '(ac[0] * n[0] + ac[1] * n[1])\n', (830, 859), False, 'import math\n'), ((1440, 1449), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1447, 1449), True, 'import matplotlib.pyplot as plt\n'), ((1488, 1500), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1498, 1500), True, 'import matplotlib.pyplot as plt\n'), ((2286, 2324), 'collections.deque', 'collections.deque', ([], {'maxlen': 'windowLength'}), '(maxlen=windowLength)\n', (2303, 2324), False, 'import collections\n'), ((2341, 2379), 'collections.deque', 'collections.deque', ([], {'maxlen': 'windowLength'}), '(maxlen=windowLength)\n', (2358, 2379), False, 'import collections\n'), ((2403, 2441), 'collections.deque', 'collections.deque', ([], {'maxlen': 'windowLength'}), '(maxlen=windowLength)\n', (2420, 2441), False, 'import collections\n'), ((2467, 2505), 'collections.deque', 'collections.deque', ([], {'maxlen': 'windowLength'}), '(maxlen=windowLength)\n', (2484, 2505), False, 'import collections\n'), ((2524, 2562), 'collections.deque', 'collections.deque', ([], {'maxlen': 'windowLength'}), '(maxlen=windowLength)\n', (2541, 2562), False, 'import collections\n'), ((2584, 2622), 'collections.deque', 'collections.deque', ([], {'maxlen': 'windowLength'}), '(maxlen=windowLength)\n', (2601, 2622), False, 'import collections\n'), ((2635, 2673), 'collections.deque', 'collections.deque', ([], {'maxlen': 'windowLength'}), '(maxlen=windowLength)\n', (2652, 2673), False, 'import collections\n'), ((2700, 2728), 'collections.deque', 'collections.deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (2717, 2728), False, 'import collections\n'), ((2754, 2782), 'collections.deque', 'collections.deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (2771, 2782), False, 'import collections\n'), ((2944, 2974), 'tia.trad.tools.io.follow.followMonitor', 'followMonitor', (['reportFile', 'fig'], {}), '(reportFile, fig)\n', (2957, 2974), False, 'from tia.trad.tools.io.follow import followMonitor\n'), ((6506, 6523), 'tia.trad.tools.errf.eReport', 'eReport', (['__file__'], {}), '(__file__)\n', (6513, 6523), False, 'from tia.trad.tools.errf import eReport\n'), ((6559, 6569), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6567, 6569), False, 'import sys\n'), ((3116, 3132), 'ujson.loads', 'json.loads', (['line'], {}), '(line)\n', (3126, 3132), True, 'import ujson as json\n'), ((4244, 4263), 'collections.deque', 'collections.deque', ([], {}), '()\n', (4261, 4263), False, 'import collections\n'), ((4273, 4292), 'collections.deque', 'collections.deque', ([], {}), '()\n', (4290, 4292), False, 'import collections\n'), ((4317, 4336), 'collections.deque', 'collections.deque', ([], {}), '()\n', (4334, 4336), False, 'import collections\n'), ((4346, 4365), 'collections.deque', 'collections.deque', ([], {}), '()\n', (4363, 4365), False, 'import collections\n'), ((5823, 5842), 'collections.deque', 'collections.deque', ([], {}), '()\n', (5840, 5842), False, 'import collections\n')]
import re from ..utils import Report, fetch SITE = "fileinfo.com" PATH = "/extension" def extract(extension: str) -> list[Report]: soup = fetch(site=SITE, path=PATH, extension=extension) description_short = soup.find_all("h2")[0].text.strip() infoboxes = soup.find_all(attrs={"class": "infoBox"}) description_long = infoboxes[0].text.strip() how_to_open = re.sub(r"\n+", "\n\n", infoboxes[1].text).strip() report = Report( description_short=description_short, description_long=description_long, how_to_open=how_to_open, ) return [report]
[ "re.sub" ]
[((381, 422), 're.sub', 're.sub', (['"""\\\\n+"""', '"""\n\n"""', 'infoboxes[1].text'], {}), "('\\\\n+', '\\n\\n', infoboxes[1].text)\n", (387, 422), False, 'import re\n')]
from fastapi import APIRouter from pydantic import BaseModel from style.predict.servable.serve import get_servable router = APIRouter() class PredictionRequest(BaseModel): text: str model_name: str @router.get("/") async def index(): return {"success": True, "message": "Predictions Router is working!"} @router.post("/predict") async def predict(request: PredictionRequest): servable = get_servable(request.model_name) prediction = servable.run_inference(request.text) return {"success": True, "prediction": prediction} @router.post("/predicts") async def predicts(request: PredictionRequest): servable = get_servable(request.model_name) predictions = servable.run_inference_multiclass(request.text) return {"success": True, "predictions": predictions}
[ "fastapi.APIRouter", "style.predict.servable.serve.get_servable" ]
[((127, 138), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (136, 138), False, 'from fastapi import APIRouter\n'), ((412, 444), 'style.predict.servable.serve.get_servable', 'get_servable', (['request.model_name'], {}), '(request.model_name)\n', (424, 444), False, 'from style.predict.servable.serve import get_servable\n'), ((645, 677), 'style.predict.servable.serve.get_servable', 'get_servable', (['request.model_name'], {}), '(request.model_name)\n', (657, 677), False, 'from style.predict.servable.serve import get_servable\n')]
from IPython.Shell import IPShellEmbed ipshell = IPShellEmbed() ipshell()
[ "IPython.Shell.IPShellEmbed" ]
[((50, 64), 'IPython.Shell.IPShellEmbed', 'IPShellEmbed', ([], {}), '()\n', (62, 64), False, 'from IPython.Shell import IPShellEmbed\n')]
#!/usr/bin/python3 """ (C) Copyright 2021 Intel Corporation. SPDX-License-Identifier: BSD-2-Clause-Patent """ import time from pool_test_base import PoolTestBase from server_utils import ServerFailed class PoolCreateTests(PoolTestBase): # pylint: disable=too-many-ancestors,too-few-public-methods """Pool create tests. All of the tests verify pool create performance with 7 servers and 1 client. Each server should be configured with full compliment of NVDIMMs and SSDs. :avocado: recursive """ def test_create_pool_quantity(self): """JIRA ID: DAOS-5114 / SRS-2 / SRS-4. Test Description: Create 200 pools on all of the servers. Perform an orderly system shutdown via cmd line (dmg). Restart the system via cmd line tool (dmg). Verify that DAOS is ready to accept requests with in 2 minutes. :avocado: tags=all,pr,daily_regression :avocado: tags=hw,large :avocado: tags=pool :avocado: tags=pool_create_tests,create_performance """ # Create some number of pools each using a equal amount of 60% of the # available capacity, e.g. 0.6% for 100 pools. quantity = self.params.get("quantity", "/run/pool/*", 1) self.add_pool_qty(quantity, create=False) self.check_pool_creation(10) # Verify DAOS can be restarted in less than 2 minutes try: self.server_managers[0].system_stop() except ServerFailed as error: self.fail(error) start = float(time.time()) try: self.server_managers[0].system_start() except ServerFailed as error: self.fail(error) duration = float(time.time()) - start self.assertLessEqual( duration, 120, "DAOS not ready to accept requests with in 2 minutes") # Verify all the pools exists after the restart detected_pools = [uuid.lower() for uuid in self.dmg.pool_list()] missing_pools = [] for pool in self.pool: pool_uuid = pool.uuid.lower() if pool_uuid not in detected_pools: missing_pools.append(pool_uuid) if missing_pools: self.fail( "The following created pools were not detected in the pool " "list after rebooting the servers:\n [{}]: {}".format( len(missing_pools), ", ".join(missing_pools))) self.assertEqual( len(self.pool), len(detected_pools), "Additional pools detected after rebooting the servers")
[ "time.time" ]
[((1573, 1584), 'time.time', 'time.time', ([], {}), '()\n', (1582, 1584), False, 'import time\n'), ((1743, 1754), 'time.time', 'time.time', ([], {}), '()\n', (1752, 1754), False, 'import time\n')]
import subprocess import sys from collections import defaultdict import pandas as pd import networkx import random import functools import matplotlib import matplotlib.pyplot as plt import matplotlib.animation as animation from .model import Network, Link, Frame, Node import io if sys.platform == 'darwin': matplotlib.use("TkAgg") class Analyzer: def __init__(self, df: pd.DataFrame, network: Network, lcm: int): self._df = df self._network = network self._graph = network.graph self._lcm = lcm def print_by_time(self): print(self._df.sort_values(by='time_slot')) def print_by_app(self): res = self._df.sort_values(by='app') print(res) def _animate_update(self, ax, time_slot): ax.clear() ax.set_title(f'Time slot: {time_slot}') edge_lable = dict() pos = networkx.spring_layout(self._graph, seed=0, scale=3) cur_table = self._df[self._df['time_slot'] == time_slot] for idx, cur_row in cur_table.iterrows(): link = cur_row['link'] edge_lable[(link.node1.name, link.node2.name)] = cur_row['app'].name networkx.draw_networkx_edges(self._graph, pos=pos, ax=ax, edge_color='gray') nodes = networkx.draw_networkx_nodes(self._graph, pos=pos, ax=ax, node_color="white", node_size=1000, node_shape='o') nodes.set_edgecolor('black') networkx.draw_networkx_labels(self._graph, pos=pos, ax=ax, font_size=8) networkx.draw_networkx_edge_labels(self._graph, pos=pos, edge_labels=edge_lable, ax=ax) ax.set_xticks([]) ax.set_yticks([]) def animate(self): fig, ax = plt.subplots(figsize=(8, 8)) ani = animation.FuncAnimation(fig, functools.partial(self._animate_update, ax), frames=self._lcm, interval=650, repeat=True) # Set up formatting for the movie files ani.save('/tmp/res.mov', fps=1, dpi=100) plt.show() pass def export(self, hosts=("127.0.0.1",)): exported = io.StringIO() p = functools.partial(print, file=exported) node_app_map = {} for app in self._df['app'].unique(): node_app_map[app.node] = app msg_core_app = defaultdict(list) app_count = 0 for node in self._graph.nodes: if node.startswith('msg'): msg_core_app[node] = msg_core_app[node] for nei in self._graph.neighbors(node): if nei.startswith('app'): app_count += 1 msg_core_app[node].append(nei) p(len(msg_core_app), self._lcm) for i, ma in enumerate(msg_core_app.keys()): # inter msg server endpoint and app endpoint ip = random.Random(200 + i).choice(hosts) p(ma, ip, 10801 + i, 1 if i == 0 else 0, ip, 20801 + i) p() # 每个app什么时间槽发送一个消息 for msg_node, app_nodes in msg_core_app.items(): for app_node in app_nodes: app = node_app_map[app_node] for idx, row in self._df[self._df['app'] == app].iterrows(): if row['link'].node1.name == app_node and int(row['time_slot']) < app.peroid: p(':', app.name) p(row['time_slot'], app.peroid, msg_node) p() # 每个msg_core需要在什么时间把消息从哪转到哪 def find_next_node_not_switch(frame: Frame, n: Node) -> Node: if not n.name.startswith('switch'): return n for _, r in self._df.iterrows(): if r['link'].node1 != n or r['frame'].id != frame.id: continue if not r['link'].node2.name.startswith('switch'): return r['link'].node2 else: return find_next_node_not_switch(frame, r['link'].node2) def find_prev_node_not_switch(frame: Frame, n: Node) -> Node: if not n.name.startswith('switch'): return n for _, r in self._df.iterrows(): if r['link'].node2 != n or r['frame'].id != frame.id: continue if not r['link'].node1.name.startswith('switch'): return r['link'].node1 else: return find_next_node_not_switch(frame, r['link'].node1) def cvt_node(node: Node): return node_app_map[node.name] if node.name.startswith('app') else node.name for msg_node in msg_core_app.keys(): tlist = [] for _, row in self._df.iterrows(): if row['link'].node1 == msg_node: # msg node需要转发该消息 target_node = find_next_node_not_switch(row['frame'], row['link'].node2) tlist.append((msg_node, 'send', cvt_node(target_node), row['frame'].id, row['time_slot'])) elif row['link'].node2 == msg_node: target_node = find_prev_node_not_switch(row['frame'], row['link'].node1) tlist.append((msg_node, 'recv', cvt_node(target_node), row['frame'].id, row['time_slot'])) tlist = sorted(tlist, key=lambda x: int(x[4])) p(':', msg_node) p(self._lcm, len(msg_core_app[msg_node]), len(tlist)) p('\n'.join(map(lambda xm: node_app_map[xm].name, msg_core_app[msg_node]))) for x in tlist: for y in x[1:]: p(y, end=' ') p() p() with open('/tmp/tt.txt.tmp', 'w+') as f: print(exported.getvalue(), file=f) for ip in hosts: subprocess.run(f'scp /tmp/tt.txt.tmp {ip}:/tmp/tt.txt'.split(' ')) # print(exported.getvalue())
[ "networkx.draw_networkx_edge_labels", "matplotlib.use", "random.Random", "networkx.spring_layout", "networkx.draw_networkx_nodes", "networkx.draw_networkx_labels", "functools.partial", "collections.defaultdict", "io.StringIO", "networkx.draw_networkx_edges", "matplotlib.pyplot.subplots", "matp...
[((314, 337), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (328, 337), False, 'import matplotlib\n'), ((871, 923), 'networkx.spring_layout', 'networkx.spring_layout', (['self._graph'], {'seed': '(0)', 'scale': '(3)'}), '(self._graph, seed=0, scale=3)\n', (893, 923), False, 'import networkx\n'), ((1164, 1240), 'networkx.draw_networkx_edges', 'networkx.draw_networkx_edges', (['self._graph'], {'pos': 'pos', 'ax': 'ax', 'edge_color': '"""gray"""'}), "(self._graph, pos=pos, ax=ax, edge_color='gray')\n", (1192, 1240), False, 'import networkx\n'), ((1258, 1372), 'networkx.draw_networkx_nodes', 'networkx.draw_networkx_nodes', (['self._graph'], {'pos': 'pos', 'ax': 'ax', 'node_color': '"""white"""', 'node_size': '(1000)', 'node_shape': '"""o"""'}), "(self._graph, pos=pos, ax=ax, node_color=\n 'white', node_size=1000, node_shape='o')\n", (1286, 1372), False, 'import networkx\n'), ((1459, 1530), 'networkx.draw_networkx_labels', 'networkx.draw_networkx_labels', (['self._graph'], {'pos': 'pos', 'ax': 'ax', 'font_size': '(8)'}), '(self._graph, pos=pos, ax=ax, font_size=8)\n', (1488, 1530), False, 'import networkx\n'), ((1540, 1632), 'networkx.draw_networkx_edge_labels', 'networkx.draw_networkx_edge_labels', (['self._graph'], {'pos': 'pos', 'edge_labels': 'edge_lable', 'ax': 'ax'}), '(self._graph, pos=pos, edge_labels=\n edge_lable, ax=ax)\n', (1574, 1632), False, 'import networkx\n'), ((1722, 1750), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (1734, 1750), True, 'import matplotlib.pyplot as plt\n'), ((2028, 2038), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2036, 2038), True, 'import matplotlib.pyplot as plt\n'), ((2116, 2129), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2127, 2129), False, 'import io\n'), ((2142, 2181), 'functools.partial', 'functools.partial', (['print'], {'file': 'exported'}), '(print, file=exported)\n', (2159, 2181), False, 'import functools\n'), ((2318, 2335), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2329, 2335), False, 'from collections import defaultdict\n'), ((1794, 1837), 'functools.partial', 'functools.partial', (['self._animate_update', 'ax'], {}), '(self._animate_update, ax)\n', (1811, 1837), False, 'import functools\n'), ((2857, 2879), 'random.Random', 'random.Random', (['(200 + i)'], {}), '(200 + i)\n', (2870, 2879), False, 'import random\n')]
# Author: <NAME> # this class handle the functions tests of controller of the component of the numerical features import pytest import sys from PyQt5 import QtWidgets from ui.mainTest import StaticObjects @pytest.mark.parametrize('slider', [1, 2.9, False, ('t1', 't2'), None]) def test_CIR_setSlider_wrong_parameter(slider): with pytest.raises(AssertionError): app = QtWidgets.QApplication(sys.argv) counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView() counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1) rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum rangeMin.setSlider(slider) def test_CIR_setSlider_right_parameter(): app = QtWidgets.QApplication(sys.argv) counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView() counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1) rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider) def test_CIR_initializeRange_none_min_parameter(): with pytest.raises(AssertionError): app = QtWidgets.QApplication(sys.argv) counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView() counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1) rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider) rangeMin.initializeRange(None, 1, 0.5, 15) def test_CIR_initializeRange_none_max_parameter(): with pytest.raises(AssertionError): app = QtWidgets.QApplication(sys.argv) counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView() counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1) rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider) rangeMin.initializeRange(0, None, 0.5, 15) def test_CIR_initializeRange_none_value_parameter(): with pytest.raises(AssertionError): app = QtWidgets.QApplication(sys.argv) counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView() counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1) rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider) rangeMin.initializeRange(0, 1, None, 15) def test_CIR_initializeRange_none_space_parameter(): with pytest.raises(AssertionError): app = QtWidgets.QApplication(sys.argv) counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView() counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1) rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider) rangeMin.initializeRange(0, 1, 0.5, None) def test_CIR_initializeRange_right_parameters(): app = QtWidgets.QApplication(sys.argv) counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView() counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1) rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider) rangeMin.initializeRange(0, 1, 0.5, 15) def test_CIR_updateRange_none_min_parameter(): with pytest.raises(AssertionError): app = QtWidgets.QApplication(sys.argv) counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView() counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1) rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider) rangeMin.initializeRange(0, 1, 0.5, 15) rangeMin.updateRange(None, 1, 0.5) def test_CIR_updateRange_none_max_parameter(): with pytest.raises(AssertionError): app = QtWidgets.QApplication(sys.argv) counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView() counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1) rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider) rangeMin.initializeRange(0, 1, 0.5, 15) rangeMin.updateRange(0, None, 0.5) def test_CIR_updateRange_none_value_parameter(): with pytest.raises(AssertionError): app = QtWidgets.QApplication(sys.argv) counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView() counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1) rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider) rangeMin.initializeRange(0, 1, 0.5, 15) rangeMin.updateRange(0, 1, None) def test_CIR_updateRange_right_parameters(): app = QtWidgets.QApplication(sys.argv) counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView() counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1) rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider) rangeMin.initializeRange(0, 1, 0.5, 15) rangeMin.updateRange(0, 1, 0.3) def test_CIR_setValue_none_parameter(): with pytest.raises(AssertionError): app = QtWidgets.QApplication(sys.argv) counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView() counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1) rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider) rangeMin.initializeRange(0, 1, 0.5, 15) rangeMin.setValue(None) def test_CIR_setValue_right_parameters(): app = QtWidgets.QApplication(sys.argv) counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView() counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1) rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider) rangeMin.initializeRange(0, 1, 0.5, 15) rangeMin.setValue(0.3)
[ "pytest.mark.parametrize", "ui.mainTest.StaticObjects.staticCounterfactualInterfaceSlider3RangesView", "pytest.raises", "PyQt5.QtWidgets.QApplication" ]
[((213, 283), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""slider"""', "[1, 2.9, False, ('t1', 't2'), None]"], {}), "('slider', [1, 2.9, False, ('t1', 't2'), None])\n", (236, 283), False, 'import pytest\n'), ((786, 818), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (808, 818), False, 'from PyQt5 import QtWidgets\n'), ((866, 928), 'ui.mainTest.StaticObjects.staticCounterfactualInterfaceSlider3RangesView', 'StaticObjects.staticCounterfactualInterfaceSlider3RangesView', ([], {}), '()\n', (926, 928), False, 'from ui.mainTest import StaticObjects\n'), ((3424, 3456), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (3446, 3456), False, 'from PyQt5 import QtWidgets\n'), ((3504, 3566), 'ui.mainTest.StaticObjects.staticCounterfactualInterfaceSlider3RangesView', 'StaticObjects.staticCounterfactualInterfaceSlider3RangesView', ([], {}), '()\n', (3564, 3566), False, 'from ui.mainTest import StaticObjects\n'), ((5659, 5691), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (5681, 5691), False, 'from PyQt5 import QtWidgets\n'), ((5739, 5801), 'ui.mainTest.StaticObjects.staticCounterfactualInterfaceSlider3RangesView', 'StaticObjects.staticCounterfactualInterfaceSlider3RangesView', ([], {}), '()\n', (5799, 5801), False, 'from ui.mainTest import StaticObjects\n'), ((6737, 6769), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (6759, 6769), False, 'from PyQt5 import QtWidgets\n'), ((6817, 6879), 'ui.mainTest.StaticObjects.staticCounterfactualInterfaceSlider3RangesView', 'StaticObjects.staticCounterfactualInterfaceSlider3RangesView', ([], {}), '()\n', (6877, 6879), False, 'from ui.mainTest import StaticObjects\n'), ((341, 370), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (354, 370), False, 'import pytest\n'), ((386, 418), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (408, 418), False, 'from PyQt5 import QtWidgets\n'), ((470, 532), 'ui.mainTest.StaticObjects.staticCounterfactualInterfaceSlider3RangesView', 'StaticObjects.staticCounterfactualInterfaceSlider3RangesView', ([], {}), '()\n', (530, 532), False, 'from ui.mainTest import StaticObjects\n'), ((1224, 1253), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1237, 1253), False, 'import pytest\n'), ((1269, 1301), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (1291, 1301), False, 'from PyQt5 import QtWidgets\n'), ((1353, 1415), 'ui.mainTest.StaticObjects.staticCounterfactualInterfaceSlider3RangesView', 'StaticObjects.staticCounterfactualInterfaceSlider3RangesView', ([], {}), '()\n', (1413, 1415), False, 'from ui.mainTest import StaticObjects\n'), ((1774, 1803), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1787, 1803), False, 'import pytest\n'), ((1819, 1851), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (1841, 1851), False, 'from PyQt5 import QtWidgets\n'), ((1903, 1965), 'ui.mainTest.StaticObjects.staticCounterfactualInterfaceSlider3RangesView', 'StaticObjects.staticCounterfactualInterfaceSlider3RangesView', ([], {}), '()\n', (1963, 1965), False, 'from ui.mainTest import StaticObjects\n'), ((2326, 2355), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2339, 2355), False, 'import pytest\n'), ((2371, 2403), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (2393, 2403), False, 'from PyQt5 import QtWidgets\n'), ((2455, 2517), 'ui.mainTest.StaticObjects.staticCounterfactualInterfaceSlider3RangesView', 'StaticObjects.staticCounterfactualInterfaceSlider3RangesView', ([], {}), '()\n', (2515, 2517), False, 'from ui.mainTest import StaticObjects\n'), ((2876, 2905), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2889, 2905), False, 'import pytest\n'), ((2921, 2953), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (2943, 2953), False, 'from PyQt5 import QtWidgets\n'), ((3005, 3067), 'ui.mainTest.StaticObjects.staticCounterfactualInterfaceSlider3RangesView', 'StaticObjects.staticCounterfactualInterfaceSlider3RangesView', ([], {}), '()\n', (3065, 3067), False, 'from ui.mainTest import StaticObjects\n'), ((3902, 3931), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3915, 3931), False, 'import pytest\n'), ((3947, 3979), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (3969, 3979), False, 'from PyQt5 import QtWidgets\n'), ((4031, 4093), 'ui.mainTest.StaticObjects.staticCounterfactualInterfaceSlider3RangesView', 'StaticObjects.staticCounterfactualInterfaceSlider3RangesView', ([], {}), '()\n', (4091, 4093), False, 'from ui.mainTest import StaticObjects\n'), ((4488, 4517), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (4501, 4517), False, 'import pytest\n'), ((4533, 4565), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (4555, 4565), False, 'from PyQt5 import QtWidgets\n'), ((4617, 4679), 'ui.mainTest.StaticObjects.staticCounterfactualInterfaceSlider3RangesView', 'StaticObjects.staticCounterfactualInterfaceSlider3RangesView', ([], {}), '()\n', (4677, 4679), False, 'from ui.mainTest import StaticObjects\n'), ((5076, 5105), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (5089, 5105), False, 'import pytest\n'), ((5121, 5153), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (5143, 5153), False, 'from PyQt5 import QtWidgets\n'), ((5205, 5267), 'ui.mainTest.StaticObjects.staticCounterfactualInterfaceSlider3RangesView', 'StaticObjects.staticCounterfactualInterfaceSlider3RangesView', ([], {}), '()\n', (5265, 5267), False, 'from ui.mainTest import StaticObjects\n'), ((6166, 6195), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (6179, 6195), False, 'import pytest\n'), ((6211, 6243), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (6233, 6243), False, 'from PyQt5 import QtWidgets\n'), ((6295, 6357), 'ui.mainTest.StaticObjects.staticCounterfactualInterfaceSlider3RangesView', 'StaticObjects.staticCounterfactualInterfaceSlider3RangesView', ([], {}), '()\n', (6355, 6357), False, 'from ui.mainTest import StaticObjects\n')]
#!/usr/bin/env python3 import argparse import pathlib GENERATED_EXTENSIONS = ["pb.go", "pb.gw.go", "swagger.json"] def find_files(path, fileglob): files_full = list(path.glob(fileglob)) return files_full def strip_path_extension(filelist): # We cannot use Path.stem directly as it doesn't handle double extensions (.pb.go) correctly files_extensionless = list(map(lambda f: (str(f).replace("".join(f.suffixes), "")), filelist)) files_name_only = list(map(lambda f: pathlib.Path(f).stem, files_extensionless)) return files_name_only def find_difference(generated_list, proto_list): difference = set(generated_list) - set(proto_list) return difference def filter_only_gen_files(candidates): return [x for x in candidates if any(str(x.name).endswith(extension) for extension in GENERATED_EXTENSIONS)] def find_in_list(target_list, searchterms): searchterms = [f"{x}." for x in searchterms] # Add a dot to only match full filenames return [x for x in target_list if any(str(x.name).startswith(term) for term in searchterms )] def remove_files(target_list): for target in target_list: target.unlink() def main(): parser = argparse.ArgumentParser() parser.add_argument("--protos", type=pathlib.Path, help="Path to proto dir") parser.add_argument("--generated", type=pathlib.Path, help="Path to generated sources dir") v = parser.parse_args() proto_files = find_files(v.protos, "**/*.proto") generated_files = [f for file_list in (find_files(v.generated, f'**/*.{ext}') for ext in GENERATED_EXTENSIONS) for f in file_list] proto_stripped = strip_path_extension(proto_files) generated_stripped = strip_path_extension(generated_files) diff = find_difference(generated_stripped, proto_stripped) full_paths = find_in_list(generated_files, diff) final_diff = filter_only_gen_files(full_paths) if len(final_diff) > 0: print(f"Removing: {final_diff}") remove_files(final_diff) if __name__ == '__main__': main()
[ "argparse.ArgumentParser", "pathlib.Path" ]
[((1195, 1220), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1218, 1220), False, 'import argparse\n'), ((492, 507), 'pathlib.Path', 'pathlib.Path', (['f'], {}), '(f)\n', (504, 507), False, 'import pathlib\n')]
"""Define different helper functions.""" import datetime import json import re import sys import urllib.parse from urllib.request import Request, urlopen import icu import markdown from flask import current_app, g, make_response, render_template, request, url_for from flask_babel import gettext from . import static_info VONAV_LIST = ["von", "af", "av"] def set_language_switch_link(route, fragment=None, lang=""): """Fix address and label for language switch button.""" if not lang: lang = g.language if lang == "en": g.switch_language = {"url": url_for("views." + route + "_sv"), "label": "Svenska"} else: g.switch_language = {"url": url_for("views." + route + "_en"), "label": "English"} if fragment is not None: g.switch_language["url"] += "/" + fragment def cache_name(pagename, lang=""): """Get page from cache.""" if not lang: lang = "sv" if "sv" in request.url_rule.rule else "en" return "%s_%s" % (pagename, lang) def karp_query(action, query, mode=None): """Generate query and send request to Karp.""" if not mode: mode = current_app.config["KARP_MODE"] query["mode"] = mode query["resource"] = current_app.config["KARP_LEXICON"] if "size" not in query: query["size"] = current_app.config["RESULT_SIZE"] params = urllib.parse.urlencode(query) return karp_request("%s?%s" % (action, params)) def karp_request(action): """Send request to Karp backend.""" q = Request("%s/%s" % (current_app.config["KARP_BACKEND"], action)) if current_app.config["DEBUG"]: log("%s/%s\n" % (current_app.config["KARP_BACKEND"], action), "REQUEST") if current_app.config.get("USE_AUTH", False): q.add_header("Authorization", "Basic %s" % (current_app.config["KARP_AUTH_HASH"])) response = urlopen(q).read() data = json.loads(response.decode("UTF-8")) return data def karp_fe_url(): """Get URL for Karp frontend.""" return current_app.config["KARP_FRONTEND"] + "/#?mode=" + current_app.config["KARP_MODE"] def serve_static_page(page, title=""): """Serve static html.""" set_language_switch_link(page) with current_app.open_resource("static/pages/%s/%s.html" % (page, g.language)) as f: data = f.read().decode("UTF-8") return render_template("page_static.html", content=data, title=title) def check_cache(page, lang=""): """ Check if page is in cache. If the cache should not be used, return None. """ if current_app.config["TEST"]: return None try: with g.mc_pool.reserve() as client: # Look for the page, return if found art = client.get(cache_name(page, lang)) if art is not None: return art except Exception: # TODO what to do?? pass # If nothing is found, return None return None def set_cache(page, name="", lang="", no_hits=0): """ Browser cache handling. Add header to the response. May also add the page to the memcache. """ pagename = cache_name(name, lang="") if no_hits >= current_app.config["CACHE_HIT_LIMIT"]: try: with g.mc_pool.reserve() as client: client.set(pagename, page, time=current_app.config["LOW_CACHE_TIME"]) except Exception: # TODO what to do?? pass r = make_response(page) r.headers.set("Cache-Control", "public, max-age=%s" % current_app.config["BROWSER_CACHE_TIME"]) return r def get_first_name(source): """Return the given name (first name).""" return re.sub("/", "", source["name"].get("firstname", "")).strip() def format_names(source, fmt="strong"): """Return the given name (first name), and the formatted callingname (tilltalsnamnet).""" if fmt: return re.sub("(.*)/(.+)/(.*)", r"\1<%s>\2</%s>\3" % (fmt, fmt), source["name"].get("firstname", "")) else: return re.sub("(.*)/(.+)/(.*)", r"\1\2\3", source["name"].get("firstname", "")) def get_life_range(source): """ Return the birth and death year from _source (as a tuple). Return empty strings if not available. """ years = [] for event in ["from", "to"]: if source["lifespan"].get(event): date = source["lifespan"][event].get("date", "") if date: date = date.get("comment", "") if "-" in date and not re.search("[a-zA-Z]", date): year = date[:date.find("-")] else: year = date else: year = "" years.append(year) return years[0], years[1] def get_life_range_force(source): """ Return the birth and death year from _source (as a tuple). Try to also parse non-dates like "ca. 1500-talet". Return -1, 1000000 if not available. """ default_born = -1 default_died = 1000000 def convert(event, retval): if source["lifespan"].get(event): date = source["lifespan"][event].get("date", "") if date: date = date.get("comment", "") match = re.search(r".*(\d{4}).*", date) if match: retval = int(match.group(1)) return retval born = convert("from", default_born) dead = convert("to", default_died) # Sorting hack: if there is no birth year, set it to dead -100 (and vice versa) # to make is appear in a more reasonable position in the chronology if born == default_born and dead != default_died: born = dead - 100 if dead == default_died and born != default_born: dead = born + 100 return born, dead def get_date(source): """Get birth and death date if available. Return empty strings otherwise.""" dates = [] for event in ["from", "to"]: if source["lifespan"][event].get("date"): date = source["lifespan"][event]["date"].get("comment", "") else: date = "" dates.append(date) return dates[0], dates[1] def get_current_date(): """Get the current date.""" return datetime.datetime.strftime(datetime.datetime.now(), "%Y-%m-%d") def markdown_html(text): """Convert markdown text to html.""" return markdown.markdown(text) def group_by_type(objlist, name): """Group objects by their type (=name), e.g. 'othernames'.""" newdict = {} for obj in objlist: val = obj.get(name, "") key_sv = obj.get("type", "Övrigt") key_en = obj.get("type_eng", "Other") if key_sv not in newdict: newdict[key_sv] = (key_en, []) newdict[key_sv][1].append(val) result = [] for key, val in list(newdict.items()): result.append({"type": key, "type_eng": val[0], name: ", ".join(val[1])}) return result def make_alphabetical_bucket(result, sortnames=False, lang="sv"): def processname(bucket, results): vonaf_pattern = re.compile(r"^(%s) " % "|".join(VONAV_LIST)) name = re.sub(vonaf_pattern, r"", bucket[0]) results.append((name[0].upper(), bucket)) return make_alphabetic(result, processname, sortnames=sortnames, lang=lang) def rewrite_von(name): """Move 'von' and 'av' to end of name.""" vonaf_pattern = re.compile(r"^(%s) (.+)$" % "|".join(VONAV_LIST)) return re.sub(vonaf_pattern, r"\2 \1", name) def make_placenames(places, lang="sv"): def processname(hit, results): name = hit["name"].strip() results.append((name[0].upper(), (name, hit))) return make_alphabetic(places, processname, lang=lang) def make_alphabetic(hits, processname, sortnames=False, lang="sv"): """ Loop through hits, apply the function 'processname' on each object and then sort the result in alphabetical order. The function processname should append zero or more processed form of the object to the result list. This processed forms should be a pair (first_letter, result) where first_letter is the first_letter of each object (to sort on), and the result is what the html-template want e.g. a pair of (name, no_hits) """ def fix_lastname(name): vonaf_pattern = re.compile(r"^(%s) " % "|".join(VONAV_LIST)) name = re.sub(vonaf_pattern, r"", name) return name.replace(" ", "z") results = [] for hit in hits: processname(hit, results) letter_results = {} # Split the result into start letters for first_letter, result in results: if first_letter == "Ø": first_letter = "Ö" if first_letter == "Æ": first_letter = "Ä" if first_letter == "Ü": first_letter = "Y" if lang == "en" and first_letter == "Ö": first_letter = "O" if lang == "en" and first_letter in "ÄÅ": first_letter = "A" if first_letter not in letter_results: letter_results[first_letter] = [result] else: letter_results[first_letter].append(result) # Sort result dictionary alphabetically into list if lang == "en": collator = icu.Collator.createInstance(icu.Locale("en_EN.UTF-8")) else: collator = icu.Collator.createInstance(icu.Locale("sv_SE.UTF-8")) for _n, items in list(letter_results.items()): if sortnames: items.sort(key=lambda x: collator.getSortKey(fix_lastname(x[0]) + " " + x[1])) else: items.sort(key=lambda x: collator.getSortKey(x[0])) letter_results = sorted(list(letter_results.items()), key=lambda x: collator.getSortKey(x[0])) return letter_results def make_simplenamelist(hits, search): """ Create a list with links to the entries url or _id. Sort entries with names matching the query higher. """ results = [] used = set() namefields = ["firstname", "lastname", "sortname"] search_terms = [st.lower() for st in search.split()] for hit in hits["hits"]: # score = sum(1 for field in hit["highlight"] if field.startswith("name.")) hitname = hit["_source"]["name"] score = sum(1 for nf in namefields if any(st in hitname.get(nf, "").lower() for st in search_terms)) if score: name = join_name(hit["_source"], mk_bold=True) liferange = get_life_range(hit["_source"]) subtitle = hit["_source"].get("subtitle", "") subtitle_eng = hit["_source"].get("subtitle_eng", "") subject_id = hit["_source"].get("url") or hit["_id"] results.append((-score, name, liferange, subtitle, subtitle_eng, subject_id)) used.add(hit["_id"]) return sorted(results), used def make_namelist(hits, exclude=set(), search=""): """ Split hits into one list per first letter. Return only info necessary for listing of names. """ results = [] first_letters = [] # List only containing letters in alphabetical order current_letterlist = [] # List containing entries starting with the same letter current_total = 0 if search: max_len = current_app.config["SEARCH_RESULT_SIZE"] - len(exclude) else: max_len = None for hit in hits["hits"]: if hit["_id"] in exclude: continue # Seperate names from linked names is_link = hit["_index"].startswith(current_app.config["SKBL_LINKS"]) if is_link: name = hit["_source"]["name"].get("sortname", "") linked_name = join_name(hit["_source"]) else: name = join_name(hit["_source"], mk_bold=True) linked_name = False liferange = get_life_range(hit["_source"]) subtitle = hit["_source"].get("subtitle", "") subtitle_eng = hit["_source"].get("subtitle_eng", "") subject_id = hit["_source"].get("url") or hit["_id"] # Get first letter from sort[0] firstletter = hit["sort"][1].upper() if firstletter not in first_letters: if current_letterlist: results.append(current_letterlist) current_letterlist = [] first_letters.append(firstletter) current_letterlist.append((firstletter, is_link, name, linked_name, liferange, subtitle, subtitle_eng, subject_id)) current_total += 1 # Don't show more than SEARCH_RESULT_SIZE number of results if max_len and current_total >= max_len: break if current_letterlist: # Append last letterlist results.append(current_letterlist) return (first_letters, results) def make_datelist(hits): """Extract information relevant for chronology list (same as make_namelist but without letter splitting).""" result = [] for hit in hits: is_link = hit["_index"].startswith(current_app.config["SKBL_LINKS"]) if is_link: name = hit["_source"]["name"].get("sortname", "") linked_name = join_name(hit["_source"]) else: name = join_name(hit["_source"], mk_bold=True) linked_name = False liferange = get_life_range(hit["_source"]) subtitle = hit["_source"].get("subtitle", "") subtitle_eng = hit["_source"].get("subtitle_eng", "") subject_id = hit["_source"].get("url") or hit["_id"] result.append((is_link, name, linked_name, liferange, subtitle, subtitle_eng, subject_id)) return result def join_name(source, mk_bold=False): """Retrieve and format name from source.""" name = [] lastname = source["name"].get("lastname", "") vonaf_pattern = re.compile(r"(%s |)(.*)" % " |".join(VONAV_LIST)) match = re.search(vonaf_pattern, lastname) vonaf = match.group(1) lastname = match.group(2) if lastname: if mk_bold: name.append("<strong>%s</strong>," % lastname) else: name.append(lastname + ",") if mk_bold: name.append(format_names(source, fmt="strong")) else: name.append(source["name"].get("firstname", "")) name.append(vonaf) return " ".join(name) def sort_places(stat_table, route): """Translate place names and sort list.""" # Work in progress! Waiting for translation list. # Or should this be part of the data instead?? place_translations = { "Göteborg": "Gothenburg" } if "place" in route.rule: lang = "en" else: lang = "sv" if lang == "en": for d in stat_table: d["display_name"] = place_translations.get(d["name"], d["name"]) else: for d in stat_table: d["display_name"] = d["name"] stat_table.sort(key=lambda x: x.get("name").strip()) return stat_table def mk_links(text): """Fix display of links within an article text.""" # TODO markdown should fix this itself try: text = re.sub(r"\[\]\((.*?)\)", r"[\1](\1)", text) for link in re.findall(r"\]\((.*?)\)", text): text = re.sub(r"\(%s\)" % link, "(%s)" % url_for("views.article_index_" + g.language, search=link), text) except Exception: # If there are parenthesis within the links, problems will occur. text = text return text def unescape(text): """Unescape some html chars.""" text = re.sub("&gt;", r">", text) text = re.sub("&apos;", r"'", text) return text def aggregate_by_type(items, use_markdown=False): if not isinstance(items, list): items = [items] types = {} for item in items: if "type" in item: t = item["type"] if t: if t not in types: types[t] = [] if use_markdown and "description" in item: item["description"] = markdown_html(item["description"]) item["description_eng"] = markdown_html(item.get("description_eng", "")) types[t].append(item) return list(types.items()) def collapse_kids(source): unkown_kids = 0 for relation in source.get("relation", []): if relation.get("type") == "Barn" and len(list(relation.keys())) == 1: unkown_kids += 1 relation["hide"] = True if unkown_kids: source["collapsedrelation"] = [{"type": "Barn", "count": unkown_kids}] def make_placelist(hits, placename, lat, lon): grouped_results = {} for hit in hits["hits"]: source = hit["_source"] hit["url"] = source.get("url") or hit["_id"] placelocations = {gettext("Residence"): source.get("places", []), gettext("Place of activity"): source.get("occupation", []), gettext("Place of education"): source.get("education", []), gettext("Contacts"): source.get("contact", []), gettext("Birthplace"): [source.get("lifespan", {}).get("from", {})], gettext("Place of death"): [source.get("lifespan", {}).get("to", {})] } for ptype, places in list(placelocations.items()): names = dict([(place.get("place", {}).get("place", "").strip(), place.get("place", {}).get("pin", {})) for place in places]) # Check if the name and the lat, lon is correct # (We can't ask karp of this, since it would be a nested query) if placename in names: # Coordinates! If coordinates are used, uncomment the two lines below # if names[placename].get("lat") == float(lat)\ # and names[placename].get("lon") == float(lon): if ptype not in grouped_results: grouped_results[ptype] = [] grouped_results[ptype].append((join_name(hit["_source"], mk_bold=True), hit)) # else: # # These two lines should be removed, but are kept for debugging # if "Fel" not in grouped_results: grouped_results["Fel"] = [] # grouped_results["Fel"].append((join_name(source), hit)) # Sort result dictionary alphabetically into list collator = icu.Collator.createInstance(icu.Locale("sv_SE.UTF-8")) for _n, items in list(grouped_results.items()): items.sort(key=lambda x: collator.getSortKey(x[0])) grouped_results = sorted(list(grouped_results.items()), key=lambda x: collator.getSortKey(x[0])) # These two lines should be removed, but are kept for debugging # if not grouped_results: # grouped_results = [("Fel", [(join_name(hit["_source"]), hit) for hit in hits["hits"]])] return grouped_results def is_email_address_valid(email): """ Validate the email address using a regex. It may not include any whitespaces, has exactly one "@" and at least one "." after the "@". """ if " " in email: return False # if not re.match("^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$", email): # More permissive regex: does allow non-ascii chars if not re.match(r"[^@]+@[^@]+\.[^@]+", email): return False return True def is_ascii(s): """Check if s contains of ASCII-characters only.""" return all(ord(c) < 128 for c in s) def get_lang_text(json_swe, json_eng, ui_lang): """Get text in correct language if available.""" if ui_lang == "en": if json_eng: return json_eng else: return json_swe else: return json_swe def get_shorttext(text): """Get the initial 200 characters of text. Remove HTML and line breaks.""" shorttext = re.sub(r"<.*?>|\n|\t", " ", text) shorttext = shorttext.strip() shorttext = re.sub(r" ", " ", shorttext) return shorttext[:200] def get_org_name(organisation): """Get short name for organisation (--> org.).""" if organisation.endswith("organisation") or organisation.endswith("organization"): return organisation[:-9] + "." else: return organisation def lowersorted(xs): """Sort case-insentitively.""" return sorted(xs, key=lambda x: x[0].lower()) def get_infotext(text, rule): """ Get infotext in correct language with Swedish as fallback. text = key in the infotext dict rule = request.url_rule.rule """ textobj = static_info.infotexter.get(text) if "sv" in rule: return textobj.get("sv") else: return textobj.get("en", textobj.get("sv")) def log(data, msg=""): """Log data to stderr.""" if msg: sys.stderr.write("\n" + msg + ": " + str(data) + "\n") else: sys.stderr.write("\n" + str(data) + "\n") def swedish_translator(firstname, lastname): """Check if 'firstname lastname' is a Swedish translator.""" swedish_translators = [ "<NAME>" ] name = firstname + " " + lastname if name in swedish_translators: return True return False def get_littb_id(skbl_url): """Get Litteraturbanken ID for an article if available.""" if not skbl_url: return None littb_url = ("https://litteraturbanken.se/api/list_all/author?filter_and={%22wikidata.skbl_link%22:%20%22" + skbl_url + "%22}&include=authorid") try: # Fake the user agent to avoid getting a 403 r = Request(littb_url, headers={"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) " "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}) contents = urlopen(r).read() except Exception as e: log("Could not open URL %s. Error: %s" % (e, littb_url)) return None resp = json.loads(contents) if resp.get("data"): return resp["data"][0]["authorid"] return None
[ "flask.render_template", "markdown.markdown", "json.loads", "flask.current_app.open_resource", "urllib.request.Request", "re.match", "icu.Locale", "flask.g.mc_pool.reserve", "flask.url_for", "datetime.datetime.now", "flask_babel.gettext", "re.sub", "flask.make_response", "re.findall", "u...
[((1504, 1567), 'urllib.request.Request', 'Request', (["('%s/%s' % (current_app.config['KARP_BACKEND'], action))"], {}), "('%s/%s' % (current_app.config['KARP_BACKEND'], action))\n", (1511, 1567), False, 'from urllib.request import Request, urlopen\n'), ((1692, 1733), 'flask.current_app.config.get', 'current_app.config.get', (['"""USE_AUTH"""', '(False)'], {}), "('USE_AUTH', False)\n", (1714, 1733), False, 'from flask import current_app, g, make_response, render_template, request, url_for\n'), ((2321, 2383), 'flask.render_template', 'render_template', (['"""page_static.html"""'], {'content': 'data', 'title': 'title'}), "('page_static.html', content=data, title=title)\n", (2336, 2383), False, 'from flask import current_app, g, make_response, render_template, request, url_for\n'), ((3458, 3477), 'flask.make_response', 'make_response', (['page'], {}), '(page)\n', (3471, 3477), False, 'from flask import current_app, g, make_response, render_template, request, url_for\n'), ((6354, 6377), 'markdown.markdown', 'markdown.markdown', (['text'], {}), '(text)\n', (6371, 6377), False, 'import markdown\n'), ((7427, 7465), 're.sub', 're.sub', (['vonaf_pattern', '"""\\\\2 \\\\1"""', 'name'], {}), "(vonaf_pattern, '\\\\2 \\\\1', name)\n", (7433, 7465), False, 'import re\n'), ((13733, 13767), 're.search', 're.search', (['vonaf_pattern', 'lastname'], {}), '(vonaf_pattern, lastname)\n', (13742, 13767), False, 'import re\n'), ((15350, 15375), 're.sub', 're.sub', (['"""&gt;"""', '""">"""', 'text'], {}), "('&gt;', '>', text)\n", (15356, 15375), False, 'import re\n'), ((15388, 15415), 're.sub', 're.sub', (['"""&apos;"""', '"""\'"""', 'text'], {}), '(\'&apos;\', "\'", text)\n', (15394, 15415), False, 'import re\n'), ((19701, 19735), 're.sub', 're.sub', (['"""<.*?>|\\\\n|\\\\t"""', '""" """', 'text'], {}), "('<.*?>|\\\\n|\\\\t', ' ', text)\n", (19707, 19735), False, 'import re\n'), ((19785, 19813), 're.sub', 're.sub', (['""" """', '""" """', 'shorttext'], {}), "(' ', ' ', shorttext)\n", (19791, 19813), False, 'import re\n'), ((21736, 21756), 'json.loads', 'json.loads', (['contents'], {}), '(contents)\n', (21746, 21756), False, 'import json\n'), ((2189, 2262), 'flask.current_app.open_resource', 'current_app.open_resource', (["('static/pages/%s/%s.html' % (page, g.language))"], {}), "('static/pages/%s/%s.html' % (page, g.language))\n", (2214, 2262), False, 'from flask import current_app, g, make_response, render_template, request, url_for\n'), ((6238, 6261), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6259, 6261), False, 'import datetime\n'), ((7107, 7143), 're.sub', 're.sub', (['vonaf_pattern', '""""""', 'bucket[0]'], {}), "(vonaf_pattern, '', bucket[0])\n", (7113, 7143), False, 'import re\n'), ((8336, 8367), 're.sub', 're.sub', (['vonaf_pattern', '""""""', 'name'], {}), "(vonaf_pattern, '', name)\n", (8342, 8367), False, 'import re\n'), ((14933, 14980), 're.sub', 're.sub', (['"""\\\\[\\\\]\\\\((.*?)\\\\)"""', '"""[\\\\1](\\\\1)"""', 'text'], {}), "('\\\\[\\\\]\\\\((.*?)\\\\)', '[\\\\1](\\\\1)', text)\n", (14939, 14980), False, 'import re\n'), ((14997, 15031), 're.findall', 're.findall', (['"""\\\\]\\\\((.*?)\\\\)"""', 'text'], {}), "('\\\\]\\\\((.*?)\\\\)', text)\n", (15007, 15031), False, 'import re\n'), ((18264, 18289), 'icu.Locale', 'icu.Locale', (['"""sv_SE.UTF-8"""'], {}), "('sv_SE.UTF-8')\n", (18274, 18289), False, 'import icu\n'), ((19135, 19173), 're.match', 're.match', (['"""[^@]+@[^@]+\\\\.[^@]+"""', 'email'], {}), "('[^@]+@[^@]+\\\\.[^@]+', email)\n", (19143, 19173), False, 'import re\n'), ((21385, 21560), 'urllib.request.Request', 'Request', (['littb_url'], {'headers': "{'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'\n }"}), "(littb_url, headers={'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'\n })\n", (21392, 21560), False, 'from urllib.request import Request, urlopen\n'), ((582, 615), 'flask.url_for', 'url_for', (["('views.' + route + '_sv')"], {}), "('views.' + route + '_sv')\n", (589, 615), False, 'from flask import current_app, g, make_response, render_template, request, url_for\n'), ((683, 716), 'flask.url_for', 'url_for', (["('views.' + route + '_en')"], {}), "('views.' + route + '_en')\n", (690, 716), False, 'from flask import current_app, g, make_response, render_template, request, url_for\n'), ((1841, 1851), 'urllib.request.urlopen', 'urlopen', (['q'], {}), '(q)\n', (1848, 1851), False, 'from urllib.request import Request, urlopen\n'), ((2647, 2666), 'flask.g.mc_pool.reserve', 'g.mc_pool.reserve', ([], {}), '()\n', (2664, 2666), False, 'from flask import current_app, g, make_response, render_template, request, url_for\n'), ((9230, 9255), 'icu.Locale', 'icu.Locale', (['"""en_EN.UTF-8"""'], {}), "('en_EN.UTF-8')\n", (9240, 9255), False, 'import icu\n'), ((9314, 9339), 'icu.Locale', 'icu.Locale', (['"""sv_SE.UTF-8"""'], {}), "('sv_SE.UTF-8')\n", (9324, 9339), False, 'import icu\n'), ((16578, 16598), 'flask_babel.gettext', 'gettext', (['"""Residence"""'], {}), "('Residence')\n", (16585, 16598), False, 'from flask_babel import gettext\n'), ((16652, 16680), 'flask_babel.gettext', 'gettext', (['"""Place of activity"""'], {}), "('Place of activity')\n", (16659, 16680), False, 'from flask_babel import gettext\n'), ((16738, 16767), 'flask_babel.gettext', 'gettext', (['"""Place of education"""'], {}), "('Place of education')\n", (16745, 16767), False, 'from flask_babel import gettext\n'), ((16824, 16843), 'flask_babel.gettext', 'gettext', (['"""Contacts"""'], {}), "('Contacts')\n", (16831, 16843), False, 'from flask_babel import gettext\n'), ((16898, 16919), 'flask_babel.gettext', 'gettext', (['"""Birthplace"""'], {}), "('Birthplace')\n", (16905, 16919), False, 'from flask_babel import gettext\n'), ((16993, 17018), 'flask_babel.gettext', 'gettext', (['"""Place of death"""'], {}), "('Place of death')\n", (17000, 17018), False, 'from flask_babel import gettext\n'), ((3258, 3277), 'flask.g.mc_pool.reserve', 'g.mc_pool.reserve', ([], {}), '()\n', (3275, 3277), False, 'from flask import current_app, g, make_response, render_template, request, url_for\n'), ((5223, 5254), 're.search', 're.search', (['""".*(\\\\d{4}).*"""', 'date'], {}), "('.*(\\\\d{4}).*', date)\n", (5232, 5254), False, 'import re\n'), ((21595, 21605), 'urllib.request.urlopen', 'urlopen', (['r'], {}), '(r)\n', (21602, 21605), False, 'from urllib.request import Request, urlopen\n'), ((4520, 4547), 're.search', 're.search', (['"""[a-zA-Z]"""', 'date'], {}), "('[a-zA-Z]', date)\n", (4529, 4547), False, 'import re\n'), ((15084, 15141), 'flask.url_for', 'url_for', (["('views.article_index_' + g.language)"], {'search': 'link'}), "('views.article_index_' + g.language, search=link)\n", (15091, 15141), False, 'from flask import current_app, g, make_response, render_template, request, url_for\n')]
#!/usr/bin/env python3 # do not hesitate to debug import pdb # python computation modules and visualization import numpy as np import sympy as sy import scipy as sp import matplotlib.pyplot as plt from sympy import Q as syQ sy.init_printing(use_latex=True,forecolor="White") def Lyapunov_stability_test_linear(ev): ''' test if a linear homogeneous system with constant coefficients is stable in the sense of Lyapunov by checking the theorem conditions against the provided eigenvalues source https://www.math24.net/stability-theory-basic-concepts/ TODO taking into account eigenvalue multiplicity ''' # the criteria result will be saved here r = None # system is asymptotically stable if only if # all eigenvalues have negative real parts r = 'asymptotically stable' if ( not r and all(sy.ask(syQ.negative(sy.re(_))) for _ in ev) ) else None # system is stable if and only if # all eigenvalues have nonpositive real parts # TODO incorporate algebraic and geometric multiplicity criteria r = 'stable' if ( not r and all(sy.ask(syQ.nonpositive(sy.re(_))) for _ in ev) ) else None # system is unstable if # at least one eigenvalue has positive real part # TODO incorporate algebraic and geometric multiplicity criteria r = 'unstable' if ( not r and any(sy.ask(syQ.positive(sy.re(_))) for _ in ev) ) else None return r def Lyapunov_stability_test_nonlinear(ev): ''' test if the fixed point of a nonlinear structure stable system is stable, unstable, critical or impossible to determine using Lyapunov criteria of first order and thus other methods are needed TODO tests are only applicable for structurally stable systems, i.e. with purely imaginary eigenvalues are not taken into account source https://www.math24.net/stability-first-approximation/ ''' # the criteria result will be saved here r = None # system is asymptotically stable if only if # all eigenvalues have negative real parts r = 'asymptotically stable' if ( not r and all(sy.ask(syQ.negative(sy.re(_))) for _ in ev) ) else None # system is unstable if # at least one eigenvalue has positive real part r = 'unstable' if ( not r and any(sy.ask(syQ.positive(sy.re(_))) for _ in ev) ) else None # if all eigenvalues have non-positive real parts, # and there is at least one eigenvalue with zero real part # then fixed point can be stable or unstable and other methods should be # used, thus mark the point critical r = 'critical' if ( not r and all(sy.ask(Q.nonpositive(sy.re(_))) for _ in ev) and any(sy.re(_) == 0 for _ in ev) ) else None return r if r else 'not decided' def RouthHurwitz_Criterion(p): ''' return principal minors of Hurwitz matrix as sympy polynomials, which if all are positive it is sufficient condition for asymptotic stability NOTE: if all n-1 principal minors are positive, and nth minor is zero, the system is at the boundary of stability, with two cases: a_n = 0 -- one of the root is zero and system is on the boundary of aperiodic stability n-1 minor is zero -- there are two complex conjugate imaginary roots and the system is at boundary of oscillatory stability source https://www.math24.net/routh-hurwitz-criterion/ ''' # initial key and index pair needed to create Hurwitz matrix via sympy banded # each entry is of the type [ dictionary key, coefficient slice ] idxs = [ [ 1, 0 ] ] # generate next key by decrementing with 1 genKey = lambda _: _ - 1 # generate next index by incrementing with 1 if key was nonnegative # or with 2 if key is negative genSlice = lambda _, __: __ + 1 if _ >= 0 else __ + 2 # fill the rest pairs w.r.t. the polynomial degree - 1, as we already have # one entry for _ in range(p.degree() - 1): key = genKey(idxs[-1][0]) idxs.append( [ key, genSlice(key, idxs[-1][1] ) ] ) # create the matrix itself H = sy.banded({ k: p.all_coeffs()[v:] for k, v in idxs }) return [ H[:_, :_].det() if _ > 0 else p.LC() for _ in range(0, p.degree()+1) ] # define independent variable t = sy.symbols('t', real=True) # define dependent variables individually and pact them in an variable theta, omega = sy.symbols(r'\theta, \omega', real = True) Y = theta, omega # define free parameters of they system and pack them in a variable g, L = sy.symbols('g, L', positive = True) parms = g, L # create rhs as sympy expressions theta_dt = omega omega_dt = -(g/L)*sy.sin(theta) rhs = {} rhs['sympy'] = sy.Matrix([theta_dt, omega_dt]) # convert the sympy matrix function to numpy function with usual signature rhs['numpy'] = sy.lambdify((t, Y, *parms), rhs['sympy'], 'numpy') # create Jacobian matrix as sympy expression J = {} J['sympy'] = rhs['sympy'].jacobian(Y) # convert the sympy Jacobian expression to numpy function with usual signature J['numpy'] = sy.lambdify((t, Y, *parms), J['sympy']) # calculate rhs fixed points fixed_points = sy.solve(rhs['sympy'], Y) # substitute each fixed point in the Jacobian # and calculate the eigenvalues J_fixed = {} for i, fp in enumerate(fixed_points): J_subs = J['sympy'].subs( [(y, v) for y, v in zip(Y, fp)]) #J_eigenvals = J_subs.eigenvals(multiple=True) J_eigenvals = J_subs.eigenvals() # save the fixed point results in more details # most importantly the eigenvalues and their corresponding multiplicity J_fixed[i] = { 'fixed point': fp, 'subs': J_subs, 'eigenvalues': list(J_eigenvals.keys()), 'multiplicity': list(J_eigenvals.values()) } def plot_phase_portrait(ax, rhs, section, args=(), n_points=25): ''' plot section of phase space of a field defined via its rhs ''' # create section grid x_grid, y_grid = np.meshgrid( np.linspace( section[0][0], section[0][1], n_points ), np.linspace( section[1][0], section[1][1], n_points ) ) # calculate rhs on the grid xx, yy = rhs(None, ( x_grid, y_grid ), *args) # compute vector norms and make line width proportional to them # i.e. greater the vector length, the thicker the line # TODO not sure why rhs returns different shape vector_norms = np.sqrt(xx[0]**2 + yy[0]**2) lw = 0.25 + 3*vector_norms/vector_norms.max() # plot the phase portrait ax.streamplot( x_grid, y_grid, xx[0], yy[0], linewidth = lw, arrowsize = 1.2, density = 1 ) return ax def plot_main(): fig, ax = plt.subplots() ax = plot_phase_portrait( ax, rhs['numpy'], ( ( -np.pi, np.pi ), ( -2*np.pi, 2*np.pi) ), args = ( 5, 1 ), ) if __name__ == '__main__': plot_main()
[ "sympy.sin", "numpy.sqrt", "sympy.re", "sympy.lambdify", "sympy.Matrix", "sympy.init_printing", "sympy.symbols", "numpy.linspace", "sympy.solve", "matplotlib.pyplot.subplots" ]
[((227, 278), 'sympy.init_printing', 'sy.init_printing', ([], {'use_latex': '(True)', 'forecolor': '"""White"""'}), "(use_latex=True, forecolor='White')\n", (243, 278), True, 'import sympy as sy\n'), ((4294, 4320), 'sympy.symbols', 'sy.symbols', (['"""t"""'], {'real': '(True)'}), "('t', real=True)\n", (4304, 4320), True, 'import sympy as sy\n'), ((4408, 4449), 'sympy.symbols', 'sy.symbols', (['"""\\\\theta, \\\\omega"""'], {'real': '(True)'}), "('\\\\theta, \\\\omega', real=True)\n", (4418, 4449), True, 'import sympy as sy\n'), ((4545, 4578), 'sympy.symbols', 'sy.symbols', (['"""g, L"""'], {'positive': '(True)'}), "('g, L', positive=True)\n", (4555, 4578), True, 'import sympy as sy\n'), ((4702, 4733), 'sympy.Matrix', 'sy.Matrix', (['[theta_dt, omega_dt]'], {}), '([theta_dt, omega_dt])\n', (4711, 4733), True, 'import sympy as sy\n'), ((4825, 4875), 'sympy.lambdify', 'sy.lambdify', (['(t, Y, *parms)', "rhs['sympy']", '"""numpy"""'], {}), "((t, Y, *parms), rhs['sympy'], 'numpy')\n", (4836, 4875), True, 'import sympy as sy\n'), ((5060, 5099), 'sympy.lambdify', 'sy.lambdify', (['(t, Y, *parms)', "J['sympy']"], {}), "((t, Y, *parms), J['sympy'])\n", (5071, 5099), True, 'import sympy as sy\n'), ((5145, 5170), 'sympy.solve', 'sy.solve', (["rhs['sympy']", 'Y'], {}), "(rhs['sympy'], Y)\n", (5153, 5170), True, 'import sympy as sy\n'), ((4664, 4677), 'sympy.sin', 'sy.sin', (['theta'], {}), '(theta)\n', (4670, 4677), True, 'import sympy as sy\n'), ((6409, 6441), 'numpy.sqrt', 'np.sqrt', (['(xx[0] ** 2 + yy[0] ** 2)'], {}), '(xx[0] ** 2 + yy[0] ** 2)\n', (6416, 6441), True, 'import numpy as np\n'), ((6735, 6749), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6747, 6749), True, 'import matplotlib.pyplot as plt\n'), ((5992, 6043), 'numpy.linspace', 'np.linspace', (['section[0][0]', 'section[0][1]', 'n_points'], {}), '(section[0][0], section[0][1], n_points)\n', (6003, 6043), True, 'import numpy as np\n'), ((6059, 6110), 'numpy.linspace', 'np.linspace', (['section[1][0]', 'section[1][1]', 'n_points'], {}), '(section[1][0], section[1][1], n_points)\n', (6070, 6110), True, 'import numpy as np\n'), ((2713, 2721), 'sympy.re', 'sy.re', (['_'], {}), '(_)\n', (2718, 2721), True, 'import sympy as sy\n'), ((865, 873), 'sympy.re', 'sy.re', (['_'], {}), '(_)\n', (870, 873), True, 'import sympy as sy\n'), ((1130, 1138), 'sympy.re', 'sy.re', (['_'], {}), '(_)\n', (1135, 1138), True, 'import sympy as sy\n'), ((1387, 1395), 'sympy.re', 'sy.re', (['_'], {}), '(_)\n', (1392, 1395), True, 'import sympy as sy\n'), ((2137, 2145), 'sympy.re', 'sy.re', (['_'], {}), '(_)\n', (2142, 2145), True, 'import sympy as sy\n'), ((2325, 2333), 'sympy.re', 'sy.re', (['_'], {}), '(_)\n', (2330, 2333), True, 'import sympy as sy\n'), ((2669, 2677), 'sympy.re', 'sy.re', (['_'], {}), '(_)\n', (2674, 2677), True, 'import sympy as sy\n')]
#coding=utf-8 import numpy as np import tensorflow as tf import os import sys import time import shutil import re import signal import subprocess import numpy as np import math from Policy import * np.set_printoptions(threshold=np.inf) BASELINES = 1 class MultiBaseline: def __init__(self, baseline_number): self.baseline_number = baseline_number self.baselines = [Baseline() for _ in range(baseline_number)] self.reward_signal_access, self.reward_signal_wait, self.reward_signal_piece = [], [], [] self.reward_signal_wait_info1, self.reward_signal_wait_info2, self.reward_signal_wait_info3 = [], [], [] def __str__(self): stri = '' for i in range(self.baseline_number): stri = stri + 'baseline number ' + str(i) + ' has reward ' + str(self.baselines[i].reward) + '\n' stri = stri + str(self.baselines[i].sample) + '\n' return stri def insert_baseline(self, baseline): if baseline > self.baselines[0]: self.baselines[0].SetSampleWithAnotherBaseline(baseline) self.baselines.sort() def store_reward_signal(self, result): self.reward_signal_access.extend(result[0]) self.reward_signal_wait.extend(result[1]) self.reward_signal_piece.extend(result[2]) self.reward_signal_wait_info1.extend(result[3]) self.reward_signal_wait_info2.extend(result[4]) self.reward_signal_wait_info3.extend(result[5]) def samples_different_action(self, access, wait, piece, waitinfo1, waitinfo2, waitinfo3): # get a all True form result = Sample.default_different_action() # get different actions for j in range(self.baseline_number): diff = self.baselines[j].different_action(\ access, wait, piece, waitinfo1, waitinfo2, waitinfo3) for i in range(len(result)): result[i] = result[i] & np.array(diff[i]) self.store_reward_signal(result) def get_ratio(self, avg_reward): rewards = [] for i in range(self.baseline_number): reward_ = self.baselines[i].reward - avg_reward if reward_ > 0: rewards.append(reward_) else: rewards.append(0) rewards = np.array(rewards) if np.sum(rewards) == 0: return [1 / self.baseline_number] * self.baseline_number else: return rewards / np.sum(rewards) def calculate_reward(self, reward): # ratio = self.get_ratio(np.mean(reward)) access_rs, wait_rs, piece_rs = \ [0] * (len(reward) * ACCESSE_SPACE), [0] * (len(reward) * WAIT_SPACE), [0] * (len(reward) * PIECE_SPACE) waitinfo1_rs, waitinfo2_rs, waitinfo3_rs = \ [0] * (len(reward) * WAIT_SPACE), [0] * (len(reward) * WAIT_SPACE), [0] * (len(reward) * WAIT_SPACE) # for i in range(self.baseline_number): # calculate discount_reward for each slot access_dr, wait_dr, piece_dr = [], [], [] waitinfo1_dr, waitinfo2_dr, waitinfo3_dr = [], [], [] for j in range(len(reward)): for _ in range(ACCESSE_SPACE): access_dr.append(reward[j]) for _ in range(PIECE_SPACE): piece_dr.append(reward[j]) for _ in range(WAIT_SPACE): wait_dr.append(reward[j]) waitinfo1_dr.append(reward[j]) waitinfo2_dr.append(reward[j]) waitinfo3_dr.append(reward[j]) avg_reward = np.mean(reward) access_rs = np.array(access_dr) - avg_reward wait_rs = np.array(wait_dr) - avg_reward piece_rs = np.array(piece_dr) - avg_reward waitinfo1_rs = (np.array(waitinfo1_dr) - avg_reward) * 5 waitinfo2_rs = (np.array(waitinfo2_dr) - avg_reward) * 2 waitinfo3_rs = (np.array(waitinfo3_dr) - avg_reward) * 2.5 # access_dr = np.array(access_dr) - self.baselines[i].reward # wait_dr = np.array(wait_dr) - self.baselines[i].reward # piece_dr = np.array(piece_dr) - self.baselines[i].reward # waitinfo1_dr = np.array(waitinfo1_dr) - self.baselines[i].reward # waitinfo2_dr = np.array(waitinfo2_dr) - self.baselines[i].reward # waitinfo3_dr = np.array(waitinfo3_dr) - self.baselines[i].reward # access_rs = access_rs + ratio[i] * access_dr * ((access_dr > 0) | self.reward_signal_access) # wait_rs = wait_rs + ratio[i] * wait_dr * ((wait_dr > 0) | self.reward_signal_wait) # piece_rs = piece_rs + ratio[i] * piece_dr * ((piece_dr > 0) | self.reward_signal_piece) # waitinfo1_rs = waitinfo1_rs + ratio[i] * waitinfo1_dr * ((waitinfo1_dr > 0) | self.reward_signal_wait_info1) # waitinfo2_rs = waitinfo2_rs + ratio[i] * waitinfo2_dr * ((waitinfo2_dr > 0) | self.reward_signal_wait_info2) # waitinfo3_rs = waitinfo3_rs + ratio[i] * waitinfo3_dr * ((waitinfo3_dr > 0) | self.reward_signal_wait_info3) return access_rs, wait_rs, piece_rs, waitinfo1_rs, waitinfo2_rs, waitinfo3_rs def clear_signal(self): self.reward_signal_access, self.reward_signal_wait, self.reward_signal_piece = [], [], [] self.reward_signal_wait_info1, self.reward_signal_wait_info2, self.reward_signal_wait_info3 = [], [], [] class Baseline: def __init__(self, access = [], wait = [], piece = [], \ waitinfo1 = [], waitinfo2 = [], waitinfo3 = [], \ reward = 0): if access == []: self.set = False else: self.set = True # manual asign a opt setting for backoff self.sample = Sample(access, wait, piece, waitinfo1, waitinfo2, waitinfo3, 6, [0,4,8,1,0,0,8,4,2,1,8,1,4,2,1,4,2,4]) self.reward = reward def setSample(self, access, wait, piece, waitinfo1, waitinfo2, waitinfo3, reward): self.set = True self.sample.set_sample(access, wait, piece, waitinfo1, waitinfo2, waitinfo3) self.reward = reward def SetSampleWithAnotherBaseline(self, baseline): self.setSample(baseline.sample.access, baseline.sample.wait, baseline.sample.piece, \ baseline.sample.wait_info1, baseline.sample.wait_info2, baseline.sample.wait_info3, \ baseline.reward) def __lt__(self, r): return self.reward < r.reward def different_action(self, access, wait, piece, waitinfo1, waitinfo2, waitinfo3): if self.set == False: return Sample.default_different_action() return self.sample.different_action(access, wait, piece, \ waitinfo1, waitinfo2, waitinfo3) class PolicyGradient: # initialize def __init__(self, log_dir, kid_dir, learning_rate,rd,output_graph=False): self.log_dir = log_dir self.kid_dir = kid_dir self.lr = learning_rate self.reward_decay = rd self.best_seen = 0 self.round_best = 0 self.round_mean = 0 self.round_worst = 0 self.round_std = 0 self.round_best_sample = None self.baselines = MultiBaseline(BASELINES) # to store observations, actions and corresponding rewards self.access_p, self.wait_p, self.piece_p = [], [], [] self.wait_info1_p, self.wait_info2_p, self.wait_info3_p = [], [], [] self.ep_access_rs, self.ep_wait_rs, self.ep_piece_rs = [], [], [] self.ep_waitinfo1_rs, self.ep_waitinfo2_rs, self.ep_waitinfo3_rs = [], [], [] self.ep_access_act, self.ep_wait_act, self.ep_piece_act = [], [], [] self.ep_wait_info_act1, self.ep_wait_info_act2, self.ep_wait_info_act3 = [], [], [] self.samples_count = 0 self.policy = Policy() self._build_net() self.sess = tf.Session() if output_graph: tf.summary.FileWriter("logs/", self.sess.graph) self.sess.run(tf.global_variables_initializer()) self.update_policy() def clear_round_info(self): self.round_best = 0 self.round_mean = 0 self.round_worst = 0 self.round_std = 0 self.round_best_sample = None def _build_net(self): with tf.name_scope('inputs'): self.tf_access_vt = tf.placeholder(tf.float32, [None, ], name="access_value") self.tf_wait_vt = tf.placeholder(tf.float32, [None, ], name="wait_value") self.tf_piece_vt = tf.placeholder(tf.float32, [None, ], name="piece_value") self.tf_wait_info_vt1 = tf.placeholder(tf.float32, [None, ], name="wait_info_value1") self.tf_wait_info_vt2 = tf.placeholder(tf.float32, [None, ], name="wait_info_value2") self.tf_wait_info_vt3 = tf.placeholder(tf.float32, [None, ], name="wait_info_value3") self.tf_access_act = tf.placeholder(tf.int32, [None, ], name="access_act") self.tf_wait_act = tf.placeholder(tf.int32, [None, ], name="wait_act") self.tf_piece_act = tf.placeholder(tf.int32, [None, ], name="piece_act") self.tf_wait_info_act1 = tf.placeholder(tf.int32, [None, ], name="wait_info_act1") self.tf_wait_info_act2 = tf.placeholder(tf.int32, [None, ], name="wait_info_act2") self.tf_wait_info_act3 = tf.placeholder(tf.int32, [None, ], name="wait_info_act3") self.tf_samples_count = tf.placeholder(tf.float32, name='samples_count') self.learning_rate = tf.placeholder(tf.float32, name='learning_rate') self.access_action_v = tf.Variable(tf.random_normal(shape=[INPUT_SPACE, 2], mean=0, stddev=1), name='access_action_v') self.wait_action_v = tf.Variable(tf.random_normal(shape=[WAIT_SPACE, 2], mean=0, stddev=1), name='wait_action_v') self.piece_action_v = tf.Variable(tf.random_normal(shape=[PIECE_SPACE, 2], mean=0, stddev=1), name='piece_action_v') self.wait_info_action1_v = tf.Variable(tf.random_normal(shape=[WAIT_SPACE, wait_info_act_count[0]], mean=0, stddev=1), name='wait_info_action1_v') self.wait_info_action2_v = tf.Variable(tf.random_normal(shape=[WAIT_SPACE, wait_info_act_count[1]], mean=0, stddev=1), name='wait_info_action2_v') self.wait_info_action3_v = tf.Variable(tf.random_normal(shape=[WAIT_SPACE, wait_info_act_count[2]], mean=0, stddev=1), name='wait_info_action3_v') self.access_action = tf.nn.softmax(self.access_action_v, axis = 1) self.wait_action = tf.nn.softmax(self.wait_action_v, axis = 1) self.piece_action = tf.nn.softmax(self.piece_action_v, axis = 1) self.wait_info_action1 = tf.nn.softmax(self.wait_info_action1_v, axis = 1) self.wait_info_action2 = tf.nn.softmax(self.wait_info_action2_v, axis = 1) self.wait_info_action3 = tf.nn.softmax(self.wait_info_action3_v, axis = 1) # self.access_action = tf.nn.softmax(tf.Variable(tf.random_normal(shape=[ACCESSE_SPACE, 2], mean=0, stddev=1), name='access_action'), axis = 1) # self.wait_action = tf.nn.softmax(tf.Variable(tf.random_normal(shape=[WAIT_SPACE, 2], mean=0, stddev=1), name='wait_action'), axis = 1) # self.piece_action = tf.nn.softmax(tf.Variable(tf.random_normal(shape=[PIECE_SPACE, 2], mean=0, stddev=1), name='piece_action'), axis = 1) # self.wait_info_action1 = tf.nn.softmax(tf.Variable(tf.random_normal(shape=[WAIT_SPACE, wait_info_act_count[0]], mean=0, stddev=1), name='wait_info_action1'), axis = 1) # self.wait_info_action2 = tf.nn.softmax(tf.Variable(tf.random_normal(shape=[WAIT_SPACE, wait_info_act_count[1]], mean=0, stddev=1), name='wait_info_action2'), axis = 1) # self.wait_info_action3 = tf.nn.softmax(tf.Variable(tf.random_normal(shape=[WAIT_SPACE, wait_info_act_count[2]], mean=0, stddev=1), name='wait_info_action3'), axis = 1) with tf.name_scope('reward'): # add a very small number to the probability in case of logging a very small number and then ouputting NAN self.access_action = tf.add(self.access_action, 0.000001) self.wait_action = tf.add(self.wait_action, 0.000001) self.piece_action = tf.add(self.piece_action, 0.000001) self.wait_info_action1 = tf.add(self.wait_info_action1, 0.000001) self.wait_info_action2 = tf.add(self.wait_info_action2, 0.000001) self.wait_info_action3 = tf.add(self.wait_info_action3, 0.000001) access_act = tf.reshape(tf.one_hot(self.tf_access_act, 2), [-1, ACCESSE_SPACE * 2]) access_act_prob = tf.reshape((access_act * (tf.reshape(self.access_action, [ACCESSE_SPACE * 2]))), [-1 ,2]) access_act_prob = -tf.log(tf.reduce_sum(access_act_prob, axis = 1)) wait_act = tf.reshape(tf.one_hot(self.tf_wait_act, 2), [-1, WAIT_SPACE * 2]) wait_act_prob = tf.reshape((wait_act * (tf.reshape(self.wait_action, [WAIT_SPACE * 2]))), [-1 ,2]) wait_act_prob = -tf.log(tf.reduce_sum(wait_act_prob, axis = 1)) piece_act = tf.reshape(tf.one_hot(self.tf_piece_act, 2), [-1, PIECE_SPACE * 2]) piece_act_prob = tf.reshape((piece_act * (tf.reshape(self.piece_action, [PIECE_SPACE * 2]))), [-1 ,2]) piece_act_prob = -tf.log(tf.reduce_sum(piece_act_prob, axis = 1)) wait_info_act1 = tf.reshape((tf.one_hot(self.tf_wait_info_act1, wait_info_act_count[0])), [-1, WAIT_SPACE * wait_info_act_count[0]]) wait_info_act1_prob = tf.reshape((wait_info_act1 * (tf.reshape(self.wait_info_action1, [WAIT_SPACE * wait_info_act_count[0]]))), [-1, wait_info_act_count[0]]) wait_info_act1_prob = -tf.log(tf.reduce_sum(wait_info_act1_prob, axis = 1)) wait_info_act2 = tf.reshape((tf.one_hot(self.tf_wait_info_act2, wait_info_act_count[1])), [-1, WAIT_SPACE * wait_info_act_count[1]]) wait_info_act2_prob = tf.reshape((wait_info_act2 * (tf.reshape(self.wait_info_action2, [WAIT_SPACE * wait_info_act_count[1]]))), [-1, wait_info_act_count[1]]) wait_info_act2_prob = -tf.log(tf.reduce_sum(wait_info_act2_prob, axis = 1)) wait_info_act3 = tf.reshape((tf.one_hot(self.tf_wait_info_act3, wait_info_act_count[2])), [-1, WAIT_SPACE * wait_info_act_count[2]]) wait_info_act3_prob = tf.reshape((wait_info_act3 * (tf.reshape(self.wait_info_action3, [WAIT_SPACE * wait_info_act_count[2]]))), [-1, wait_info_act_count[2]]) wait_info_act3_prob = -tf.log(tf.reduce_sum(wait_info_act3_prob, axis = 1)) self.reward = tf.divide(tf.reduce_sum(access_act_prob * self.tf_access_vt) + \ tf.reduce_sum(piece_act_prob * self.tf_piece_vt) + \ tf.reduce_sum(wait_act_prob * self.tf_wait_vt) + \ tf.reduce_sum(wait_info_act1_prob * self.tf_wait_info_vt1) + \ tf.reduce_sum(wait_info_act2_prob * self.tf_wait_info_vt2) + \ tf.reduce_sum(wait_info_act3_prob * self.tf_wait_info_vt3), self.tf_samples_count) with tf.name_scope('train'): self.train_op = tf.train.GradientDescentOptimizer(learning_rate = self.learning_rate).minimize(self.reward) def update_policy(self): access_p, wait_p, piece_p, wait_info1_p, wait_info2_p, wait_info3_p = \ self.sess.run([self.access_action, self.wait_action, self.piece_action, \ self.wait_info_action1, self.wait_info_action2, self.wait_info_action3]) self.policy.set_prob(access_p, wait_p, piece_p, \ wait_info1_p, wait_info2_p, wait_info3_p) # store corresponding reward def record_reward(self, round_id, reward, previous_samples, idx): access = self.ep_access_act[previous_samples * ACCESSE_SPACE : (previous_samples + 1) * ACCESSE_SPACE] wait = self.ep_wait_act[previous_samples * WAIT_SPACE : (previous_samples + 1) * WAIT_SPACE] piece = self.ep_piece_act[previous_samples * PIECE_SPACE : (previous_samples + 1) * PIECE_SPACE] waitinfo1 = self.ep_wait_info_act1[previous_samples * WAIT_SPACE : (previous_samples + 1) * WAIT_SPACE] waitinfo2 = self.ep_wait_info_act2[previous_samples * WAIT_SPACE : (previous_samples + 1) * WAIT_SPACE] waitinfo3 = self.ep_wait_info_act3[previous_samples * WAIT_SPACE : (previous_samples + 1) * WAIT_SPACE] if reward > self.baselines.baselines[0].reward: baseline_ = Baseline(access, wait, piece, waitinfo1, waitinfo2, waitinfo3, reward) self.baselines.insert_baseline(baseline_) if reward > self.best_seen: self.best_seen = reward # save RL best seen result print('Update rl best seen sample - {}'.format(reward)) kid_path = os.path.join(os.getcwd(), self.kid_dir + '/kid_' + str(idx) + '.txt') log_path = os.path.join(os.getcwd(), self.log_dir + '/rl_best.txt') shutil.copy(kid_path, log_path) # save RL best seen result for every round old_path = os.path.join(os.getcwd(), self.log_dir + '/rl_best.txt') new_path = os.path.join(os.getcwd(), self.log_dir + '/rl_best_iter_' + str(round_id) + '.txt') shutil.copy(old_path, new_path) if reward > self.round_best: self.round_best = reward kid_path = os.path.join(os.getcwd(), self.kid_dir + '/kid_' + str(idx) + '.txt') log_path = os.path.join(os.getcwd(), self.log_dir + '/round_best_' + str(round_id) + '.txt') shutil.copy(kid_path, log_path) # store round_best sample for EA future use self.round_best_sample = Sample(access, wait, piece, \ waitinfo1, waitinfo2, waitinfo3, 6, [0,4,8,1,0,0,8,4,2,1,8,1,4,2,1,4,2,4]) if self.round_worst == 0: self.round_worst = reward if reward < self.round_worst: self.round_worst = reward self.round_mean = (self.round_mean * previous_samples + reward)/(previous_samples + 1) # store reward for each sample self.ep_rs.append(reward) def Evaluate(self, command, round_id, samples_per_distribution, load_per_sample): base_path = os.path.join(os.getcwd(), self.log_dir) policy_path = os.path.join(base_path, 'Distribution.txt') with open(policy_path, 'a+') as f: f.write('RL at iter {}'.format(round_id) + '\n') f.write(str(self.policy) + '\n') self.ep_rs = [] self.ep_access_act, self.ep_wait_act, self.ep_piece_act, \ self.ep_wait_info_act1, self.ep_wait_info_act2, self.ep_wait_info_act3, \ = self.policy.table_sample_batch(self.kid_dir, samples_per_distribution) policies_res = samples_eval(command, samples_per_distribution, load_per_sample) reward_ = 0 fail_to_exe = 0 for idx in range(samples_per_distribution): # if the execution has failed, rollback the ep_obs and ep_as, continue the training if policies_res[idx][0] == 0.0 and policies_res[idx][1] == 1.0: print("continue") self.rollback(idx, fail_to_exe) fail_to_exe += 1 continue print("RL sample:" + str(idx) + " throughput:" + str(policies_res[idx][0])) self.record_reward(round_id, policies_res[idx][0], idx - fail_to_exe, idx) def set_baseline(self, access, wait, piece, \ wait_info1, wait_info2, wait_info3, \ reward_buffer): samples = int(len(access) / ACCESSE_SPACE) for i in range(samples): r = reward_buffer[i] if r > self.baselines.baselines[0].reward: access_t = access[i * ACCESSE_SPACE : (i + 1) * ACCESSE_SPACE] wait_t = wait[i * WAIT_SPACE : (i + 1) * WAIT_SPACE] piece_t = piece[i * PIECE_SPACE : (i + 1) * PIECE_SPACE] waitinfo1_t = wait_info1[i * WAIT_SPACE : (i + 1) * WAIT_SPACE] waitinfo2_t = wait_info2[i * WAIT_SPACE : (i + 1) * WAIT_SPACE] waitinfo3_t = wait_info3[i * WAIT_SPACE : (i + 1) * WAIT_SPACE] baseline_ = Baseline(access_t, wait_t, piece_t, \ waitinfo1_t, waitinfo2_t, waitinfo3_t, \ r) self.baselines.insert_baseline(baseline_) print("access") print(self.baselines.baselines[0].sample) access, wait, piece, waitinfo1, waitinfo2, waitinfo3 = self.baselines.baselines[0].sample.get_actions() assign_access = tf.assign(self.access_action_v, access) assign_wait = tf.assign(self.wait_action_v, wait) assign_piece = tf.assign(self.piece_action_v, piece) assign_waitinfo1 = tf.assign(self.wait_info_action1_v, waitinfo1) assign_waitinfo2 = tf.assign(self.wait_info_action2_v, waitinfo2) assign_waitinfo3 = tf.assign(self.wait_info_action3_v, waitinfo3) self.sess.run([assign_access, assign_wait, assign_piece, assign_waitinfo1, assign_waitinfo2, assign_waitinfo3]) self.update_policy() def get_ic3_distribution(self, access_in, wait_in , piece_in, waitinfo1_in, waitinfo2_in, waitinfo3_in): access, wait, piece, waitinfo1, waitinfo2, waitinfo3 = \ self.baselines.baselines[0].sample.get_actions(access_in, wait_in , piece_in, waitinfo1_in, waitinfo2_in, waitinfo3_in) assign_access = tf.assign(self.access_action_v, access) assign_wait = tf.assign(self.wait_action_v, wait) assign_piece = tf.assign(self.piece_action_v, piece) assign_waitinfo1 = tf.assign(self.wait_info_action1_v, waitinfo1) assign_waitinfo2 = tf.assign(self.wait_info_action2_v, waitinfo2) assign_waitinfo3 = tf.assign(self.wait_info_action3_v, waitinfo3) self.sess.run([assign_access, assign_wait, assign_piece, assign_waitinfo1, assign_waitinfo2, assign_waitinfo3]) self.update_policy() # preprocess the reward def get_reward(self, access, wait, piece, \ wait_info1, wait_info2, wait_info3, \ reward_buffer): samples = int(len(access) / ACCESSE_SPACE) for i in range(samples): access_t = access[i * ACCESSE_SPACE : (i + 1) * ACCESSE_SPACE] wait_t = wait[i * WAIT_SPACE : (i + 1) * WAIT_SPACE] piece_t = piece[i * PIECE_SPACE : (i + 1) * PIECE_SPACE] waitinfo1_t = wait_info1[i * WAIT_SPACE : (i + 1) * WAIT_SPACE] waitinfo2_t = wait_info2[i * WAIT_SPACE : (i + 1) * WAIT_SPACE] waitinfo3_t = wait_info3[i * WAIT_SPACE : (i + 1) * WAIT_SPACE] self.baselines.samples_different_action(access_t, wait_t, piece_t, \ waitinfo1_t, waitinfo2_t, waitinfo3_t) self.ep_access_rs, self.ep_wait_rs, self.ep_piece_rs, \ self.ep_waitinfo1_rs, self.ep_waitinfo2_rs, self.ep_waitinfo3_rs, \ = self.baselines.calculate_reward(reward_buffer) self.baselines.clear_signal() def learn(self, idx, lr, generations): if (len(self.ep_access_act) == 0): print("useless round") return base_path = os.path.join(os.getcwd(), self.log_dir) baseline_path = os.path.join(base_path, 'Baseline.txt') with open(baseline_path, 'a+') as f: f.write('RL at iter {}'.format(idx) + ', ') f.write(str(self.baselines) + '\n') self.get_reward(self.ep_access_act, self.ep_wait_act, self.ep_piece_act, \ self.ep_wait_info_act1, self.ep_wait_info_act2, self.ep_wait_info_act3, \ self.ep_rs) self.lr = 0.5 * lr * (1 + math.cos(math.pi * idx / generations)) self.samples_count = len(self.ep_rs) self.sess.run(self.train_op, feed_dict={ self.tf_access_act: self.ep_access_act, self.tf_wait_act: self.ep_wait_act, self.tf_piece_act: self.ep_piece_act, self.tf_wait_info_act1: self.ep_wait_info_act1, self.tf_wait_info_act2: self.ep_wait_info_act2, self.tf_wait_info_act3: self.ep_wait_info_act3, self.tf_access_vt: self.ep_access_rs, self.tf_wait_vt: self.ep_wait_rs, self.tf_piece_vt: self.ep_piece_rs, self.tf_wait_info_vt1: self.ep_waitinfo1_rs, self.tf_wait_info_vt2: self.ep_waitinfo2_rs, self.tf_wait_info_vt3: self.ep_waitinfo3_rs, self.tf_samples_count: self.samples_count, self.learning_rate: self.lr, }) self.update_policy() # tool functions: def get_prob(self): self.access_p, self.wait_p, self.piece_p, \ self.wait_info1_p, self.wait_info2_p, self.wait_info3_p, \ = self.sess.run([self.access_action, self.wait_action, self.piece_action, \ self.wait_info_action1, self.wait_info_action2, self.wait_info_action3]) self.print_prob() def print_prob(self): stri = "" stri += str(self.access_p) + " " stri += str(self.wait_p) + " " stri += str(self.piece_p) + " " stri += str(self.wait_info1_p) + " " stri += str(self.wait_info2_p) + " " stri += str(self.wait_info3_p) + " " print(stri + "\n") def rollback(self, index, fail_to_exe): self.ep_access_act = self.ep_access_act[:(index - fail_to_exe) * ACCESSE_SPACE] + self.ep_access_act[(index + 1 - fail_to_exe) * ACCESSE_SPACE :] self.ep_wait_act = self.ep_wait_act[:(index - fail_to_exe) * WAIT_SPACE] + self.ep_wait_act[(index + 1 - fail_to_exe) * WAIT_SPACE :] self.ep_piece_act = self.ep_piece_act[:(index - fail_to_exe) * PIECE_SPACE] + self.ep_piece_act[(index + 1 - fail_to_exe) * PIECE_SPACE :] self.ep_wait_info_act1 = self.ep_wait_info_act1[:(index - fail_to_exe) * WAIT_SPACE] + self.ep_wait_info_act1[(index + 1 - fail_to_exe) * WAIT_SPACE :] self.ep_wait_info_act2 = self.ep_wait_info_act2[:(index - fail_to_exe) * WAIT_SPACE] + self.ep_wait_info_act2[(index + 1 - fail_to_exe) * WAIT_SPACE :] self.ep_wait_info_act3 = self.ep_wait_info_act3[:(index - fail_to_exe) * WAIT_SPACE] + self.ep_wait_info_act3[(index + 1 - fail_to_exe) * WAIT_SPACE :]
[ "tensorflow.reduce_sum", "math.cos", "numpy.array", "tensorflow.nn.softmax", "numpy.mean", "tensorflow.random_normal", "tensorflow.Session", "tensorflow.placeholder", "tensorflow.assign", "tensorflow.one_hot", "tensorflow.add", "tensorflow.train.GradientDescentOptimizer", "shutil.copy", "t...
[((199, 236), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (218, 236), True, 'import numpy as np\n'), ((2315, 2332), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (2323, 2332), True, 'import numpy as np\n'), ((3569, 3584), 'numpy.mean', 'np.mean', (['reward'], {}), '(reward)\n', (3576, 3584), True, 'import numpy as np\n'), ((7904, 7916), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7914, 7916), True, 'import tensorflow as tf\n'), ((10475, 10518), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.access_action_v'], {'axis': '(1)'}), '(self.access_action_v, axis=1)\n', (10488, 10518), True, 'import tensorflow as tf\n'), ((10548, 10589), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.wait_action_v'], {'axis': '(1)'}), '(self.wait_action_v, axis=1)\n', (10561, 10589), True, 'import tensorflow as tf\n'), ((10620, 10662), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.piece_action_v'], {'axis': '(1)'}), '(self.piece_action_v, axis=1)\n', (10633, 10662), True, 'import tensorflow as tf\n'), ((10698, 10745), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.wait_info_action1_v'], {'axis': '(1)'}), '(self.wait_info_action1_v, axis=1)\n', (10711, 10745), True, 'import tensorflow as tf\n'), ((10781, 10828), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.wait_info_action2_v'], {'axis': '(1)'}), '(self.wait_info_action2_v, axis=1)\n', (10794, 10828), True, 'import tensorflow as tf\n'), ((10864, 10911), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.wait_info_action3_v'], {'axis': '(1)'}), '(self.wait_info_action3_v, axis=1)\n', (10877, 10911), True, 'import tensorflow as tf\n'), ((18424, 18467), 'os.path.join', 'os.path.join', (['base_path', '"""Distribution.txt"""'], {}), "(base_path, 'Distribution.txt')\n", (18436, 18467), False, 'import os\n'), ((20805, 20844), 'tensorflow.assign', 'tf.assign', (['self.access_action_v', 'access'], {}), '(self.access_action_v, access)\n', (20814, 20844), True, 'import tensorflow as tf\n'), ((20867, 20902), 'tensorflow.assign', 'tf.assign', (['self.wait_action_v', 'wait'], {}), '(self.wait_action_v, wait)\n', (20876, 20902), True, 'import tensorflow as tf\n'), ((20926, 20963), 'tensorflow.assign', 'tf.assign', (['self.piece_action_v', 'piece'], {}), '(self.piece_action_v, piece)\n', (20935, 20963), True, 'import tensorflow as tf\n'), ((20991, 21037), 'tensorflow.assign', 'tf.assign', (['self.wait_info_action1_v', 'waitinfo1'], {}), '(self.wait_info_action1_v, waitinfo1)\n', (21000, 21037), True, 'import tensorflow as tf\n'), ((21065, 21111), 'tensorflow.assign', 'tf.assign', (['self.wait_info_action2_v', 'waitinfo2'], {}), '(self.wait_info_action2_v, waitinfo2)\n', (21074, 21111), True, 'import tensorflow as tf\n'), ((21139, 21185), 'tensorflow.assign', 'tf.assign', (['self.wait_info_action3_v', 'waitinfo3'], {}), '(self.wait_info_action3_v, waitinfo3)\n', (21148, 21185), True, 'import tensorflow as tf\n'), ((21664, 21703), 'tensorflow.assign', 'tf.assign', (['self.access_action_v', 'access'], {}), '(self.access_action_v, access)\n', (21673, 21703), True, 'import tensorflow as tf\n'), ((21726, 21761), 'tensorflow.assign', 'tf.assign', (['self.wait_action_v', 'wait'], {}), '(self.wait_action_v, wait)\n', (21735, 21761), True, 'import tensorflow as tf\n'), ((21785, 21822), 'tensorflow.assign', 'tf.assign', (['self.piece_action_v', 'piece'], {}), '(self.piece_action_v, piece)\n', (21794, 21822), True, 'import tensorflow as tf\n'), ((21850, 21896), 'tensorflow.assign', 'tf.assign', (['self.wait_info_action1_v', 'waitinfo1'], {}), '(self.wait_info_action1_v, waitinfo1)\n', (21859, 21896), True, 'import tensorflow as tf\n'), ((21924, 21970), 'tensorflow.assign', 'tf.assign', (['self.wait_info_action2_v', 'waitinfo2'], {}), '(self.wait_info_action2_v, waitinfo2)\n', (21933, 21970), True, 'import tensorflow as tf\n'), ((21998, 22044), 'tensorflow.assign', 'tf.assign', (['self.wait_info_action3_v', 'waitinfo3'], {}), '(self.wait_info_action3_v, waitinfo3)\n', (22007, 22044), True, 'import tensorflow as tf\n'), ((23530, 23569), 'os.path.join', 'os.path.join', (['base_path', '"""Baseline.txt"""'], {}), "(base_path, 'Baseline.txt')\n", (23542, 23569), False, 'import os\n'), ((2344, 2359), 'numpy.sum', 'np.sum', (['rewards'], {}), '(rewards)\n', (2350, 2359), True, 'import numpy as np\n'), ((3606, 3625), 'numpy.array', 'np.array', (['access_dr'], {}), '(access_dr)\n', (3614, 3625), True, 'import numpy as np\n'), ((3657, 3674), 'numpy.array', 'np.array', (['wait_dr'], {}), '(wait_dr)\n', (3665, 3674), True, 'import numpy as np\n'), ((3707, 3725), 'numpy.array', 'np.array', (['piece_dr'], {}), '(piece_dr)\n', (3715, 3725), True, 'import numpy as np\n'), ((7955, 8002), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""logs/"""', 'self.sess.graph'], {}), "('logs/', self.sess.graph)\n", (7976, 8002), True, 'import tensorflow as tf\n'), ((8026, 8059), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8057, 8059), True, 'import tensorflow as tf\n'), ((8313, 8336), 'tensorflow.name_scope', 'tf.name_scope', (['"""inputs"""'], {}), "('inputs')\n", (8326, 8336), True, 'import tensorflow as tf\n'), ((8370, 8425), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""access_value"""'}), "(tf.float32, [None], name='access_value')\n", (8384, 8425), True, 'import tensorflow as tf\n'), ((8458, 8511), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""wait_value"""'}), "(tf.float32, [None], name='wait_value')\n", (8472, 8511), True, 'import tensorflow as tf\n'), ((8545, 8599), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""piece_value"""'}), "(tf.float32, [None], name='piece_value')\n", (8559, 8599), True, 'import tensorflow as tf\n'), ((8638, 8697), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""wait_info_value1"""'}), "(tf.float32, [None], name='wait_info_value1')\n", (8652, 8697), True, 'import tensorflow as tf\n'), ((8736, 8795), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""wait_info_value2"""'}), "(tf.float32, [None], name='wait_info_value2')\n", (8750, 8795), True, 'import tensorflow as tf\n'), ((8834, 8893), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""wait_info_value3"""'}), "(tf.float32, [None], name='wait_info_value3')\n", (8848, 8893), True, 'import tensorflow as tf\n'), ((8930, 8981), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {'name': '"""access_act"""'}), "(tf.int32, [None], name='access_act')\n", (8944, 8981), True, 'import tensorflow as tf\n'), ((9015, 9064), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {'name': '"""wait_act"""'}), "(tf.int32, [None], name='wait_act')\n", (9029, 9064), True, 'import tensorflow as tf\n'), ((9099, 9149), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {'name': '"""piece_act"""'}), "(tf.int32, [None], name='piece_act')\n", (9113, 9149), True, 'import tensorflow as tf\n'), ((9189, 9244), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {'name': '"""wait_info_act1"""'}), "(tf.int32, [None], name='wait_info_act1')\n", (9203, 9244), True, 'import tensorflow as tf\n'), ((9284, 9339), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {'name': '"""wait_info_act2"""'}), "(tf.int32, [None], name='wait_info_act2')\n", (9298, 9339), True, 'import tensorflow as tf\n'), ((9379, 9434), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {'name': '"""wait_info_act3"""'}), "(tf.int32, [None], name='wait_info_act3')\n", (9393, 9434), True, 'import tensorflow as tf\n'), ((9474, 9522), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""samples_count"""'}), "(tf.float32, name='samples_count')\n", (9488, 9522), True, 'import tensorflow as tf\n'), ((9556, 9604), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""learning_rate"""'}), "(tf.float32, name='learning_rate')\n", (9570, 9604), True, 'import tensorflow as tf\n'), ((9649, 9707), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[INPUT_SPACE, 2]', 'mean': '(0)', 'stddev': '(1)'}), '(shape=[INPUT_SPACE, 2], mean=0, stddev=1)\n', (9665, 9707), True, 'import tensorflow as tf\n'), ((9774, 9831), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[WAIT_SPACE, 2]', 'mean': '(0)', 'stddev': '(1)'}), '(shape=[WAIT_SPACE, 2], mean=0, stddev=1)\n', (9790, 9831), True, 'import tensorflow as tf\n'), ((9897, 9955), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[PIECE_SPACE, 2]', 'mean': '(0)', 'stddev': '(1)'}), '(shape=[PIECE_SPACE, 2], mean=0, stddev=1)\n', (9913, 9955), True, 'import tensorflow as tf\n'), ((10027, 10105), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[WAIT_SPACE, wait_info_act_count[0]]', 'mean': '(0)', 'stddev': '(1)'}), '(shape=[WAIT_SPACE, wait_info_act_count[0]], mean=0, stddev=1)\n', (10043, 10105), True, 'import tensorflow as tf\n'), ((10182, 10260), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[WAIT_SPACE, wait_info_act_count[1]]', 'mean': '(0)', 'stddev': '(1)'}), '(shape=[WAIT_SPACE, wait_info_act_count[1]], mean=0, stddev=1)\n', (10198, 10260), True, 'import tensorflow as tf\n'), ((10337, 10415), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[WAIT_SPACE, wait_info_act_count[2]]', 'mean': '(0)', 'stddev': '(1)'}), '(shape=[WAIT_SPACE, wait_info_act_count[2]], mean=0, stddev=1)\n', (10353, 10415), True, 'import tensorflow as tf\n'), ((11916, 11939), 'tensorflow.name_scope', 'tf.name_scope', (['"""reward"""'], {}), "('reward')\n", (11929, 11939), True, 'import tensorflow as tf\n'), ((12093, 12126), 'tensorflow.add', 'tf.add', (['self.access_action', '(1e-06)'], {}), '(self.access_action, 1e-06)\n', (12099, 12126), True, 'import tensorflow as tf\n'), ((12161, 12192), 'tensorflow.add', 'tf.add', (['self.wait_action', '(1e-06)'], {}), '(self.wait_action, 1e-06)\n', (12167, 12192), True, 'import tensorflow as tf\n'), ((12228, 12260), 'tensorflow.add', 'tf.add', (['self.piece_action', '(1e-06)'], {}), '(self.piece_action, 1e-06)\n', (12234, 12260), True, 'import tensorflow as tf\n'), ((12301, 12338), 'tensorflow.add', 'tf.add', (['self.wait_info_action1', '(1e-06)'], {}), '(self.wait_info_action1, 1e-06)\n', (12307, 12338), True, 'import tensorflow as tf\n'), ((12379, 12416), 'tensorflow.add', 'tf.add', (['self.wait_info_action2', '(1e-06)'], {}), '(self.wait_info_action2, 1e-06)\n', (12385, 12416), True, 'import tensorflow as tf\n'), ((12457, 12494), 'tensorflow.add', 'tf.add', (['self.wait_info_action3', '(1e-06)'], {}), '(self.wait_info_action3, 1e-06)\n', (12463, 12494), True, 'import tensorflow as tf\n'), ((15172, 15194), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (15185, 15194), True, 'import tensorflow as tf\n'), ((17062, 17093), 'shutil.copy', 'shutil.copy', (['kid_path', 'log_path'], {}), '(kid_path, log_path)\n', (17073, 17093), False, 'import shutil\n'), ((17348, 17379), 'shutil.copy', 'shutil.copy', (['old_path', 'new_path'], {}), '(old_path, new_path)\n', (17359, 17379), False, 'import shutil\n'), ((17664, 17695), 'shutil.copy', 'shutil.copy', (['kid_path', 'log_path'], {}), '(kid_path, log_path)\n', (17675, 17695), False, 'import shutil\n'), ((18375, 18386), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (18384, 18386), False, 'import os\n'), ((23479, 23490), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (23488, 23490), False, 'import os\n'), ((2478, 2493), 'numpy.sum', 'np.sum', (['rewards'], {}), '(rewards)\n', (2484, 2493), True, 'import numpy as np\n'), ((3763, 3785), 'numpy.array', 'np.array', (['waitinfo1_dr'], {}), '(waitinfo1_dr)\n', (3771, 3785), True, 'import numpy as np\n'), ((3828, 3850), 'numpy.array', 'np.array', (['waitinfo2_dr'], {}), '(waitinfo2_dr)\n', (3836, 3850), True, 'import numpy as np\n'), ((3893, 3915), 'numpy.array', 'np.array', (['waitinfo3_dr'], {}), '(waitinfo3_dr)\n', (3901, 3915), True, 'import numpy as np\n'), ((12535, 12568), 'tensorflow.one_hot', 'tf.one_hot', (['self.tf_access_act', '(2)'], {}), '(self.tf_access_act, 2)\n', (12545, 12568), True, 'import tensorflow as tf\n'), ((12830, 12861), 'tensorflow.one_hot', 'tf.one_hot', (['self.tf_wait_act', '(2)'], {}), '(self.tf_wait_act, 2)\n', (12840, 12861), True, 'import tensorflow as tf\n'), ((13108, 13140), 'tensorflow.one_hot', 'tf.one_hot', (['self.tf_piece_act', '(2)'], {}), '(self.tf_piece_act, 2)\n', (13118, 13140), True, 'import tensorflow as tf\n'), ((13400, 13458), 'tensorflow.one_hot', 'tf.one_hot', (['self.tf_wait_info_act1', 'wait_info_act_count[0]'], {}), '(self.tf_wait_info_act1, wait_info_act_count[0])\n', (13410, 13458), True, 'import tensorflow as tf\n'), ((13805, 13863), 'tensorflow.one_hot', 'tf.one_hot', (['self.tf_wait_info_act2', 'wait_info_act_count[1]'], {}), '(self.tf_wait_info_act2, wait_info_act_count[1])\n', (13815, 13863), True, 'import tensorflow as tf\n'), ((14210, 14268), 'tensorflow.one_hot', 'tf.one_hot', (['self.tf_wait_info_act3', 'wait_info_act_count[2]'], {}), '(self.tf_wait_info_act3, wait_info_act_count[2])\n', (14220, 14268), True, 'import tensorflow as tf\n'), ((16913, 16924), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16922, 16924), False, 'import os\n'), ((17006, 17017), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (17015, 17017), False, 'import os\n'), ((17185, 17196), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (17194, 17196), False, 'import os\n'), ((17265, 17276), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (17274, 17276), False, 'import os\n'), ((17490, 17501), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (17499, 17501), False, 'import os\n'), ((17583, 17594), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (17592, 17594), False, 'import os\n'), ((23972, 24009), 'math.cos', 'math.cos', (['(math.pi * idx / generations)'], {}), '(math.pi * idx / generations)\n', (23980, 24009), False, 'import math\n'), ((1953, 1970), 'numpy.array', 'np.array', (['diff[i]'], {}), '(diff[i])\n', (1961, 1970), True, 'import numpy as np\n'), ((12651, 12702), 'tensorflow.reshape', 'tf.reshape', (['self.access_action', '[ACCESSE_SPACE * 2]'], {}), '(self.access_action, [ACCESSE_SPACE * 2])\n', (12661, 12702), True, 'import tensorflow as tf\n'), ((12753, 12791), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['access_act_prob'], {'axis': '(1)'}), '(access_act_prob, axis=1)\n', (12766, 12791), True, 'import tensorflow as tf\n'), ((12937, 12983), 'tensorflow.reshape', 'tf.reshape', (['self.wait_action', '[WAIT_SPACE * 2]'], {}), '(self.wait_action, [WAIT_SPACE * 2])\n', (12947, 12983), True, 'import tensorflow as tf\n'), ((13032, 13068), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['wait_act_prob'], {'axis': '(1)'}), '(wait_act_prob, axis=1)\n', (13045, 13068), True, 'import tensorflow as tf\n'), ((13219, 13267), 'tensorflow.reshape', 'tf.reshape', (['self.piece_action', '[PIECE_SPACE * 2]'], {}), '(self.piece_action, [PIECE_SPACE * 2])\n', (13229, 13267), True, 'import tensorflow as tf\n'), ((13317, 13354), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['piece_act_prob'], {'axis': '(1)'}), '(piece_act_prob, axis=1)\n', (13330, 13354), True, 'import tensorflow as tf\n'), ((13568, 13641), 'tensorflow.reshape', 'tf.reshape', (['self.wait_info_action1', '[WAIT_SPACE * wait_info_act_count[0]]'], {}), '(self.wait_info_action1, [WAIT_SPACE * wait_info_act_count[0]])\n', (13578, 13641), True, 'import tensorflow as tf\n'), ((13717, 13759), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['wait_info_act1_prob'], {'axis': '(1)'}), '(wait_info_act1_prob, axis=1)\n', (13730, 13759), True, 'import tensorflow as tf\n'), ((13973, 14046), 'tensorflow.reshape', 'tf.reshape', (['self.wait_info_action2', '[WAIT_SPACE * wait_info_act_count[1]]'], {}), '(self.wait_info_action2, [WAIT_SPACE * wait_info_act_count[1]])\n', (13983, 14046), True, 'import tensorflow as tf\n'), ((14122, 14164), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['wait_info_act2_prob'], {'axis': '(1)'}), '(wait_info_act2_prob, axis=1)\n', (14135, 14164), True, 'import tensorflow as tf\n'), ((14378, 14451), 'tensorflow.reshape', 'tf.reshape', (['self.wait_info_action3', '[WAIT_SPACE * wait_info_act_count[2]]'], {}), '(self.wait_info_action3, [WAIT_SPACE * wait_info_act_count[2]])\n', (14388, 14451), True, 'import tensorflow as tf\n'), ((14527, 14569), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['wait_info_act3_prob'], {'axis': '(1)'}), '(wait_info_act3_prob, axis=1)\n', (14540, 14569), True, 'import tensorflow as tf\n'), ((15075, 15133), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(wait_info_act3_prob * self.tf_wait_info_vt3)'], {}), '(wait_info_act3_prob * self.tf_wait_info_vt3)\n', (15088, 15133), True, 'import tensorflow as tf\n'), ((15224, 15291), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (15257, 15291), True, 'import tensorflow as tf\n'), ((14976, 15034), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(wait_info_act2_prob * self.tf_wait_info_vt2)'], {}), '(wait_info_act2_prob * self.tf_wait_info_vt2)\n', (14989, 15034), True, 'import tensorflow as tf\n'), ((14877, 14935), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(wait_info_act1_prob * self.tf_wait_info_vt1)'], {}), '(wait_info_act1_prob * self.tf_wait_info_vt1)\n', (14890, 14935), True, 'import tensorflow as tf\n'), ((14790, 14836), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(wait_act_prob * self.tf_wait_vt)'], {}), '(wait_act_prob * self.tf_wait_vt)\n', (14803, 14836), True, 'import tensorflow as tf\n'), ((14610, 14660), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(access_act_prob * self.tf_access_vt)'], {}), '(access_act_prob * self.tf_access_vt)\n', (14623, 14660), True, 'import tensorflow as tf\n'), ((14701, 14749), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(piece_act_prob * self.tf_piece_vt)'], {}), '(piece_act_prob * self.tf_piece_vt)\n', (14714, 14749), True, 'import tensorflow as tf\n')]
import numpy as np import os,sys,time import torch import torch.nn.functional as torch_F import collections from easydict import EasyDict as edict import util class Pose(): def __call__(self,R=None,t=None): assert(R is not None or t is not None) if R is None: if not isinstance(t,torch.Tensor): t = torch.tensor(t) R = torch.eye(3,device=t.device).repeat(*t.shape[:-1],1,1) elif t is None: if not isinstance(R,torch.Tensor): R = torch.tensor(R) t = torch.zeros(R.shape[:-1],device=R.device) else: if not isinstance(R,torch.Tensor): R = torch.tensor(R) if not isinstance(t,torch.Tensor): t = torch.tensor(t) assert(R.shape[:-1]==t.shape and R.shape[-2:]==(3,3)) R = R.float() t = t.float() pose = torch.cat([R,t[...,None]],dim=-1) # [...,3,4] assert(pose.shape[-2:]==(3,4)) return pose def invert(self,pose,use_inverse=False): R,t = pose[...,:3],pose[...,3:] R_inv = R.inverse() if use_inverse else R.transpose(-1,-2) t_inv = (-R_inv@t)[...,0] pose_inv = self(R=R_inv,t=t_inv) return pose_inv def compose(self,pose_list): # pose_new(x) = poseN(...(pose2(pose1(x)))...) pose_new = pose_list[0] for pose in pose_list[1:]: pose_new = self.compose_pair(pose_new,pose) return pose_new def compose_pair(self,pose_a,pose_b): # pose_new(x) = pose_b(pose_a(x)) R_a,t_a = pose_a[...,:3],pose_a[...,3:] R_b,t_b = pose_b[...,:3],pose_b[...,3:] R_new = R_b@R_a t_new = (R_b@t_a+t_b)[...,0] pose_new = self(R=R_new,t=t_new) return pose_new pose = Pose() def to_hom(X): X_hom = torch.cat([X,torch.ones_like(X[...,:1])],dim=-1) return X_hom def world2cam(X,pose): # [B,N,3] X_hom = to_hom(X) return X_hom@pose.transpose(-1,-2) def cam2img(X,cam_intr): return X@cam_intr.transpose(-1,-2) def img2cam(X,cam_intr): return X@cam_intr.inverse().transpose(-1,-2) def cam2world(X,pose): X_hom = to_hom(X) pose_inv = Pose().invert(pose) return X_hom@pose_inv.transpose(-1,-2) def angle_to_rotation_matrix(a,axis): roll = dict(X=1,Y=2,Z=0)[axis] O = torch.zeros_like(a) I = torch.ones_like(a) M = torch.stack([torch.stack([a.cos(),-a.sin(),O],dim=-1), torch.stack([a.sin(),a.cos(),O],dim=-1), torch.stack([O,O,I],dim=-1)],dim=-2) M = M.roll((roll,roll),dims=(-2,-1)) return M def get_camera_grid(opt,batch_size,intr=None): # compute image coordinate grid if opt.camera.model=="perspective": y_range = torch.arange(opt.H,dtype=torch.float32,device=opt.device).add_(0.5) x_range = torch.arange(opt.W,dtype=torch.float32,device=opt.device).add_(0.5) Y,X = torch.meshgrid(y_range,x_range) # [H,W] xy_grid = torch.stack([X,Y],dim=-1).view(-1,2) # [HW,2] elif opt.camera.model=="orthographic": assert(opt.H==opt.W) y_range = torch.linspace(-1,1,opt.H,device=opt.device) x_range = torch.linspace(-1,1,opt.W,device=opt.device) Y,X = torch.meshgrid(y_range,x_range) # [H,W] xy_grid = torch.stack([X,Y],dim=-1).view(-1,2) # [HW,2] xy_grid = xy_grid.repeat(batch_size,1,1) # [B,HW,2] if opt.camera.model=="perspective": grid_3D = img2cam(to_hom(xy_grid),intr) # [B,HW,3] elif opt.camera.model=="orthographic": grid_3D = to_hom(xy_grid) # [B,HW,3] return xy_grid,grid_3D def get_center_and_ray(opt,pose,intr=None,offset=None): # [HW,2] batch_size = len(pose) xy_grid,grid_3D = get_camera_grid(opt,batch_size,intr=intr) # [B,HW,3] # compute center and ray if opt.camera.model=="perspective": if offset is not None: grid_3D[...,:2] += offset center_3D = torch.zeros(batch_size,1,3,device=opt.device) # [B,1,3] elif opt.camera.model=="orthographic": center_3D = torch.cat([xy_grid,torch.zeros_like(xy_grid[...,:1])],dim=-1) # [B,HW,3] # transform from camera to world coordinates grid_3D = cam2world(grid_3D,pose) # [B,HW,3] center_3D = cam2world(center_3D,pose) # [B,HW,3] ray = grid_3D-center_3D # [B,HW,3] return center_3D,ray def get_3D_points_from_depth(opt,center,ray,depth,multi_samples=False): if multi_samples: center,ray = center[:,:,None],ray[:,:,None] # x = c+dv points_3D = center+ray*depth # [B,HW,3]/[B,HW,N,3]/[N,3] return points_3D def get_depth_from_3D_points(opt,center,ray,points_3D): # d = ||x-c||/||v|| (x-c and v should be in same direction) depth = (points_3D-center).norm(dim=-1,keepdim=True)/ray.norm(dim=-1,keepdim=True) # [B,HW,1] return depth
[ "torch.ones_like", "torch.eye", "torch.stack", "torch.tensor", "torch.arange", "torch.meshgrid", "torch.linspace", "torch.zeros_like", "torch.zeros", "torch.cat" ]
[((2286, 2305), 'torch.zeros_like', 'torch.zeros_like', (['a'], {}), '(a)\n', (2302, 2305), False, 'import torch\n'), ((2314, 2332), 'torch.ones_like', 'torch.ones_like', (['a'], {}), '(a)\n', (2329, 2332), False, 'import torch\n'), ((839, 875), 'torch.cat', 'torch.cat', (['[R, t[..., None]]'], {'dim': '(-1)'}), '([R, t[..., None]], dim=-1)\n', (848, 875), False, 'import torch\n'), ((2880, 2912), 'torch.meshgrid', 'torch.meshgrid', (['y_range', 'x_range'], {}), '(y_range, x_range)\n', (2894, 2912), False, 'import torch\n'), ((3896, 3944), 'torch.zeros', 'torch.zeros', (['batch_size', '(1)', '(3)'], {'device': 'opt.device'}), '(batch_size, 1, 3, device=opt.device)\n', (3907, 3944), False, 'import torch\n'), ((1795, 1822), 'torch.ones_like', 'torch.ones_like', (['X[..., :1]'], {}), '(X[..., :1])\n', (1810, 1822), False, 'import torch\n'), ((2479, 2509), 'torch.stack', 'torch.stack', (['[O, O, I]'], {'dim': '(-1)'}), '([O, O, I], dim=-1)\n', (2490, 2509), False, 'import torch\n'), ((3074, 3121), 'torch.linspace', 'torch.linspace', (['(-1)', '(1)', 'opt.H'], {'device': 'opt.device'}), '(-1, 1, opt.H, device=opt.device)\n', (3088, 3121), False, 'import torch\n'), ((3137, 3184), 'torch.linspace', 'torch.linspace', (['(-1)', '(1)', 'opt.W'], {'device': 'opt.device'}), '(-1, 1, opt.W, device=opt.device)\n', (3151, 3184), False, 'import torch\n'), ((3196, 3228), 'torch.meshgrid', 'torch.meshgrid', (['y_range', 'x_range'], {}), '(y_range, x_range)\n', (3210, 3228), False, 'import torch\n'), ((334, 349), 'torch.tensor', 'torch.tensor', (['t'], {}), '(t)\n', (346, 349), False, 'import torch\n'), ((528, 570), 'torch.zeros', 'torch.zeros', (['R.shape[:-1]'], {'device': 'R.device'}), '(R.shape[:-1], device=R.device)\n', (539, 570), False, 'import torch\n'), ((2712, 2771), 'torch.arange', 'torch.arange', (['opt.H'], {'dtype': 'torch.float32', 'device': 'opt.device'}), '(opt.H, dtype=torch.float32, device=opt.device)\n', (2724, 2771), False, 'import torch\n'), ((2798, 2857), 'torch.arange', 'torch.arange', (['opt.W'], {'dtype': 'torch.float32', 'device': 'opt.device'}), '(opt.W, dtype=torch.float32, device=opt.device)\n', (2810, 2857), False, 'import torch\n'), ((2938, 2965), 'torch.stack', 'torch.stack', (['[X, Y]'], {'dim': '(-1)'}), '([X, Y], dim=-1)\n', (2949, 2965), False, 'import torch\n'), ((366, 395), 'torch.eye', 'torch.eye', (['(3)'], {'device': 't.device'}), '(3, device=t.device)\n', (375, 395), False, 'import torch\n'), ((496, 511), 'torch.tensor', 'torch.tensor', (['R'], {}), '(R)\n', (508, 511), False, 'import torch\n'), ((635, 650), 'torch.tensor', 'torch.tensor', (['R'], {}), '(R)\n', (647, 650), False, 'import torch\n'), ((702, 717), 'torch.tensor', 'torch.tensor', (['t'], {}), '(t)\n', (714, 717), False, 'import torch\n'), ((3254, 3281), 'torch.stack', 'torch.stack', (['[X, Y]'], {'dim': '(-1)'}), '([X, Y], dim=-1)\n', (3265, 3281), False, 'import torch\n'), ((4034, 4068), 'torch.zeros_like', 'torch.zeros_like', (['xy_grid[..., :1]'], {}), '(xy_grid[..., :1])\n', (4050, 4068), False, 'import torch\n')]
from __future__ import print_function import time import sys import os import shutil import csv import boto3 from awsglue.utils import getResolvedOptions import pyspark from pyspark.sql import SparkSession from pyspark.ml import Pipeline from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString from pyspark.ml.evaluation import MulticlassClassificationEvaluator from pyspark.sql.functions import * from mleap.pyspark.spark_support import SimpleSparkSerializer def toCSVLine(data): r = ','.join(str(d) for d in data[1]) return str(data[0]) + "," + r def main(): spark = SparkSession.builder.appName("PySparkTitanic").getOrCreate() args = getResolvedOptions(sys.argv, ['s3_input_data_location', 's3_output_bucket', 's3_output_bucket_prefix', 's3_model_bucket', 's3_model_bucket_prefix']) # This is needed to write RDDs to file which is the only way to write nested Dataframes into CSV. spark.sparkContext._jsc.hadoopConfiguration().set("mapred.output.committer.class", "org.apache.hadoop.mapred.FileOutputCommitter") train = spark.read.csv(args['s3_input_data_location'], header=False) oldColumns = train.schema.names newColumns = ['buying', 'maint', 'doors', 'persons', 'lug_boot', 'safety', 'cat'] train = reduce(lambda train, idx: train.withColumnRenamed(oldColumns[idx], newColumns[idx]), xrange(len(oldColumns)), train) # dropping null values train = train.dropna() # Target label catIndexer = StringIndexer(inputCol="cat", outputCol="label") labelIndexModel = catIndexer.fit(train) train = labelIndexModel.transform(train) converter = IndexToString(inputCol="label", outputCol="cat") # Spliting in train and test set. Beware : It sorts the dataset (traindf, validationdf) = train.randomSplit([0.8, 0.2]) # Index labels, adding metadata to the label column. # Fit on whole dataset to include all labels in index. buyingIndexer = StringIndexer(inputCol="buying", outputCol="indexedBuying") maintIndexer = StringIndexer(inputCol="maint", outputCol="indexedMaint") doorsIndexer = StringIndexer(inputCol="doors", outputCol="indexedDoors") personsIndexer = StringIndexer(inputCol="persons", outputCol="indexedPersons") lug_bootIndexer = StringIndexer(inputCol="lug_boot", outputCol="indexedLug_boot") safetyIndexer = StringIndexer(inputCol="safety", outputCol="indexedSafety") # One Hot Encoder on indexed features buyingEncoder = OneHotEncoder(inputCol="indexedBuying", outputCol="buyingVec") maintEncoder = OneHotEncoder(inputCol="indexedMaint", outputCol="maintVec") doorsEncoder = OneHotEncoder(inputCol="indexedDoors", outputCol="doorsVec") personsEncoder = OneHotEncoder(inputCol="indexedPersons", outputCol="personsVec") lug_bootEncoder = OneHotEncoder(inputCol="indexedLug_boot", outputCol="lug_bootVec") safetyEncoder = OneHotEncoder(inputCol="indexedSafety", outputCol="safetyVec") # Create the vector structured data (label,features(vector)) assembler = VectorAssembler(inputCols=["buyingVec", "maintVec", "doorsVec", "personsVec", "lug_bootVec", "safetyVec"], outputCol="features") # Chain featurizers in a Pipeline pipeline = Pipeline(stages=[buyingIndexer, maintIndexer, doorsIndexer, personsIndexer, lug_bootIndexer, safetyIndexer, buyingEncoder, maintEncoder, doorsEncoder, personsEncoder, lug_bootEncoder, safetyEncoder, assembler]) # Train model. This also runs the indexers. model = pipeline.fit(traindf) # Delete previous data from output s3 = boto3.resource('s3') bucket = s3.Bucket(args['s3_output_bucket']) bucket.objects.filter(Prefix=args['s3_output_bucket_prefix']).delete() # Save transformed training data to CSV in S3 by converting to RDD. transformed_traindf = model.transform(traindf) transformed_train_rdd = transformed_traindf.rdd.map(lambda x: (x.label, x.features)) lines = transformed_train_rdd.map(toCSVLine) lines.saveAsTextFile('s3a://' + args['s3_output_bucket'] + '/' +args['s3_output_bucket_prefix'] + '/' + 'train') # Similar data processing for validation dataset. predictions = model.transform(validationdf) transformed_train_rdd = predictions.rdd.map(lambda x: (x.label, x.features)) lines = transformed_train_rdd.map(toCSVLine) lines.saveAsTextFile('s3a://' + args['s3_output_bucket'] + '/' +args['s3_output_bucket_prefix'] + '/' + 'validation') # Serialize and store via MLeap SimpleSparkSerializer().serializeToBundle(model, "jar:file:/tmp/model.zip", predictions) # Unzipping as SageMaker expects a .tar.gz file but MLeap produces a .zip file. import zipfile with zipfile.ZipFile("/tmp/model.zip") as zf: zf.extractall("/tmp/model") # Writing back the content as a .tar.gz file import tarfile with tarfile.open("/tmp/model.tar.gz", "w:gz") as tar: tar.add("/tmp/model/bundle.json", arcname='bundle.json') tar.add("/tmp/model/root", arcname='root') s3 = boto3.resource('s3') file_name = args['s3_model_bucket_prefix'] + '/' + 'model.tar.gz' s3.Bucket(args['s3_model_bucket']).upload_file('/tmp/model.tar.gz', file_name) os.remove('/tmp/model.zip') os.remove('/tmp/model.tar.gz') shutil.rmtree('/tmp/model') # Save postprocessor SimpleSparkSerializer().serializeToBundle(converter, "jar:file:/tmp/postprocess.zip", predictions) with zipfile.ZipFile("/tmp/postprocess.zip") as zf: zf.extractall("/tmp/postprocess") # Writing back the content as a .tar.gz file import tarfile with tarfile.open("/tmp/postprocess.tar.gz", "w:gz") as tar: tar.add("/tmp/postprocess/bundle.json", arcname='bundle.json') tar.add("/tmp/postprocess/root", arcname='root') file_name = args['s3_model_bucket_prefix'] + '/' + 'postprocess.tar.gz' s3.Bucket(args['s3_model_bucket']).upload_file('/tmp/postprocess.tar.gz', file_name) os.remove('/tmp/postprocess.zip') os.remove('/tmp/postprocess.tar.gz') shutil.rmtree('/tmp/postprocess') if __name__ == "__main__": main()
[ "tarfile.open", "pyspark.ml.Pipeline", "zipfile.ZipFile", "pyspark.ml.feature.StringIndexer", "os.remove", "mleap.pyspark.spark_support.SimpleSparkSerializer", "boto3.resource", "awsglue.utils.getResolvedOptions", "pyspark.sql.SparkSession.builder.appName", "shutil.rmtree", "pyspark.ml.feature.V...
[((716, 868), 'awsglue.utils.getResolvedOptions', 'getResolvedOptions', (['sys.argv', "['s3_input_data_location', 's3_output_bucket', 's3_output_bucket_prefix',\n 's3_model_bucket', 's3_model_bucket_prefix']"], {}), "(sys.argv, ['s3_input_data_location', 's3_output_bucket',\n 's3_output_bucket_prefix', 's3_model_bucket', 's3_model_bucket_prefix'])\n", (734, 868), False, 'from awsglue.utils import getResolvedOptions\n'), ((1766, 1814), 'pyspark.ml.feature.StringIndexer', 'StringIndexer', ([], {'inputCol': '"""cat"""', 'outputCol': '"""label"""'}), "(inputCol='cat', outputCol='label')\n", (1779, 1814), False, 'from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString\n'), ((1930, 1978), 'pyspark.ml.feature.IndexToString', 'IndexToString', ([], {'inputCol': '"""label"""', 'outputCol': '"""cat"""'}), "(inputCol='label', outputCol='cat')\n", (1943, 1978), False, 'from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString\n'), ((2249, 2308), 'pyspark.ml.feature.StringIndexer', 'StringIndexer', ([], {'inputCol': '"""buying"""', 'outputCol': '"""indexedBuying"""'}), "(inputCol='buying', outputCol='indexedBuying')\n", (2262, 2308), False, 'from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString\n'), ((2328, 2385), 'pyspark.ml.feature.StringIndexer', 'StringIndexer', ([], {'inputCol': '"""maint"""', 'outputCol': '"""indexedMaint"""'}), "(inputCol='maint', outputCol='indexedMaint')\n", (2341, 2385), False, 'from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString\n'), ((2405, 2462), 'pyspark.ml.feature.StringIndexer', 'StringIndexer', ([], {'inputCol': '"""doors"""', 'outputCol': '"""indexedDoors"""'}), "(inputCol='doors', outputCol='indexedDoors')\n", (2418, 2462), False, 'from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString\n'), ((2484, 2545), 'pyspark.ml.feature.StringIndexer', 'StringIndexer', ([], {'inputCol': '"""persons"""', 'outputCol': '"""indexedPersons"""'}), "(inputCol='persons', outputCol='indexedPersons')\n", (2497, 2545), False, 'from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString\n'), ((2568, 2631), 'pyspark.ml.feature.StringIndexer', 'StringIndexer', ([], {'inputCol': '"""lug_boot"""', 'outputCol': '"""indexedLug_boot"""'}), "(inputCol='lug_boot', outputCol='indexedLug_boot')\n", (2581, 2631), False, 'from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString\n'), ((2652, 2711), 'pyspark.ml.feature.StringIndexer', 'StringIndexer', ([], {'inputCol': '"""safety"""', 'outputCol': '"""indexedSafety"""'}), "(inputCol='safety', outputCol='indexedSafety')\n", (2665, 2711), False, 'from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString\n'), ((2780, 2842), 'pyspark.ml.feature.OneHotEncoder', 'OneHotEncoder', ([], {'inputCol': '"""indexedBuying"""', 'outputCol': '"""buyingVec"""'}), "(inputCol='indexedBuying', outputCol='buyingVec')\n", (2793, 2842), False, 'from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString\n'), ((2862, 2922), 'pyspark.ml.feature.OneHotEncoder', 'OneHotEncoder', ([], {'inputCol': '"""indexedMaint"""', 'outputCol': '"""maintVec"""'}), "(inputCol='indexedMaint', outputCol='maintVec')\n", (2875, 2922), False, 'from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString\n'), ((2942, 3002), 'pyspark.ml.feature.OneHotEncoder', 'OneHotEncoder', ([], {'inputCol': '"""indexedDoors"""', 'outputCol': '"""doorsVec"""'}), "(inputCol='indexedDoors', outputCol='doorsVec')\n", (2955, 3002), False, 'from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString\n'), ((3024, 3088), 'pyspark.ml.feature.OneHotEncoder', 'OneHotEncoder', ([], {'inputCol': '"""indexedPersons"""', 'outputCol': '"""personsVec"""'}), "(inputCol='indexedPersons', outputCol='personsVec')\n", (3037, 3088), False, 'from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString\n'), ((3111, 3177), 'pyspark.ml.feature.OneHotEncoder', 'OneHotEncoder', ([], {'inputCol': '"""indexedLug_boot"""', 'outputCol': '"""lug_bootVec"""'}), "(inputCol='indexedLug_boot', outputCol='lug_bootVec')\n", (3124, 3177), False, 'from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString\n'), ((3198, 3260), 'pyspark.ml.feature.OneHotEncoder', 'OneHotEncoder', ([], {'inputCol': '"""indexedSafety"""', 'outputCol': '"""safetyVec"""'}), "(inputCol='indexedSafety', outputCol='safetyVec')\n", (3211, 3260), False, 'from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString\n'), ((3343, 3475), 'pyspark.ml.feature.VectorAssembler', 'VectorAssembler', ([], {'inputCols': "['buyingVec', 'maintVec', 'doorsVec', 'personsVec', 'lug_bootVec', 'safetyVec']", 'outputCol': '"""features"""'}), "(inputCols=['buyingVec', 'maintVec', 'doorsVec',\n 'personsVec', 'lug_bootVec', 'safetyVec'], outputCol='features')\n", (3358, 3475), False, 'from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString\n'), ((3526, 3744), 'pyspark.ml.Pipeline', 'Pipeline', ([], {'stages': '[buyingIndexer, maintIndexer, doorsIndexer, personsIndexer, lug_bootIndexer,\n safetyIndexer, buyingEncoder, maintEncoder, doorsEncoder,\n personsEncoder, lug_bootEncoder, safetyEncoder, assembler]'}), '(stages=[buyingIndexer, maintIndexer, doorsIndexer, personsIndexer,\n lug_bootIndexer, safetyIndexer, buyingEncoder, maintEncoder,\n doorsEncoder, personsEncoder, lug_bootEncoder, safetyEncoder, assembler])\n', (3534, 3744), False, 'from pyspark.ml import Pipeline\n'), ((3874, 3894), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (3888, 3894), False, 'import boto3\n'), ((5346, 5366), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (5360, 5366), False, 'import boto3\n'), ((5525, 5552), 'os.remove', 'os.remove', (['"""/tmp/model.zip"""'], {}), "('/tmp/model.zip')\n", (5534, 5552), False, 'import os\n'), ((5557, 5587), 'os.remove', 'os.remove', (['"""/tmp/model.tar.gz"""'], {}), "('/tmp/model.tar.gz')\n", (5566, 5587), False, 'import os\n'), ((5592, 5619), 'shutil.rmtree', 'shutil.rmtree', (['"""/tmp/model"""'], {}), "('/tmp/model')\n", (5605, 5619), False, 'import shutil\n'), ((6285, 6318), 'os.remove', 'os.remove', (['"""/tmp/postprocess.zip"""'], {}), "('/tmp/postprocess.zip')\n", (6294, 6318), False, 'import os\n'), ((6323, 6359), 'os.remove', 'os.remove', (['"""/tmp/postprocess.tar.gz"""'], {}), "('/tmp/postprocess.tar.gz')\n", (6332, 6359), False, 'import os\n'), ((6364, 6397), 'shutil.rmtree', 'shutil.rmtree', (['"""/tmp/postprocess"""'], {}), "('/tmp/postprocess')\n", (6377, 6397), False, 'import shutil\n'), ((5015, 5048), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""/tmp/model.zip"""'], {}), "('/tmp/model.zip')\n", (5030, 5048), False, 'import zipfile\n'), ((5170, 5211), 'tarfile.open', 'tarfile.open', (['"""/tmp/model.tar.gz"""', '"""w:gz"""'], {}), "('/tmp/model.tar.gz', 'w:gz')\n", (5182, 5211), False, 'import tarfile\n'), ((5763, 5802), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""/tmp/postprocess.zip"""'], {}), "('/tmp/postprocess.zip')\n", (5778, 5802), False, 'import zipfile\n'), ((5930, 5977), 'tarfile.open', 'tarfile.open', (['"""/tmp/postprocess.tar.gz"""', '"""w:gz"""'], {}), "('/tmp/postprocess.tar.gz', 'w:gz')\n", (5942, 5977), False, 'import tarfile\n'), ((639, 685), 'pyspark.sql.SparkSession.builder.appName', 'SparkSession.builder.appName', (['"""PySparkTitanic"""'], {}), "('PySparkTitanic')\n", (667, 685), False, 'from pyspark.sql import SparkSession\n'), ((4809, 4832), 'mleap.pyspark.spark_support.SimpleSparkSerializer', 'SimpleSparkSerializer', ([], {}), '()\n', (4830, 4832), False, 'from mleap.pyspark.spark_support import SimpleSparkSerializer\n'), ((5654, 5677), 'mleap.pyspark.spark_support.SimpleSparkSerializer', 'SimpleSparkSerializer', ([], {}), '()\n', (5675, 5677), False, 'from mleap.pyspark.spark_support import SimpleSparkSerializer\n')]
from datetime import datetime __author__ = 'aGn' __copyright__ = "Copyright 2018, Planet Earth" class Response(object): """Response Class""" def __init__(self): self.socket = None @staticmethod def publisher( module, meta_data, **kwargs ): """ Packing Json file in order to sending on ZMQ pipeline. :param module: :param meta_data: :param kwargs: SNMP values result. :return: """ for name, data in kwargs.items(): if data != -8555: meta_data['status'] = 200 else: meta_data['status'] = 404 result = { 'data': {name: data}, 'module': module, 'time': datetime.now().strftime('%Y-%m-%dT%H:%M:%S'), 'station': 'SNMP', 'tags': meta_data } print({name: data}, ' ', result['time']) def publish( self, module, meta_data, **kwargs ): """ Call the publisher method to send the result on the subscriber servers by ZMQ. :param module: :param meta_data: :param kwargs: :return: """ self.publisher( module, meta_data, **kwargs )
[ "datetime.datetime.now" ]
[((786, 800), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (798, 800), False, 'from datetime import datetime\n')]
import mimetypes import os from pathlib import Path def ignore_files(dir: str, files: list[str]): """ Returns a list of files to ignore. To be used by shutil.copytree() """ return [f for f in files if Path(dir, f).is_file()] def get_input_images(input_folder: Path, output_path: Path): """ Get all images from a folder and it's subfolders. Also outputs a save path to be used by the image. :param input_folder: The folder to be scanned. :param output_path: The root folder of the destination path. """ for root, _, files in os.walk(input_folder): for file in files: mime_type = mimetypes.guess_type(file)[0] if isinstance(mime_type, str): if "image" in mime_type: image = Path(root, file) relative_path = image.relative_to(input_folder) save_path = Path(output_path, relative_path) yield image, save_path def generate_filename(input_path: Path) -> Path: gen_counter = 1 gen_output = input_path.with_name(f"{input_path.stem}-{gen_counter}").with_suffix( ".jpg" ) while gen_output.is_file(): gen_counter += 1 gen_output = input_path.with_name( f"{input_path.stem}-{gen_counter}" ).with_suffix(".jpg") output_path = gen_output return output_path
[ "mimetypes.guess_type", "os.walk", "pathlib.Path" ]
[((579, 600), 'os.walk', 'os.walk', (['input_folder'], {}), '(input_folder)\n', (586, 600), False, 'import os\n'), ((653, 679), 'mimetypes.guess_type', 'mimetypes.guess_type', (['file'], {}), '(file)\n', (673, 679), False, 'import mimetypes\n'), ((224, 236), 'pathlib.Path', 'Path', (['dir', 'f'], {}), '(dir, f)\n', (228, 236), False, 'from pathlib import Path\n'), ((796, 812), 'pathlib.Path', 'Path', (['root', 'file'], {}), '(root, file)\n', (800, 812), False, 'from pathlib import Path\n'), ((913, 945), 'pathlib.Path', 'Path', (['output_path', 'relative_path'], {}), '(output_path, relative_path)\n', (917, 945), False, 'from pathlib import Path\n')]
from typing import List from PySide2.QtGui import QVector3D from nexus_constructor.common_attrs import SHAPE_GROUP_NAME, CommonAttrs from nexus_constructor.model.component import Component from nexus_constructor.model.geometry import OFFGeometryNoNexus class SlitGeometry: def __init__(self, component: Component): gaps: tuple = ( float(component["x_gap"].values) if "x_gap" in component else None, float(component["y_gap"].values) if "y_gap" in component else None, ) self._units = self._get_units(component) self.vertices: List[QVector3D] = [] self.faces: List[List[int]] self._gaps: tuple = gaps self._create_vertices() self._create_faces() def _get_units(self, component: Component): if "x_gap" in component: return component["x_gap"].attributes.get_attribute_value(CommonAttrs.UNITS) elif "y_gap" in component: return component["y_gap"].attributes.get_attribute_value(CommonAttrs.UNITS) else: return "" def _create_vertices(self): x_gap, y_gap = self._gaps if x_gap: x_1 = 0.0 x_2 = -1.0 half_side_length = x_gap * 2 dx = x_gap / 2 + half_side_length else: x_1 = -0.1 x_2 = -0.5 dx = 0 half_side_length = 0.05 if y_gap: dy = y_gap / 2 slit_thickness = y_gap * 2 else: slit_thickness = 0.02 dy = half_side_length slit_matrix = [ [x_2, -1, 0.1], [x_1, -1, 0.1], [x_2, 1, 0.1], [x_1, 1, 0.1], [x_2, 1, -0.1], [x_1, 1, -0.1], [x_2, -1, -0.1], [x_1, -1, -0.1], ] # Left and right rectangle. dimension_matrix = [] for column in slit_matrix: dimension_matrix.append( [ column[0] * half_side_length + dx, column[1] * dy, column[2] * half_side_length, ] ) vertices_left_bank: List[QVector3D] = [] vertices_right_bank: List[QVector3D] = [] for column in dimension_matrix: vertices_left_bank.append(QVector3D(column[0], column[1], column[2])) vertices_right_bank.append(QVector3D(-column[0], -column[1], column[2])) # Lower and upper rectangle. x_dist = dx if x_gap else half_side_length / 2 slit_matrix = [ [1, dy, 0.1], [-1, dy, 0.1], [1, slit_thickness + dy, 0.1], [-1, slit_thickness + dy, 0.1], [1, slit_thickness + dy, -0.1], [-1, slit_thickness + dy, -0.1], [1, dy, -0.1], [-1, dy, -0.1], ] dimension_matrix = [] for column in slit_matrix: dimension_matrix.append( [column[0] * x_dist, column[1], column[2] * half_side_length] ) vertices_lower_bank: List[QVector3D] = [] vertices_upper_bank: List[QVector3D] = [] for column in dimension_matrix: vertices_lower_bank.append(QVector3D(column[0], column[1], column[2])) vertices_upper_bank.append(QVector3D(column[0], -column[1], column[2])) self.vertices = ( vertices_left_bank + vertices_right_bank + vertices_lower_bank + vertices_upper_bank ) def _create_faces(self): left_faces = [ [0, 1, 3, 2], [2, 3, 5, 4], [4, 5, 7, 6], [6, 7, 1, 0], [1, 7, 5, 3], [6, 0, 2, 4], ] right_faces = [ [col[0] + 8, col[1] + 8, col[2] + 8, col[3] + 8] for col in left_faces ] lower_faces = [ [col[0] + 8, col[1] + 8, col[2] + 8, col[3] + 8] for col in right_faces ] upper_faces = [ [col[0] + 8, col[1] + 8, col[2] + 8, col[3] + 8] for col in lower_faces ] self.faces = left_faces + right_faces + lower_faces + upper_faces def create_slit_geometry(self) -> OFFGeometryNoNexus: geometry = OFFGeometryNoNexus(self.vertices, self.faces, SHAPE_GROUP_NAME) geometry.units = self._units return geometry
[ "nexus_constructor.model.geometry.OFFGeometryNoNexus", "PySide2.QtGui.QVector3D" ]
[((4267, 4330), 'nexus_constructor.model.geometry.OFFGeometryNoNexus', 'OFFGeometryNoNexus', (['self.vertices', 'self.faces', 'SHAPE_GROUP_NAME'], {}), '(self.vertices, self.faces, SHAPE_GROUP_NAME)\n', (4285, 4330), False, 'from nexus_constructor.model.geometry import OFFGeometryNoNexus\n'), ((2333, 2375), 'PySide2.QtGui.QVector3D', 'QVector3D', (['column[0]', 'column[1]', 'column[2]'], {}), '(column[0], column[1], column[2])\n', (2342, 2375), False, 'from PySide2.QtGui import QVector3D\n'), ((2416, 2460), 'PySide2.QtGui.QVector3D', 'QVector3D', (['(-column[0])', '(-column[1])', 'column[2]'], {}), '(-column[0], -column[1], column[2])\n', (2425, 2460), False, 'from PySide2.QtGui import QVector3D\n'), ((3246, 3288), 'PySide2.QtGui.QVector3D', 'QVector3D', (['column[0]', 'column[1]', 'column[2]'], {}), '(column[0], column[1], column[2])\n', (3255, 3288), False, 'from PySide2.QtGui import QVector3D\n'), ((3329, 3372), 'PySide2.QtGui.QVector3D', 'QVector3D', (['column[0]', '(-column[1])', 'column[2]'], {}), '(column[0], -column[1], column[2])\n', (3338, 3372), False, 'from PySide2.QtGui import QVector3D\n')]
# -*- coding: utf-8 -*- ################################################################################# # Author : Webkul Software Pvt. Ltd. (<https://webkul.com/>) # Copyright(c): 2015-Present Webkul Software Pvt. Ltd. # License URL : https://store.webkul.com/license.html/ # All Rights Reserved. # # # # This program is copyright property of the author mentioned above. # You can`t redistribute it and/or modify it. # # # You should have received a copy of the License along with this program. # If not, see <https://store.webkul.com/license.html/> ################################################################################# from odoo import models, fields, api, _ from odoo.tools.translate import _ from odoo.exceptions import UserError import logging _logger = logging.getLogger(__name__) class ResConfigSettings(models.TransientModel): _inherit = "res.config.settings" @api.model def _default_category(self): obj = self.env["product.category"].search([('name', '=', _('All'))]) return obj[0] if obj else self.env["product.category"] @api.model def get_journal_id(self): obj = self.env["account.journal"].search([('name', '=', _('Vendor Bills'))]) return obj[0] if obj else self.env["account.journal"] auto_product_approve = fields.Boolean(string="Auto Product Approve") internal_categ = fields.Many2one( "product.category", string="Internal Category") warehouse_location_id = fields.Many2one( "stock.location", string="Warehouse Location", domain="[('usage', '=', 'internal')]") mp_default_warehouse_id = fields.Many2one("stock.warehouse", string="Warehouse") seller_payment_limit = fields.Integer(string="Seller Payment Limit") next_payment_requset = fields.Integer(string="Next Payment Request") group_mp_product_variant = fields.Boolean( string="Allow sellers for several product attributes, defining variants (Example: size, color,...)", group='odoo_marketplace.marketplace_seller_group', implied_group='product.group_product_variant' ) group_mp_shop_allow = fields.Boolean( string="Allow sellers to manage seller shop.", group='odoo_marketplace.marketplace_seller_group', implied_group='odoo_marketplace.group_marketplace_seller_shop' ) group_mp_product_pricelist = fields.Boolean( string="Allow sellers for Advanced pricing on product using pricelist.", group='odoo_marketplace.marketplace_seller_group', implied_group='product.group_product_pricelist' ) # Inventory related field auto_approve_qty = fields.Boolean(string="Auto Quantity Approve") # Seller related field auto_approve_seller = fields.Boolean(string="Auto Seller Approve") global_commission = fields.Float(string="Global Commission") # Mail notification related fields enable_notify_admin_4_new_seller = fields.Boolean(string="Enable Notification Admin For New Seller") enable_notify_seller_4_new_seller = fields.Boolean( string="Enable Notification Seller for Seller Request") enable_notify_admin_on_seller_approve_reject = fields.Boolean( string="Enable Notification Admin On Seller Approve Reject") enable_notify_seller_on_approve_reject = fields.Boolean(string="Enable Notification Seller On Approve Reject") enable_notify_admin_on_product_approve_reject = fields.Boolean( string="Enable Notification Admin On Product Approve Reject") enable_notify_seller_on_product_approve_reject = fields.Boolean( string="Enable Notification Seller On Product Approve Reject") enable_notify_seller_on_new_order = fields.Boolean(string="Enable Notification Seller On New Order") notify_admin_4_new_seller_m_tmpl_id = fields.Many2one( "mail.template", string="Mail Template to Notify Admin For New Seller", domain="[('model_id.model','=','res.partner')]") notify_seller_4_new_seller_m_tmpl_id = fields.Many2one( "mail.template", string="Mail Template to Notify Seller On Seller Request", domain="[('model_id.model','=','res.partner')]") notify_admin_on_seller_approve_reject_m_tmpl_id = fields.Many2one( "mail.template", string="Mail Template to Notify Admin on Seller Approve/Reject", domain="[('model_id.model','=','res.partner')]") notify_seller_on_approve_reject_m_tmpl_id = fields.Many2one( "mail.template", string="Mail Template to Notify Seller On Approve/Reject", domain="[('model_id.model','=','res.partner')]") notify_admin_on_product_approve_reject_m_tmpl_id = fields.Many2one( "mail.template", string="Mail Template to Notify Admin On Product Approve/Reject", domain="[('model_id.model','=','product.template')]") notify_seller_on_product_approve_reject_m_tmpl_id = fields.Many2one( "mail.template", string="Mail Template to Notify Seller On Product Approve/Reject", domain="[('model_id.model','=','product.template')]") notify_seller_on_new_order_m_tmpl_id = fields.Many2one( "mail.template", string="Mail Template to Notify Seller On New Order", domain="[('model_id.model','=','sale.order.line')]") # Seller shop/profile releted field product_count = fields.Boolean(related="website_id.mp_product_count", string="Show seller's product count on website.", readonly=False) sale_count = fields.Boolean(related="website_id.mp_sale_count", string="Show seller's sales count on website.", readonly=False) shipping_address = fields.Boolean(related="website_id.mp_shipping_address", string="Show seller's shipping address on website.", readonly=False) seller_since = fields.Boolean(related="website_id.mp_seller_since", string="Show seller since Date on website.", readonly=False) seller_t_c = fields.Boolean(related="website_id.mp_seller_t_c", string="Show seller's Terms & Conditions on website.", readonly=False) seller_contact_btn = fields.Boolean(related="website_id.mp_seller_contact_btn", string='Show "Contact Seller" Button on website.', readonly=False) seller_review = fields.Boolean(related="website_id.mp_seller_review", string='Show Seller Review on website.', readonly=False) return_policy = fields.Boolean(related="website_id.mp_return_policy", string="Show seller's Retrun Policy on website.", readonly=False) shipping_policy = fields.Boolean(related="website_id.mp_shipping_policy", string="Show Seller's Shipping Policy on website.", readonly=False) recently_product = fields.Integer(related="website_id.mp_recently_product", string="# of products for recently added products menu. ", readonly=False) # Seller Review settings field review_load_no = fields.Integer(related="website_id.mp_review_load_no", string="No. of Reviews to load", help="Set default numbers of review to show on website.", readonly=False) review_auto_publish = fields.Boolean(related="website_id.mp_review_auto_publish", string="Auto Publish", help="Publish Customer's review automatically.", readonly=False) show_seller_list = fields.Boolean(related="website_id.mp_show_seller_list", string='Show Sellers List on website.', readonly=False) show_seller_shop_list = fields.Boolean(related="website_id.mp_show_seller_shop_list", string='Show Seller Shop List on website.', readonly=False) show_become_a_seller = fields.Boolean(related="website_id.mp_show_become_a_seller",string="Show Become a Seller button on Account Home Page", readonly=False) seller_payment_journal_id = fields.Many2one("account.journal", string="Seller Payment Journal", default=get_journal_id, domain="[('type', '=', 'purchase')]") mp_currency_id = fields.Many2one('res.currency', "Marketplace Currency", readonly=False) show_visit_shop = fields.Boolean("Show visit shop link on product page") seller_payment_product_id = fields.Many2one("product.product", string="Seller Payment Product", domain="[('sale_ok', '=', False),('purchase_ok', '=', False),('type','=','service')]") term_and_condition = fields.Html(string="Marketplace Terms & Conditions", related="website_id.mp_term_and_condition", readonly=False) message_to_publish = fields.Text( string="Review feedback message", help="Message to Customer on review publish.", related="website_id.mp_message_to_publish", readonly=False) sell_page_label = fields.Char( string="Sell Link Label", related="website_id.mp_sell_page_label", readonly=False) sellers_list_label = fields.Char( string="Seller List Link Label", related="website_id.mp_sellers_list_label", readonly=False) seller_shop_list_label = fields.Char( string="Seller Shop List Link Label", related="website_id.mp_seller_shop_list_label", readonly=False) landing_page_banner = fields.Binary(string="Landing Page Banner", related="website_id.mp_landing_page_banner", readonly=False) seller_new_status_msg = fields.Text( string="For New Status", related="website_id.mp_seller_new_status_msg", readonly=False) seller_pending_status_msg = fields.Text( string="For Pending Status", related="website_id.mp_seller_pending_status_msg", readonly=False) show_sell_menu_header = fields.Boolean(related="website_id.mp_show_sell_menu_header", string="Show Sell menu in header", readonly=False) show_sell_menu_footer = fields.Boolean(related="website_id.mp_show_sell_menu_footer", string="Show Sell menu in footer", readonly=False) # seller_denied_status_msg = fields.Text( # string="For Denied Status", related="website_id.mp_seller_denied_status_msg") @api.onchange("warehouse_location_id") def on_change_location_id(self): if not self.warehouse_location_id: wl_obj = self.env["stock.location"].sudo().browse( self.warehouse_location_id.id) wh_obj = self.env["stock.warehouse"] whs = wh_obj.search([('view_location_id', 'parent_of', wl_obj.ids)], limit=1) if whs: self.mp_default_warehouse_id = whs.id @api.multi def set_values(self): super(ResConfigSettings, self).set_values() self.env['ir.default'].sudo().set('res.config.settings', 'auto_product_approve', self.auto_product_approve) self.env['ir.default'].sudo().set('res.config.settings', 'internal_categ', self.internal_categ.id) self.env['ir.default'].sudo().set('res.config.settings', 'mp_default_warehouse_id', self.mp_default_warehouse_id.id) self.env['ir.default'].sudo().set('res.config.settings', 'warehouse_location_id', self.warehouse_location_id.id) self.env['ir.default'].sudo().set('res.config.settings', 'auto_approve_qty', self.auto_approve_qty) self.env['ir.default'].sudo().set('res.config.settings', 'auto_approve_seller', self.auto_approve_seller) self.env['ir.default'].sudo().set('res.config.settings', 'global_commission', self.global_commission) self.env['ir.default'].sudo().set('res.config.settings', 'seller_payment_limit', self.seller_payment_limit) self.env['ir.default'].sudo().set('res.config.settings', 'next_payment_requset', self.next_payment_requset) self.env['ir.default'].sudo().set('res.config.settings', 'enable_notify_admin_4_new_seller', self.enable_notify_admin_4_new_seller) self.env['ir.default'].sudo().set('res.config.settings', 'enable_notify_seller_4_new_seller', self.enable_notify_seller_4_new_seller) self.env['ir.default'].sudo().set('res.config.settings', 'enable_notify_admin_on_seller_approve_reject', self.enable_notify_admin_on_seller_approve_reject) self.env['ir.default'].sudo().set('res.config.settings', 'enable_notify_seller_on_approve_reject', self.enable_notify_seller_on_approve_reject) self.env['ir.default'].sudo().set('res.config.settings', 'enable_notify_admin_on_product_approve_reject', self.enable_notify_admin_on_product_approve_reject) self.env['ir.default'].sudo().set('res.config.settings', 'enable_notify_seller_on_product_approve_reject', self.enable_notify_seller_on_product_approve_reject) self.env['ir.default'].sudo().set('res.config.settings', 'enable_notify_seller_on_new_order', self.enable_notify_seller_on_new_order) self.env['ir.default'].sudo().set('res.config.settings', 'notify_admin_4_new_seller_m_tmpl_id', self.notify_admin_4_new_seller_m_tmpl_id.id) self.env['ir.default'].sudo().set('res.config.settings', 'notify_seller_4_new_seller_m_tmpl_id', self.notify_seller_4_new_seller_m_tmpl_id.id) self.env['ir.default'].sudo().set('res.config.settings', 'notify_admin_on_seller_approve_reject_m_tmpl_id', self.notify_admin_on_seller_approve_reject_m_tmpl_id.id) self.env['ir.default'].sudo().set('res.config.settings', 'notify_seller_on_approve_reject_m_tmpl_id', self.notify_seller_on_approve_reject_m_tmpl_id.id) self.env['ir.default'].sudo().set('res.config.settings', 'notify_admin_on_product_approve_reject_m_tmpl_id', self.notify_admin_on_product_approve_reject_m_tmpl_id.id) self.env['ir.default'].sudo().set('res.config.settings', 'notify_seller_on_product_approve_reject_m_tmpl_id', self.notify_seller_on_product_approve_reject_m_tmpl_id.id) self.env['ir.default'].sudo().set('res.config.settings', 'notify_seller_on_new_order_m_tmpl_id', self.notify_seller_on_new_order_m_tmpl_id.id) self.env['ir.default'].sudo().set('res.config.settings', 'seller_payment_journal_id', self.seller_payment_journal_id.id) seller_payment = self.env["seller.payment"].sudo().search([]) #For users who are not from marketplace group if not seller_payment: self.env['ir.default'].sudo().set('res.config.settings', 'mp_currency_id', self.mp_currency_id.id) self.env['ir.default'].sudo().set('res.config.settings', 'show_visit_shop', self.show_visit_shop) self.env['ir.default'].sudo().set('res.config.settings', 'seller_payment_product_id', self.seller_payment_product_id.id) self.env['ir.default'].sudo().set('res.config.settings', 'group_mp_product_variant', self.group_mp_product_variant) self.env['ir.default'].sudo().set('res.config.settings', 'group_mp_shop_allow', self.group_mp_shop_allow) self.env['ir.default'].sudo().set('res.config.settings', 'group_mp_product_pricelist', self.group_mp_product_pricelist) @api.model def get_values(self): res = super(ResConfigSettings, self).get_values() auto_product_approve = self.env['ir.default'].get('res.config.settings', 'auto_product_approve') internal_categ = self.env['ir.default'].get('res.config.settings', 'internal_categ') or self._default_category().id mp_default_warehouse_id = self.env['ir.default'].get('res.config.settings', 'mp_default_warehouse_id') warehouse_location_id = self.env['ir.default'].get('res.config.settings', 'warehouse_location_id') or self._default_location().id auto_approve_qty = self.env['ir.default'].get('res.config.settings', 'auto_approve_qty') auto_approve_seller = self.env['ir.default'].get('res.config.settings', 'auto_approve_seller') global_commission = self.env['ir.default'].get('res.config.settings', 'global_commission') seller_payment_limit = self.env['ir.default'].get('res.config.settings', 'seller_payment_limit') next_payment_requset = self.env['ir.default'].get('res.config.settings', 'next_payment_requset') enable_notify_admin_4_new_seller = self.env['ir.default'].get('res.config.settings', 'enable_notify_admin_4_new_seller') enable_notify_seller_4_new_seller = self.env['ir.default'].get('res.config.settings', 'enable_notify_seller_4_new_seller') enable_notify_admin_on_seller_approve_reject = self.env['ir.default'].get('res.config.settings', 'enable_notify_admin_on_seller_approve_reject') enable_notify_seller_on_approve_reject = self.env['ir.default'].get('res.config.settings', 'enable_notify_seller_on_approve_reject') enable_notify_admin_on_product_approve_reject = self.env['ir.default'].get('res.config.settings', 'enable_notify_admin_on_product_approve_reject') enable_notify_seller_on_product_approve_reject = self.env['ir.default'].get('res.config.settings', 'enable_notify_seller_on_product_approve_reject') enable_notify_seller_on_new_order = self.env['ir.default'].get('res.config.settings', 'enable_notify_seller_on_new_order') notify_admin_4_new_seller_m_tmpl_id = self.env['ir.default'].get('res.config.settings', 'notify_admin_4_new_seller_m_tmpl_id') notify_seller_4_new_seller_m_tmpl_id = self.env['ir.default'].get('res.config.settings', 'notify_seller_4_new_seller_m_tmpl_id') notify_admin_on_seller_approve_reject_m_tmpl_id = self.env['ir.default'].get('res.config.settings', 'notify_admin_on_seller_approve_reject_m_tmpl_id') notify_seller_on_approve_reject_m_tmpl_id = self.env['ir.default'].get('res.config.settings', 'notify_seller_on_approve_reject_m_tmpl_id') notify_admin_on_product_approve_reject_m_tmpl_id = self.env['ir.default'].get('res.config.settings', 'notify_admin_on_product_approve_reject_m_tmpl_id') notify_seller_on_product_approve_reject_m_tmpl_id = self.env['ir.default'].get('res.config.settings', 'notify_seller_on_product_approve_reject_m_tmpl_id') notify_seller_on_new_order_m_tmpl_id = self.env['ir.default'].get('res.config.settings', 'notify_seller_on_new_order_m_tmpl_id') seller_payment_journal_id = self.env['ir.default'].get('res.config.settings', 'seller_payment_journal_id') or self.get_journal_id().id mp_currency_id = self.env['ir.default'].get('res.config.settings', 'mp_currency_id') or self.env.user.company_id.currency_id.id show_visit_shop = self.env['ir.default'].get('res.config.settings', 'show_visit_shop') group_mp_product_variant = self.env['ir.default'].get('res.config.settings', 'group_mp_product_variant') group_mp_shop_allow = self.env['ir.default'].get('res.config.settings', 'group_mp_shop_allow') group_mp_product_pricelist = self.env['ir.default'].get('res.config.settings', 'group_mp_product_pricelist') seller_payment_product_id = self.env['ir.default'].get('res.config.settings', 'seller_payment_product_id') res.update( auto_product_approve = auto_product_approve, internal_categ = internal_categ, mp_default_warehouse_id = mp_default_warehouse_id, warehouse_location_id = warehouse_location_id, auto_approve_qty = auto_approve_qty, auto_approve_seller = auto_approve_seller, global_commission = global_commission, seller_payment_limit = seller_payment_limit, next_payment_requset = next_payment_requset, enable_notify_admin_4_new_seller = enable_notify_admin_4_new_seller, enable_notify_seller_4_new_seller = enable_notify_seller_4_new_seller, enable_notify_admin_on_seller_approve_reject = enable_notify_admin_on_seller_approve_reject, enable_notify_seller_on_approve_reject = enable_notify_seller_on_approve_reject, enable_notify_admin_on_product_approve_reject = enable_notify_admin_on_product_approve_reject, enable_notify_seller_on_product_approve_reject = enable_notify_seller_on_product_approve_reject, enable_notify_seller_on_new_order = enable_notify_seller_on_new_order, notify_admin_4_new_seller_m_tmpl_id = notify_admin_4_new_seller_m_tmpl_id, notify_seller_4_new_seller_m_tmpl_id = notify_seller_4_new_seller_m_tmpl_id, notify_admin_on_seller_approve_reject_m_tmpl_id = notify_admin_on_seller_approve_reject_m_tmpl_id, notify_seller_on_approve_reject_m_tmpl_id = notify_seller_on_approve_reject_m_tmpl_id, notify_admin_on_product_approve_reject_m_tmpl_id = notify_admin_on_product_approve_reject_m_tmpl_id, notify_seller_on_product_approve_reject_m_tmpl_id = notify_seller_on_product_approve_reject_m_tmpl_id, notify_seller_on_new_order_m_tmpl_id = notify_seller_on_new_order_m_tmpl_id, seller_payment_journal_id = seller_payment_journal_id, mp_currency_id = mp_currency_id, show_visit_shop = show_visit_shop, group_mp_product_variant = group_mp_product_variant, group_mp_shop_allow = group_mp_shop_allow, group_mp_product_pricelist = group_mp_product_pricelist, seller_payment_product_id = seller_payment_product_id, ) return res @api.multi def execute(self): for rec in self: if rec.recently_product < 1 or rec.recently_product > 20: raise UserError(_("Recently Added Products count should be in range 1 to 20.")) if rec.review_load_no < 1: raise UserError(_("Display Seller Reviews count should be more than 0.")) if rec.global_commission < 0 or rec.global_commission >= 100: raise UserError(_("Global Commission should be greater than 0 and less than 100.")) if rec.seller_payment_limit < 0 : raise UserError(_("Amount Limit can't be negative.")) if rec.next_payment_requset < 0: raise UserError(_("Minimum Gap can't be negative.")) return super(ResConfigSettings, self).execute() @api.model def _default_location(self): """ Set default location """ user_obj = self.env.user if user_obj: company_id = user_obj.company_id.id location_ids = self.env["stock.location"].sudo().search( [("company_id", '=', company_id), ("name", "=", "Stock"), ('usage', '=', 'internal')]) return location_ids[0] if location_ids else self.env["stock.location"] return self.env["stock.location"].sudo().search([('usage', '=', 'internal')])[0]
[ "logging.getLogger", "odoo.fields.Binary", "odoo.fields.Float", "odoo.fields.Html", "odoo.fields.Many2one", "odoo.api.onchange", "odoo.fields.Integer", "odoo.fields.Text", "odoo.tools.translate._", "odoo.fields.Char", "odoo.fields.Boolean" ]
[((782, 809), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (799, 809), False, 'import logging\n'), ((1307, 1352), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Auto Product Approve"""'}), "(string='Auto Product Approve')\n", (1321, 1352), False, 'from odoo import models, fields, api, _\n'), ((1374, 1437), 'odoo.fields.Many2one', 'fields.Many2one', (['"""product.category"""'], {'string': '"""Internal Category"""'}), "('product.category', string='Internal Category')\n", (1389, 1437), False, 'from odoo import models, fields, api, _\n'), ((1475, 1581), 'odoo.fields.Many2one', 'fields.Many2one', (['"""stock.location"""'], {'string': '"""Warehouse Location"""', 'domain': '"""[(\'usage\', \'=\', \'internal\')]"""'}), '(\'stock.location\', string=\'Warehouse Location\', domain=\n "[(\'usage\', \'=\', \'internal\')]")\n', (1490, 1581), False, 'from odoo import models, fields, api, _\n'), ((1616, 1670), 'odoo.fields.Many2one', 'fields.Many2one', (['"""stock.warehouse"""'], {'string': '"""Warehouse"""'}), "('stock.warehouse', string='Warehouse')\n", (1631, 1670), False, 'from odoo import models, fields, api, _\n'), ((1698, 1743), 'odoo.fields.Integer', 'fields.Integer', ([], {'string': '"""Seller Payment Limit"""'}), "(string='Seller Payment Limit')\n", (1712, 1743), False, 'from odoo import models, fields, api, _\n'), ((1771, 1816), 'odoo.fields.Integer', 'fields.Integer', ([], {'string': '"""Next Payment Request"""'}), "(string='Next Payment Request')\n", (1785, 1816), False, 'from odoo import models, fields, api, _\n'), ((1848, 2076), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Allow sellers for several product attributes, defining variants (Example: size, color,...)"""', 'group': '"""odoo_marketplace.marketplace_seller_group"""', 'implied_group': '"""product.group_product_variant"""'}), "(string=\n 'Allow sellers for several product attributes, defining variants (Example: size, color,...)'\n , group='odoo_marketplace.marketplace_seller_group', implied_group=\n 'product.group_product_variant')\n", (1862, 2076), False, 'from odoo import models, fields, api, _\n'), ((2118, 2304), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Allow sellers to manage seller shop."""', 'group': '"""odoo_marketplace.marketplace_seller_group"""', 'implied_group': '"""odoo_marketplace.group_marketplace_seller_shop"""'}), "(string='Allow sellers to manage seller shop.', group=\n 'odoo_marketplace.marketplace_seller_group', implied_group=\n 'odoo_marketplace.group_marketplace_seller_shop')\n", (2132, 2304), False, 'from odoo import models, fields, api, _\n'), ((2358, 2560), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Allow sellers for Advanced pricing on product using pricelist."""', 'group': '"""odoo_marketplace.marketplace_seller_group"""', 'implied_group': '"""product.group_product_pricelist"""'}), "(string=\n 'Allow sellers for Advanced pricing on product using pricelist.', group\n ='odoo_marketplace.marketplace_seller_group', implied_group=\n 'product.group_product_pricelist')\n", (2372, 2560), False, 'from odoo import models, fields, api, _\n'), ((2630, 2676), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Auto Quantity Approve"""'}), "(string='Auto Quantity Approve')\n", (2644, 2676), False, 'from odoo import models, fields, api, _\n'), ((2731, 2775), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Auto Seller Approve"""'}), "(string='Auto Seller Approve')\n", (2745, 2775), False, 'from odoo import models, fields, api, _\n'), ((2800, 2840), 'odoo.fields.Float', 'fields.Float', ([], {'string': '"""Global Commission"""'}), "(string='Global Commission')\n", (2812, 2840), False, 'from odoo import models, fields, api, _\n'), ((2920, 2985), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Enable Notification Admin For New Seller"""'}), "(string='Enable Notification Admin For New Seller')\n", (2934, 2985), False, 'from odoo import models, fields, api, _\n'), ((3026, 3096), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Enable Notification Seller for Seller Request"""'}), "(string='Enable Notification Seller for Seller Request')\n", (3040, 3096), False, 'from odoo import models, fields, api, _\n'), ((3157, 3232), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Enable Notification Admin On Seller Approve Reject"""'}), "(string='Enable Notification Admin On Seller Approve Reject')\n", (3171, 3232), False, 'from odoo import models, fields, api, _\n'), ((3287, 3356), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Enable Notification Seller On Approve Reject"""'}), "(string='Enable Notification Seller On Approve Reject')\n", (3301, 3356), False, 'from odoo import models, fields, api, _\n'), ((3409, 3485), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Enable Notification Admin On Product Approve Reject"""'}), "(string='Enable Notification Admin On Product Approve Reject')\n", (3423, 3485), False, 'from odoo import models, fields, api, _\n'), ((3548, 3625), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Enable Notification Seller On Product Approve Reject"""'}), "(string='Enable Notification Seller On Product Approve Reject')\n", (3562, 3625), False, 'from odoo import models, fields, api, _\n'), ((3675, 3739), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Enable Notification Seller On New Order"""'}), "(string='Enable Notification Seller On New Order')\n", (3689, 3739), False, 'from odoo import models, fields, api, _\n'), ((3783, 3929), 'odoo.fields.Many2one', 'fields.Many2one', (['"""mail.template"""'], {'string': '"""Mail Template to Notify Admin For New Seller"""', 'domain': '"""[(\'model_id.model\',\'=\',\'res.partner\')]"""'}), '(\'mail.template\', string=\n \'Mail Template to Notify Admin For New Seller\', domain=\n "[(\'model_id.model\',\'=\',\'res.partner\')]")\n', (3798, 3929), False, 'from odoo import models, fields, api, _\n'), ((3972, 4122), 'odoo.fields.Many2one', 'fields.Many2one', (['"""mail.template"""'], {'string': '"""Mail Template to Notify Seller On Seller Request"""', 'domain': '"""[(\'model_id.model\',\'=\',\'res.partner\')]"""'}), '(\'mail.template\', string=\n \'Mail Template to Notify Seller On Seller Request\', domain=\n "[(\'model_id.model\',\'=\',\'res.partner\')]")\n', (3987, 4122), False, 'from odoo import models, fields, api, _\n'), ((4176, 4332), 'odoo.fields.Many2one', 'fields.Many2one', (['"""mail.template"""'], {'string': '"""Mail Template to Notify Admin on Seller Approve/Reject"""', 'domain': '"""[(\'model_id.model\',\'=\',\'res.partner\')]"""'}), '(\'mail.template\', string=\n \'Mail Template to Notify Admin on Seller Approve/Reject\', domain=\n "[(\'model_id.model\',\'=\',\'res.partner\')]")\n', (4191, 4332), False, 'from odoo import models, fields, api, _\n'), ((4380, 4530), 'odoo.fields.Many2one', 'fields.Many2one', (['"""mail.template"""'], {'string': '"""Mail Template to Notify Seller On Approve/Reject"""', 'domain': '"""[(\'model_id.model\',\'=\',\'res.partner\')]"""'}), '(\'mail.template\', string=\n \'Mail Template to Notify Seller On Approve/Reject\', domain=\n "[(\'model_id.model\',\'=\',\'res.partner\')]")\n', (4395, 4530), False, 'from odoo import models, fields, api, _\n'), ((4585, 4747), 'odoo.fields.Many2one', 'fields.Many2one', (['"""mail.template"""'], {'string': '"""Mail Template to Notify Admin On Product Approve/Reject"""', 'domain': '"""[(\'model_id.model\',\'=\',\'product.template\')]"""'}), '(\'mail.template\', string=\n \'Mail Template to Notify Admin On Product Approve/Reject\', domain=\n "[(\'model_id.model\',\'=\',\'product.template\')]")\n', (4600, 4747), False, 'from odoo import models, fields, api, _\n'), ((4803, 4966), 'odoo.fields.Many2one', 'fields.Many2one', (['"""mail.template"""'], {'string': '"""Mail Template to Notify Seller On Product Approve/Reject"""', 'domain': '"""[(\'model_id.model\',\'=\',\'product.template\')]"""'}), '(\'mail.template\', string=\n \'Mail Template to Notify Seller On Product Approve/Reject\', domain=\n "[(\'model_id.model\',\'=\',\'product.template\')]")\n', (4818, 4966), False, 'from odoo import models, fields, api, _\n'), ((5009, 5158), 'odoo.fields.Many2one', 'fields.Many2one', (['"""mail.template"""'], {'string': '"""Mail Template to Notify Seller On New Order"""', 'domain': '"""[(\'model_id.model\',\'=\',\'sale.order.line\')]"""'}), '(\'mail.template\', string=\n \'Mail Template to Notify Seller On New Order\', domain=\n "[(\'model_id.model\',\'=\',\'sale.order.line\')]")\n', (5024, 5158), False, 'from odoo import models, fields, api, _\n'), ((5219, 5343), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'related': '"""website_id.mp_product_count"""', 'string': '"""Show seller\'s product count on website."""', 'readonly': '(False)'}), '(related=\'website_id.mp_product_count\', string=\n "Show seller\'s product count on website.", readonly=False)\n', (5233, 5343), False, 'from odoo import models, fields, api, _\n'), ((5364, 5483), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'related': '"""website_id.mp_sale_count"""', 'string': '"""Show seller\'s sales count on website."""', 'readonly': '(False)'}), '(related=\'website_id.mp_sale_count\', string=\n "Show seller\'s sales count on website.", readonly=False)\n', (5378, 5483), False, 'from odoo import models, fields, api, _\n'), ((5502, 5632), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'related': '"""website_id.mp_shipping_address"""', 'string': '"""Show seller\'s shipping address on website."""', 'readonly': '(False)'}), '(related=\'website_id.mp_shipping_address\', string=\n "Show seller\'s shipping address on website.", readonly=False)\n', (5516, 5632), False, 'from odoo import models, fields, api, _\n'), ((5655, 5773), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'related': '"""website_id.mp_seller_since"""', 'string': '"""Show seller since Date on website."""', 'readonly': '(False)'}), "(related='website_id.mp_seller_since', string=\n 'Show seller since Date on website.', readonly=False)\n", (5669, 5773), False, 'from odoo import models, fields, api, _\n'), ((5786, 5912), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'related': '"""website_id.mp_seller_t_c"""', 'string': '"""Show seller\'s Terms & Conditions on website."""', 'readonly': '(False)'}), '(related=\'website_id.mp_seller_t_c\', string=\n "Show seller\'s Terms & Conditions on website.", readonly=False)\n', (5800, 5912), False, 'from odoo import models, fields, api, _\n'), ((5941, 6071), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'related': '"""website_id.mp_seller_contact_btn"""', 'string': '"""Show "Contact Seller" Button on website."""', 'readonly': '(False)'}), '(related=\'website_id.mp_seller_contact_btn\', string=\n \'Show "Contact Seller" Button on website.\', readonly=False)\n', (5955, 6071), False, 'from odoo import models, fields, api, _\n'), ((6095, 6210), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'related': '"""website_id.mp_seller_review"""', 'string': '"""Show Seller Review on website."""', 'readonly': '(False)'}), "(related='website_id.mp_seller_review', string=\n 'Show Seller Review on website.', readonly=False)\n", (6109, 6210), False, 'from odoo import models, fields, api, _\n'), ((6234, 6358), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'related': '"""website_id.mp_return_policy"""', 'string': '"""Show seller\'s Retrun Policy on website."""', 'readonly': '(False)'}), '(related=\'website_id.mp_return_policy\', string=\n "Show seller\'s Retrun Policy on website.", readonly=False)\n', (6248, 6358), False, 'from odoo import models, fields, api, _\n'), ((6384, 6512), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'related': '"""website_id.mp_shipping_policy"""', 'string': '"""Show Seller\'s Shipping Policy on website."""', 'readonly': '(False)'}), '(related=\'website_id.mp_shipping_policy\', string=\n "Show Seller\'s Shipping Policy on website.", readonly=False)\n', (6398, 6512), False, 'from odoo import models, fields, api, _\n'), ((6539, 6675), 'odoo.fields.Integer', 'fields.Integer', ([], {'related': '"""website_id.mp_recently_product"""', 'string': '"""# of products for recently added products menu. """', 'readonly': '(False)'}), "(related='website_id.mp_recently_product', string=\n '# of products for recently added products menu. ', readonly=False)\n", (6553, 6675), False, 'from odoo import models, fields, api, _\n'), ((6735, 6906), 'odoo.fields.Integer', 'fields.Integer', ([], {'related': '"""website_id.mp_review_load_no"""', 'string': '"""No. of Reviews to load"""', 'help': '"""Set default numbers of review to show on website."""', 'readonly': '(False)'}), "(related='website_id.mp_review_load_no', string=\n 'No. of Reviews to load', help=\n 'Set default numbers of review to show on website.', readonly=False)\n", (6749, 6906), False, 'from odoo import models, fields, api, _\n'), ((6931, 7087), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'related': '"""website_id.mp_review_auto_publish"""', 'string': '"""Auto Publish"""', 'help': '"""Publish Customer\'s review automatically."""', 'readonly': '(False)'}), '(related=\'website_id.mp_review_auto_publish\', string=\n \'Auto Publish\', help="Publish Customer\'s review automatically.",\n readonly=False)\n', (6945, 7087), False, 'from odoo import models, fields, api, _\n'), ((7110, 7227), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'related': '"""website_id.mp_show_seller_list"""', 'string': '"""Show Sellers List on website."""', 'readonly': '(False)'}), "(related='website_id.mp_show_seller_list', string=\n 'Show Sellers List on website.', readonly=False)\n", (7124, 7227), False, 'from odoo import models, fields, api, _\n'), ((7259, 7385), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'related': '"""website_id.mp_show_seller_shop_list"""', 'string': '"""Show Seller Shop List on website."""', 'readonly': '(False)'}), "(related='website_id.mp_show_seller_shop_list', string=\n 'Show Seller Shop List on website.', readonly=False)\n", (7273, 7385), False, 'from odoo import models, fields, api, _\n'), ((7416, 7556), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'related': '"""website_id.mp_show_become_a_seller"""', 'string': '"""Show Become a Seller button on Account Home Page"""', 'readonly': '(False)'}), "(related='website_id.mp_show_become_a_seller', string=\n 'Show Become a Seller button on Account Home Page', readonly=False)\n", (7430, 7556), False, 'from odoo import models, fields, api, _\n'), ((7583, 7717), 'odoo.fields.Many2one', 'fields.Many2one', (['"""account.journal"""'], {'string': '"""Seller Payment Journal"""', 'default': 'get_journal_id', 'domain': '"""[(\'type\', \'=\', \'purchase\')]"""'}), '(\'account.journal\', string=\'Seller Payment Journal\', default\n =get_journal_id, domain="[(\'type\', \'=\', \'purchase\')]")\n', (7598, 7717), False, 'from odoo import models, fields, api, _\n'), ((7734, 7805), 'odoo.fields.Many2one', 'fields.Many2one', (['"""res.currency"""', '"""Marketplace Currency"""'], {'readonly': '(False)'}), "('res.currency', 'Marketplace Currency', readonly=False)\n", (7749, 7805), False, 'from odoo import models, fields, api, _\n'), ((7829, 7883), 'odoo.fields.Boolean', 'fields.Boolean', (['"""Show visit shop link on product page"""'], {}), "('Show visit shop link on product page')\n", (7843, 7883), False, 'from odoo import models, fields, api, _\n'), ((7917, 8081), 'odoo.fields.Many2one', 'fields.Many2one', (['"""product.product"""'], {'string': '"""Seller Payment Product"""', 'domain': '"""[(\'sale_ok\', \'=\', False),(\'purchase_ok\', \'=\', False),(\'type\',\'=\',\'service\')]"""'}), '(\'product.product\', string=\'Seller Payment Product\', domain=\n "[(\'sale_ok\', \'=\', False),(\'purchase_ok\', \'=\', False),(\'type\',\'=\',\'service\')]"\n )\n', (7932, 8081), False, 'from odoo import models, fields, api, _\n'), ((8098, 8215), 'odoo.fields.Html', 'fields.Html', ([], {'string': '"""Marketplace Terms & Conditions"""', 'related': '"""website_id.mp_term_and_condition"""', 'readonly': '(False)'}), "(string='Marketplace Terms & Conditions', related=\n 'website_id.mp_term_and_condition', readonly=False)\n", (8109, 8215), False, 'from odoo import models, fields, api, _\n'), ((8236, 8398), 'odoo.fields.Text', 'fields.Text', ([], {'string': '"""Review feedback message"""', 'help': '"""Message to Customer on review publish."""', 'related': '"""website_id.mp_message_to_publish"""', 'readonly': '(False)'}), "(string='Review feedback message', help=\n 'Message to Customer on review publish.', related=\n 'website_id.mp_message_to_publish', readonly=False)\n", (8247, 8398), False, 'from odoo import models, fields, api, _\n'), ((8420, 8519), 'odoo.fields.Char', 'fields.Char', ([], {'string': '"""Sell Link Label"""', 'related': '"""website_id.mp_sell_page_label"""', 'readonly': '(False)'}), "(string='Sell Link Label', related=\n 'website_id.mp_sell_page_label', readonly=False)\n", (8431, 8519), False, 'from odoo import models, fields, api, _\n'), ((8549, 8658), 'odoo.fields.Char', 'fields.Char', ([], {'string': '"""Seller List Link Label"""', 'related': '"""website_id.mp_sellers_list_label"""', 'readonly': '(False)'}), "(string='Seller List Link Label', related=\n 'website_id.mp_sellers_list_label', readonly=False)\n", (8560, 8658), False, 'from odoo import models, fields, api, _\n'), ((8692, 8810), 'odoo.fields.Char', 'fields.Char', ([], {'string': '"""Seller Shop List Link Label"""', 'related': '"""website_id.mp_seller_shop_list_label"""', 'readonly': '(False)'}), "(string='Seller Shop List Link Label', related=\n 'website_id.mp_seller_shop_list_label', readonly=False)\n", (8703, 8810), False, 'from odoo import models, fields, api, _\n'), ((8841, 8950), 'odoo.fields.Binary', 'fields.Binary', ([], {'string': '"""Landing Page Banner"""', 'related': '"""website_id.mp_landing_page_banner"""', 'readonly': '(False)'}), "(string='Landing Page Banner', related=\n 'website_id.mp_landing_page_banner', readonly=False)\n", (8854, 8950), False, 'from odoo import models, fields, api, _\n'), ((8974, 9078), 'odoo.fields.Text', 'fields.Text', ([], {'string': '"""For New Status"""', 'related': '"""website_id.mp_seller_new_status_msg"""', 'readonly': '(False)'}), "(string='For New Status', related=\n 'website_id.mp_seller_new_status_msg', readonly=False)\n", (8985, 9078), False, 'from odoo import models, fields, api, _\n'), ((9115, 9227), 'odoo.fields.Text', 'fields.Text', ([], {'string': '"""For Pending Status"""', 'related': '"""website_id.mp_seller_pending_status_msg"""', 'readonly': '(False)'}), "(string='For Pending Status', related=\n 'website_id.mp_seller_pending_status_msg', readonly=False)\n", (9126, 9227), False, 'from odoo import models, fields, api, _\n'), ((9260, 9377), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'related': '"""website_id.mp_show_sell_menu_header"""', 'string': '"""Show Sell menu in header"""', 'readonly': '(False)'}), "(related='website_id.mp_show_sell_menu_header', string=\n 'Show Sell menu in header', readonly=False)\n", (9274, 9377), False, 'from odoo import models, fields, api, _\n'), ((9401, 9518), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'related': '"""website_id.mp_show_sell_menu_footer"""', 'string': '"""Show Sell menu in footer"""', 'readonly': '(False)'}), "(related='website_id.mp_show_sell_menu_footer', string=\n 'Show Sell menu in footer', readonly=False)\n", (9415, 9518), False, 'from odoo import models, fields, api, _\n'), ((9654, 9691), 'odoo.api.onchange', 'api.onchange', (['"""warehouse_location_id"""'], {}), "('warehouse_location_id')\n", (9666, 9691), False, 'from odoo import models, fields, api, _\n'), ((1011, 1019), 'odoo.tools.translate._', '_', (['"""All"""'], {}), "('All')\n", (1012, 1019), False, 'from odoo.tools.translate import _\n'), ((1196, 1213), 'odoo.tools.translate._', '_', (['"""Vendor Bills"""'], {}), "('Vendor Bills')\n", (1197, 1213), False, 'from odoo.tools.translate import _\n'), ((20863, 20925), 'odoo.tools.translate._', '_', (['"""Recently Added Products count should be in range 1 to 20."""'], {}), "('Recently Added Products count should be in range 1 to 20.')\n", (20864, 20925), False, 'from odoo.tools.translate import _\n'), ((20998, 21054), 'odoo.tools.translate._', '_', (['"""Display Seller Reviews count should be more than 0."""'], {}), "('Display Seller Reviews count should be more than 0.')\n", (20999, 21054), False, 'from odoo.tools.translate import _\n'), ((21162, 21228), 'odoo.tools.translate._', '_', (['"""Global Commission should be greater than 0 and less than 100."""'], {}), "('Global Commission should be greater than 0 and less than 100.')\n", (21163, 21228), False, 'from odoo.tools.translate import _\n'), ((21308, 21344), 'odoo.tools.translate._', '_', (['"""Amount Limit can\'t be negative."""'], {}), '("Amount Limit can\'t be negative.")\n', (21309, 21344), False, 'from odoo.tools.translate import _\n'), ((21423, 21458), 'odoo.tools.translate._', '_', (['"""Minimum Gap can\'t be negative."""'], {}), '("Minimum Gap can\'t be negative.")\n', (21424, 21458), False, 'from odoo.tools.translate import _\n')]
# -*- coding: utf-8 -*- import sys import os import json import http.client import urllib import time sys.path.append("../") from models.ApiInstance import ApiInstance from utils.ConstantUtils import ConstantUtils ''' NetworkingUtils is responsible for holding the external URLs and the default parameters of each URL used by the API. ''' class NetworkingUtils(): def __init__(self): self.TAG = "NetworkingUtils" self.PATH_SERVICES_CONFIG_FILE = "config/hosts.json" self.constUtils = ConstantUtils() self.apiInstances = [] self.initListApiInstances() ## --------------------## ## Requests management ## ## --------------------## ''' Return a headers object used in requests to the service API ''' def getRequestHeaders(self, headersType, token): headers = "" try: if headersType == self.constUtils.HEADERS_TYPE_AUTH_TOKEN: headers = { "cache-control": "no-cache", "User-Agent": "Linkehub-API-Manager", "access_token": "{0}".format(token) } elif headersType == self.constUtils.HEADERS_TYPE_URL_ENCODED: headers = { "Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain" } elif headersType == self.constUtils.HEADERS_TYPE_NO_AUTH_TOKEN: headers = { "cache-control": "no-cache", "User-Agent": "Linkehub-API-Manager" } except Exception as e: print("{0} Failed to getRequestHeaders: {1}".format(self.TAG, e)) return headers ## ---------------------## ## Instances management ## ## ---------------------## ''' Initialize the list of copies running the same version of the service API ''' def initListApiInstances(self): try: fileData = open(self.PATH_SERVICES_CONFIG_FILE).read() data = json.loads(fileData) for idx, hostFromList in enumerate(data["hosts"]): apiInstance = ApiInstance() apiInstance.id = idx if "url" in hostFromList: apiInstance.url = hostFromList["url"] if "name" in hostFromList: apiInstance.name = hostFromList["name"] self.apiInstances.append(apiInstance) print("The list of API instances has been initialized: {0}".format(json.dumps(self.getSerializableApiInstances()))) except Exception as e: print("{0}: Failed to initListApiInstances: {1}".format(self.TAG, e)) ''' Return the serializable version of the list of ApiInstances ''' def getSerializableApiInstances(self): sApiInstances = [] try: for apiInstance in self.apiInstances: sApiInstances.append(apiInstance.toJSON()) except Exception as e: print("{0} Failed to getSerializableApiInstances : {1}".format(self.TAG, e)) return sApiInstances ''' Return the object that represents the main instance, which contain the same content of the others, but it is the one used to generate the copies of the service. ''' def getRootApiInstance(self): try: return self.apiInstances[0] except Exception as e: print("{0} Failed to getRootInstance: {1}".format(self.TAG, e)) ''' Verify how many requests an instance of the service API still has to the Github API before the limit of requests per hour get exceeded. ''' def updateListRemainingRequestsGithubAPI(self): try: print("\nVerify the number of remaining requests to the Github API for all instances: \n") # Identify the number of remaining requests to the Github API for each instance of the API if self.apiInstances is not None: for apiInstance in self.apiInstances: try: # Make a request to the Github API and verify if the limit of requests per hour has been exceeded connection = http.client.HTTPSConnection(apiInstance.getBaseUrl()) headers = self.getRequestHeaders(self.constUtils.HEADERS_TYPE_NO_AUTH_TOKEN, None) endpoint = "/has_expired_requests_per_hour_github/" connection.request("GET", endpoint, headers=headers) res = connection.getresponse() data = res.read() githubApiResponse = json.loads(data.decode(self.constUtils.UTF8_DECODER)) # Process the response if githubApiResponse is not None: if "usage" in githubApiResponse: usage = githubApiResponse["usage"] if "remaining" in usage: apiInstance.remainingCallsGithub = usage["remaining"] except Exception: print("{0} Failed to connect to a host ...".format(self.TAG)) print("{0} : {1}".format(apiInstance.getUrl(), apiInstance.remainingCallsGithub)) print("Total number available requests : {0}".format(self.getNumRemaningRequestToGithub())) except ValueError as err2: print("{0} Failed to updateListRemainingRequestsGithubAPI: {1}".format(self.TAG, err2)) ''' Returns the sum of the remaning requests to the Github API of each instance of the service ''' def getNumRemaningRequestToGithub(self): totalRemainingRequest = 0 try: for apiInstance in self.apiInstances: totalRemainingRequest += apiInstance.remainingCallsGithub except Exception as e: print("{0} Failed to getNumRemaningRequestToGithub: {1}".format(self.TAG, e)) return totalRemainingRequest ''' Returns the instance of the service with the largest number of remaining requests to the Github API ''' def getInstanceForRequestToGithubAPI(self): selectedInstance = self.getRootApiInstance() largestNumRemainingRequests = 0 try: for apiInstance in self.apiInstances: if apiInstance.remainingCallsGithub > largestNumRemainingRequests: largestNumRemainingRequests = apiInstance.remainingCallsGithub selectedInstance = apiInstance except Exception as e: print("{0} Failed to getInstanceForRequestToGithubAPI : {1}".format(self.TAG, e)) return selectedInstance ''' If the number of available requests to the Github API has exceeded, wait until the instances get refueled ''' def waitRequestGithubApiIfNeeded(self): try: numRequestsGithubApi = self.getNumRemaningRequestToGithub() if numRequestsGithubApi == 0: i = 0 print("\nThe maximum number of requests to the Github API has been exceeded for all instances of the service") while i < self.constUtils.TIMEOUT_REQUEST_GITHUB_API: time.sleep(1) if i == 0: print("\nYou'll have to wait {0} minutes until the next request:".format((self.constUtils.TIMEOUT_REQUEST_GITHUB_API - i) / 60)) elif i < self.constUtils.TIMEOUT_REQUEST_GITHUB_API: if (self.constUtils.TIMEOUT_REQUEST_GITHUB_API / i) == 2: print("\nWe are half way there, we still have to wait {0} minutes".format((self.constUtils.TIMEOUT_REQUEST_GITHUB_API - i) / 60)) else: print(".", end="") i += 1 self.updateListRemainingRequestsGithubAPI() self.waitRequestGithubApiIfNeeded() except Exception as e: print("{0} Failed to waitRequestGithubApiIfNeeded : {1}".format(self.TAG, e))
[ "json.loads", "models.ApiInstance.ApiInstance", "time.sleep", "utils.ConstantUtils.ConstantUtils", "sys.path.append" ]
[((104, 126), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (119, 126), False, 'import sys\n'), ((527, 542), 'utils.ConstantUtils.ConstantUtils', 'ConstantUtils', ([], {}), '()\n', (540, 542), False, 'from utils.ConstantUtils import ConstantUtils\n'), ((2085, 2105), 'json.loads', 'json.loads', (['fileData'], {}), '(fileData)\n', (2095, 2105), False, 'import json\n'), ((2200, 2213), 'models.ApiInstance.ApiInstance', 'ApiInstance', ([], {}), '()\n', (2211, 2213), False, 'from models.ApiInstance import ApiInstance\n'), ((7464, 7477), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (7474, 7477), False, 'import time\n')]
import numpy as np from scipy.special import factorial from itertools import permutations, product from pysat.solvers import Minisat22, Minicard from pysat.pb import PBEnc from clauses import build_clauses, build_max_min_clauses from clauses import build_permutation_clauses from clauses import build_cardinality_lits, build_exclusivity_lits def compare_dice(first, second): hits = 0 for x in first: for y in second: if y < x: hits += 1 return hits def compare_doubled_dice(first, second, comp="max"): d = len(first) hits = 0 if comp == "max": indices = range(1, 2 * d, 2) if comp == "min": indices = range(2 * d - 1, 0, -2) for i, x in zip(indices, first): for j, y in zip(indices, second): if y < x: hits += i * j return hits def recover_values(d, dice_names, constraints): natural_faces = [] for die in dice_names: faces = np.arange(d) for die2 in dice_names: if die != die2: faces += constraints[(die, die2)].sum(1) natural_faces.append(faces) return natural_faces def compress_values(*args): T = {} for i, die in enumerate(args): T.update({k: i for k in die}) n = len(T.keys()) T_list = [T[i] for i in range(n)] current_value = 0 current_die = T_list[0] compressed_dice = [[] for _ in args] compressed_dice[current_die].append(current_value) for i in range(1, n): previous_die = current_die current_die = T_list[i] if current_die != previous_die: current_value += 1 compressed_dice[current_die].append(current_value) return compressed_dice def sat_to_constraints(d, dice_names, sat_solution, compress=True): dice_pairs = list(permutations(dice_names, 2)) n = len(dice_pairs) signs_array = (sat_solution[: (n * d ** 2)] > 0).reshape((n, d, d)) constraints = {v: s for v, s in zip(dice_pairs, signs_array)} return constraints def sat_to_dice(d, dice_names, sat_solution, compress=False): constraints = sat_to_constraints(d, dice_names, sat_solution) natural_faces = recover_values(d, dice_names, constraints) if compress: dice_faces = compress_values(*natural_faces) dice_dict = {k: v for k, v in zip(dice_names, dice_faces)} else: dice_dict = {k: v for k, v in zip(dice_names, natural_faces)} return dice_dict def dice_to_constraints(dice, dtype=np.int): dice_names = list(dice.keys()) d = len(dice[dice_names[0]]) dice_pairs = list(permutations(dice_names, 2)) n = len(dice_pairs) constraints = dict() for x, y in dice_pairs: foo = np.array(dice[x]).reshape(len(dice[x]), 1) bar = np.array(dice[y]).reshape(1, len(dice[y])) constraint = foo > bar constraints[(x, y)] = constraint.astype(dtype) return constraints def dice_to_word(dice_solution): dice_names = list(dice_solution.keys()) m = len(dice_names) d = len(dice_solution[dice_names[0]]) foo = [[(x, dice_solution[x][i]) for i in range(d)] for x in dice_names] bar = sum(foo, []) ram = sorted(bar, key=lambda x: x[1]) word = "".join([t[0] for t in ram]) segments = [word[i : (i + m)] for i in range(0, m * d, m)] segmented_word = " ".join(segments) return word, segmented_word def word_to_dice(word): dice_names = set(word) dice_solution = dict() for i, w in enumerate(word): if w in dice_solution: dice_solution[w].append(i) else: dice_solution[w] = [i] return dice_solution def permute_letters(string, permutation, relative=True): letter_set = set(string) if relative: pairs = [(string.index(letter), letter) for letter in letter_set] sorted_pairs = sorted(pairs) letters = "".join(l for i, l in sorted_pairs) # letters = string[: len(letter_set)] else: letters = sorted(list(set(string))) subs = {s: letters[p] for s, p in zip(letters, permutation)} subs_string = "".join([subs[s] for s in string]) return subs_string # ---------------------------------------------------------------------------- def verify_solution(scores, dice_solution): for x, y in scores: check = compare_dice(dice_solution[x], dice_solution[y]) print((x, y), check, scores[(x, y)]) def verify_doubling_solution( scores, doubled_scores_max, doubled_scores_min, dice_solution ): verify_solution(scores, dice_solution) print() for x, y in doubled_scores_max: check = compare_doubled_dice(dice_solution[x], dice_solution[y], "max") print((x, y), check, doubled_scores_max[(x, y)]) print() for x, y in doubled_scores_min: check = compare_doubled_dice(dice_solution[x], dice_solution[y], "min") print((x, y), check, doubled_scores_min[(x, y)]) def verify_go_first(dice_solution, verbose=True): m = len(dice_solution) keys = np.array(sorted(list(dice_solution.keys()))) d = len(dice_solution[keys[0]]) check = d ** m // factorial(m, exact=True) counts = {x: 0 for x in permutations(keys)} for outcome in product(*[dice_solution[k] for k in keys]): perm = np.argsort(outcome) counts[tuple(keys[perm])] += 1 if verbose: for k in counts: print(k, check, counts[k]) print() return counts # ============================================================================ def build_sat( d, dice_names, scores, cardinality_clauses=False, symmetry_clauses=True, structure_clauses=True, pb=PBEnc.equals, ): clauses, cardinality_lits = build_clauses( d, dice_names, scores, card_clauses=cardinality_clauses, symmetry_clauses=symmetry_clauses, structure_clauses=structure_clauses, ) sat = Minicard() for clause in clauses: sat.add_clause(clause) if not cardinality_clauses: for x, lits in cardinality_lits.items(): if pb in (PBEnc.equals, PBEnc.atmost): sat.add_atmost(lits, scores[x]) if pb in (PBEnc.equals, PBEnc.atleast): conv_lits = [-l for l in lits] sat.add_atmost(conv_lits, d ** 2 - scores[x]) return sat def sat_search( d, dice_names, scores, cardinality_clauses=False, symmetry_clauses=True, structure_clauses=True, pb=PBEnc.equals, solution_type="dice_solution", ): sat = build_sat( d=d, dice_names=dice_names, scores=scores, cardinality_clauses=cardinality_clauses, symmetry_clauses=symmetry_clauses, structure_clauses=structure_clauses, pb=pb, ) is_solvable = sat.solve() if is_solvable: sat_solution = np.array(sat.get_model()) dice_solution = sat_to_dice(d, dice_names, sat_solution, compress=False) else: sat_solution = None dice_solution = None if solution_type == "sat_solution": return sat_solution elif solution_type == "dice_solution": return dice_solution # ---------------------------------------------------------------------------- def sat_exhaust( d, dice_names, scores, cardinality_clauses=False, symmetry_clauses=True, structure_clauses=True, pb=PBEnc.equals, solution_type="sat_solution", ): sat = build_sat( d=d, dice_names=dice_names, scores=scores, cardinality_clauses=cardinality_clauses, symmetry_clauses=symmetry_clauses, structure_clauses=structure_clauses, pb=pb, ) dice_pairs = list(permutations(dice_names, 2)) n = len(dice_pairs) solutions = sat.enum_models() if solution_type == "sat_solution": return [np.array(s) for s in solutions] elif solution_type == "dice_solution": dice_solutions = [sat_to_dice(d, dice_names, np.array(s)) for s in solutions] return dice_solutions # ---------------------------------------------------------------------------- def sat_search_max_min(d, dice_names, scores, max_scores, min_scores): clauses = build_max_min_clauses(d, dice_names, scores, max_scores, min_scores) sat = Minisat22() for clause in clauses: sat.add_clause(clause) is_solvable = sat.solve() if is_solvable: model = np.array(sat.get_model()) sat_solution = np.array(sat.get_model()) dice_solution = sat_to_dice(d, dice_names, sat_solution, compress=False) else: dice_solution = None return dice_solution # ---------------------------------------------------------------------------- def sat_search_go_first(d, dice_names, scores_2, scores_m, m=None): if m == None: m = len(dice_names) start_enum = 1 dice_pairs = list(permutations(dice_names, 2)) faces = {x: ["%s%i" % (x, i) for i in range(1, d + 1)] for x in dice_names} # ------------------------------------------------------------------------ var_lists_2 = {(x, y): list(product(faces[x], faces[y])) for (x, y) in dice_pairs} variables_2 = sum(var_lists_2.values(), []) var_dict_2 = dict((v, k) for k, v in enumerate(variables_2, start_enum)) start_enum += len(variables_2) # ------------------------------------------------------------------------ dice_perms = list(permutations(dice_names, m)) var_lists_m = {xs: list(product(*[faces[x] for x in xs])) for xs in dice_perms} variables_m = sum(var_lists_m.values(), []) var_dict_m = dict((v, k) for k, v in enumerate(variables_m, start_enum)) start_enum += len(variables_m) # ------------------------------------------------------------------------ clauses_2, cardinality_lits_2 = build_clauses(d, dice_names, scores_2) # ------------------------------------------------------------------------ clauses_m = build_permutation_clauses(d, var_dict_2, var_dict_m, dice_names, m) cardinality_lits_m = build_cardinality_lits(d, var_dict_m, var_lists_m) exclusivity_lits = build_exclusivity_lits(d, var_dict_m, dice_names, m) # ------------------------------------------------------------------------ clauses = clauses_2 + clauses_m sat = Minicard() for clause in clauses: sat.add_clause(clause) for x, lits in cardinality_lits_2.items(): sat.add_atmost(lits, scores_2[x]) conv_lits = [-l for l in lits] sat.add_atmost(conv_lits, d ** 2 - scores_2[x]) for x, lits in cardinality_lits_m.items(): sat.add_atmost(lits, scores_m[x]) conv_lits = [-l for l in lits] sat.add_atmost(conv_lits, d ** m - scores_m[x]) for x, lits in exclusivity_lits.items(): sat.add_atmost(lits, 1) conv_lits = [-l for l in lits] sat.add_atmost(conv_lits, len(lits) - 1) is_solvable = sat.solve() if is_solvable: sat_solution = np.array(sat.get_model()) dice_solution = sat_to_dice(d, dice_names, sat_solution, compress=False) else: dice_solution = None return dice_solution
[ "clauses.build_exclusivity_lits", "pysat.solvers.Minicard", "clauses.build_permutation_clauses", "scipy.special.factorial", "itertools.product", "clauses.build_cardinality_lits", "pysat.solvers.Minisat22", "numpy.argsort", "numpy.array", "clauses.build_max_min_clauses", "clauses.build_clauses", ...
[((5231, 5273), 'itertools.product', 'product', (['*[dice_solution[k] for k in keys]'], {}), '(*[dice_solution[k] for k in keys])\n', (5238, 5273), False, 'from itertools import permutations, product\n'), ((5738, 5884), 'clauses.build_clauses', 'build_clauses', (['d', 'dice_names', 'scores'], {'card_clauses': 'cardinality_clauses', 'symmetry_clauses': 'symmetry_clauses', 'structure_clauses': 'structure_clauses'}), '(d, dice_names, scores, card_clauses=cardinality_clauses,\n symmetry_clauses=symmetry_clauses, structure_clauses=structure_clauses)\n', (5751, 5884), False, 'from clauses import build_clauses, build_max_min_clauses\n'), ((5947, 5957), 'pysat.solvers.Minicard', 'Minicard', ([], {}), '()\n', (5955, 5957), False, 'from pysat.solvers import Minisat22, Minicard\n'), ((8259, 8327), 'clauses.build_max_min_clauses', 'build_max_min_clauses', (['d', 'dice_names', 'scores', 'max_scores', 'min_scores'], {}), '(d, dice_names, scores, max_scores, min_scores)\n', (8280, 8327), False, 'from clauses import build_clauses, build_max_min_clauses\n'), ((8339, 8350), 'pysat.solvers.Minisat22', 'Minisat22', ([], {}), '()\n', (8348, 8350), False, 'from pysat.solvers import Minisat22, Minicard\n'), ((9867, 9905), 'clauses.build_clauses', 'build_clauses', (['d', 'dice_names', 'scores_2'], {}), '(d, dice_names, scores_2)\n', (9880, 9905), False, 'from clauses import build_clauses, build_max_min_clauses\n'), ((10003, 10070), 'clauses.build_permutation_clauses', 'build_permutation_clauses', (['d', 'var_dict_2', 'var_dict_m', 'dice_names', 'm'], {}), '(d, var_dict_2, var_dict_m, dice_names, m)\n', (10028, 10070), False, 'from clauses import build_permutation_clauses\n'), ((10096, 10146), 'clauses.build_cardinality_lits', 'build_cardinality_lits', (['d', 'var_dict_m', 'var_lists_m'], {}), '(d, var_dict_m, var_lists_m)\n', (10118, 10146), False, 'from clauses import build_cardinality_lits, build_exclusivity_lits\n'), ((10170, 10222), 'clauses.build_exclusivity_lits', 'build_exclusivity_lits', (['d', 'var_dict_m', 'dice_names', 'm'], {}), '(d, var_dict_m, dice_names, m)\n', (10192, 10222), False, 'from clauses import build_cardinality_lits, build_exclusivity_lits\n'), ((10351, 10361), 'pysat.solvers.Minicard', 'Minicard', ([], {}), '()\n', (10359, 10361), False, 'from pysat.solvers import Minisat22, Minicard\n'), ((974, 986), 'numpy.arange', 'np.arange', (['d'], {}), '(d)\n', (983, 986), True, 'import numpy as np\n'), ((1827, 1854), 'itertools.permutations', 'permutations', (['dice_names', '(2)'], {}), '(dice_names, 2)\n', (1839, 1854), False, 'from itertools import permutations, product\n'), ((2612, 2639), 'itertools.permutations', 'permutations', (['dice_names', '(2)'], {}), '(dice_names, 2)\n', (2624, 2639), False, 'from itertools import permutations, product\n'), ((5139, 5163), 'scipy.special.factorial', 'factorial', (['m'], {'exact': '(True)'}), '(m, exact=True)\n', (5148, 5163), False, 'from scipy.special import factorial\n'), ((5290, 5309), 'numpy.argsort', 'np.argsort', (['outcome'], {}), '(outcome)\n', (5300, 5309), True, 'import numpy as np\n'), ((7755, 7782), 'itertools.permutations', 'permutations', (['dice_names', '(2)'], {}), '(dice_names, 2)\n', (7767, 7782), False, 'from itertools import permutations, product\n'), ((8936, 8963), 'itertools.permutations', 'permutations', (['dice_names', '(2)'], {}), '(dice_names, 2)\n', (8948, 8963), False, 'from itertools import permutations, product\n'), ((9477, 9504), 'itertools.permutations', 'permutations', (['dice_names', 'm'], {}), '(dice_names, m)\n', (9489, 9504), False, 'from itertools import permutations, product\n'), ((5192, 5210), 'itertools.permutations', 'permutations', (['keys'], {}), '(keys)\n', (5204, 5210), False, 'from itertools import permutations, product\n'), ((7900, 7911), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (7908, 7911), True, 'import numpy as np\n'), ((9158, 9185), 'itertools.product', 'product', (['faces[x]', 'faces[y]'], {}), '(faces[x], faces[y])\n', (9165, 9185), False, 'from itertools import permutations, product\n'), ((9534, 9566), 'itertools.product', 'product', (['*[faces[x] for x in xs]'], {}), '(*[faces[x] for x in xs])\n', (9541, 9566), False, 'from itertools import permutations, product\n'), ((2732, 2749), 'numpy.array', 'np.array', (['dice[x]'], {}), '(dice[x])\n', (2740, 2749), True, 'import numpy as np\n'), ((2789, 2806), 'numpy.array', 'np.array', (['dice[y]'], {}), '(dice[y])\n', (2797, 2806), True, 'import numpy as np\n'), ((8028, 8039), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (8036, 8039), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- ########################################################################### # Copyright (c), The AiiDA team. All rights reserved. # # This file is part of the AiiDA code. # # # # The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### """Test update of link labels.""" from uuid import uuid4 from aiida.common import timezone from aiida.storage.psql_dos.migrator import PsqlDostoreMigrator def test_legacy_jobcalc_attrs(perform_migrations: PsqlDostoreMigrator): """Test update of link labels.""" # starting revision perform_migrations.migrate_up('django@django_0042') # setup the database user_model = perform_migrations.get_current_table('db_dbuser') node_model = perform_migrations.get_current_table('db_dbnode') link_model = perform_migrations.get_current_table('db_dblink') with perform_migrations.session() as session: user = user_model( email='<EMAIL>', first_name='John', last_name='Doe', institution='EPFL', ) session.add(user) session.commit() node_process = node_model( uuid=str(uuid4()), node_type='process.calculation.calcjob.CalcJobNode.', label='test', description='', user_id=user.id, ctime=timezone.now(), mtime=timezone.now(), ) node_data = node_model( uuid=str(uuid4()), node_type='data.core.dict.Dict.', label='test', description='', user_id=user.id, ctime=timezone.now(), mtime=timezone.now(), ) session.add(node_process) session.add(node_data) session.commit() link = link_model( input_id=node_data.id, output_id=node_process.id, type='input', label='_return', ) session.add(link) session.commit() link_id = link.id # final revision perform_migrations.migrate_up('django@django_0043') link_model = perform_migrations.get_current_table('db_dblink') with perform_migrations.session() as session: link = session.get(link_model, link_id) assert link.label == 'result'
[ "aiida.common.timezone.now", "uuid.uuid4" ]
[((1700, 1714), 'aiida.common.timezone.now', 'timezone.now', ([], {}), '()\n', (1712, 1714), False, 'from aiida.common import timezone\n'), ((1734, 1748), 'aiida.common.timezone.now', 'timezone.now', ([], {}), '()\n', (1746, 1748), False, 'from aiida.common import timezone\n'), ((1970, 1984), 'aiida.common.timezone.now', 'timezone.now', ([], {}), '()\n', (1982, 1984), False, 'from aiida.common import timezone\n'), ((2004, 2018), 'aiida.common.timezone.now', 'timezone.now', ([], {}), '()\n', (2016, 2018), False, 'from aiida.common import timezone\n'), ((1523, 1530), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1528, 1530), False, 'from uuid import uuid4\n'), ((1813, 1820), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1818, 1820), False, 'from uuid import uuid4\n')]
import time import unittest from Data.parameters import Data from SI.MAP.check_infrascore_with_download_functionality import SchoolInfra_scores from SI.MAP.check_sc_map_clusterwise_records import test_school_map_schoollevel_records from SI.MAP.click_on_anydistrict_and_download_csv import download_icon from SI.MAP.click_on_block_cluster_school_and_check_schoolscount import Block_cluster_school_count from SI.MAP.click_on_blocks import click_on_blocks from SI.MAP.click_on_blocks_and_scores import block_btn_scores from SI.MAP.click_on_clusters import cluster_button from SI.MAP.click_on_clusters_and_scores import cluster_btn_scores from SI.MAP.click_on_district_and_homeicon import district_home from SI.MAP.click_on_hyperlink import click_on_hyperlink from SI.MAP.click_on_infra_score import click_on_infrascores from SI.MAP.click_on_schools import click_schoolbutton from SI.MAP.click_on_schools_and_scores import schools_btn_scores from reuse_func import GetData class cQube_SI_Map_Report(unittest.TestCase): @classmethod def setUpClass(self): self.data = GetData() self.driver = self.data.get_driver() self.data.open_cqube_appln(self.driver) self.data.login_cqube(self.driver) time.sleep(2) self.data.navigate_to_school_infrastructure_map() time.sleep(3) def test_hyperlink(self): b = click_on_hyperlink(self.driver) res = b.test_link() if "school-infra-map" in self.driver.current_url: print("school infra map based report present") else: print("home icon is not working ") def test_districtwise_download(self): b = download_icon(self.driver) res = b.test_donwload() self.assertEqual(0,res,msg="mismatch found at no of school values") self.data.page_loading(self.driver) def test_schools_per_cluster_csv_download1(self): school = test_school_map_schoollevel_records(self.driver) result = school.check_download_csv1() if result == 0: print("Schools per cluster csv download report is working") print("on selection of each district,block and cluster") print("The footer value of no of schools and no of students are") print("equals to downloaded file") else: raise self.failureException("Schools per cluster csv report download1 is working") def test_click_home_in_districtwise(self): b = district_home(self.driver) res = b.test_district() if "school-infra-map" in self.driver.current_url: print("school infra map based report present") else: print("home icon is not working ") def test_logout(self): self.driver.find_element_by_xpath(Data.hyper_link).click() self.data.page_loading(self.driver) self.driver.find_element_by_id(Data.logout).click() self.data.page_loading(self.driver) count = 0 print(self.driver.title) if 'Log in to cQube' in self.driver.title: print('logout button is working and Login page is displayed') else: print("logout button is not working ") count = count + 1 self.assertEqual(0,count,msg='logout button is not worked') self.data.login_cqube(self.driver) self.data.page_loading(self.driver) self.data.navigate_to_school_infrastructure_map() self.data.page_loading(self.driver) def test_infrascore(self): b = SchoolInfra_scores(self.driver) infra_score = b.infra_score() b.remove_csv() self.assertNotEqual(0, infra_score, msg='Failed') boy_toilet = b.Boys_toilet_percentage() b.remove_csv() self.assertNotEqual(0, boy_toilet, msg='Failed') drinking_water = b.drinking_water() b.remove_csv() self.assertNotEqual(0, drinking_water, msg='Failed') Electricity = b.Electricity() b.remove_csv() self.assertNotEqual(0, Electricity, msg='Failed') girl_toilet = b.girls_toilet() b.remove_csv() self.assertNotEqual(0, girl_toilet, msg='Failed') Handpump = b.Handpump() b.remove_csv() self.assertNotEqual(0, Handpump, msg='Failed') Handwash = b.Handwash() b.remove_csv() self.assertNotEqual(0, Handwash, msg='Failed') Library = b.Library() b.remove_csv() self.assertNotEqual(0, Library, msg='Failed') Solar_panel = b.Solar_panel() b.remove_csv() self.assertNotEqual(0, Solar_panel, msg='Failed') Tapwater = b.Tapwater() b.remove_csv() self.assertNotEqual(0, Tapwater, msg='Failed') Toilet = b.Toilet() b.remove_csv() self.assertNotEqual(0, Toilet, msg='Failed') def test_infrascores(self): b = click_on_infrascores(self.driver) res = b.test_infrascores() self.assertNotEqual(0, res, msg="infra score options not contains in drop down") print("checked with infrascores options") def test_click_on_block_cluster_school(self): b = click_on_blocks(self.driver) res1,res2 = b.test_blocks_button() self.assertNotEqual(0, res1, msg="Records are not present on map ") self.assertTrue(res2,msg='Block wise file downloading is not working ') print("Block buttons is working...") b = cluster_button(self.driver) res1, res2 = b.test_clusterbtn() self.assertNotEqual(0, res1, msg="Records are not present on map ") self.assertTrue(res2, msg='Cluster wise file downloading is not working ') print("cluster button is working ") b = click_schoolbutton(self.driver) res1,res2 = b.test_click_on_school_btn() self.assertNotEqual(0, res1, msg="Records are not present on map ") self.assertTrue(res2, msg='School wise file downloading is not working ') print("school button is working ") def test_no_of_schools(self): b = Block_cluster_school_count(self.driver) r, r1, r2, r3 = b.test_check_total_schoolvalue() self.assertEqual(int(r), int(r1), msg="mis match found in no of school in block level") self.assertEqual(int(r), int(r2), msg="mis match found in no of school in cluster level") self.assertEqual(int(r), int(r3), msg="mis match found in no of school in school level") self.data.page_loading(self.driver) print("checked with comapared with footer values ") def test_block_cluster_schools_infrascores(self): b = block_btn_scores(self.driver) result = b.test_click_blocks() self.data.page_loading(self.driver) print("block button is worked and infra scores is working ") b = cluster_btn_scores(self.driver) result = b.test_click_clusters() self.data.page_loading(self.driver) print("cluster button is worked and infra scores is working ") b = schools_btn_scores(self.driver) res = b.test_click_schools() self.data.page_loading(self.driver) print("school button is worked and infra scores is working ") def test_homebtn(self): self.driver.find_element_by_xpath(Data.hyper_link).click() self.data.page_loading(self.driver) self.driver.find_element_by_id('homeBtn').click() self.data.page_loading(self.driver) count = 0 if 'dashboard' in self.driver.current_url: print("cQube Landing page is displayed ") else: print('Homebutton is not working ') count = count + 1 self.assertEqual(0,count,msg='Landing page does not exists') self.data.navigate_to_school_infrastructure_map() self.data.page_loading(self.driver) @classmethod def tearDownClass(cls): cls.driver.close()
[ "SI.MAP.check_infrascore_with_download_functionality.SchoolInfra_scores", "SI.MAP.click_on_district_and_homeicon.district_home", "SI.MAP.click_on_clusters_and_scores.cluster_btn_scores", "SI.MAP.click_on_schools_and_scores.schools_btn_scores", "SI.MAP.click_on_schools.click_schoolbutton", "SI.MAP.click_on...
[((1084, 1093), 'reuse_func.GetData', 'GetData', ([], {}), '()\n', (1091, 1093), False, 'from reuse_func import GetData\n'), ((1238, 1251), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1248, 1251), False, 'import time\n'), ((1318, 1331), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1328, 1331), False, 'import time\n'), ((1376, 1407), 'SI.MAP.click_on_hyperlink.click_on_hyperlink', 'click_on_hyperlink', (['self.driver'], {}), '(self.driver)\n', (1394, 1407), False, 'from SI.MAP.click_on_hyperlink import click_on_hyperlink\n'), ((1669, 1695), 'SI.MAP.click_on_anydistrict_and_download_csv.download_icon', 'download_icon', (['self.driver'], {}), '(self.driver)\n', (1682, 1695), False, 'from SI.MAP.click_on_anydistrict_and_download_csv import download_icon\n'), ((1920, 1968), 'SI.MAP.check_sc_map_clusterwise_records.test_school_map_schoollevel_records', 'test_school_map_schoollevel_records', (['self.driver'], {}), '(self.driver)\n', (1955, 1968), False, 'from SI.MAP.check_sc_map_clusterwise_records import test_school_map_schoollevel_records\n'), ((2474, 2500), 'SI.MAP.click_on_district_and_homeicon.district_home', 'district_home', (['self.driver'], {}), '(self.driver)\n', (2487, 2500), False, 'from SI.MAP.click_on_district_and_homeicon import district_home\n'), ((3526, 3557), 'SI.MAP.check_infrascore_with_download_functionality.SchoolInfra_scores', 'SchoolInfra_scores', (['self.driver'], {}), '(self.driver)\n', (3544, 3557), False, 'from SI.MAP.check_infrascore_with_download_functionality import SchoolInfra_scores\n'), ((4887, 4920), 'SI.MAP.click_on_infra_score.click_on_infrascores', 'click_on_infrascores', (['self.driver'], {}), '(self.driver)\n', (4907, 4920), False, 'from SI.MAP.click_on_infra_score import click_on_infrascores\n'), ((5158, 5186), 'SI.MAP.click_on_blocks.click_on_blocks', 'click_on_blocks', (['self.driver'], {}), '(self.driver)\n', (5173, 5186), False, 'from SI.MAP.click_on_blocks import click_on_blocks\n'), ((5444, 5471), 'SI.MAP.click_on_clusters.cluster_button', 'cluster_button', (['self.driver'], {}), '(self.driver)\n', (5458, 5471), False, 'from SI.MAP.click_on_clusters import cluster_button\n'), ((5729, 5760), 'SI.MAP.click_on_schools.click_schoolbutton', 'click_schoolbutton', (['self.driver'], {}), '(self.driver)\n', (5747, 5760), False, 'from SI.MAP.click_on_schools import click_schoolbutton\n'), ((6058, 6097), 'SI.MAP.click_on_block_cluster_school_and_check_schoolscount.Block_cluster_school_count', 'Block_cluster_school_count', (['self.driver'], {}), '(self.driver)\n', (6084, 6097), False, 'from SI.MAP.click_on_block_cluster_school_and_check_schoolscount import Block_cluster_school_count\n'), ((6618, 6647), 'SI.MAP.click_on_blocks_and_scores.block_btn_scores', 'block_btn_scores', (['self.driver'], {}), '(self.driver)\n', (6634, 6647), False, 'from SI.MAP.click_on_blocks_and_scores import block_btn_scores\n'), ((6813, 6844), 'SI.MAP.click_on_clusters_and_scores.cluster_btn_scores', 'cluster_btn_scores', (['self.driver'], {}), '(self.driver)\n', (6831, 6844), False, 'from SI.MAP.click_on_clusters_and_scores import cluster_btn_scores\n'), ((7014, 7045), 'SI.MAP.click_on_schools_and_scores.schools_btn_scores', 'schools_btn_scores', (['self.driver'], {}), '(self.driver)\n', (7032, 7045), False, 'from SI.MAP.click_on_schools_and_scores import schools_btn_scores\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Nov 3 15:24:46 2020 @author: hamishgibbs """ import pandas as pd import re import numpy as np #%% ox = pd.read_csv('https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest_withnotes.csv') #%% ox = ox[0:100] #%% ox.fillna(0.0, inplace = True) #%% def oxcgrt_records(ox, drop_columns = []): ''' Function to convert OXCGRT data to records This is an additional challenge because of the wide format of the Oxford data ''' full_value_names, value_names, stub_names = get_names(ox) id_columns = [x for x in list(set(ox.columns).difference(set(full_value_names))) if x not in drop_columns] records = ox.to_dict(orient="records") rs = [x for x in [get_measure_records(r, stub_names, id_columns) for r in records] if x != []] rs = [item for sublist in rs for item in sublist] return(rs) def get_names(ox): ''' Function to get names of columns holding measure information. These columns begin with the prefix "A1_" etc. returns: full_value_names: the names of all columns with measure information value_names: the names of measure columns stub_names: the measure column prefixes (i.e. "A1") ''' stub_exp = r'[A-Z][0-9]+_' full_value_names = [match for match in ox.columns if re.findall(stub_exp , match) != []] value_names = [x for x in full_value_names if 'Flag' not in x] value_names = [x for x in value_names if 'Notes' not in x] stub_names = [x.split('_')[0] for x in value_names] return(full_value_names, value_names, stub_names) def get_measure_records(combined_record, stub_names, id_columns): '''Function to break rows into individual records by stub group i.e. subset a row for only C4 records and other information, repeat for all possible measures. Also drops records with no data where sum(all values) == 0 ''' records = [] for stub in stub_names: stub_keys = [x for x in full_value_names if stub in x] keys = id_columns + stub_keys try: flag_key = [x for x in stub_keys if '_Flag' in x][0] except: pass try: notes_key = [x for x in stub_keys if '_Notes' in x][0] except: pass subset = {key: value for key, value in combined_record.items() if key in keys} try: if sum([subset[key] for key in stub_keys]) == 0: continue except: pass try: subset['flag'] = subset.pop(flag_key) except: subset['flag'] = 0.0 pass try: subset['notes'] = subset.pop(notes_key) except: pass measure_key = list(set(list(subset.keys())).difference(set(id_columns + ['measure_name', 'flag', 'notes']))) subset['measure'] = subset.pop(measure_key[0]) subset['measure_name'] = measure_key[0] records.append(subset) return(records) #%% drop_columns = ['ConfirmedCases', 'ConfirmedDeaths', 'StringencyIndex', 'StringencyIndexForDisplay', 'StringencyLegacyIndex', 'StringencyLegacyIndexForDisplay', 'GovernmentResponseIndex', 'GovernmentResponseIndexForDisplay', 'ContainmentHealthIndex', 'ContainmentHealthIndexForDisplay', 'EconomicSupportIndex', 'EconomicSupportIndexForDisplay'] #%% ox_r = oxcgrt_records(ox, drop_columns) #%% len(ox_r) #%% keep_columns = list(set(ox.columns).difference(set(drop_columns))) full_value_names, value_names, stub_names = get_names(ox) id_columns = [x for x in list(set(ox.columns).difference(set(full_value_names))) if x not in drop_columns] #%% records = ox.to_dict(orient="records") #%% rs = [x for x in [get_measure_records(r, stub_names, id_columns) for r in records] if x != []] rs = [item for sublist in rs for item in sublist] rs = pd.DataFrame(rs) #%%
[ "pandas.DataFrame", "re.findall", "pandas.read_csv" ]
[((174, 300), 'pandas.read_csv', 'pd.read_csv', (['"""https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest_withnotes.csv"""'], {}), "(\n 'https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest_withnotes.csv'\n )\n", (185, 300), True, 'import pandas as pd\n'), ((4196, 4212), 'pandas.DataFrame', 'pd.DataFrame', (['rs'], {}), '(rs)\n', (4208, 4212), True, 'import pandas as pd\n'), ((1416, 1443), 're.findall', 're.findall', (['stub_exp', 'match'], {}), '(stub_exp, match)\n', (1426, 1443), False, 'import re\n')]
#!/usr/bin/env python3 """ Implements a TF Model class by inheriting the Model base class. @author: <NAME> @version: 1.0 """ from base.base_model import BaseModel import tensorflow as tf class MnistModel(BaseModel): def __init__(self, config): """ Constructor to initialize the TF model class by inheritance from super. :param config :return none :raises none """ super(MnistModel, self).__init__(config) self.build_model() self.init_saver() def build_model(self): """ Build the Tensorflow model :param self :return none :raises none """ batch_size = self.config['batch_size'] self.is_training = tf.placeholder(tf.bool) # declare the training data placeholders # input x - for 28 x 28 pixels = 784 self.x = tf.placeholder(tf.float32, [None, 784]) # now declare the output data placeholder - 10 digits self.y = tf.placeholder(tf.float32, [None, 10]) # now declare the weights connecting the input to the hidden layer self.W1 = tf.Variable(tf.random_normal([784, 300], stddev=0.03), name='W1') self.b1 = tf.Variable(tf.random_normal([300]), name='b1') # and the weights connecting the hidden layer to the output layer self.W2 = tf.Variable(tf.random_normal([300, 10], stddev=0.03), name='W2') self.b2 = tf.Variable(tf.random_normal([10]), name='b2') # calculate the output of the hidden layer self.hidden_out = tf.add(tf.matmul(self.x, self.W1), self.b1) self.hidden_out = tf.nn.relu(self.hidden_out) # now calculate the hidden layer output - in this case, let's use a softmax activated output layer self.y_ = tf.nn.softmax(tf.add(tf.matmul(self.hidden_out, self.W2), self.b2)) # define the loss function self.y_clipped = tf.clip_by_value(self.y_, 1e-10, 0.9999999) self.cross_entropy = -tf.reduce_mean(tf.reduce_sum(self.y * tf.log(self.y_clipped) + (1 - self.y) * tf.log(1 - self.y_clipped), axis=1)) # add an optimiser self.optimiser = tf.train.GradientDescentOptimizer(learning_rate=self.config['learning_rate']).minimize(self.cross_entropy) # define an accuracy assessment operation self.correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.y_, 1)) self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) def init_saver(self): """ Initialize the tensorflow saver that will be used in saving the checkpoints. :param self :return none :raises none """ self.saver = tf.train.Saver(max_to_keep=self.config['max_to_keep'])
[ "tensorflow.random_normal", "tensorflow.nn.relu", "tensorflow.placeholder", "tensorflow.train.Saver", "tensorflow.train.GradientDescentOptimizer", "tensorflow.argmax", "tensorflow.clip_by_value", "tensorflow.matmul", "tensorflow.cast", "tensorflow.log" ]
[((783, 806), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {}), '(tf.bool)\n', (797, 806), True, 'import tensorflow as tf\n'), ((927, 966), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 784]'], {}), '(tf.float32, [None, 784])\n', (941, 966), True, 'import tensorflow as tf\n'), ((1047, 1085), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 10]'], {}), '(tf.float32, [None, 10])\n', (1061, 1085), True, 'import tensorflow as tf\n'), ((1683, 1710), 'tensorflow.nn.relu', 'tf.nn.relu', (['self.hidden_out'], {}), '(self.hidden_out)\n', (1693, 1710), True, 'import tensorflow as tf\n'), ((1982, 2025), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.y_', '(1e-10)', '(0.9999999)'], {}), '(self.y_, 1e-10, 0.9999999)\n', (1998, 2025), True, 'import tensorflow as tf\n'), ((2801, 2855), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': "self.config['max_to_keep']"}), "(max_to_keep=self.config['max_to_keep'])\n", (2815, 2855), True, 'import tensorflow as tf\n'), ((1192, 1233), 'tensorflow.random_normal', 'tf.random_normal', (['[784, 300]'], {'stddev': '(0.03)'}), '([784, 300], stddev=0.03)\n', (1208, 1233), True, 'import tensorflow as tf\n'), ((1276, 1299), 'tensorflow.random_normal', 'tf.random_normal', (['[300]'], {}), '([300])\n', (1292, 1299), True, 'import tensorflow as tf\n'), ((1417, 1457), 'tensorflow.random_normal', 'tf.random_normal', (['[300, 10]'], {'stddev': '(0.03)'}), '([300, 10], stddev=0.03)\n', (1433, 1457), True, 'import tensorflow as tf\n'), ((1500, 1522), 'tensorflow.random_normal', 'tf.random_normal', (['[10]'], {}), '([10])\n', (1516, 1522), True, 'import tensorflow as tf\n'), ((1620, 1646), 'tensorflow.matmul', 'tf.matmul', (['self.x', 'self.W1'], {}), '(self.x, self.W1)\n', (1629, 1646), True, 'import tensorflow as tf\n'), ((2433, 2453), 'tensorflow.argmax', 'tf.argmax', (['self.y', '(1)'], {}), '(self.y, 1)\n', (2442, 2453), True, 'import tensorflow as tf\n'), ((2455, 2476), 'tensorflow.argmax', 'tf.argmax', (['self.y_', '(1)'], {}), '(self.y_, 1)\n', (2464, 2476), True, 'import tensorflow as tf\n'), ((2517, 2561), 'tensorflow.cast', 'tf.cast', (['self.correct_prediction', 'tf.float32'], {}), '(self.correct_prediction, tf.float32)\n', (2524, 2561), True, 'import tensorflow as tf\n'), ((1866, 1901), 'tensorflow.matmul', 'tf.matmul', (['self.hidden_out', 'self.W2'], {}), '(self.hidden_out, self.W2)\n', (1875, 1901), True, 'import tensorflow as tf\n'), ((2232, 2309), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': "self.config['learning_rate']"}), "(learning_rate=self.config['learning_rate'])\n", (2265, 2309), True, 'import tensorflow as tf\n'), ((2094, 2116), 'tensorflow.log', 'tf.log', (['self.y_clipped'], {}), '(self.y_clipped)\n', (2100, 2116), True, 'import tensorflow as tf\n'), ((2134, 2160), 'tensorflow.log', 'tf.log', (['(1 - self.y_clipped)'], {}), '(1 - self.y_clipped)\n', (2140, 2160), True, 'import tensorflow as tf\n')]
try: import pathlib except ImportError as e: try: import pathlib2 as pathlib except ImportError: raise e def name_to_asserted_group_path(name): path = pathlib.PurePosixPath(name) if path.is_absolute(): raise NotImplementedError( "Absolute paths are currently not supported and unlikely to be implemented." ) if len(path.parts) < 1 and str(name) != ".": raise NotImplementedError( "Getting an item on a group with path '" + name + "' " + "is not supported and unlikely to be implemented." ) return path def remove_root(name): path = pathlib.PurePosixPath(name) if path.is_absolute(): path = path.relative_to(path.root) return path
[ "pathlib2.PurePosixPath" ]
[((185, 212), 'pathlib2.PurePosixPath', 'pathlib.PurePosixPath', (['name'], {}), '(name)\n', (206, 212), True, 'import pathlib2 as pathlib\n'), ((654, 681), 'pathlib2.PurePosixPath', 'pathlib.PurePosixPath', (['name'], {}), '(name)\n', (675, 681), True, 'import pathlib2 as pathlib\n')]
#!/usr/bin/env python # # import modules used here -- sys is a very standard one from __future__ import print_function import argparse import csv import logging import zipfile from collections import OrderedDict from glob import glob import os import sys import nibabel as nb import json import pandas as pd import numpy as np # Gather our code in a main() function from shutil import copy def get_metadata_for_nifti(bids_root, path): #TODO support .nii sidecarJSON = path.replace(".nii.gz", ".json") pathComponents = os.path.split(sidecarJSON) filenameComponents = pathComponents[-1].split("_") sessionLevelComponentList = [] subjectLevelComponentList = [] topLevelComponentList = [] ses = None; sub = None; for filenameComponent in filenameComponents: if filenameComponent[:3] != "run": sessionLevelComponentList.append(filenameComponent) if filenameComponent[:3] == "ses": ses = filenameComponent else: subjectLevelComponentList.append(filenameComponent) if filenameComponent[:3] == "sub": sub = filenameComponent else: topLevelComponentList.append(filenameComponent) topLevelJSON = os.path.join(bids_root, "_".join(topLevelComponentList)) potentialJSONs = [topLevelJSON] subjectLevelJSON = os.path.join(bids_root, sub, "_".join(subjectLevelComponentList)) potentialJSONs.append(subjectLevelJSON) if ses: sessionLevelJSON = os.path.join(bids_root, sub, ses, "_".join(sessionLevelComponentList)) potentialJSONs.append(sessionLevelJSON) potentialJSONs.append(sidecarJSON) merged_param_dict = {} for json_file_path in potentialJSONs: if os.path.exists(json_file_path): param_dict = json.load(open(json_file_path, "r")) merged_param_dict.update(param_dict) return merged_param_dict def dict_append(d, key, value): if key in d: d[key].append(value) else: d[key] = [value, ] def run(args): guid_mapping = dict([line.split(" - ") for line in open(args.guid_mapping).read().split("\n") if line != '']) suffix_to_scan_type = {"dwi": "MR diffusion", "bold": "fMRI", #""MR structural(MPRAGE)", "T1w": "MR structural (T1)", "PD": "MR structural (PD)", #"MR structural(FSPGR)", "T2w": "MR structural (T2)", "T2map": "MR structural (T2)", "T2star": "MR: T2star", "FLAIR": "MR: FLAIR", "asl": "ASL", "FLASH": "MR structural (FLASH)", #PET; #microscopy; #MR structural(PD, T2); #MR structural(B0 map); #MR structural(B1 map); #single - shell DTI; #multi - shell DTI; "epi": "Field Map", "phase1": "Field Map", "phase2": "Field Map", "phasediff": "Field Map", "magnitude1": "Field Map", "magnitude2": "Field Map", "fieldmap": "Field Map" #X - Ray } units_dict = {"mm": "Millimeters", "sec": "Seconds", "msec": "Milliseconds"} participants_df = pd.read_csv(os.path.join(args.bids_directory, "participants.tsv"), header=0, sep="\t") participants_df['age'] = participants_df.age.astype(str).str.rstrip('Y').str.lstrip('0') image03_dict = OrderedDict() for file in glob(os.path.join(args.bids_directory, "sub-*", "*", "sub-*.nii.gz")) + \ glob(os.path.join(args.bids_directory, "sub-*", "ses-*", "*", "sub-*_ses-*.nii.gz")): metadata = get_metadata_for_nifti(args.bids_directory, file) bids_subject_id = os.path.split(file)[-1].split("_")[0][4:] dict_append(image03_dict, 'subjectkey', guid_mapping[bids_subject_id]) dict_append(image03_dict, 'src_subject_id', bids_subject_id) sub = file.split("sub-")[-1].split("_")[0] if "ses-" in file: ses = file.split("ses-")[-1].split("_")[0] scans_file = (os.path.join(args.bids_directory, "sub-" + sub, "ses-" + ses, "sub-" + sub + "_ses-" + ses + "_scans.tsv")) else: scans_file = (os.path.join(args.bids_directory, "sub-" + sub, "sub-" + sub + "_scans.tsv")) if os.path.exists(scans_file): scans_df = pd.read_csv(scans_file, header=0, sep="\t") else: print("%s file not found - information about scan date required by NDA could not be found." % scans_file) sys.exit(-1) for (_, row) in scans_df.iterrows(): if file.endswith(row["filename"].replace("/", os.sep)): date = row.acq_time break sdate = date.split("-") ndar_date = sdate[1] + "/" + sdate[2].split("T")[0] + "/" + sdate[0] dict_append(image03_dict, 'interview_date', ndar_date) interview_age = int(round(float(participants_df[participants_df.participant_id == "sub-" + sub].age.values[0]), 0)*12) dict_append(image03_dict, 'interview_age', interview_age) sex = list(participants_df[participants_df.participant_id == "sub-" + sub].sex)[0] dict_append(image03_dict, 'gender', sex) dict_append(image03_dict, 'image_file', file) suffix = file.split("_")[-1].split(".")[0] if suffix == "bold": description = suffix + " " + metadata["TaskName"] dict_append(image03_dict, 'experiment_id', metadata.get("ExperimentID", args.experiment_id)) else: description = suffix dict_append(image03_dict, 'experiment_id', '') dict_append(image03_dict, 'image_description', description) dict_append(image03_dict, 'scan_type', suffix_to_scan_type[suffix]) dict_append(image03_dict, 'scan_object', "Live") dict_append(image03_dict, 'image_file_format', "NIFTI") dict_append(image03_dict, 'image_modality', "MRI") dict_append(image03_dict, 'scanner_manufacturer_pd', metadata.get("Manufacturer", "")) dict_append(image03_dict, 'scanner_type_pd', metadata.get("ManufacturersModelName", "")) dict_append(image03_dict, 'scanner_software_versions_pd', metadata.get("SoftwareVersions", "")) dict_append(image03_dict, 'magnetic_field_strength', metadata.get("MagneticFieldStrength", "")) dict_append(image03_dict, 'mri_echo_time_pd', metadata.get("EchoTime", "")) dict_append(image03_dict, 'flip_angle', metadata.get("FlipAngle", "")) dict_append(image03_dict, 'receive_coil', metadata.get("ReceiveCoilName", "")) plane = metadata.get("ImageOrientationPatient","") get_orientation = lambda place: ['Axial','Coronal','Sagittal'][np.argmax(plane[:3])] dict_append(image03_dict, 'image_orientation',get_orientation(plane)) dict_append(image03_dict, 'transformation_performed', 'Yes') dict_append(image03_dict, 'transformation_type', 'BIDS2NDA') nii = nb.load(file) dict_append(image03_dict, 'image_num_dimensions', len(nii.shape)) dict_append(image03_dict, 'image_extent1', nii.shape[0]) dict_append(image03_dict, 'image_extent2', nii.shape[1]) dict_append(image03_dict, 'image_extent3', nii.shape[2]) if suffix == "bold": extent4_type = "time" elif suffix == "dwi": extent4_type = "diffusion weighting" else: extent4_type = "" dict_append(image03_dict, 'extent4_type', extent4_type) dict_append(image03_dict, 'acquisition_matrix', "%g x %g" %(nii.shape[0], nii.shape[1])) dict_append(image03_dict, 'image_resolution1', nii.header.get_zooms()[0]) dict_append(image03_dict, 'image_resolution2', nii.header.get_zooms()[1]) dict_append(image03_dict, 'image_resolution3', nii.header.get_zooms()[2]) dict_append(image03_dict, 'image_slice_thickness', nii.header.get_zooms()[2]) dict_append(image03_dict, 'photomet_interpret', metadata.get("global",{}).get("const",{}).get("PhotometricInterpretation","MONOCHROME2")) if len(nii.shape) > 3: image_extent4 = nii.shape[3] image_resolution4 = nii.header.get_zooms()[3] image_unit4 = units_dict[nii.header.get_xyzt_units()[1]] if image_unit4 == "Milliseconds": TR = nii.header.get_zooms()[3]/1000. else: TR = nii.header.get_zooms()[3] else: image_resolution4 = "" image_unit4 = "" image_extent4 = "" TR = metadata.get("RepetitionTime", "") slice_timing = metadata.get("SliceTiming", "") dict_append(image03_dict, 'image_extent4', image_extent4) dict_append(image03_dict, 'slice_timing', slice_timing) dict_append(image03_dict, 'image_unit4', image_unit4) dict_append(image03_dict, 'mri_repetition_time_pd', TR) dict_append(image03_dict, 'image_resolution4', image_resolution4) dict_append(image03_dict, 'image_unit1', units_dict[nii.header.get_xyzt_units()[0]]) dict_append(image03_dict, 'image_unit2', units_dict[nii.header.get_xyzt_units()[0]]) dict_append(image03_dict, 'image_unit3', units_dict[nii.header.get_xyzt_units()[0]]) dict_append(image03_dict, 'mri_field_of_view_pd', "%g x %g %s" % (nii.header.get_zooms()[0], nii.header.get_zooms()[1], units_dict[nii.header.get_xyzt_units()[0]])) dict_append(image03_dict, 'patient_position', 'head first-supine') if file.split(os.sep)[-1].split("_")[1].startswith("ses"): visit = file.split(os.sep)[-1].split("_")[1][4:] else: visit = "" dict_append(image03_dict, 'visit', visit) if len(metadata) > 0 or suffix in ['bold', 'dwi']: _, fname = os.path.split(file) zip_name = fname.split(".")[0] + ".metadata.zip" zip_path = os.path.join(args.output_directory, zip_name) zip_path_exists = os.path.exists(zip_path) if not zip_path_exists or (zip_path_exists and args.overwrite_zips): with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf: zipf.writestr(fname.replace(".nii.gz", ".json"), json.dumps(metadata, indent=4, sort_keys=True)) if suffix == "bold": #TODO write a more robust function for finding those files events_file = file.split("_bold")[0] + "_events.tsv" arch_name = os.path.split(events_file)[1] if not os.path.exists(events_file): task_name = file.split("_task-")[1].split("_")[0] events_file = os.path.join(args.bids_directory, "task-" + task_name + "_events.tsv") if os.path.exists(events_file): zipf.write(events_file, arch_name) dict_append(image03_dict, 'data_file2', zip_path) dict_append(image03_dict, 'data_file2_type', "ZIP file with additional metadata from Brain Imaging " "Data Structure (http://bids.neuroimaging.io)") else: dict_append(image03_dict, 'data_file2', "") dict_append(image03_dict, 'data_file2_type', "") if suffix == "dwi": # TODO write a more robust function for finding those files bvec_file = file.split("_dwi")[0] + "_dwi.bvec" if not os.path.exists(bvec_file): bvec_file = os.path.join(args.bids_directory, "dwi.bvec") if os.path.exists(bvec_file): dict_append(image03_dict, 'bvecfile', bvec_file) else: dict_append(image03_dict, 'bvecfile', "") bval_file = file.split("_dwi")[0] + "_dwi.bval" if not os.path.exists(bval_file): bval_file = os.path.join(args.bids_directory, "dwi.bval") if os.path.exists(bval_file): dict_append(image03_dict, 'bvalfile', bval_file) else: dict_append(image03_dict, 'bvalfile', "") if os.path.exists(bval_file) or os.path.exists(bvec_file): dict_append(image03_dict, 'bvek_bval_files', 'Yes') else: dict_append(image03_dict, 'bvek_bval_files', 'No') else: dict_append(image03_dict, 'bvecfile', "") dict_append(image03_dict, 'bvalfile', "") dict_append(image03_dict, 'bvek_bval_files', "") # all values of image03_dict should be the same length. # Fail when this is not true instead of when the dataframe # is created. assert(len(set(map(len,image03_dict.values()))) ==1) image03_df = pd.DataFrame(image03_dict) with open(os.path.join(args.output_directory, "image03.txt"), "w") as out_fp: out_fp.write('"image"\t"3"\n') image03_df.to_csv(out_fp, sep="\t", index=False, quoting=csv.QUOTE_ALL) def main(): class MyParser(argparse.ArgumentParser): def error(self, message): sys.stderr.write('error: %s\n' % message) self.print_help() sys.exit(2) parser = MyParser( description="BIDS to NDA converter.", fromfile_prefix_chars='@') # TODO Specify your real parameters here. parser.add_argument( "bids_directory", help="Location of the root of your BIDS compatible directory", metavar="BIDS_DIRECTORY") parser.add_argument('-e', '--experiment_id', default=None, help = ("Functional scans require an experiment_id. If ExperimentID is not" " found in the scan metadata this value is used")) parser.add_argument('-o', '--overwrite_zips', action='store_true', help = ("If a conversion has already been performed, the default is " "to avoid rewriting each zip file generated and instead just rewrite image03.txt")) parser.add_argument( "guid_mapping", help="Path to a text file with participant_id to GUID mapping. You will need to use the " "GUID Tool (https://ndar.nih.gov/contribute.html) to generate GUIDs for your participants.", metavar="GUID_MAPPING") parser.add_argument( "output_directory", help="Directory where NDA files will be stored", metavar="OUTPUT_DIRECTORY") args = parser.parse_args() run(args) print("Metadata extraction complete.") if __name__ == '__main__': main()
[ "os.path.exists", "collections.OrderedDict", "pandas.read_csv", "nibabel.load", "zipfile.ZipFile", "json.dumps", "os.path.join", "numpy.argmax", "os.path.split", "sys.stderr.write", "sys.exit", "pandas.DataFrame" ]
[((538, 564), 'os.path.split', 'os.path.split', (['sidecarJSON'], {}), '(sidecarJSON)\n', (551, 564), False, 'import os\n'), ((3938, 3951), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3949, 3951), False, 'from collections import OrderedDict\n'), ((13537, 13563), 'pandas.DataFrame', 'pd.DataFrame', (['image03_dict'], {}), '(image03_dict)\n', (13549, 13563), True, 'import pandas as pd\n'), ((1795, 1825), 'os.path.exists', 'os.path.exists', (['json_file_path'], {}), '(json_file_path)\n', (1809, 1825), False, 'import os\n'), ((3748, 3801), 'os.path.join', 'os.path.join', (['args.bids_directory', '"""participants.tsv"""'], {}), "(args.bids_directory, 'participants.tsv')\n", (3760, 3801), False, 'import os\n'), ((4825, 4851), 'os.path.exists', 'os.path.exists', (['scans_file'], {}), '(scans_file)\n', (4839, 4851), False, 'import os\n'), ((7524, 7537), 'nibabel.load', 'nb.load', (['file'], {}), '(file)\n', (7531, 7537), True, 'import nibabel as nb\n'), ((3973, 4036), 'os.path.join', 'os.path.join', (['args.bids_directory', '"""sub-*"""', '"""*"""', '"""sub-*.nii.gz"""'], {}), "(args.bids_directory, 'sub-*', '*', 'sub-*.nii.gz')\n", (3985, 4036), False, 'import os\n'), ((4059, 4137), 'os.path.join', 'os.path.join', (['args.bids_directory', '"""sub-*"""', '"""ses-*"""', '"""*"""', '"""sub-*_ses-*.nii.gz"""'], {}), "(args.bids_directory, 'sub-*', 'ses-*', '*', 'sub-*_ses-*.nii.gz')\n", (4071, 4137), False, 'import os\n'), ((4587, 4697), 'os.path.join', 'os.path.join', (['args.bids_directory', "('sub-' + sub)", "('ses-' + ses)", "('sub-' + sub + '_ses-' + ses + '_scans.tsv')"], {}), "(args.bids_directory, 'sub-' + sub, 'ses-' + ses, 'sub-' + sub +\n '_ses-' + ses + '_scans.tsv')\n", (4599, 4697), False, 'import os\n'), ((4735, 4811), 'os.path.join', 'os.path.join', (['args.bids_directory', "('sub-' + sub)", "('sub-' + sub + '_scans.tsv')"], {}), "(args.bids_directory, 'sub-' + sub, 'sub-' + sub + '_scans.tsv')\n", (4747, 4811), False, 'import os\n'), ((4876, 4919), 'pandas.read_csv', 'pd.read_csv', (['scans_file'], {'header': '(0)', 'sep': '"""\t"""'}), "(scans_file, header=0, sep='\\t')\n", (4887, 4919), True, 'import pandas as pd\n'), ((5064, 5076), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (5072, 5076), False, 'import sys\n'), ((10523, 10542), 'os.path.split', 'os.path.split', (['file'], {}), '(file)\n', (10536, 10542), False, 'import os\n'), ((10627, 10672), 'os.path.join', 'os.path.join', (['args.output_directory', 'zip_name'], {}), '(args.output_directory, zip_name)\n', (10639, 10672), False, 'import os\n'), ((10703, 10727), 'os.path.exists', 'os.path.exists', (['zip_path'], {}), '(zip_path)\n', (10717, 10727), False, 'import os\n'), ((12364, 12389), 'os.path.exists', 'os.path.exists', (['bvec_file'], {}), '(bvec_file)\n', (12378, 12389), False, 'import os\n'), ((12729, 12754), 'os.path.exists', 'os.path.exists', (['bval_file'], {}), '(bval_file)\n', (12743, 12754), False, 'import os\n'), ((13579, 13629), 'os.path.join', 'os.path.join', (['args.output_directory', '"""image03.txt"""'], {}), "(args.output_directory, 'image03.txt')\n", (13591, 13629), False, 'import os\n'), ((13870, 13911), 'sys.stderr.write', 'sys.stderr.write', (["('error: %s\\n' % message)"], {}), "('error: %s\\n' % message)\n", (13886, 13911), False, 'import sys\n'), ((13954, 13965), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (13962, 13965), False, 'import sys\n'), ((7271, 7291), 'numpy.argmax', 'np.argmax', (['plane[:3]'], {}), '(plane[:3])\n', (7280, 7291), True, 'import numpy as np\n'), ((12247, 12272), 'os.path.exists', 'os.path.exists', (['bvec_file'], {}), '(bvec_file)\n', (12261, 12272), False, 'import os\n'), ((12302, 12347), 'os.path.join', 'os.path.join', (['args.bids_directory', '"""dwi.bvec"""'], {}), "(args.bids_directory, 'dwi.bvec')\n", (12314, 12347), False, 'import os\n'), ((12612, 12637), 'os.path.exists', 'os.path.exists', (['bval_file'], {}), '(bval_file)\n', (12626, 12637), False, 'import os\n'), ((12667, 12712), 'os.path.join', 'os.path.join', (['args.bids_directory', '"""dwi.bval"""'], {}), "(args.bids_directory, 'dwi.bval')\n", (12679, 12712), False, 'import os\n'), ((12912, 12937), 'os.path.exists', 'os.path.exists', (['bval_file'], {}), '(bval_file)\n', (12926, 12937), False, 'import os\n'), ((12941, 12966), 'os.path.exists', 'os.path.exists', (['bvec_file'], {}), '(bvec_file)\n', (12955, 12966), False, 'import os\n'), ((10830, 10882), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_path', '"""w"""', 'zipfile.ZIP_DEFLATED'], {}), "(zip_path, 'w', zipfile.ZIP_DEFLATED)\n", (10845, 10882), False, 'import zipfile\n'), ((10962, 11008), 'json.dumps', 'json.dumps', (['metadata'], {'indent': '(4)', 'sort_keys': '(True)'}), '(metadata, indent=4, sort_keys=True)\n', (10972, 11008), False, 'import json\n'), ((11556, 11583), 'os.path.exists', 'os.path.exists', (['events_file'], {}), '(events_file)\n', (11570, 11583), False, 'import os\n'), ((4237, 4256), 'os.path.split', 'os.path.split', (['file'], {}), '(file)\n', (4250, 4256), False, 'import os\n'), ((11247, 11273), 'os.path.split', 'os.path.split', (['events_file'], {}), '(events_file)\n', (11260, 11273), False, 'import os\n'), ((11308, 11335), 'os.path.exists', 'os.path.exists', (['events_file'], {}), '(events_file)\n', (11322, 11335), False, 'import os\n'), ((11457, 11527), 'os.path.join', 'os.path.join', (['args.bids_directory', "('task-' + task_name + '_events.tsv')"], {}), "(args.bids_directory, 'task-' + task_name + '_events.tsv')\n", (11469, 11527), False, 'import os\n')]
from .base import * import os import dj_database_url ALLOWED_HOSTS = ['*'] DEBUG = False MIDDLEWARE += [ 'whitenoise.middleware.WhiteNoiseMiddleware' ] INSTALLED_APPS = [ 'whitenoise.runserver_nostatic', ] + INSTALLED_APPS DATABASES = { 'default': dj_database_url.config() } EMAIL_USE_TLS = True EMAIL_HOST = os.environ.get('EMAIL_HOST') EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER') EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD') EMAIL_PORT = os.environ.get('EMAIL_PORT')
[ "dj_database_url.config", "os.environ.get" ]
[((331, 359), 'os.environ.get', 'os.environ.get', (['"""EMAIL_HOST"""'], {}), "('EMAIL_HOST')\n", (345, 359), False, 'import os\n'), ((378, 411), 'os.environ.get', 'os.environ.get', (['"""EMAIL_HOST_USER"""'], {}), "('EMAIL_HOST_USER')\n", (392, 411), False, 'import os\n'), ((434, 471), 'os.environ.get', 'os.environ.get', (['"""EMAIL_HOST_PASSWORD"""'], {}), "('EMAIL_HOST_PASSWORD')\n", (448, 471), False, 'import os\n'), ((485, 513), 'os.environ.get', 'os.environ.get', (['"""EMAIL_PORT"""'], {}), "('EMAIL_PORT')\n", (499, 513), False, 'import os\n'), ((268, 292), 'dj_database_url.config', 'dj_database_url.config', ([], {}), '()\n', (290, 292), False, 'import dj_database_url\n')]
""" >>> G = Graph(6) >>> G.insert(0, 1, 3) >>> G.insert(0, 2, 7) >>> G.insert(0, 4, 8) >>> G.insert(0, 5, 1) >>> G.insert(1, 2, 2) >>> G.insert(1, 4, 13) >>> G.insert(2, 3, 15) >>> G.insert(3, 5, 17) >>> G.insert(4, 5, 9) >>> G.dijkstra(0)[0] [0, 3, 5, 20, 8, 1] >>> G.shortest_distance(1, 5) [1, 4, 5] """ """ Lazy implementation of Dijkstra's Algorithm. In this implementation we Lazily check all the (node, distance) pair even if a better distance for given node exists(i.e. duplicates exists). Priority queue which is always sorted in ascending order of distance. """ from sys import maxsize import heapq from collections import defaultdict as dd class Graph: def __init__(self, vertices): # Using defaultdict to avoid key error self.adjmat = dd(dict) self.vertices = vertices for i in range(vertices): self.adjmat[i] = dd(int) def insert(self, u, v, w=1): # Vertex from u to v because it is a directed graph self.adjmat[u][v] = w def dijkstra(self, source): vis = [False for i in range(self.vertices)] dist = [maxsize for i in range(self.vertices)] prev = [None for i in range(self.vertices)] dist[source] = 0 pq = list() pq.append((0, source)) while len(pq) > 0: # Pop the node with shortest distance mindist, node = heapq.heappop(pq) vis[node] = True if dist[node] < mindist: continue for i in self.adjmat[node].keys(): if vis[i]: continue new_dist = dist[node] + self.adjmat[node][i] # Add present distance with weight of the edge if new_dist < dist[i]: # If better path is found prev[i] = node dist[i] = new_dist pq.append((new_dist, i)) return dist, prev # Return minimum distance of each node from source. def shortest_distance(self, s, e): dist, prev = self.dijkstra(s) path = list() if dist[e] == maxsize: return path i = e while i is not None: path.append(i) i = prev[i] return path[::-1]
[ "heapq.heappop", "collections.defaultdict" ]
[((843, 851), 'collections.defaultdict', 'dd', (['dict'], {}), '(dict)\n', (845, 851), True, 'from collections import defaultdict as dd\n'), ((948, 955), 'collections.defaultdict', 'dd', (['int'], {}), '(int)\n', (950, 955), True, 'from collections import defaultdict as dd\n'), ((1453, 1470), 'heapq.heappop', 'heapq.heappop', (['pq'], {}), '(pq)\n', (1466, 1470), False, 'import heapq\n')]
# Example of faking classes with a closure import sys class ClosureInstance: def __init__(self, locals=None): if locals is None: locals = sys._getframe(1).f_locals # Update instance dictionary with callables self.__dict__.update((key,value) for key, value in locals.items() if callable(value) ) # Redirect special methods def __len__(self): return self.__dict__['__len__']() # Example use def Stack(): items = [] def push(item): items.append(item) def pop(): return items.pop() def __len__(): return len(items) return ClosureInstance() if __name__ == '__main__': s = Stack() print(s) s.push(10) s.push(20) s.push('Hello') print(len(s)) print(s.pop()) print(s.pop()) print(s.pop())
[ "sys._getframe" ]
[((163, 179), 'sys._getframe', 'sys._getframe', (['(1)'], {}), '(1)\n', (176, 179), False, 'import sys\n')]
''' Utilities useful for datasets ''' import os from functools import partial from urllib.request import urlretrieve import requests from tqdm import tqdm from torch.utils.data.dataloader import DataLoader from torch.utils.data.sampler import BatchSampler, RandomSampler, SequentialSampler from data.sampler import SequenceLengthSampler # See https://github.com/tqdm/tqdm#hooks-and-callbacks class DownloadProgressBar(tqdm): """Provides `update_to(n)` which uses `tqdm.update(delta_n)`.""" def __init__(self, filename): ''' ''' super(DownloadProgressBar, self).__init__( unit='B', unit_scale=True, miniters=1, desc=filename) def update_to(self, blocks=1, block_size=1, total_size=None): """ blocks : int, optional Number of blocks transferred so far [default: 1]. block_size : int, optional Size of each block (in tqdm units) [default: 1]. total_size : int, optional Total size (in tqdm units). If [default: None] remains unchanged. """ if total_size: self.total = total_size self.update(blocks * block_size - self.n) # will also set self.n = blocks * block_size def maybe_download(filepath, url): ''' Download the requested URL to the requested path if it does not already exist ''' directory = os.path.dirname(filepath) if not os.path.exists(directory): os.makedirs(directory) if os.path.exists(filepath): return filepath if 'drive.google.com' in url: return download_from_google_drive(filepath, url) else: return download_url(filepath, url) def download_url(filepath, url): ''' Downloads the given url to the specified file path. ''' filename = os.path.basename(filepath) with DownloadProgressBar(filename) as progress: urlretrieve(url, filepath, reporthook=progress.update_to) return filepath def download_from_google_drive(filepath, url): ''' Downloads a file from Google Drive. Apparently Google Drive may issue a warning about scanning for viruses and require confirmation to continue the download. ''' confirmation_token = None session = requests.Session() response = session.get(url, stream=True) for key, value in response.cookies.items(): if key.startswith("download_warning"): confirmation_token = value if confirmation_token: url = url + "&confirm=" + confirmation_token response = session.get(url, stream=True) total_size = int(response.headers.get('content-length', 0)) block_size = 16 * 1024 filename = os.path.basename(filepath) with open(filepath, "wb") as file: with DownloadProgressBar(filename) as progress: blocks = iter( file.write(block) for block in response.iter_content(block_size) if block ) for i, block in enumerate(blocks): progress.update_to(i, block_size, total_size) return filepath def get_dataloader(config, worker_init_fn=None, pin_memory=True, num_devices=1, shuffle=False): ''' Utility function that gets a data loader ''' dataset = config.dataset(config, split=config.split).load() if config.batch_method == 'token': # Calculate batch sizes for each device. Potentially reduce the batch size on device 0 as # the optimization step (all the gradients from all devices) happens on device 0. batch_sizes = [config.batch_size - config.batch_size_buffer] batch_sizes += [config.batch_size] * (num_devices - 1) batch_sampler = SequenceLengthSampler( batch_sizes, [(len(d['input']), len(d['target'])) for d in dataset.data], shuffle=shuffle, granularity=config.token_bucket_granularity ) elif config.batch_method == 'example': sampler_fn = RandomSampler if shuffle else SequentialSampler batch_sampler = BatchSampler( sampler_fn(dataset), config.batch_size, False ) else: raise ValueError('Unknown batch method!') return DataLoader( dataset, batch_sampler=batch_sampler, collate_fn=partial(dataset.collate, sort=True), num_workers=num_devices, pin_memory=pin_memory, worker_init_fn=worker_init_fn )
[ "os.path.exists", "requests.Session", "os.makedirs", "urllib.request.urlretrieve", "os.path.dirname", "functools.partial", "os.path.basename" ]
[((1361, 1386), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (1376, 1386), False, 'import os\n'), ((1464, 1488), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (1478, 1488), False, 'import os\n'), ((1773, 1799), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (1789, 1799), False, 'import os\n'), ((2219, 2237), 'requests.Session', 'requests.Session', ([], {}), '()\n', (2235, 2237), False, 'import requests\n'), ((2651, 2677), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (2667, 2677), False, 'import os\n'), ((1398, 1423), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (1412, 1423), False, 'import os\n'), ((1433, 1455), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (1444, 1455), False, 'import os\n'), ((1860, 1917), 'urllib.request.urlretrieve', 'urlretrieve', (['url', 'filepath'], {'reporthook': 'progress.update_to'}), '(url, filepath, reporthook=progress.update_to)\n', (1871, 1917), False, 'from urllib.request import urlretrieve\n'), ((4280, 4315), 'functools.partial', 'partial', (['dataset.collate'], {'sort': '(True)'}), '(dataset.collate, sort=True)\n', (4287, 4315), False, 'from functools import partial\n')]
#!/usr/bin/python3 # -*- coding: utf-8 -*- ##===-----------------------------------------------------------------------------*- Python -*-===## ## ## S E R I A L B O X ## ## This file is distributed under terms of BSD license. ## See LICENSE.txt for more information. ## ##===------------------------------------------------------------------------------------------===## ## ## This example demonstrates the asynchronous API of Serialbox which can improve the throughput of ## read operations. ## ##===------------------------------------------------------------------------------------------===## # # First, we have to make sure Python finds the Serialbox module. Alternatively, you can also set the # environment variable PYTHONPATH. # import os import sys import time sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../python') sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../../src/serialbox-python') # # Import Serialbox # import serialbox as ser import numpy as np def main(): N = 512; M = 512; K = 80 savepoint = ser.Savepoint('sp') # # First, we write some data to disk ... # serializer_write = ser.Serializer(ser.OpenModeKind.Write, "./async", "Field", "Binary") field_1 = np.random.rand(N, M, K) field_2 = np.random.rand(N, M, K) field_3 = np.random.rand(N, M, K) field_4 = np.random.rand(N, M, K) field_5 = np.random.rand(N, M, K) field_6 = np.random.rand(N, M, K) serializer_write.write('field_1', savepoint, field_1) serializer_write.write('field_2', savepoint, field_2) serializer_write.write('field_3', savepoint, field_3) serializer_write.write('field_4', savepoint, field_4) serializer_write.write('field_5', savepoint, field_5) serializer_write.write('field_6', savepoint, field_6) # # ... and read it again. # serializer_read = ser.Serializer(ser.OpenModeKind.Read, "./async", "Field", "Binary") start = time.time() field_1_rd = serializer_read.read('field_1', savepoint) field_2_rd = serializer_read.read('field_2', savepoint) field_3_rd = serializer_read.read('field_3', savepoint) field_4_rd = serializer_read.read('field_4', savepoint) field_5_rd = serializer_read.read('field_5', savepoint) field_6_rd = serializer_read.read('field_6', savepoint) print("Serializer.read : %8.2f s" % (time.time() - start)) # # Read operations are usually embarrassingly parallel and we can leverage this parallelism by # launching the operations asynchronously. If the archive is not thread-safe or if the library # was not configured with `SERIALBOX_ASYNC_API` the method falls back to synchronous execution. # To synchronize the tasks in the end, we can add a blocking Serializer.wait_for_all(). # start = time.time() field_1_rd_async = serializer_read.read_async('field_1', savepoint) field_2_rd_async = serializer_read.read_async('field_2', savepoint) field_3_rd_async = serializer_read.read_async('field_3', savepoint) field_4_rd_async = serializer_read.read_async('field_4', savepoint) field_5_rd_async = serializer_read.read_async('field_5', savepoint) field_6_rd_async = serializer_read.read_async('field_6', savepoint) serializer_read.wait_for_all() print("Serializer.read_async : %8.2f s" % (time.time() - start)) # # Finally, we verify the read operations actually do the same. # assert(np.allclose(field_1_rd, field_1_rd_async)) assert(np.allclose(field_2_rd, field_2_rd_async)) assert(np.allclose(field_3_rd, field_3_rd_async)) assert(np.allclose(field_4_rd, field_4_rd_async)) assert(np.allclose(field_5_rd, field_5_rd_async)) assert(np.allclose(field_6_rd, field_6_rd_async)) # # Remove directory # import shutil shutil.rmtree("./async") if __name__ == '__main__': main()
[ "numpy.allclose", "numpy.random.rand", "shutil.rmtree", "os.path.realpath", "time.time", "serialbox.Savepoint", "serialbox.Serializer" ]
[((1139, 1158), 'serialbox.Savepoint', 'ser.Savepoint', (['"""sp"""'], {}), "('sp')\n", (1152, 1158), True, 'import serialbox as ser\n'), ((1244, 1312), 'serialbox.Serializer', 'ser.Serializer', (['ser.OpenModeKind.Write', '"""./async"""', '"""Field"""', '"""Binary"""'], {}), "(ser.OpenModeKind.Write, './async', 'Field', 'Binary')\n", (1258, 1312), True, 'import serialbox as ser\n'), ((1330, 1353), 'numpy.random.rand', 'np.random.rand', (['N', 'M', 'K'], {}), '(N, M, K)\n', (1344, 1353), True, 'import numpy as np\n'), ((1369, 1392), 'numpy.random.rand', 'np.random.rand', (['N', 'M', 'K'], {}), '(N, M, K)\n', (1383, 1392), True, 'import numpy as np\n'), ((1408, 1431), 'numpy.random.rand', 'np.random.rand', (['N', 'M', 'K'], {}), '(N, M, K)\n', (1422, 1431), True, 'import numpy as np\n'), ((1447, 1470), 'numpy.random.rand', 'np.random.rand', (['N', 'M', 'K'], {}), '(N, M, K)\n', (1461, 1470), True, 'import numpy as np\n'), ((1486, 1509), 'numpy.random.rand', 'np.random.rand', (['N', 'M', 'K'], {}), '(N, M, K)\n', (1500, 1509), True, 'import numpy as np\n'), ((1525, 1548), 'numpy.random.rand', 'np.random.rand', (['N', 'M', 'K'], {}), '(N, M, K)\n', (1539, 1548), True, 'import numpy as np\n'), ((1974, 2041), 'serialbox.Serializer', 'ser.Serializer', (['ser.OpenModeKind.Read', '"""./async"""', '"""Field"""', '"""Binary"""'], {}), "(ser.OpenModeKind.Read, './async', 'Field', 'Binary')\n", (1988, 2041), True, 'import serialbox as ser\n'), ((2057, 2068), 'time.time', 'time.time', ([], {}), '()\n', (2066, 2068), False, 'import time\n'), ((2931, 2942), 'time.time', 'time.time', ([], {}), '()\n', (2940, 2942), False, 'import time\n'), ((3587, 3628), 'numpy.allclose', 'np.allclose', (['field_1_rd', 'field_1_rd_async'], {}), '(field_1_rd, field_1_rd_async)\n', (3598, 3628), True, 'import numpy as np\n'), ((3642, 3683), 'numpy.allclose', 'np.allclose', (['field_2_rd', 'field_2_rd_async'], {}), '(field_2_rd, field_2_rd_async)\n', (3653, 3683), True, 'import numpy as np\n'), ((3697, 3738), 'numpy.allclose', 'np.allclose', (['field_3_rd', 'field_3_rd_async'], {}), '(field_3_rd, field_3_rd_async)\n', (3708, 3738), True, 'import numpy as np\n'), ((3752, 3793), 'numpy.allclose', 'np.allclose', (['field_4_rd', 'field_4_rd_async'], {}), '(field_4_rd, field_4_rd_async)\n', (3763, 3793), True, 'import numpy as np\n'), ((3807, 3848), 'numpy.allclose', 'np.allclose', (['field_5_rd', 'field_5_rd_async'], {}), '(field_5_rd, field_5_rd_async)\n', (3818, 3848), True, 'import numpy as np\n'), ((3862, 3903), 'numpy.allclose', 'np.allclose', (['field_6_rd', 'field_6_rd_async'], {}), '(field_6_rd, field_6_rd_async)\n', (3873, 3903), True, 'import numpy as np\n'), ((3969, 3993), 'shutil.rmtree', 'shutil.rmtree', (['"""./async"""'], {}), "('./async')\n", (3982, 3993), False, 'import shutil\n'), ((864, 890), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (880, 890), False, 'import os\n'), ((941, 967), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (957, 967), False, 'import os\n'), ((2487, 2498), 'time.time', 'time.time', ([], {}), '()\n', (2496, 2498), False, 'import time\n'), ((3469, 3480), 'time.time', 'time.time', ([], {}), '()\n', (3478, 3480), False, 'import time\n')]
from dataclasses import dataclass from datetime import datetime, timedelta import json import os.path from dateutil.parser import parse import pytz import redis from redis.lock import LockError import requests from . import settings from .logger import logger UNCACHED_HEADERS = ( 'Age', 'Cache-Control', 'Date', 'X-Cache', ) def get_cache(): if settings.REDIS_URL: logger.info('Using Redis Cache.') return RedisCache(settings.REDIS_URL) logger.info('Using Local Cache.') return PersistedCache( os.path.join(settings.CACHE_LOCATION, settings.CACHE_NAME) ) class PersistedCache(object): store = {} def __init__(self, cache_location): self.cache_location = cache_location try: self.store.update(self.load(cache_location)) except IOError: logger.warn('No existing cache detected. Will create one.') except Exception: logger.error('Could not load cache. Removing and recreating.') self.save() finally: logger.info(f'Cache prepopulated with {len(self.store.keys())} items.') def get(self, key): return self.store.get(key, None) def set(self, key, value): self.store[key] = value try: self.save() except Exception: logger.error('Could not load cache. Dumping store and regenerating.') self.store = {} self.save() def save(self): with open(self.cache_location, 'w+') as f: json.dump({ key: cache_item.encode() for key, cache_item in self.store.items() }, f) def load(self, cache_location): with open(cache_location, 'r+') as f: return { key: CacheItem.decode(value) for key, value in json.load(f).items() } class RedisCache(object): def __init__(self, url): self.ttl = ( settings.MAX_CACHE_SECONDS if settings.MAX_CACHE_SECONDS > 0 else None ) self.client = redis.Redis.from_url(url) logger.info(f'Connected to redis: {url}') def get(self, key): value = self.client.get(key) if not value: return None return CacheItem.decode(json.loads(value)) def set(self, key, value): value = json.dumps(value.encode()) try: with self.client.lock(f'lock__{key}', blocking_timeout=6, timeout=2): self.client.set(key, value, ex=self.ttl) except LockError as e: logger.error(f'Failed to aquire lock for key {key}\n{e}') return None @dataclass class CacheItem: """ A record in the cache. """ url: str headers: dict etag: str expires: datetime last_modified: datetime created_at: datetime @property def is_expired(self): if settings.MAX_CACHE_SECONDS == 0: return False expires = ( self.created_at + timedelta(seconds=settings.MAX_CACHE_SECONDS) ) return expires < datetime.now(pytz.utc) @property def is_valid(self): logger.debug( f'Using: {self.url}\n' f'\tEtag: {self.etag}\n' f'\tExpires: {self.expires}\n' f'\tLast-Modified: {self.last_modified}\n' f'-------------------------------------' ) if not self.expires and not self.last_modified and not self.etag: logger.debug('No cache information.') return False if self.etag == '-1': logger.debug(f'Forcing uncached version due to Etag: {self.etag}') return False if self.is_expired: logger.debug('CacheItem has expired.') return False if self.expires and self.expires > datetime.now(pytz.utc): logger.debug('Using cached version due to Expires.') return True logger.debug(f'>>> HEAD {self.url}') try: head_check = requests.head( self.url, timeout=10, ) head_check.raise_for_status() except Exception as e: logger.error(f'>>> HEAD {self.url} failed with error: {e}') return False etag = head_check.headers.get('etag', None) logger.debug(f'Trying ETag... {etag}') if etag and etag == self.etag: return True last_modified = head_check.headers.get('last-modified', None) logger.debug(f'Trying Last-Modified... {last_modified}') if ( last_modified and self.last_modified and parse(last_modified) <= self.last_modified ): return True return False def encode(self): return [ self.url, self.headers, self.etag, self.expires.isoformat() if self.expires else None, self.last_modified.isoformat() if self.last_modified else None, self.created_at.isoformat(), ] @classmethod def decode(cls, value): url, headers, etag, expires_str, last_modified_str, created_at_str = value return CacheItem( url=url, headers=headers, etag=etag, expires=parse(expires_str) if expires_str else None, last_modified=parse(last_modified_str) if last_modified_str else None, created_at=parse(created_at_str) ) # Global Cache request_cache = get_cache() # Cache Functions def check(url): item = request_cache.get(url) if item is None: return None if not item.is_valid: return None return item def get(url): return request_cache.get(url) def add(url, response): expires = response.headers.get('expires') last_modified = response.headers.get('last-modified') etag = response.headers.get('etag') if etag: etag = ( etag .replace('W/', '') # replace weak comparison marker .replace('"', '') # replace quotes ) headers = { key: value for key, value in dict(response.headers).items() if key not in UNCACHED_HEADERS } request_cache.set(url, CacheItem( url=url, headers=headers, etag=etag, expires=parse(expires) if expires else None, last_modified=parse(last_modified) if last_modified else None, created_at=datetime.now(pytz.utc), )) logger.debug( f'Adding: {url}\n' f'\tEtag: {etag}\n' f'\tExpires: {expires}\n' f'\tLast-Modified: {last_modified}\n' f'-------------------------------------' )
[ "dateutil.parser.parse", "redis.Redis.from_url", "json.loads", "requests.head", "datetime.datetime.now", "json.load", "datetime.timedelta" ]
[((2121, 2146), 'redis.Redis.from_url', 'redis.Redis.from_url', (['url'], {}), '(url)\n', (2141, 2146), False, 'import redis\n'), ((2337, 2354), 'json.loads', 'json.loads', (['value'], {}), '(value)\n', (2347, 2354), False, 'import json\n'), ((3051, 3096), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'settings.MAX_CACHE_SECONDS'}), '(seconds=settings.MAX_CACHE_SECONDS)\n', (3060, 3096), False, 'from datetime import datetime, timedelta\n'), ((3132, 3154), 'datetime.datetime.now', 'datetime.now', (['pytz.utc'], {}), '(pytz.utc)\n', (3144, 3154), False, 'from datetime import datetime, timedelta\n'), ((4080, 4115), 'requests.head', 'requests.head', (['self.url'], {'timeout': '(10)'}), '(self.url, timeout=10)\n', (4093, 4115), False, 'import requests\n'), ((3883, 3905), 'datetime.datetime.now', 'datetime.now', (['pytz.utc'], {}), '(pytz.utc)\n', (3895, 3905), False, 'from datetime import datetime, timedelta\n'), ((4722, 4742), 'dateutil.parser.parse', 'parse', (['last_modified'], {}), '(last_modified)\n', (4727, 4742), False, 'from dateutil.parser import parse\n'), ((5524, 5545), 'dateutil.parser.parse', 'parse', (['created_at_str'], {}), '(created_at_str)\n', (5529, 5545), False, 'from dateutil.parser import parse\n'), ((6553, 6575), 'datetime.datetime.now', 'datetime.now', (['pytz.utc'], {}), '(pytz.utc)\n', (6565, 6575), False, 'from datetime import datetime, timedelta\n'), ((5373, 5391), 'dateutil.parser.parse', 'parse', (['expires_str'], {}), '(expires_str)\n', (5378, 5391), False, 'from dateutil.parser import parse\n'), ((5444, 5468), 'dateutil.parser.parse', 'parse', (['last_modified_str'], {}), '(last_modified_str)\n', (5449, 5468), False, 'from dateutil.parser import parse\n'), ((6426, 6440), 'dateutil.parser.parse', 'parse', (['expires'], {}), '(expires)\n', (6431, 6440), False, 'from dateutil.parser import parse\n'), ((6485, 6505), 'dateutil.parser.parse', 'parse', (['last_modified'], {}), '(last_modified)\n', (6490, 6505), False, 'from dateutil.parser import parse\n'), ((1868, 1880), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1877, 1880), False, 'import json\n')]
''' File: discretizer.py Description: function definition History: Date Programmer SAR# - Description ---------- ---------- ---------------------------- Author: <NAME> 29Apr2016 - Created ''' import numpy as np from . import pinm as pinm from stl import mesh from mpl_toolkits import mplot3d from matplotlib import pyplot from matplotlib import colors as Colors from matplotlib.widgets import Button import matplotlib.cm as cmx def stlImport(filePath,coords): # Create a new plot figure = pyplot.figure() pyplot.subplots_adjust(bottom=0.2) axes = mplot3d.Axes3D(figure) # Load the STL files and add the vectors to the plot modelMesh = mesh.Mesh.from_file(filePath) indexedTri=[] for n in range(len(modelMesh.vectors)): indexedTri.append(mplot3d.art3d.Poly3DCollection([modelMesh.vectors[n]],facecolors='b')) axes.add_collection3d(indexedTri[n]) indexedTri[0].set_facecolor('k') scale = modelMesh.points.flatten(-1) axes.auto_scale_xyz(scale, scale, scale) callback = DomainSelector(indexedTri) axprev = pyplot.axes([0.7, 0.05, 0.1, 0.075]) axnext = pyplot.axes([0.81, 0.05, 0.1, 0.075]) axselect = pyplot.axes([0.05, 0.05, 0.15, 0.075]) axaddToDomain = pyplot.axes([0.05, 0.85, 0.15, 0.075]) axswapSelected = pyplot.axes([0.8, 0.85, 0.15, 0.075]) bnext = Button(axnext, 'Next') bnext.on_clicked(callback.next) bprev = Button(axprev, 'Previous') bprev.on_clicked(callback.prev) bselect = Button(axselect, '(un)Select') bselect.on_clicked(callback.select) baddToDomain = Button(axaddToDomain, 'Add Domain') baddToDomain.on_clicked(callback.addToDomain) bswapSelected = Button(axswapSelected, 'Swap Selected') bswapSelected.on_clicked(callback.swapSelected) # Show the plot to the screen #pyplot.connect('key_press_event', callback.keyPressed) pyplot.show() maindomain=pinm.Domain('') subdomain=[] for domainNumber in range(callback.domainCount): subdomain.append(pinm.Domain('')) maindomain.addNode(subdomain[domainNumber]) normalVector=[] vertices=[] minvert={} maxvert={} for n in range(len(modelMesh.normals)): normalVector.append({}) vertices.append([]) for keyIndex in range(len(coords)): normalVector[n][coords[keyIndex]]=modelMesh.normals[n][keyIndex] for m in range(3): temp_vert={} for keyIndex in range(len(coords)): if coords[keyIndex] not in minvert: minvert[coords[keyIndex]]=modelMesh.vectors[n][m][keyIndex] else: minvert[coords[keyIndex]]=min(minvert[coords[keyIndex]],modelMesh.vectors[n][m][keyIndex]) if coords[keyIndex] not in maxvert: maxvert[coords[keyIndex]]=modelMesh.vectors[n][m][keyIndex] else: maxvert[coords[keyIndex]]=max(maxvert[coords[keyIndex]],modelMesh.vectors[n][m][keyIndex]) temp_vert[coords[keyIndex]]=modelMesh.vectors[n][m][keyIndex] vertices[n].append(temp_vert) domainVertices=[] for n in range(8): temp_domainVertices={} for key in range(len(coords)): if (key==0 and (n in [1,2,5,6])) or (key==1 and (n in [2,3,6,7])) or (key==2 and (n in [4,5,6,7])): temp_domainVertices[coords[key]]=maxvert[coords[key]] else: temp_domainVertices[coords[key]]=minvert[coords[key]] domainVertices.append(temp_domainVertices) for n in range(len(callback.domainInfo)): temp_sub2domain=pinm.Domain('',norm=normalVector[n]) temp_sub2domain.setCentroid(vertices[n]) subdomain[callback.domainInfo[n]].addNode(temp_sub2domain) maindomain.setCentroid(domainVertices) return (maindomain,subdomain) def createMainDomain(minvert,maxvert,coords): maindomain=pinm.Domain('') domainVertices=[] for n in range(8): temp_domainVertices={} for key in range(len(coords)): if (key==0 and (n in [1,2,5,6])) or (key==1 and (n in [2,3,6,7])) or (key==2 and (n in [4,5,6,7])): temp_domainVertices[coords[key]]=maxvert[coords[key]] else: temp_domainVertices[coords[key]]=minvert[coords[key]] domainVertices.append(temp_domainVertices) maindomain.setCentroid(domainVertices) return maindomain def filterNodes(domainList,nodalDistribution,closeness=0.2): #first in list is prioritized to keep nodes=[] for domain in domainList: for node in domain.nodes(): nodes.append(node) for n in range(len(nodes)): if nodes[n].domain!=None: nodalSpacing=nodalDistribution(nodes[n].pos) closeNodalSpacing=multiplyDictionary(nodalSpacing,closeness) linkedNodes=findNodes(nodes[n].pos,domainList,distance=closeNodalSpacing,searchDepth=-1.) for temp_linkNode in linkedNodes: if temp_linkNode is not nodes[n]: temp_domain=temp_linkNode.domain temp_domain.removeNode(temp_linkNode) temp_linkNode.domain=None while len(temp_domain.subDomain)==0: if temp_domain.superDomain==None: break else: temp2_domain=temp_domain.superDomain temp2_domain.removeNode(temp_domain) temp_domain=temp2_domain return; def secondaryLinkNode(targetNode,primarylinkIdentifier,secondarylinkIdentifier='secondary'): nodes=[] targetNode.addLink(secondarylinkIdentifier,targetNode) for node in targetNode.link[primarylinkIdentifier]: for temp_node in node.link[primarylinkIdentifier]: targetNode.addLink(secondarylinkIdentifier,node) return; def primaryLinkNodes(domainList,nodalDistribution,linkIdentifier='primary',closeness=1.5):#influence is function with dictionary input and output nodes=[] for domain in domainList: for node in domain.nodes(): nodes.append(node) for n in range(len(nodes)): nodalSpacing=nodalDistribution(nodes[n].pos) expandedNodalSpacing=multiplyDictionary(nodalSpacing,closeness) linkedNodes=findNodes(nodes[n].pos,domainList,distance=expandedNodalSpacing,searchDepth=-1.) addNodesToLink=[] for temp_linkNode in linkedNodes: if temp_linkNode is not nodes[n]: addNodesToLink.append(temp_linkNode) addNodesToLink.insert(0,nodes[n]) nodes[n].addLink(linkIdentifier,addNodesToLink) return; def duplicateNode(coordinateIdentifier,value,nodePorting,newNodes,domainList,targetDomain): nodeInDomain=[] for domain in domainList: if type(domain) is pinm.Node: new_pos=node.pos.copy() new_pos[coordinateIdentifier]=value newNode=pinm.Node(new_pos) tempCopy=domain.variable.copy() for key in tempCopy: newNode.addvariable(key,tempCopy[key]) newNode.addLink('copied from',domain) tempCopy=node.linkBasis.copy() for key in tempCopy: newNode.setLinkBasis(key,tempCopy[key]) newNode.setNorm(node.norm.copy()) newNode.setNormLink(node.normLink) for n in range(len(node.material)): newNode.addMaterial(n,node.material[n]) tempCopy=node.variableLink.copy() for key in tempCopy: newNode.setVariableLink(key,tempCopy[key]) nodePorting[domain]=newNode newNodes.append(newNode) nodeInDomain.append(newNode) else: newDomain=pinm.Domain('') newDomain.pos=domain.pos.copy() newDomain.maxDistance=domain.maxDistance.copy() newDomain.pos[coordinateIdentifier]=value newDomain.maxDistance[coordinateIdentifier]=0. nodeInDomain.append(newDomain) duplicateNode(coordinateIdentifier,value,nodePorting,newNodes,domain.subDomain,newDomain) targetDomain.addNode(nodeInDomain) return nodeInDomain def extrudeDimension(domainList,coordinateIdentifier,valueList,prevLinkIdentifier='',nextLinkIdentifier=''): newDomain=[] prevNodes=[] for m in range(len(valueList)): newNodes=[] nodePorting={} newDomain.append([]) for domain in domainList: tempDomain=pinm.Domain('') tempDomain.pos=domain.pos.copy() tempDomain.maxDistance=domain.maxDistance.copy() tempDomain.pos[coordinateIdentifier]=value tempDomain.maxDistance[coordinateIdentifier]=0. newDomain[-1].append(tempDomain) duplicateNode(coordinateIdentifier,value,nodePorting,newNodes,domain.subDomain,tempDomain) for new_node in newNodes: for temp_linkIdentifier in new_node.link['copied from'][0].link: if temp_linkIdentifier!='copied from': tempList=[] for linkNode in new_node.link['copied from'][0].link[temp_linkIdentifier]: tempList.append(nodePorting[linkNode]) new_node.addLink(temp_linkIdentifier,tempList) if (prevLinkIdentifier!='' or nextLinkIdentifier!='') and len(prevNodes)!=0: for n in range(len(newNodes)): if prevLinkIdentifier!='': prevNodes[n].addLink(linkIdentifier,newNodes[n]) if nextLinkIdentifier!='': newNodes[n].addLink(linkIdentifier,prevNodes[n]) prevNodes=newNodes[:] return newDomain def arrangeExtrudeDimension(domainList,coordinateIdentifier,valueList,prevLinkIdentifier='',nextLinkIdentifier='',newDomainNameAddOn=' new',firstDomainNameAddOn='',lastDomainNameAddOn=''): nameList=[] for domain in domainList: nameList.append(domain.name) subDomain=extrudeDimension(domainList,coordinateIdentifier,valueList,prevLinkIdentifier=prevLinkIdentifier,nextLinkIdentifier=nextLinkIdentifier) newDomain=[] startDomain=[] endDomain=[] for n in range(len(subDomain[0])): startCount=0 endCountReduce=0 if firstDomainNameAddOn!='': if firstDomainNameAddOn!=lastDomainNameAddOn: subDomain[0][n].setDomainName(nameList[n]+firstDomainNameAddOn) startDomain.append(subDomain[0][n]) startCount=1 if lastDomainNameAddOn!='': if firstDomainNameAddOn!=lastDomainNameAddOn: subDomain[-1][n].setDomainName(nameList[n]+lastDomainNameAddOn) else: domainGroup=pinm.Domain(nameList[n]+firstDomainNameAddOn) domainGroup.addNode([subDomain[0][n],subDomain[-1][n]]) endDomain.append(subDomain[-1][n]) endCountReduce=1 leftOverDomain=[] for m in range(startCount,len(subDomain)-endCountReduce): leftOverDomain.append(subDomain[m][n]) if len(leftOverDomain)!=0: tempDomain=pinm.Domain(nameList[n]+newDomainNameAddOn) tempDomain.addNode(leftOverDomain) newDomain.append(tempDomain) return (newDomain,startDomain,endDomain) def meshSurfaceDomainTriangle(subDomain,nodalDistribution): for domain in subDomain: for sub2domain in domain.subDomain: toBeFurtherMeshed=meshTriangleSpliting(sub2domain,nodalDistribution) while len(toBeFurtherMeshed)>0: copy_toBeFurtherMeshed=toBeFurtherMeshed toBeFurtherMeshed=[] for new_domain in copy_toBeFurtherMeshed: for temp_domain in meshTriangleSpliting(new_domain,nodalDistribution): toBeFurtherMeshed.append(temp_domain) return; def meshMainDomain(mainDomain,boundaryDomainList,nodalDistribution,meshOuterNode=False): innerNodesDomain=pinm.Domain('') innerNodesDomain.setCentroid(mainDomain.vertices) mainDomain.addNode(innerNodesDomain) toBeFurtherMeshed=meshVolume(innerNodesDomain,boundaryDomainList,nodalDistribution,meshOuter=meshOuterNode) while len(toBeFurtherMeshed)>0: copy_toBeFurtherMeshed=toBeFurtherMeshed toBeFurtherMeshed=[] for new_domain in copy_toBeFurtherMeshed: for temp_domain in meshVolume(new_domain,boundaryDomainList,nodalDistribution,meshOuter=meshOuterNode): toBeFurtherMeshed.append(temp_domain) return innerNodesDomain def meshTriangleSpliting(domain,nodalDistribution): #nodalDistribution is a function with both i/o dictionary objects toBeFurtherMeshed=[] subDomain=[] #check for odd triangle sidelength=[0.,0.,0.] maxSideLength=0. minSideLength=float('inf') maxSideIndex=-1 minSideIndex=-1 for n in range(len(domain.vertices)): for coord in domain.vertices[0]: sidelength[n]+=(domain.vertices[n][coord]-domain.vertices[n-1][coord])**2. sidelength[n]=np.sqrt(sidelength[n]) if sidelength[n]>maxSideLength: maxSideLength=sidelength[n] maxSideIndex=n if sidelength[n]<minSideLength: minSideLength=sidelength[n] minSideIndex=n NodeSpacing=nodalDistribution(domain.pos) newPoint=multiplyDictionary(addDictionary([domain.vertices[maxSideIndex],domain.vertices[maxSideIndex-1]]),0.5) tri1Domain=pinm.Domain('',norm=domain.normalVector) tri1Domain.setCentroid([newPoint,domain.vertices[maxSideIndex],domain.vertices[maxSideIndex-2]]) tri2Domain=pinm.Domain('',norm=domain.normalVector) tri2Domain.setCentroid([newPoint,domain.vertices[maxSideIndex-2],domain.vertices[maxSideIndex-1]]) temp_total=0. for coord in NodeSpacing: temp_total+=NodeSpacing[coord]**2. nodeDis=np.sqrt(temp_total) if nodeDis<(sum(sidelength)/3.): subDomain.append(tri1Domain) subDomain.append(tri2Domain) toBeFurtherMeshed.append(tri1Domain) toBeFurtherMeshed.append(tri2Domain) else: subDomain.append(pinm.Node(tri1Domain.pos,norm=domain.normalVector)) subDomain.append(pinm.Node(tri2Domain.pos,norm=domain.normalVector)) domain.addNode(subDomain) return toBeFurtherMeshed def meshVolume(domain,boundaryDomainList,nodalDistribution,meshOuter=False): #nodalDistribution is a function with both i/o dictionary objects if meshOuter: meshOuterCoef=-1. else: meshOuterCoef=1. NodeSpacing=nodalDistribution(domain.pos) addNodeInstead=1 for coord in domain.maxDistance: if domain.maxDistance[coord]>NodeSpacing[coord]: addNodeInstead=0 centerPlane=[] centerPlaneMidPoints=[] for n in range(4): centerPlane.append(multiplyDictionary(addDictionary([domain.vertices[n],domain.vertices[4+n]]),0.5)) for n in range(3): centerPlaneMidPoints.append(multiplyDictionary(addDictionary([centerPlane[n],centerPlane[n+1]]),0.5)) centerPlaneMidPoints.append(multiplyDictionary(addDictionary([centerPlane[3],centerPlane[0]]),0.5)) planeCentroid=[] midPoints=[] for m in range(2): midPoints.append([]) for n in range(3): midPoints[m].append(multiplyDictionary(addDictionary([domain.vertices[m*4+n],domain.vertices[m*4+n+1]]),0.5)) midPoints[m].append(multiplyDictionary(addDictionary([domain.vertices[m*4+3],domain.vertices[m*4]]),0.5)) for m in range(2): planeCentroid.append(multiplyDictionary(addDictionary([midPoints[m][0],midPoints[m][2]]),0.5)) subDomain=[] toBeFurtherMeshed=[] for m in range(2): for n in range(4): temp_subdomain=pinm.Domain('') temp_vertices=[midPoints[m][n-1],domain.vertices[4*m+n],midPoints[m][n],planeCentroid[m], centerPlaneMidPoints[n-1],centerPlane[n],centerPlaneMidPoints[n],domain.pos] temp_subdomain.setCentroid(temp_vertices) temp_boundaryNode=findNodes(temp_subdomain.pos,boundaryDomainList) distancebetween={} for coord in temp_boundaryNode.pos: distancebetween[coord]=np.absolute(temp_boundaryNode.pos[coord]-temp_subdomain.pos[coord]) boundaryNodes=findNodes(temp_subdomain.pos,boundaryDomainList,distance=distancebetween) innerNode=True for boundaryNode in boundaryNodes: boundaryNodeCentroid=boundaryNode.pos boundaryNodeNorm=boundaryNode.norm dotProduct=0. normamplitude=0. for coords in temp_subdomain.pos: dotProduct+= (temp_subdomain.pos[coords]-boundaryNodeCentroid[coords])*boundaryNodeNorm[coords] normamplitude+=boundaryNodeNorm[coords]**2. dotProduct=dotProduct/np.sqrt(normamplitude) for coords in temp_subdomain.maxDistance: if (temp_subdomain.maxDistance[coords]*(1-addNodeInstead))<(meshOuterCoef*dotProduct): innerNode=False break if innerNode==False: break if innerNode: if addNodeInstead==1: temp_node=pinm.Node(temp_subdomain.pos,norm=domain.normalVector) subDomain.append(temp_node) else: toBeFurtherMeshed.append(temp_subdomain) subDomain.append(temp_subdomain) domain.addNode(subDomain) return toBeFurtherMeshed; def findNodes(position,domainList,distance=None,searchDepth=-1.):#assign search depth to -1 for nodes temp_searchDepth=searchDepth if distance==None: findNearest=True else: findNearest=False if findNearest: referenceDomain=None minDistanceSq=float("inf") otherDomain=[] for domain in domainList: temp_distSq=0. if bool(domain.pos): for coords in position: temp_distSq+=(position[coords]-domain.pos[coords])**2. if minDistanceSq>temp_distSq: minDistanceSq=temp_distSq referenceDomain=domain else: for allDomain in domain.subDomain: otherDomain.append(allDomain) if len(otherDomain)!=0: if type(referenceDomain) is pinm.Domain: for includeDomain in referenceDomain.subDomain: otherDomain.append(includeDomain) elif type(referenceDomain) is pinm.Node: otherDomain.append(referenceDomain) nodes=findNodes(position,otherDomain,searchDepth=temp_searchDepth) elif (type(referenceDomain) is not pinm.Node) and searchDepth!=0: nodes=findNodes(position,referenceDomain.subDomain,searchDepth=(temp_searchDepth-1)) else: nodes=referenceDomain return nodes else: nodes=[] for domain in domainList: toAdd=True if bool(domain.pos): if type(domain) is not pinm.Node: maxDistance=domain.maxDistance else: maxDistance={} for coords in position: maxDistance[coords]=0. for coords in position: if np.absolute(position[coords]-domain.pos[coords])>(maxDistance[coords]+distance[coords]): toAdd=False if toAdd: if type(domain) is not pinm.Node: for temp_nodes in findNodes(position,domain.subDomain,distance): nodes.append(temp_nodes) else: nodes.append(domain) return nodes def addDictionary(a): result={} for dicts in a: for key in dicts: if key in result: result[key]+=dicts[key] else: result[key]=dicts[key] return result def multiplyDictionary(a,b): result={} for key in a: result[key]=a[key]*b return result def plotNodes(nodes,coordinate=['x','y','z'],variableIdentifier='',complex='real'): figure = pyplot.figure() axes = mplot3d.Axes3D(figure) coordinateKey=[] var=[] numOfNodes=len(nodes) coords=np.zeros((3,numOfNodes)) for n in range(numOfNodes): for m in range(len(coords)): coords[m][n]=nodes[n].pos[coordinate[m]] if variableIdentifier!='': if complex=='real': var.append(nodes[n].variable[variableIdentifier].real) elif complex=='imag': var.append(nodes[n].variable[variableIdentifier].imag) elif complex=='abs': var.append(np.absolute(nodes[n].variable[variableIdentifier])) if variableIdentifier!='': cm = pyplot.get_cmap('jet') cNorm = Colors.Normalize(vmin=min(var), vmax=max(var)) scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm) axes.scatter(coords[0], coords[1], coords[2],c=scalarMap.to_rgba(var)) scalarMap.set_array(var) figure.colorbar(scalarMap) else: axes.scatter(coords[0], coords[1], coords[2]) pyplot.show() class DomainSelector: def __init__(self,collectionList): self.ind = 0 self.collectionList=collectionList self.selectedIndex=[] self.domainInfo=[] self.domainCount=1 self.end=False self.keyFunc={'l':self.nextFunc, 'k':self.prevFunc, 's':self.selectFunc, 'a':self.addToDomainFunc} for n in collectionList: self.selectedIndex.append(False) self.domainInfo.append(0) self.maxIndex=len(collectionList)-1 def next(self, event): self.nextFunc() def prev(self, event): self.prevFunc() def select(self, event): self.selectFunc() self.nextFunc() def addToDomain(self, event): self.addToDomainFunc() def swapSelected(self, event): self.swapSelectedFunc() # def keyPressed(self,event): # self.keyFunc[event.key]() #find code error def nextFunc(self): if not(self.end): if self.selectedIndex[self.ind]: self.collectionList[self.ind].set_facecolor('g') else: self.collectionList[self.ind].set_facecolor('b') self.ind += 1 if self.ind>self.maxIndex: self.ind = 0 while self.domainInfo[self.ind]!=0: self.ind += 1 if self.ind>self.maxIndex: self.ind = 0 if self.selectedIndex[self.ind]: self.collectionList[self.ind].set_facecolor('r') else: self.collectionList[self.ind].set_facecolor('k') pyplot.draw() def prevFunc(self): if not(self.end): if self.selectedIndex[self.ind]: self.collectionList[self.ind].set_facecolor('g') else: self.collectionList[self.ind].set_facecolor('b') self.ind -= 1 if self.ind<0: self.ind = self.maxIndex while self.domainInfo[self.ind]!=0: self.ind -= 1 if self.ind<0: self.ind = self.maxIndex if self.selectedIndex[self.ind]: self.collectionList[self.ind].set_facecolor('r') else: self.collectionList[self.ind].set_facecolor('k') pyplot.draw() def selectFunc(self): if not(self.end): if self.selectedIndex[self.ind]: self.collectionList[self.ind].set_facecolor('k') self.selectedIndex[self.ind]=False else: self.collectionList[self.ind].set_facecolor('r') self.selectedIndex[self.ind]=True pyplot.draw() def addToDomainFunc(self): for n in range(len(self.selectedIndex)): if self.selectedIndex[n]: self.selectedIndex[n]=False self.domainInfo[n]=self.domainCount self.collectionList[n].set_facecolor('none') self.domainCount +=1 self.end=True for n in range(len(self.domainInfo)): if self.domainInfo[n]==0: self.ind = n self.collectionList[self.ind].set_facecolor('k') self.end=False break pyplot.draw() def swapSelectedFunc(self): for n in range(len(self.selectedIndex)): if self.domainInfo[n]==0: if self.selectedIndex[n]: self.selectedIndex[n]=False if n==self.ind: self.collectionList[n].set_facecolor('k') else: self.collectionList[n].set_facecolor('b') else: self.selectedIndex[n]=True if n==self.ind: self.collectionList[n].set_facecolor('r') else: self.collectionList[n].set_facecolor('g') pyplot.draw()
[ "mpl_toolkits.mplot3d.art3d.Poly3DCollection", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.draw", "numpy.sqrt", "matplotlib.pyplot.show", "numpy.absolute", "matplotlib.widgets.Button", "matplotlib.pyplot.figure", "numpy.zeros", "matplotlib.pyplot.axes", "matplotlib.cm.ScalarMappable"...
[((522, 537), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (535, 537), False, 'from matplotlib import pyplot\n'), ((542, 576), 'matplotlib.pyplot.subplots_adjust', 'pyplot.subplots_adjust', ([], {'bottom': '(0.2)'}), '(bottom=0.2)\n', (564, 576), False, 'from matplotlib import pyplot\n'), ((588, 610), 'mpl_toolkits.mplot3d.Axes3D', 'mplot3d.Axes3D', (['figure'], {}), '(figure)\n', (602, 610), False, 'from mpl_toolkits import mplot3d\n'), ((684, 713), 'stl.mesh.Mesh.from_file', 'mesh.Mesh.from_file', (['filePath'], {}), '(filePath)\n', (703, 713), False, 'from stl import mesh\n'), ((1119, 1155), 'matplotlib.pyplot.axes', 'pyplot.axes', (['[0.7, 0.05, 0.1, 0.075]'], {}), '([0.7, 0.05, 0.1, 0.075])\n', (1130, 1155), False, 'from matplotlib import pyplot\n'), ((1169, 1206), 'matplotlib.pyplot.axes', 'pyplot.axes', (['[0.81, 0.05, 0.1, 0.075]'], {}), '([0.81, 0.05, 0.1, 0.075])\n', (1180, 1206), False, 'from matplotlib import pyplot\n'), ((1222, 1260), 'matplotlib.pyplot.axes', 'pyplot.axes', (['[0.05, 0.05, 0.15, 0.075]'], {}), '([0.05, 0.05, 0.15, 0.075])\n', (1233, 1260), False, 'from matplotlib import pyplot\n'), ((1281, 1319), 'matplotlib.pyplot.axes', 'pyplot.axes', (['[0.05, 0.85, 0.15, 0.075]'], {}), '([0.05, 0.85, 0.15, 0.075])\n', (1292, 1319), False, 'from matplotlib import pyplot\n'), ((1341, 1378), 'matplotlib.pyplot.axes', 'pyplot.axes', (['[0.8, 0.85, 0.15, 0.075]'], {}), '([0.8, 0.85, 0.15, 0.075])\n', (1352, 1378), False, 'from matplotlib import pyplot\n'), ((1391, 1413), 'matplotlib.widgets.Button', 'Button', (['axnext', '"""Next"""'], {}), "(axnext, 'Next')\n", (1397, 1413), False, 'from matplotlib.widgets import Button\n'), ((1462, 1488), 'matplotlib.widgets.Button', 'Button', (['axprev', '"""Previous"""'], {}), "(axprev, 'Previous')\n", (1468, 1488), False, 'from matplotlib.widgets import Button\n'), ((1539, 1569), 'matplotlib.widgets.Button', 'Button', (['axselect', '"""(un)Select"""'], {}), "(axselect, '(un)Select')\n", (1545, 1569), False, 'from matplotlib.widgets import Button\n'), ((1629, 1664), 'matplotlib.widgets.Button', 'Button', (['axaddToDomain', '"""Add Domain"""'], {}), "(axaddToDomain, 'Add Domain')\n", (1635, 1664), False, 'from matplotlib.widgets import Button\n'), ((1735, 1774), 'matplotlib.widgets.Button', 'Button', (['axswapSelected', '"""Swap Selected"""'], {}), "(axswapSelected, 'Swap Selected')\n", (1741, 1774), False, 'from matplotlib.widgets import Button\n'), ((1930, 1943), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (1941, 1943), False, 'from matplotlib import pyplot\n'), ((14065, 14084), 'numpy.sqrt', 'np.sqrt', (['temp_total'], {}), '(temp_total)\n', (14072, 14084), True, 'import numpy as np\n'), ((20515, 20530), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (20528, 20530), False, 'from matplotlib import pyplot\n'), ((20542, 20564), 'mpl_toolkits.mplot3d.Axes3D', 'mplot3d.Axes3D', (['figure'], {}), '(figure)\n', (20556, 20564), False, 'from mpl_toolkits import mplot3d\n'), ((20634, 20659), 'numpy.zeros', 'np.zeros', (['(3, numOfNodes)'], {}), '((3, numOfNodes))\n', (20642, 20659), True, 'import numpy as np\n'), ((21549, 21562), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (21560, 21562), False, 'from matplotlib import pyplot\n'), ((13247, 13269), 'numpy.sqrt', 'np.sqrt', (['sidelength[n]'], {}), '(sidelength[n])\n', (13254, 13269), True, 'import numpy as np\n'), ((21180, 21202), 'matplotlib.pyplot.get_cmap', 'pyplot.get_cmap', (['"""jet"""'], {}), "('jet')\n", (21195, 21202), False, 'from matplotlib import pyplot\n'), ((21286, 21325), 'matplotlib.cm.ScalarMappable', 'cmx.ScalarMappable', ([], {'norm': 'cNorm', 'cmap': 'cm'}), '(norm=cNorm, cmap=cm)\n', (21304, 21325), True, 'import matplotlib.cm as cmx\n'), ((24903, 24916), 'matplotlib.pyplot.draw', 'pyplot.draw', ([], {}), '()\n', (24914, 24916), False, 'from matplotlib import pyplot\n'), ((25591, 25604), 'matplotlib.pyplot.draw', 'pyplot.draw', ([], {}), '()\n', (25602, 25604), False, 'from matplotlib import pyplot\n'), ((802, 872), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'mplot3d.art3d.Poly3DCollection', (['[modelMesh.vectors[n]]'], {'facecolors': '"""b"""'}), "([modelMesh.vectors[n]], facecolors='b')\n", (832, 872), False, 'from mpl_toolkits import mplot3d\n'), ((23238, 23251), 'matplotlib.pyplot.draw', 'pyplot.draw', ([], {}), '()\n', (23249, 23251), False, 'from matplotlib import pyplot\n'), ((23952, 23965), 'matplotlib.pyplot.draw', 'pyplot.draw', ([], {}), '()\n', (23963, 23965), False, 'from matplotlib import pyplot\n'), ((24324, 24337), 'matplotlib.pyplot.draw', 'pyplot.draw', ([], {}), '()\n', (24335, 24337), False, 'from matplotlib import pyplot\n'), ((16417, 16486), 'numpy.absolute', 'np.absolute', (['(temp_boundaryNode.pos[coord] - temp_subdomain.pos[coord])'], {}), '(temp_boundaryNode.pos[coord] - temp_subdomain.pos[coord])\n', (16428, 16486), True, 'import numpy as np\n'), ((17095, 17117), 'numpy.sqrt', 'np.sqrt', (['normamplitude'], {}), '(normamplitude)\n', (17102, 17117), True, 'import numpy as np\n'), ((19654, 19704), 'numpy.absolute', 'np.absolute', (['(position[coords] - domain.pos[coords])'], {}), '(position[coords] - domain.pos[coords])\n', (19665, 19704), True, 'import numpy as np\n'), ((21084, 21134), 'numpy.absolute', 'np.absolute', (['nodes[n].variable[variableIdentifier]'], {}), '(nodes[n].variable[variableIdentifier])\n', (21095, 21134), True, 'import numpy as np\n')]
from urllib.parse import parse_qs, urlencode, urlparse from .. import settings def normalize(request, url: str) -> str: parts = urlparse(url) url = f"{settings.BASE_URL}{parts.path}" if "background" in parts.query: background = parse_qs(parts.query)["background"][0] else: background = "" query = params(request, background=background) if query: url += "?" + urlencode(query) return clean(url) def params(request, **kwargs) -> dict: return {k: v for k, v in kwargs.items() if v} def clean(url: str) -> str: url = _unquote_slashes(url) url = _drop_trailing_spaces(url) return url def _unquote_slashes(url: str) -> str: return url.replace("%3A%2F%2F", "://").replace("%2F", "/") def _drop_trailing_spaces(url: str) -> str: while "/_." in url: url = url.replace("/_.", ".") return url
[ "urllib.parse.urlencode", "urllib.parse.parse_qs", "urllib.parse.urlparse" ]
[((135, 148), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (143, 148), False, 'from urllib.parse import parse_qs, urlencode, urlparse\n'), ((410, 426), 'urllib.parse.urlencode', 'urlencode', (['query'], {}), '(query)\n', (419, 426), False, 'from urllib.parse import parse_qs, urlencode, urlparse\n'), ((251, 272), 'urllib.parse.parse_qs', 'parse_qs', (['parts.query'], {}), '(parts.query)\n', (259, 272), False, 'from urllib.parse import parse_qs, urlencode, urlparse\n')]
from ...forms.checks import check_is_logged from django.shortcuts import redirect def no_login_required(view_function): def exec_view_function(*args, **kwargs): request = args[0] if check_is_logged(request): return redirect('/') return view_function(*args, **kwargs) return exec_view_function
[ "django.shortcuts.redirect" ]
[((255, 268), 'django.shortcuts.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (263, 268), False, 'from django.shortcuts import redirect\n')]
from collections import defaultdict from pathlib import Path from typing import Dict, Iterable, List, Optional, Tuple, Union import networkx as nx import onnx from . import graph_ir as g from .onnx_attr import get_node_shape, node_attr_to_dict, node_to_shape PathLike = Union[str, Path] GraphT = onnx.GraphProto NodeT = onnx.NodeProto NodeT.__hash__ = lambda self: id(self) NodeT.__repr__ = NodeT.__str__ = lambda self: self.name class MarkedSubGraph: """A subgraph with information on how it should replace a node in a super graph. subgraph: a nx.DiGraph subgraph entry_edges: a list of edges from nodes "outside" to nodes in self.subgraph exit: the exit node of the subgraph. When this subgraph replaces a node `n`, self.exit will be connected to whateven `n` is connected to. """ def __init__(self, subgraph: nx.DiGraph, entry_edges, exit) -> None: assert all(to in subgraph for _, to, _ in entry_edges) assert exit in subgraph self.subgraph, self.exit = subgraph, exit self.entry_edges = [(f, t, {"index": i}) for f, t, i in entry_edges] @classmethod def idiomatic_1to2(cls, node1, node2, predecessors): """Create an idiomatic replacement as follow: node(arg1, arg2, arg3) -> node2(node1(arg1, arg2), arg3)""" p0, p1, p2 = predecessors graph = nx.DiGraph() graph.add_edge(node1, node2, index=0) return cls(graph, [(p0, node1, 0), (p1, node1, 1), (p2, node2, 1)], node2) EmitNodeT = Union[MarkedSubGraph, g.DFGNode] class DFG(object): """ONNX model translated into DFG with `DFGNode`s. This class has a DFG, input/output information, and a clear traverse order (think dominant tree), and is easier for CodeGen classes to work with.""" def __init__(self, graph: GraphT): self._check_model(graph) self._var_count = 0 # Build explicit DFG with ONNX nodes onnx_graph = self._build_onnx_dfg(graph) # Convert ONNX dfg into DFGNode DFG self.graph = self._build_dfg(onnx_graph) # Find out input nodes and output node (unique) # removing dead nodes along the way if any self.inputs, self.output = self._dce_get_io_info() ################ Interfaces: @property def traverse_order(self) -> List[g.DFGNode]: """Get topological order of computational graph by use-def relation.""" return list(nx.topological_sort(self.graph)) def node_args(self, node: g.DFGNode): """Get input arguments of node.""" sorted_edges = sorted(self.graph.in_edges(node, "index"), key=lambda p: p[2]) return [e[0] for e in sorted_edges] def dump_weights(self, output_dir: PathLike) -> None: """Dump `WeightTensor`s into output_dir.""" output_dir = Path(output_dir) for node in self.graph.nodes: if not isinstance(node, g.WeightTensor): continue node.dump_weight(output_dir / (node.new_name + "_path.bin")) ################ Internal methods (high-level): @staticmethod def _check_model(onnx_graph: GraphT): """Check model validaty and single output (which is our limitation)""" import warnings from onnx import checker, onnx_cpp2py_export # try use onnx's own model checker before converting any model try: checker.check_graph(onnx_graph) except onnx_cpp2py_export.checker.ValidationError as e: warnings.warn(str(e)) if any(len(n.output) > 1 for n in onnx_graph.node): raise ValueError("All node must have single output") if len(onnx_graph.output) > 1: raise ValueError("Graph must have single output") @staticmethod def _build_onnx_dfg(graph: GraphT) -> nx.DiGraph: """Creates a DiGraph (by use-def relation) of onnx nodes from onnx GraphProto. DiGraph is easier to use as a graph compared to GraphProto where use-def is implicit.""" ret_graph = nx.DiGraph() onnx_defs, onnx_uses = def_use(graph.node) node_shape = node_to_shape(graph) node_and_attr = [(n, {"shape": shape}) for n, shape in node_shape.items()] ret_graph.add_nodes_from(node_and_attr) tensors = extract_tensors_from_graph(graph) tensor_and_attr = [(t, {"shape": t.output_shape}) for t in tensors.values()] ret_graph.add_nodes_from(tensor_and_attr) for onnx_value_name, use_nodes in onnx_uses.items(): def_node = onnx_defs.get(onnx_value_name) if def_node is None: def_node = tensors[onnx_value_name] for use_node, used_at_narg in use_nodes: ret_graph.add_edge(def_node, use_node, index=used_at_narg) return ret_graph def _build_dfg(self, onnx_graph: nx.DiGraph) -> nx.DiGraph: """Translate _build_onnx_dfg output into DFGNode DFG. First run some passes to process subgraphs that needs to be processed together, then each unprocessed node is generated into 1 or more nodes.""" # Gemm in tensor_runtime does reshape automatically # it also doesn't have a dedicated reshape operator onnx_graph = drop_reshape_before_gemm(onnx_graph) # For each onnx node, generate our nodes node_to_nodes, error_nodes = {}, [] for onnx_node in nx.topological_sort(onnx_graph): our_nodes = self._emit_node(onnx_graph, onnx_node) if our_nodes is None: error_nodes.append(onnx_node) else: node_to_nodes[onnx_node] = our_nodes if error_nodes: error_repr = [f"{n.name}({n.op_type})" for n in error_nodes] if len(error_nodes) > 10: # Magic number raise ValueError(f"Unsupported operators (first 10): {error_repr[:10]}") else: raise ValueError(f"Unsupported operators: {error_repr}") # Apply node_to_nodes replacement on onnx_graph to create a new DFG return build_graph_with_mapping(onnx_graph, node_to_nodes) def _dce_get_io_info(self): inputs = [n for n in self.graph if isinstance(n, g.InputTensor)] inputs_set = set(inputs) reachables = set() for component in nx.connected_components(self.graph.to_undirected()): # If any inputs goes into this subgraph, it's alive. if set(component).intersection(inputs_set): reachables.update(component) unreachables = set(self.graph) - reachables # Remove nodes unreachable from input self.graph.remove_nodes_from(unreachables) # Then outputs are nodes with out_degree = 0 outputs = [n for n in self.graph if self.graph.out_degree[n] == 0] assert len(outputs) == 1 return inputs, outputs[0] @staticmethod def _emit_node(in_graph: nx.DiGraph, node: NodeT) -> Optional[EmitNodeT]: output_shape = in_graph.nodes[node].get("shape") predec = sorted_inputs(in_graph, node) predec_shapes = [in_graph.nodes[n].get("shape") for n in predec] if isinstance(node, g.DFGNode): # Directly add node into return graph. return node attrs = node_attr_to_dict(node) attrs["input_shapes"] = predec_shapes attrs["output_shape"] = output_shape if node.op_type == "Conv": if not isinstance(predec[1], g.WeightTensor) or len(predec_shapes[1]) != 4: return None # Only supports 2D conv with rhs being constant # Only pass in the first 2 arguments' shapes attrs["input_shapes"] = predec_shapes[:2] conv_node = g.Conv2DNode(node.name, **attrs) if len(predec) == 2: return conv_node # Split into conv followed by an addition bias_node = g.BiasAddNode( f"Bias_{node.name.split('_')[-1]}", [output_shape], output_shape ) return MarkedSubGraph.idiomatic_1to2(conv_node, bias_node, predec) if node.op_type in ("MatMul", "Gemm"): attrs["input_shapes"] = predec_shapes[:2] mul_node = g.MatMulNode(node.name, **attrs) if node.op_type == "Gemm": mul_node.gemm_transpose(predec) if len(predec) == 2: return mul_node # Split into mul followed by an addition bias_node = g.BiasAddNode( f"Bias_{node.name.split('_')[-1]}", [output_shape], output_shape ) return MarkedSubGraph.idiomatic_1to2(mul_node, bias_node, predec) if node.op_type == "GlobalAveragePool": input0_shape = in_graph.nodes[predec[0]]["shape"] _, _, h, w = input0_shape return g.AveragePool2DNode( node.name, predec_shapes, output_shape, [1, 1], (h, w), [0, 0, 0, 0] ) one_to_one_nodes = { "MaxPool": g.MaxPool2DNode, "AveragePool": g.AveragePool2DNode, "Add": g.AddNode, "Softmax": g.SoftMaxNode, "Relu": g.ReluNode, "Tanh": g.TanhNode, "BatchNormalization": g.BatchNormalizationNode, "Pad": g.PadNode, "Identity": g.IdentityNode, "Flatten": g.FlattenNode, } if node.op_type not in one_to_one_nodes: return None try: return one_to_one_nodes[node.op_type](node.name, **attrs) except (TypeError, KeyError, ValueError, RuntimeError): node_class = one_to_one_nodes[node.op_type] raise ValueError(f"Node ({node_class}) creation failed") def def_use(nodes: Iterable) -> Tuple[dict, dict]: """Computes def/use relation from a list of node. This method is duck-typed and operates on any node defining .input and .output. """ defs, uses = {}, defaultdict(list) for n in nodes: for i, input_ in enumerate(n.input): uses[input_].append((n, i)) for output in n.output: defs[output] = n return defs, uses def drop_reshape_before_gemm(graph: nx.DiGraph) -> nx.DiGraph: """Look for a shape-gather-unsqueeze-concat-reshape chain and replace that with flatten.""" for node in list(graph.nodes): if node.op_type != "Reshape": continue reshape_input, target_shape = sorted_inputs(graph, node) if not isinstance(target_shape, g.WeightTensor): # Not constant shape, nope continue n_gemm = get_next_in_chain(graph, "Gemm", node) if n_gemm is None: continue # Must be an (n-1)-d flatten before gemm assert list(target_shape.input_data) == [1, -1] # Connect input of reshape to gemm, then remove reshape graph.add_edge(reshape_input, n_gemm, index=0) graph.remove_node(node) return graph def get_next_in_chain( graph: nx.DiGraph, type_: str, node: Optional[NodeT] ) -> Optional[NodeT]: """ Get a unique user node of the unique output of Node `node`, and return it if it has Type `type_`. """ if node is None or len(node.output) != 1: return None # Propagates None; Unique output users = list(graph.neighbors(node)) if len(users) != 1 or users[0].op_type != type_: return None # Unique user of the output; Correct type return users[0] def build_graph_with_mapping( graph: nx.DiGraph, node_mapping: Dict[NodeT, EmitNodeT] ) -> nx.DiGraph: graph = graph.copy() single_node, multi_node = {}, {} for replace_node, by_node in node_mapping.items(): if isinstance(by_node, g.DFGNode): single_node[replace_node] = by_node else: multi_node[replace_node] = by_node # We do one-to-many replacements first # because their predecessors are specified as onnx nodes. for replace_node, subgraph in multi_node.items(): # Add subgraph itself graph = nx.compose(graph, subgraph.subgraph) # Add in edges graph.add_edges_from(subgraph.entry_edges) # Add out edges succ = graph.out_edges(replace_node, "index") for _, to, index in succ: graph.add_edge(subgraph.exit, to, index=index) # Remove old node graph.remove_node(replace_node) # Then do all one-to-one replacements. graph = nx.relabel_nodes(graph, single_node) return graph def extract_tensors_from_graph(onnx_graph: GraphT) -> Dict[str, g.TensorNode]: tensors = {} # parse weight weight_cnt = 0 for weight_tensor in onnx_graph.initializer: tensors[weight_tensor.name] = g.WeightTensor( weight_tensor, f"weight_{weight_cnt}" ) weight_cnt += 1 # parse input input_cnt = 0 for input_ in onnx_graph.input: if input_.name in tensors: continue tensors[input_.name] = g.InputTensor( input_, get_node_shape(input_), f"input_{input_cnt}" ) input_cnt += 1 return tensors def sorted_inputs(graph: nx.DiGraph, node): sorted_edges = sorted(graph.in_edges(node, "index"), key=lambda p: p[2]) return [e[0] for e in sorted_edges] def draw_graph(graph: nx.DiGraph, output_to): from networkx.drawing.nx_agraph import to_agraph agraph = to_agraph(graph) agraph.layout("dot") agraph.draw(output_to)
[ "networkx.relabel_nodes", "networkx.topological_sort", "networkx.drawing.nx_agraph.to_agraph", "pathlib.Path", "onnx.checker.check_graph", "networkx.DiGraph", "collections.defaultdict", "networkx.compose" ]
[((12461, 12497), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['graph', 'single_node'], {}), '(graph, single_node)\n', (12477, 12497), True, 'import networkx as nx\n'), ((13407, 13423), 'networkx.drawing.nx_agraph.to_agraph', 'to_agraph', (['graph'], {}), '(graph)\n', (13416, 13423), False, 'from networkx.drawing.nx_agraph import to_agraph\n'), ((1369, 1381), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1379, 1381), True, 'import networkx as nx\n'), ((2825, 2841), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (2829, 2841), False, 'from pathlib import Path\n'), ((4034, 4046), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (4044, 4046), True, 'import networkx as nx\n'), ((5413, 5444), 'networkx.topological_sort', 'nx.topological_sort', (['onnx_graph'], {}), '(onnx_graph)\n', (5432, 5444), True, 'import networkx as nx\n'), ((9964, 9981), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (9975, 9981), False, 'from collections import defaultdict\n'), ((12058, 12094), 'networkx.compose', 'nx.compose', (['graph', 'subgraph.subgraph'], {}), '(graph, subgraph.subgraph)\n', (12068, 12094), True, 'import networkx as nx\n'), ((2444, 2475), 'networkx.topological_sort', 'nx.topological_sort', (['self.graph'], {}), '(self.graph)\n', (2463, 2475), True, 'import networkx as nx\n'), ((3400, 3431), 'onnx.checker.check_graph', 'checker.check_graph', (['onnx_graph'], {}), '(onnx_graph)\n', (3419, 3431), False, 'from onnx import checker, onnx_cpp2py_export\n')]
import csv import json import matplotlib.pyplot as plt import numpy as np if __name__ == '__main__': formats = ['png', 'pdf', 'svg', 'eps'] metrics = [ {'gmetric': 'groc', 'lmetric': 'lroc', 'metric': 'AUC'}, {'gmetric': 'gauc', 'lmetric': 'lauc', 'metric': 'PRAUC'}, ] datasets = [ {'name': 'HCC', 'file': '../../results/evaluation/hcc_multi_sites_100_each.csv'}, {'name': 'ILPD', 'file': '../../results/evaluation/ilpd_multi_sites_100_each.csv'}, {'name': 'LTD', 'file': '../../results/evaluation/tumor_multi_sites_100_each.csv'}, {'name': 'BCD', 'file': '../../results/evaluation/diag_multi_sites_100_each.csv'}, ] for metric in metrics: gmetric = metric['gmetric'] lmetric = metric['lmetric'] metric = metric['metric'] for ds in datasets: file = ds['file'] name = ds['name'] title = f'{name} | Multiple Local Models' stats = {} xs = ['1', '2', '5', '10', '20', '50', '100'] with open(file, newline='') as csvfile: data = csv.reader(csvfile, delimiter=';') headers = next(data) gauc_idx = headers.index(gmetric) lauc_idx = headers.index(lmetric) for row in data: stat = stats.get(row[1]) if not stat: stat = { gmetric: [], lmetric: [], } stats[row[1]] = stat # xs.append(row[1]) gvals = json.loads(row[gauc_idx]) lvals = json.loads(row[lauc_idx]) stat[gmetric].append(gvals) if len(lvals) > 0: stat[lmetric].extend(lvals) else: stat[lmetric].append(gvals) # datainfo = str(len(stats['100'][gmetric])) # title += ' | ' + datainfo y_gauc_median = [np.median(stats[x][gmetric]) for x in xs] y_gauc_q25 = [np.quantile(stats[x][gmetric], 0.25) for x in xs] y_gauc_q75 = [np.quantile(stats[x][gmetric], 0.75) for x in xs] y_lauc_median = [np.median(stats[x][lmetric]) for x in xs] y_lauc_q25 = [np.quantile(stats[x][lmetric], 0.25) for x in xs] y_lauc_q75 = [np.quantile(stats[x][lmetric], 0.75) for x in xs] xs = [int(x) for x in xs] regular_col = '#b0b0b0' global_col = '#424ef5' local_col = '#f57542' alpha_mean = 1.0 alpha_q = 0.25 alpha_area = 0.2 fig = plt.figure(figsize=(6, 4.5)) ax = fig.add_subplot() ax.hlines(y_gauc_q25[0], 1, 100, linestyles='dotted', colors=[regular_col]) ax.hlines(y_gauc_median[0], 1, 100, label='Centralized', colors=[regular_col]) ax.hlines(y_gauc_q75[0], 1, 100, linestyles='dotted', colors=[regular_col]) ax.fill_between(xs, y_gauc_q25, y_gauc_median, color=global_col, alpha=alpha_area) ax.fill_between(xs, y_gauc_q75, y_gauc_median, color=global_col, alpha=alpha_area) ax.fill_between(xs, y_lauc_q25, y_lauc_median, color=local_col, alpha=alpha_area) ax.fill_between(xs, y_lauc_q75, y_lauc_median, color=local_col, alpha=alpha_area) ax.plot(xs, y_gauc_q25, '_', color=global_col, alpha=alpha_q) ax.plot(xs, y_gauc_median, '.', label='Combined', color=global_col, alpha=alpha_mean) ax.plot(xs, y_gauc_q75, '_', color=global_col, alpha=alpha_q) ax.plot(xs, y_lauc_q25, '_', color=local_col, alpha=alpha_q) ax.plot(xs, y_lauc_median, '.', label='Local', color=local_col, alpha=alpha_mean) ax.plot(xs, y_lauc_q75, '_', color=local_col, alpha=alpha_q) plt.yticks([0.5, 0.6, 0.7, 0.8, 0.9, 1.0]) plt.xscale('log') plt.xticks([1, 2, 5, 10, 20, 50, 100], ['Centralized', '2', '5', '10', '20', '50', '100']) plt.ylabel(metric) plt.xlabel('Number of Sites') plt.legend() plt.title(title) for format in formats: plt.savefig(f'../../results/plots/{name}_{metric}_sites.{format}', format=format, bbox_inches='tight')
[ "json.loads", "numpy.median", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.legend", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.figure", "numpy.quantile", "matplotlib.pyplot.yticks", "matplotlib.pyplot.title", "csv.reader", "matpl...
[((2763, 2791), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4.5)'}), '(figsize=(6, 4.5))\n', (2773, 2791), True, 'import matplotlib.pyplot as plt\n'), ((3977, 4019), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0.5, 0.6, 0.7, 0.8, 0.9, 1.0]'], {}), '([0.5, 0.6, 0.7, 0.8, 0.9, 1.0])\n', (3987, 4019), True, 'import matplotlib.pyplot as plt\n'), ((4032, 4049), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (4042, 4049), True, 'import matplotlib.pyplot as plt\n'), ((4062, 4156), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[1, 2, 5, 10, 20, 50, 100]', "['Centralized', '2', '5', '10', '20', '50', '100']"], {}), "([1, 2, 5, 10, 20, 50, 100], ['Centralized', '2', '5', '10', '20',\n '50', '100'])\n", (4072, 4156), True, 'import matplotlib.pyplot as plt\n'), ((4165, 4183), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['metric'], {}), '(metric)\n', (4175, 4183), True, 'import matplotlib.pyplot as plt\n'), ((4196, 4225), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Sites"""'], {}), "('Number of Sites')\n", (4206, 4225), True, 'import matplotlib.pyplot as plt\n'), ((4238, 4250), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4248, 4250), True, 'import matplotlib.pyplot as plt\n'), ((4263, 4279), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4272, 4279), True, 'import matplotlib.pyplot as plt\n'), ((1126, 1160), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""";"""'}), "(csvfile, delimiter=';')\n", (1136, 1160), False, 'import csv\n'), ((2095, 2123), 'numpy.median', 'np.median', (['stats[x][gmetric]'], {}), '(stats[x][gmetric])\n', (2104, 2123), True, 'import numpy as np\n'), ((2163, 2199), 'numpy.quantile', 'np.quantile', (['stats[x][gmetric]', '(0.25)'], {}), '(stats[x][gmetric], 0.25)\n', (2174, 2199), True, 'import numpy as np\n'), ((2239, 2275), 'numpy.quantile', 'np.quantile', (['stats[x][gmetric]', '(0.75)'], {}), '(stats[x][gmetric], 0.75)\n', (2250, 2275), True, 'import numpy as np\n'), ((2319, 2347), 'numpy.median', 'np.median', (['stats[x][lmetric]'], {}), '(stats[x][lmetric])\n', (2328, 2347), True, 'import numpy as np\n'), ((2387, 2423), 'numpy.quantile', 'np.quantile', (['stats[x][lmetric]', '(0.25)'], {}), '(stats[x][lmetric], 0.25)\n', (2398, 2423), True, 'import numpy as np\n'), ((2463, 2499), 'numpy.quantile', 'np.quantile', (['stats[x][lmetric]', '(0.75)'], {}), '(stats[x][lmetric], 0.75)\n', (2474, 2499), True, 'import numpy as np\n'), ((4332, 4439), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""../../results/plots/{name}_{metric}_sites.{format}"""'], {'format': 'format', 'bbox_inches': '"""tight"""'}), "(f'../../results/plots/{name}_{metric}_sites.{format}', format=\n format, bbox_inches='tight')\n", (4343, 4439), True, 'import matplotlib.pyplot as plt\n'), ((1669, 1694), 'json.loads', 'json.loads', (['row[gauc_idx]'], {}), '(row[gauc_idx])\n', (1679, 1694), False, 'import json\n'), ((1723, 1748), 'json.loads', 'json.loads', (['row[lauc_idx]'], {}), '(row[lauc_idx])\n', (1733, 1748), False, 'import json\n')]
# Copyright (c) 2020 StackHPC Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from ansible import errors import jinja2 def _get_hostvar(context, var_name, inventory_hostname=None): if inventory_hostname is None: namespace = context else: if inventory_hostname not in context['hostvars']: raise errors.AnsibleFilterError( "Inventory hostname '%s' not in hostvars" % inventory_hostname) namespace = context["hostvars"][inventory_hostname] return namespace.get(var_name) @jinja2.contextfilter def luks_mode(context, device): """Returns a string represent the mode""" if "mode" in device: return device["mode"] return "keyfile" @jinja2.contextfilter def luks_key(context, device): """Returns name of keyfile""" return device["device"].replace('/', '-')[1:] @jinja2.contextfilter def luks_keypath(context, device): """Returns full path to keyfile""" directory = _get_hostvar(context, "luks_keys_path") key = luks_key(context, device) return os.path.join(directory, key) class FilterModule(object): """Utility filters.""" def filters(self): return { 'luks_mode': luks_mode, 'luks_key': luks_key, 'luks_keypath': luks_keypath, }
[ "ansible.errors.AnsibleFilterError", "os.path.join" ]
[((1568, 1596), 'os.path.join', 'os.path.join', (['directory', 'key'], {}), '(directory, key)\n', (1580, 1596), False, 'import os\n'), ((848, 941), 'ansible.errors.AnsibleFilterError', 'errors.AnsibleFilterError', (['("Inventory hostname \'%s\' not in hostvars" % inventory_hostname)'], {}), '("Inventory hostname \'%s\' not in hostvars" %\n inventory_hostname)\n', (873, 941), False, 'from ansible import errors\n')]
#!/usr/bin/env python from flask import Flask, request,Response import logging import os import json import cognitoHelper as cog #logging config logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s',level=logging.INFO,datefmt='%Y-%m-%d %H:%M:%S') logger = logging.getLogger(__name__) #globals MODULE = "section8" HOST = "0.0.0.0" PORT = "8080" PROFILE = "aws-dev" REGION = "eu-west-2" PROFILE = "aws-dev" REGION = "eu-west-2" COGNITO_CLIENT_ID = "5br85tkg2nmq8nn1v8pk71lkku" COGNITO_CLIENT_SECRET = "<KEY>" USER_POOL = "my-app-pool" #initiliase flask app = Flask(__name__) app.secret_key = os.urandom(24) cidp = cog.create_client(REGION) @app.route('/api/<string:version>/auth/login',methods=["POST"]) def loginUser(version): result = {} headers = {} username = request.authorization.username password = request.authorization.password authObject = cog.login(cidp,username,password,USER_POOL) if 'error' in authObject: if 'User is disabled' in str(authObject['error']): result['error'] = "user disabled" else: result['error'] = str(authObject['error']) status = 401 result['result'] = 'fail' else: result['result'] = "ok" result['data'] = authObject['AuthenticationResult'] status = 200 lresponse = Response(json.dumps(result), status=status, mimetype='application/json',headers=headers) if status == 200: lresponse.set_cookie("idtoken",authObject['AuthenticationResult']['IdToken'],httponly=True,expires=None) return lresponse @app.route('/api/<string:version>/content/warranty',methods=["POST"]) def secure(version): resource_path = request.path result = {} headers = {} idtoken = request.cookies.get("idtoken") if request.args.get('accesstoken'): access_token = request.args.get('accesstoken') try: tokenObject = cog.decode_cognito_token(access_token) except Exception as e: status = 500 result['error'] = str(e) else: if 'error' in tokenObject: result['error'] = tokenObject['error'] status = 403 result['result'] = 'fail' else: found = 0 if str(tokenObject['data']['scope']).find(resource_path) == 0: found = 1 if found == 1: result['result'] = "ok" result['data'] = tokenObject['data'] status = 200 else: status = 403 result['resource'] = resource_path result['result'] = 'fail' result['error'] = "Not in scope, scope=" + tokenObject['data']['scope'] else: result['error'] = "no accesstoken specified" status = 400 result['result'] = 'fail' lresponse = Response(json.dumps(result), status=status, mimetype='application/json', headers=headers) return lresponse @app.route('/api/<string:version>/auth/whoami',methods=["POST"]) def whoami(version): result = {} headers = {} idtoken = request.cookies.get("idtoken") tokenObject = cog.decode_cognito_token(idtoken) if 'error' in tokenObject: result['error'] = tokenObject['error'] status = 401 result['result'] = 'fail' else: result['result'] = "ok" result['data'] = tokenObject status = 200 lresponse = Response(json.dumps(result), status=status, mimetype='application/json',headers=headers) return lresponse def main (): print('Running:{}'.format(MODULE)) app.run(debug=True) #app.run(host='0.0.0.0',port=PORT) app.logger.info('Running:{}'.format(MODULE)) if __name__ == "__main__": main()
[ "logging.basicConfig", "logging.getLogger", "flask.request.args.get", "cognitoHelper.create_client", "flask.Flask", "os.urandom", "cognitoHelper.login", "json.dumps", "flask.request.cookies.get", "cognitoHelper.decode_cognito_token" ]
[((146, 269), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)-8s %(message)s"""', 'level': 'logging.INFO', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(format='%(asctime)s %(levelname)-8s %(message)s', level\n =logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')\n", (165, 269), False, 'import logging\n'), ((272, 299), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (289, 299), False, 'import logging\n'), ((575, 590), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (580, 590), False, 'from flask import Flask, request, Response\n'), ((608, 622), 'os.urandom', 'os.urandom', (['(24)'], {}), '(24)\n', (618, 622), False, 'import os\n'), ((630, 655), 'cognitoHelper.create_client', 'cog.create_client', (['REGION'], {}), '(REGION)\n', (647, 655), True, 'import cognitoHelper as cog\n'), ((888, 934), 'cognitoHelper.login', 'cog.login', (['cidp', 'username', 'password', 'USER_POOL'], {}), '(cidp, username, password, USER_POOL)\n', (897, 934), True, 'import cognitoHelper as cog\n'), ((1747, 1777), 'flask.request.cookies.get', 'request.cookies.get', (['"""idtoken"""'], {}), "('idtoken')\n", (1766, 1777), False, 'from flask import Flask, request, Response\n'), ((1785, 1816), 'flask.request.args.get', 'request.args.get', (['"""accesstoken"""'], {}), "('accesstoken')\n", (1801, 1816), False, 'from flask import Flask, request, Response\n'), ((3168, 3198), 'flask.request.cookies.get', 'request.cookies.get', (['"""idtoken"""'], {}), "('idtoken')\n", (3187, 3198), False, 'from flask import Flask, request, Response\n'), ((3217, 3250), 'cognitoHelper.decode_cognito_token', 'cog.decode_cognito_token', (['idtoken'], {}), '(idtoken)\n', (3241, 3250), True, 'import cognitoHelper as cog\n'), ((1339, 1357), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (1349, 1357), False, 'import json\n'), ((1841, 1872), 'flask.request.args.get', 'request.args.get', (['"""accesstoken"""'], {}), "('accesstoken')\n", (1857, 1872), False, 'from flask import Flask, request, Response\n'), ((2932, 2950), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (2942, 2950), False, 'import json\n'), ((3509, 3527), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (3519, 3527), False, 'import json\n'), ((1912, 1950), 'cognitoHelper.decode_cognito_token', 'cog.decode_cognito_token', (['access_token'], {}), '(access_token)\n', (1936, 1950), True, 'import cognitoHelper as cog\n')]
#!/usr/bin/env python3 # Copyright (c) 2019 Arm Limited and Contributors. All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """This script checks for checker for unwanted TCP/UDP open ports.""" import os import json import logging from enum import Enum import mbl.open_ports_checker.connection as connection import mbl.open_ports_checker.netstatutils as nsu __version__ = "1.0" class Status(Enum): """OpenPortsChecker operation status codes.""" SUCCESS = 0 BLACK_LISTED_CONNECTION = 1 class OpenPortsChecker: """Checker for unwanted open ports.""" def __init__(self, white_list_filename): """ Create and initialize OpenPortsChecker object. :param white_list_filename: white list .json file name """ self.logger = logging.getLogger("OpenPortsChecker") self.logger.info("Initializing OpenPortsChecker") self.logger.info("Version {}".format(__version__)) # Load connections white list JSON file with open(white_list_filename, "r") as in_file: self.white_list = json.load(in_file) def run_check(self): """ Run open ports check. :return: Status.SUCCESS if all open ports are white-listed otherwise Status.BLACK_LISTED_CONNECTION """ active_connections = self.__get_list_of_active_connections() self.logger.debug( "Found {} active connections".format(len(active_connections)) ) return self.__check_connections_against_white_list(active_connections) def __check_connection_against_white_list(self, connection): """ Check if a single connection is white listed. :param connection: connection objects to be checked against white list :return: Status.SUCCESS Status.BLACK_LISTED_CONNECTION """ check_result = Status.BLACK_LISTED_CONNECTION ports = self.white_list["ports"] for port_data in ports: protocol = port_data["protocol"] port = port_data["port"] if connection.is_equal_port(protocol, port): check_result = Status.SUCCESS break executables = self.white_list["executables"] for executable_data in executables: executable = executable_data["executable"] if connection.is_equal_executable(executable): check_result = Status.SUCCESS break return check_result def __check_connections_against_white_list(self, connections): """ Check list of connections against white list. If all connections are listed into white list, the function returns Status.SUCCESS overwise an error code will be returned. :param connections: list of connections objects to be checked against white list :return: Status.SUCCESS Status.BLACK_LISTED_CONNECTION """ self.logger.debug("***Checking connections against white list***") blacklisted_connections = 0 for connection in connections: self.logger.debug( "Checking connection status: {}".format(connection) ) connection_status = self.__check_connection_against_white_list( connection ) self.logger.debug( "Connection status: {}".format(connection_status) ) if connection_status != Status.SUCCESS: blacklisted_connections += 1 self.logger.info( "Connection {} is blacklisted".format(connection) ) self.logger.info( "Found {}/{} blacklisted connections".format( blacklisted_connections, len(connections) ) ) return ( Status.SUCCESS if blacklisted_connections == 0 else Status.BLACK_LISTED_CONNECTION ) def __get_list_of_active_connections(self): """ Get list of all active connections except loopback. :return: List of active connections """ self.logger.debug("Get list of active connections") active_connections = nsu.netstat() return active_connections
[ "logging.getLogger", "mbl.open_ports_checker.connection.is_equal_port", "mbl.open_ports_checker.netstatutils.netstat", "mbl.open_ports_checker.connection.is_equal_executable", "json.load" ]
[((797, 834), 'logging.getLogger', 'logging.getLogger', (['"""OpenPortsChecker"""'], {}), "('OpenPortsChecker')\n", (814, 834), False, 'import logging\n'), ((4284, 4297), 'mbl.open_ports_checker.netstatutils.netstat', 'nsu.netstat', ([], {}), '()\n', (4295, 4297), True, 'import mbl.open_ports_checker.netstatutils as nsu\n'), ((1086, 1104), 'json.load', 'json.load', (['in_file'], {}), '(in_file)\n', (1095, 1104), False, 'import json\n'), ((2110, 2150), 'mbl.open_ports_checker.connection.is_equal_port', 'connection.is_equal_port', (['protocol', 'port'], {}), '(protocol, port)\n', (2134, 2150), True, 'import mbl.open_ports_checker.connection as connection\n'), ((2387, 2429), 'mbl.open_ports_checker.connection.is_equal_executable', 'connection.is_equal_executable', (['executable'], {}), '(executable)\n', (2417, 2429), True, 'import mbl.open_ports_checker.connection as connection\n')]
# Copyright (C) <NAME> 2019. All rights reserved. import argparse from kitti_odometry import KittiEvalOdom parser = argparse.ArgumentParser(description='KITTI evaluation') parser.add_argument('--result', type=str, required=True, help="Result directory") parser.add_argument('--align', type=str, choices=['scale', 'scale_7dof', '7dof', '6dof'], default=None, help="alignment type") parser.add_argument('--seqs', nargs="+", type=int, help="sequences to be evaluated", default=None) args = parser.parse_args() eval_tool = KittiEvalOdom() gt_dir = "dataset/kitti_odom/gt_poses/" result_dir = args.result continue_flag = input("Evaluate result in {}? [y/n]".format(result_dir)) if continue_flag == "y": eval_tool.eval( gt_dir, result_dir, alignment=args.align, seqs=args.seqs, ) else: print("Double check the path!")
[ "kitti_odometry.KittiEvalOdom", "argparse.ArgumentParser" ]
[((119, 174), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""KITTI evaluation"""'}), "(description='KITTI evaluation')\n", (142, 174), False, 'import argparse\n'), ((686, 701), 'kitti_odometry.KittiEvalOdom', 'KittiEvalOdom', ([], {}), '()\n', (699, 701), False, 'from kitti_odometry import KittiEvalOdom\n')]
import pygame import random import os import time import neat import visualize import pickle import bcolors as b pygame.font.init() SCORE_MAX = [0, 0, 0] WIN_WIDTH = 600 WIN_HEIGHT = 800 FLOOR = 730 STAT_FONT = pygame.font.SysFont("comicsans", 50) END_FONT = pygame.font.SysFont("comicsans", 70) DRAW_LINES = False WIN = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT)) pygame.display.set_caption("IA LEARNS Flappy Bird") pipe_img = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs","pipe.png")).convert_alpha()) bg_img = pygame.transform.scale(pygame.image.load(os.path.join("imgs","bg.png")).convert_alpha(), (600, 900)) bird_images = [pygame.transform.scale2x(pygame.image.load(os.path.join("imgs","bird" + str(x) + ".png"))) for x in range(1,4)] base_img = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs","base.png")).convert_alpha()) gen = 0 class Bird: MAX_ROTATION = 25 IMGS = bird_images ROT_VEL = 20 ANIMATION_TIME = 5 def __init__(self, x, y): self.x = x self.y = y self.tilt = 0 self.tick_count = 0 self.vel = 0 self.height = self.y self.img_count = 0 self.img = self.IMGS[0] def jump(self): self.vel = -10 self.tick_count = 0 self.height = self.y def move(self): self.tick_count += 1 displacement = self.vel*(self.tick_count) + 0.5*(3)*(self.tick_count)**2 if displacement >= 16: displacement = (displacement/abs(displacement)) * 16 if displacement < 0: displacement -= 2 self.y = self.y + displacement if displacement < 0 or self.y < self.height + 50: if self.tilt < self.MAX_ROTATION: self.tilt = self.MAX_ROTATION else: if self.tilt > -90: self.tilt -= self.ROT_VEL def draw(self, win): self.img_count += 1 if self.img_count <= self.ANIMATION_TIME: self.img = self.IMGS[0] elif self.img_count <= self.ANIMATION_TIME*2: self.img = self.IMGS[1] elif self.img_count <= self.ANIMATION_TIME*3: self.img = self.IMGS[2] elif self.img_count <= self.ANIMATION_TIME*4: self.img = self.IMGS[1] elif self.img_count == self.ANIMATION_TIME*4 + 1: self.img = self.IMGS[0] self.img_count = 0 if self.tilt <= -80: self.img = self.IMGS[1] self.img_count = self.ANIMATION_TIME*2 blitRotateCenter(win, self.img, (self.x, self.y), self.tilt) def get_mask(self): return pygame.mask.from_surface(self.img) class Pipe(): GAP = 200 VEL = 15 def __init__(self, x): self.x = x self.height = 0 self.top = 0 self.bottom = 0 self.PIPE_TOP = pygame.transform.flip(pipe_img, False, True) self.PIPE_BOTTOM = pipe_img self.passed = False self.set_height() def set_height(self): self.height = random.randrange(50, 450) self.top = self.height - self.PIPE_TOP.get_height() self.bottom = self.height + self.GAP def move(self): self.x -= self.VEL def draw(self, win): win.blit(self.PIPE_TOP, (self.x, self.top)) win.blit(self.PIPE_BOTTOM, (self.x, self.bottom)) def collide(self, bird, win): bird_mask = bird.get_mask() top_mask = pygame.mask.from_surface(self.PIPE_TOP) bottom_mask = pygame.mask.from_surface(self.PIPE_BOTTOM) top_offset = (self.x - bird.x, self.top - round(bird.y)) bottom_offset = (self.x - bird.x, self.bottom - round(bird.y)) b_point = bird_mask.overlap(bottom_mask, bottom_offset) t_point = bird_mask.overlap(top_mask,top_offset) if b_point or t_point: return True return False class Base: VEL = 5 WIDTH = base_img.get_width() IMG = base_img def __init__(self, y): self.y = y self.x1 = 0 self.x2 = self.WIDTH def move(self): self.x1 -= self.VEL self.x2 -= self.VEL if self.x1 + self.WIDTH < 0: self.x1 = self.x2 + self.WIDTH if self.x2 + self.WIDTH < 0: self.x2 = self.x1 + self.WIDTH def draw(self, win): win.blit(self.IMG, (self.x1, self.y)) win.blit(self.IMG, (self.x2, self.y)) def blitRotateCenter(surf, image, topleft, angle): rotated_image = pygame.transform.rotate(image, angle) new_rect = rotated_image.get_rect(center = image.get_rect(topleft = topleft).center) surf.blit(rotated_image, new_rect.topleft) def draw_window(win, birds, pipes, base, score, gen, pipe_ind): if gen == 0: gen = 1 win.blit(bg_img, (0,0)) for pipe in pipes: pipe.draw(win) base.draw(win) for bird in birds: if DRAW_LINES: try: pygame.draw.line(win, (255,0,0), (bird.x+bird.img.get_width()/2, bird.y + bird.img.get_height()/2), (pipes[pipe_ind].x + pipes[pipe_ind].PIPE_TOP.get_width()/2, pipes[pipe_ind].height), 5) pygame.draw.line(win, (255,0,0), (bird.x+bird.img.get_width()/2, bird.y + bird.img.get_height()/2), (pipes[pipe_ind].x + pipes[pipe_ind].PIPE_BOTTOM.get_width()/2, pipes[pipe_ind].bottom), 5) except: pass bird.draw(win) score_label = STAT_FONT.render("Pontuação: " + str(score),1,(255,255,255)) win.blit(score_label, (WIN_WIDTH - score_label.get_width() - 15, 10)) score_label = STAT_FONT.render("Geração: " + str(gen-1),1,(255,255,255)) win.blit(score_label, (10, 10)) score_label = STAT_FONT.render("Restantes: " + str(len(birds)),1,(255,255,255)) win.blit(score_label, (10, 50)) pygame.display.update() def eval_genomes(genomes, config): global WIN, gen win = WIN gen += 1 nets = [] birds = [] ge = [] for genome_id, genome in genomes: genome.fitness = 0 net = neat.nn.FeedForwardNetwork.create(genome, config) nets.append(net) birds.append(Bird(230,350)) ge.append(genome) base = Base(FLOOR) pipes = [Pipe(700)] score = 0 clock = pygame.time.Clock() run = True while run and len(birds) > 0: clock.tick(60) for event in pygame.event.get(): if event.type == pygame.QUIT: run = False pygame.quit() quit() break pipe_ind = 0 if len(birds) > 0: if len(pipes) > 1 and birds[0].x > pipes[0].x + pipes[0].PIPE_TOP.get_width(): pipe_ind = 1 for x, bird in enumerate(birds): ge[x].fitness += 0.1 bird.move() output = nets[birds.index(bird)].activate((bird.y, abs(bird.y - pipes[pipe_ind].height), abs(bird.y - pipes[pipe_ind].bottom))) if output[0] > 0.5: bird.jump() base.move() rem = [] add_pipe = False for pipe in pipes: pipe.move() for bird in birds: if pipe.collide(bird, win): ge[birds.index(bird)].fitness -= 1 nets.pop(birds.index(bird)) ge.pop(birds.index(bird)) birds.pop(birds.index(bird)) if pipe.x + pipe.PIPE_TOP.get_width() < 0: rem.append(pipe) if not pipe.passed and pipe.x < bird.x: pipe.passed = True add_pipe = True if add_pipe: score += 1 for genome in ge: genome.fitness += 5 pipes.append(Pipe(WIN_WIDTH)) for r in rem: pipes.remove(r) for bird in birds: if bird.y + bird.img.get_height() - 10 >= FLOOR or bird.y < -50: nets.pop(birds.index(bird)) ge.pop(birds.index(bird)) birds.pop(birds.index(bird)) draw_window(WIN, birds, pipes, base, score, gen, pipe_ind) if score > SCORE_MAX[0]: SCORE_MAX[0] = score SCORE_MAX[1] = gen - 1 SCORE_MAX[2] = genome.fitness print(b.HELP, 'ACTUAL SCORE:', score, 'from generation:', gen, 'with fitness:', genome.fitness, b.END) print(b.OKMSG, 'MAX SCORE FOR NOW:', SCORE_MAX[0], b.END, b.ERRMSG, 'by generation:', SCORE_MAX[1], b.END, b.BLUE, 'with fitness:', SCORE_MAX[2], b.END) def run(config_file): config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_file) p = neat.Population(config) p.add_reporter(neat.StdOutReporter(True)) stats = neat.StatisticsReporter() p.add_reporter(stats) winner = p.run(eval_genomes, 50) print('\nMelhor Genoma:\n{!s}'.format(winner)) if __name__ == '__main__': local_dir = os.path.dirname(__file__) config_path = os.path.join(local_dir, 'config-neat-flappybird.txt') run(config_path)
[ "pygame.quit", "neat.StatisticsReporter", "pygame.mask.from_surface", "pygame.display.set_mode", "neat.nn.FeedForwardNetwork.create", "pygame.font.init", "pygame.display.update", "pygame.transform.flip", "neat.StdOutReporter", "random.randrange", "os.path.dirname", "pygame.time.Clock", "neat...
[((113, 131), 'pygame.font.init', 'pygame.font.init', ([], {}), '()\n', (129, 131), False, 'import pygame\n'), ((213, 249), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""comicsans"""', '(50)'], {}), "('comicsans', 50)\n", (232, 249), False, 'import pygame\n'), ((261, 297), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""comicsans"""', '(70)'], {}), "('comicsans', 70)\n", (280, 297), False, 'import pygame\n'), ((324, 372), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(WIN_WIDTH, WIN_HEIGHT)'], {}), '((WIN_WIDTH, WIN_HEIGHT))\n', (347, 372), False, 'import pygame\n'), ((373, 424), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""IA LEARNS Flappy Bird"""'], {}), "('IA LEARNS Flappy Bird')\n", (399, 424), False, 'import pygame\n'), ((4496, 4533), 'pygame.transform.rotate', 'pygame.transform.rotate', (['image', 'angle'], {}), '(image, angle)\n', (4519, 4533), False, 'import pygame\n'), ((5798, 5821), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (5819, 5821), False, 'import pygame\n'), ((6239, 6258), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (6256, 6258), False, 'import pygame\n'), ((8527, 8657), 'neat.config.Config', 'neat.config.Config', (['neat.DefaultGenome', 'neat.DefaultReproduction', 'neat.DefaultSpeciesSet', 'neat.DefaultStagnation', 'config_file'], {}), '(neat.DefaultGenome, neat.DefaultReproduction, neat.\n DefaultSpeciesSet, neat.DefaultStagnation, config_file)\n', (8545, 8657), False, 'import neat\n'), ((8726, 8749), 'neat.Population', 'neat.Population', (['config'], {}), '(config)\n', (8741, 8749), False, 'import neat\n'), ((8809, 8834), 'neat.StatisticsReporter', 'neat.StatisticsReporter', ([], {}), '()\n', (8832, 8834), False, 'import neat\n'), ((8996, 9021), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (9011, 9021), False, 'import os\n'), ((9040, 9093), 'os.path.join', 'os.path.join', (['local_dir', '"""config-neat-flappybird.txt"""'], {}), "(local_dir, 'config-neat-flappybird.txt')\n", (9052, 9093), False, 'import os\n'), ((2641, 2675), 'pygame.mask.from_surface', 'pygame.mask.from_surface', (['self.img'], {}), '(self.img)\n', (2665, 2675), False, 'import pygame\n'), ((2861, 2905), 'pygame.transform.flip', 'pygame.transform.flip', (['pipe_img', '(False)', '(True)'], {}), '(pipe_img, False, True)\n', (2882, 2905), False, 'import pygame\n'), ((3047, 3072), 'random.randrange', 'random.randrange', (['(50)', '(450)'], {}), '(50, 450)\n', (3063, 3072), False, 'import random\n'), ((3453, 3492), 'pygame.mask.from_surface', 'pygame.mask.from_surface', (['self.PIPE_TOP'], {}), '(self.PIPE_TOP)\n', (3477, 3492), False, 'import pygame\n'), ((3515, 3557), 'pygame.mask.from_surface', 'pygame.mask.from_surface', (['self.PIPE_BOTTOM'], {}), '(self.PIPE_BOTTOM)\n', (3539, 3557), False, 'import pygame\n'), ((6027, 6076), 'neat.nn.FeedForwardNetwork.create', 'neat.nn.FeedForwardNetwork.create', (['genome', 'config'], {}), '(genome, config)\n', (6060, 6076), False, 'import neat\n'), ((6354, 6372), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (6370, 6372), False, 'import pygame\n'), ((8770, 8795), 'neat.StdOutReporter', 'neat.StdOutReporter', (['(True)'], {}), '(True)\n', (8789, 8795), False, 'import neat\n'), ((480, 512), 'os.path.join', 'os.path.join', (['"""imgs"""', '"""pipe.png"""'], {}), "('imgs', 'pipe.png')\n", (492, 512), False, 'import os\n'), ((580, 610), 'os.path.join', 'os.path.join', (['"""imgs"""', '"""bg.png"""'], {}), "('imgs', 'bg.png')\n", (592, 610), False, 'import os\n'), ((821, 853), 'os.path.join', 'os.path.join', (['"""imgs"""', '"""base.png"""'], {}), "('imgs', 'base.png')\n", (833, 853), False, 'import os\n'), ((6460, 6473), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (6471, 6473), False, 'import pygame\n')]
import os import pytest from assertpy import assert_that from ..models.granule import Granule from ..models.granule_count import GranuleCount from ..models.status import Status from ..session import _get_url, get_session, get_session_maker @pytest.mark.usefixtures("db_connection_secret") def test_that_db_correctly_gets_db_connection_details(): url = _get_url() assert_that(url.drivername).is_equal_to("postgresql") assert_that(url.host).is_equal_to("localhost") assert_that(url.username).is_equal_to(os.environ["PG_USER"]) assert_that(url.password).is_equal_to(os.environ["PG_PASSWORD"]) assert_that(url.database).is_equal_to(os.environ["PG_DB"]) @pytest.mark.usefixtures("db_connection_secret") @pytest.mark.usefixtures("db_session") def test_that_db_can_create_successful_connection_with_granule(): session_maker = get_session_maker() with get_session(session_maker) as db: granules = db.query(Granule).all() assert_that(granules).is_length(0) @pytest.mark.usefixtures("db_connection_secret") @pytest.mark.usefixtures("db_session") def test_that_db_can_create_successful_connection_with_granule_count(): session_maker = get_session_maker() with get_session(session_maker) as db: granule_counts = db.query(GranuleCount).all() assert_that(granule_counts).is_length(0) @pytest.mark.usefixtures("db_connection_secret") @pytest.mark.usefixtures("db_session") def test_that_db_can_create_successful_connection_with_status(): session_maker = get_session_maker() with get_session(session_maker) as db: statuses = db.query(Status).all() assert_that(statuses).is_length(0)
[ "assertpy.assert_that", "pytest.mark.usefixtures" ]
[((245, 292), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""db_connection_secret"""'], {}), "('db_connection_secret')\n", (268, 292), False, 'import pytest\n'), ((680, 727), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""db_connection_secret"""'], {}), "('db_connection_secret')\n", (703, 727), False, 'import pytest\n'), ((729, 766), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""db_session"""'], {}), "('db_session')\n", (752, 766), False, 'import pytest\n'), ((1005, 1052), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""db_connection_secret"""'], {}), "('db_connection_secret')\n", (1028, 1052), False, 'import pytest\n'), ((1054, 1091), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""db_session"""'], {}), "('db_session')\n", (1077, 1091), False, 'import pytest\n'), ((1353, 1400), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""db_connection_secret"""'], {}), "('db_connection_secret')\n", (1376, 1400), False, 'import pytest\n'), ((1402, 1439), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""db_session"""'], {}), "('db_session')\n", (1425, 1439), False, 'import pytest\n'), ((375, 402), 'assertpy.assert_that', 'assert_that', (['url.drivername'], {}), '(url.drivername)\n', (386, 402), False, 'from assertpy import assert_that\n'), ((433, 454), 'assertpy.assert_that', 'assert_that', (['url.host'], {}), '(url.host)\n', (444, 454), False, 'from assertpy import assert_that\n'), ((484, 509), 'assertpy.assert_that', 'assert_that', (['url.username'], {}), '(url.username)\n', (495, 509), False, 'from assertpy import assert_that\n'), ((549, 574), 'assertpy.assert_that', 'assert_that', (['url.password'], {}), '(url.password)\n', (560, 574), False, 'from assertpy import assert_that\n'), ((618, 643), 'assertpy.assert_that', 'assert_that', (['url.database'], {}), '(url.database)\n', (629, 643), False, 'from assertpy import assert_that\n'), ((967, 988), 'assertpy.assert_that', 'assert_that', (['granules'], {}), '(granules)\n', (978, 988), False, 'from assertpy import assert_that\n'), ((1309, 1336), 'assertpy.assert_that', 'assert_that', (['granule_counts'], {}), '(granule_counts)\n', (1320, 1336), False, 'from assertpy import assert_that\n'), ((1638, 1659), 'assertpy.assert_that', 'assert_that', (['statuses'], {}), '(statuses)\n', (1649, 1659), False, 'from assertpy import assert_that\n')]
import numpy as np import pytest from arbol import aprint from dexp.processing.utils.normalise import Normalise from dexp.utils.backends import Backend from dexp.utils.testing.testing import execute_both_backends @execute_both_backends @pytest.mark.parametrize( "dexp_nuclei_background_data", [dict(length_xy=128, dtype=np.float32)], indirect=True, ) def test_normalise(dexp_nuclei_background_data): _, _, image = dexp_nuclei_background_data image = image.astype(np.uint16) # required to convert afterwards normalise = Normalise(image, low=-0.5, high=1, in_place=False, clip=True, dtype=np.float32) image_normalised = normalise.forward(image) image_denormalised = normalise.backward(image_normalised) assert image_normalised.dtype == np.float32 assert image_denormalised.dtype == image.dtype assert image_normalised.shape == image.shape assert image_denormalised.shape == image.shape assert image_normalised.min() >= -0.5 assert image_normalised.max() <= 1 assert image_normalised.max() - image_normalised.min() >= 1.5 assert image_denormalised.min() * (1 + 1e-3) >= image.min() assert image_denormalised.max() <= (1 + 1e-3) * image.max() assert (image_denormalised.max() - image_denormalised.min()) * (1 + 1e-3) >= image.max() - image.min() xp = Backend.get_xp_module() error = xp.median(xp.abs(image - image_denormalised)).item() aprint(f"Error = {error}") assert error < 1e-6
[ "dexp.processing.utils.normalise.Normalise", "arbol.aprint", "dexp.utils.backends.Backend.get_xp_module" ]
[((548, 627), 'dexp.processing.utils.normalise.Normalise', 'Normalise', (['image'], {'low': '(-0.5)', 'high': '(1)', 'in_place': '(False)', 'clip': '(True)', 'dtype': 'np.float32'}), '(image, low=-0.5, high=1, in_place=False, clip=True, dtype=np.float32)\n', (557, 627), False, 'from dexp.processing.utils.normalise import Normalise\n'), ((1334, 1357), 'dexp.utils.backends.Backend.get_xp_module', 'Backend.get_xp_module', ([], {}), '()\n', (1355, 1357), False, 'from dexp.utils.backends import Backend\n'), ((1427, 1453), 'arbol.aprint', 'aprint', (['f"""Error = {error}"""'], {}), "(f'Error = {error}')\n", (1433, 1453), False, 'from arbol import aprint\n')]
# -*- coding: utf-8 -*- import atexit import os import signal import time from flask_restful import Api from actinia_core.testsuite import ActiniaTestCaseBase, URL_PREFIX from actinia_core.core.common.config import global_config from actinia_core.core.common.app import flask_app, flask_api from actinia_statistic_plugin.endpoints import create_endpoints from actinia_core.endpoints import create_endpoints as create_actinia_endpoints __license__ = "GPLv3" __author__ = "<NAME>" __copyright__ = "Copyright 2016-2019, <NAME>" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" redis_pid = None server_test = False custom_actinia_cfg = False create_actinia_endpoints() create_endpoints(flask_api) # If this environmental variable is set, then a real http request will be send # instead of using the flask test_client. if "ACTINIA_SERVER_TEST" in os.environ: server_test = bool(os.environ["ACTINIA_SERVER_TEST"]) # Set this variable to use a actinia config file in a docker container if "ACTINIA_CUSTOM_TEST_CFG" in os.environ: custom_actinia_cfg = str(os.environ["ACTINIA_CUSTOM_TEST_CFG"]) def setup_environment(): global redis_pid # Set the port to the test redis server global_config.REDIS_SERVER_SERVER = "localhost" global_config.REDIS_SERVER_PORT = 7000 # home = os.getenv("HOME") # GRASS # Setup the test environment global_config.GRASS_GIS_BASE="/usr/local/grass79/" global_config.GRASS_GIS_START_SCRIPT="/usr/local/bin/grass79" # global_config.GRASS_DATABASE= "/usr/local/grass_test_db" # global_config.GRASS_DATABASE = "%s/actinia/grass_test_db" % home global_config.GRASS_TMP_DATABASE = "/tmp" if server_test is False and custom_actinia_cfg is False: # Start the redis server for user and logging management redis_pid = os.spawnl(os.P_NOWAIT, "/usr/bin/redis-server", "common/redis.conf", "--port %i" % global_config.REDIS_SERVER_PORT) time.sleep(1) if server_test is False and custom_actinia_cfg is not False: global_config.read(custom_actinia_cfg) def stop_redis(): if server_test is False: global redis_pid # Kill th redis server if redis_pid is not None: os.kill(redis_pid, signal.SIGTERM) # Register the redis stop function atexit.register(stop_redis) # Setup the environment setup_environment() class ActiniaResourceTestCaseBase(ActiniaTestCaseBase): pass
[ "actinia_statistic_plugin.endpoints.create_endpoints", "os.kill", "actinia_core.endpoints.create_endpoints", "time.sleep", "os.spawnl", "actinia_core.core.common.config.global_config.read", "atexit.register" ]
[((650, 676), 'actinia_core.endpoints.create_endpoints', 'create_actinia_endpoints', ([], {}), '()\n', (674, 676), True, 'from actinia_core.endpoints import create_endpoints as create_actinia_endpoints\n'), ((677, 704), 'actinia_statistic_plugin.endpoints.create_endpoints', 'create_endpoints', (['flask_api'], {}), '(flask_api)\n', (693, 704), False, 'from actinia_statistic_plugin.endpoints import create_endpoints\n'), ((2355, 2382), 'atexit.register', 'atexit.register', (['stop_redis'], {}), '(stop_redis)\n', (2370, 2382), False, 'import atexit\n'), ((1822, 1942), 'os.spawnl', 'os.spawnl', (['os.P_NOWAIT', '"""/usr/bin/redis-server"""', '"""common/redis.conf"""', "('--port %i' % global_config.REDIS_SERVER_PORT)"], {}), "(os.P_NOWAIT, '/usr/bin/redis-server', 'common/redis.conf', \n '--port %i' % global_config.REDIS_SERVER_PORT)\n", (1831, 1942), False, 'import os\n'), ((2006, 2019), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2016, 2019), False, 'import time\n'), ((2094, 2132), 'actinia_core.core.common.config.global_config.read', 'global_config.read', (['custom_actinia_cfg'], {}), '(custom_actinia_cfg)\n', (2112, 2132), False, 'from actinia_core.core.common.config import global_config\n'), ((2284, 2318), 'os.kill', 'os.kill', (['redis_pid', 'signal.SIGTERM'], {}), '(redis_pid, signal.SIGTERM)\n', (2291, 2318), False, 'import os\n')]
# -*- coding: utf-8 -*- from importlib import import_module def get_annotations_compiler_flag() -> int: future = import_module("__future__") assert future is not None annotations = getattr(future, "annotations") assert annotations is not None compiler_flag = getattr(annotations, "compiler_flag") assert isinstance(compiler_flag, int) return compiler_flag
[ "importlib.import_module" ]
[((120, 147), 'importlib.import_module', 'import_module', (['"""__future__"""'], {}), "('__future__')\n", (133, 147), False, 'from importlib import import_module\n')]
#! /usr/bin/env python3 # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import argparse import hashlib import json import os import subprocess import sys parser = argparse.ArgumentParser(description="Generate an addon package") parser.add_argument( "-q", "--qt_path", default=None, dest="qtpath", help="The QT binary path. If not set, we try to guess.", ) args = parser.parse_args() build_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "build.py") addons_path = os.path.join( os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))), "addons", ) generated_path = os.path.join(addons_path, "generated") if not os.path.isdir(generated_path): os.mkdir(generated_path) generated_path = os.path.join(generated_path, "addons") if not os.path.isdir(generated_path): os.mkdir(generated_path) addons = [] for file in os.listdir(addons_path): addon_path = os.path.join(addons_path, file, "manifest.json") if not os.path.exists(addon_path): print(f"Ignoring path {file}.") continue build_cmd = [sys.executable, build_path, addon_path, generated_path] if args.qtpath: build_cmd.append("-q") build_cmd.append(args.qtpath) subprocess.call(build_cmd) generated_addon_path = os.path.join(generated_path, file + ".rcc") if not os.path.exists(generated_addon_path): exit(f"Expected addon file {generated_addon_path}") with open(generated_addon_path,"rb") as f: sha256 = hashlib.sha256(f.read()).hexdigest(); addons.append({ 'id': file, 'sha256': sha256 }) index = { 'api_version': '0.1', 'addons': addons, } with open(os.path.join(generated_path, "manifest.json"), "w") as f: f.write(json.dumps(index, indent=2))
[ "os.path.exists", "os.listdir", "argparse.ArgumentParser", "json.dumps", "os.path.join", "os.path.realpath", "os.path.isdir", "os.mkdir", "subprocess.call" ]
[((316, 380), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate an addon package"""'}), "(description='Generate an addon package')\n", (339, 380), False, 'import argparse\n'), ((785, 823), 'os.path.join', 'os.path.join', (['addons_path', '"""generated"""'], {}), "(addons_path, 'generated')\n", (797, 823), False, 'import os\n'), ((908, 946), 'os.path.join', 'os.path.join', (['generated_path', '"""addons"""'], {}), "(generated_path, 'addons')\n", (920, 946), False, 'import os\n'), ((1039, 1062), 'os.listdir', 'os.listdir', (['addons_path'], {}), '(addons_path)\n', (1049, 1062), False, 'import os\n'), ((831, 860), 'os.path.isdir', 'os.path.isdir', (['generated_path'], {}), '(generated_path)\n', (844, 860), False, 'import os\n'), ((866, 890), 'os.mkdir', 'os.mkdir', (['generated_path'], {}), '(generated_path)\n', (874, 890), False, 'import os\n'), ((954, 983), 'os.path.isdir', 'os.path.isdir', (['generated_path'], {}), '(generated_path)\n', (967, 983), False, 'import os\n'), ((989, 1013), 'os.mkdir', 'os.mkdir', (['generated_path'], {}), '(generated_path)\n', (997, 1013), False, 'import os\n'), ((1081, 1129), 'os.path.join', 'os.path.join', (['addons_path', 'file', '"""manifest.json"""'], {}), "(addons_path, file, 'manifest.json')\n", (1093, 1129), False, 'import os\n'), ((1391, 1417), 'subprocess.call', 'subprocess.call', (['build_cmd'], {}), '(build_cmd)\n', (1406, 1417), False, 'import subprocess\n'), ((1446, 1489), 'os.path.join', 'os.path.join', (['generated_path', "(file + '.rcc')"], {}), "(generated_path, file + '.rcc')\n", (1458, 1489), False, 'import os\n'), ((599, 625), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (615, 625), False, 'import os\n'), ((1141, 1167), 'os.path.exists', 'os.path.exists', (['addon_path'], {}), '(addon_path)\n', (1155, 1167), False, 'import os\n'), ((1501, 1537), 'os.path.exists', 'os.path.exists', (['generated_addon_path'], {}), '(generated_addon_path)\n', (1515, 1537), False, 'import os\n'), ((1826, 1871), 'os.path.join', 'os.path.join', (['generated_path', '"""manifest.json"""'], {}), "(generated_path, 'manifest.json')\n", (1838, 1871), False, 'import os\n'), ((1894, 1921), 'json.dumps', 'json.dumps', (['index'], {'indent': '(2)'}), '(index, indent=2)\n', (1904, 1921), False, 'import json\n'), ((720, 746), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (736, 746), False, 'import os\n')]
import os import sys # import common package in parent directory sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common')) import mongodb_client import news_topic_modeling_service_client if __name__ == '__main__': db = mongodb_client.get_db() cursor = db['news'].find({}) count = 0 for news in cursor: count += 1 print(count) if 'class' in news: print('Populating classes...') description = news['description'] if description is None: description = news['title'] topic = news_topic_modeling_service_client.classify(description) news['class'] = topic db['news'].replace_one({'digest': news['digest']}, news, upsert=True)
[ "mongodb_client.get_db", "os.path.dirname", "news_topic_modeling_service_client.classify" ]
[((241, 264), 'mongodb_client.get_db', 'mongodb_client.get_db', ([], {}), '()\n', (262, 264), False, 'import mongodb_client\n'), ((95, 120), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (110, 120), False, 'import os\n'), ((593, 649), 'news_topic_modeling_service_client.classify', 'news_topic_modeling_service_client.classify', (['description'], {}), '(description)\n', (636, 649), False, 'import news_topic_modeling_service_client\n')]