id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8185921 | <filename>pyqldb/cursor/stream_cursor.py
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
# the License. A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
from amazon.ion.simpleion import loads
from ..errors import ResultClosedError
class StreamCursor:
"""
An iterable class representing a stream cursor on a statement's result set.
:type page: dict
:param page: The page containing the initial result set data dictionary of the statement's execution.
:type session: :py:class:`pyqldb.communication.session_client.SessionClient`
:param session: The parent session that represents the communication channel to QLDB.
:type transaction_id: str
:param transaction_id: The ID of this cursor's parent transaction, required to fetch pages.
"""
def __init__(self, page, session, transaction_id):
self._page = page
self._session = session
self._index = 0
self._is_open = True
self._transaction_id = transaction_id
def __iter__(self):
"""
Iterator function to implement the iterator protocol.
"""
return self
def __next__(self):
"""
Iterator function to implement the iterator protocol. Pulls the next page containing the results being
iterated on when the end of the current is reached.
:rtype: :py:class:`amazon.ion.simple_types.IonSymbol`
:return: The Ion value in the row that the cursor is on.
:raises StopIteration: When there are no more results.
"""
if not self._is_open:
raise ResultClosedError(self._session.token)
if self._index >= len(self._page.get('Values')):
if not self._are_there_more_results():
raise StopIteration
self._next_page()
while len(self._page.get('Values')) == 0 and self._are_there_more_results():
self._next_page()
if len(self._page.get('Values')) == 0:
raise StopIteration
row = self._page.get('Values')[self._index]
ion_value = self._value_holder_to_ion_value(row)
self._index += 1
return ion_value
def close(self):
"""
Close this stream cursor object.
"""
self._is_open = False
def _are_there_more_results(self):
"""
Check if there are more results.
"""
return self._page.get('NextPageToken') is not None
def _next_page(self):
"""
Get the next page using this cursor's session.
"""
statement_result = self._session._fetch_page(self._transaction_id, self._page.get('NextPageToken'))
page = statement_result.get('Page')
self._page = page
self._index = 0
@staticmethod
def _value_holder_to_ion_value(value):
"""
Get the Ion binary out from a value holder, and convert it into an Ion value.
"""
binary = value.get('IonBinary')
ion_value = loads(binary)
return ion_value
| StarcoderdataPython |
1878358 | <reponame>bechtoldt/domainapi
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: ts=4 sw=4 et
from xml.etree import ElementTree as ET
#import pprint
import re
class Helper:
"""
Helper class providing useful methods
"""
@staticmethod
def convert_json2xml(doc, root):
children = ET.Element(root)
if isinstance(doc, dict):
for key, value, in doc.items():
if isinstance(value, dict):
child = Helper.convert_json2xml(doc=value, root=key)
children.append(child)
elif isinstance(value, list):
for item in value:
child = Helper.convert_json2xml(doc=item, root=key)
children.append(child)
elif isinstance(value, (str, int, float)):
child = ET.Element(key)
child.text = str(value)
children.append(child)
elif value is None:
continue
else:
raise NotImplementedError('Type of {} is not implemented yet, is it a {}?'.format(key, type(value)))
return children
@staticmethod
def prettify(element):
"""Return a pretty-printed XML string for the Element.
"""
raw_string = ET.tostring(element, 'utf-8')
text_re = re.compile(r'>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL)
return text_re.sub(r'>\g<1></', raw_string.decode())
| StarcoderdataPython |
1861861 | import os
from bs4 import BeautifulSoup
import socket
import sys
import requests
import re
docIdList=[]
urlList=[]
graph={'':[]}
def generateDocIds():
with open("task1E_URLs.txt","r") as f:
for line in f:
urlList.append(line.replace('\n',''))
lineArray=line.split('/')
docIdList.append(lineArray[len(lineArray)-1].replace('\n',''))
#print urlList
#print docIdList
for line in docIdList:
url='https://en.wikipedia.org/wiki/' + line
#print url
#source_txt=(urllib.request.urlopen(urlObj.url)).read()
source_txt = requests.get(url)
plain_txt = source_txt.text.encode('utf-8')
soup = BeautifulSoup(plain_txt, "html.parser")
for txt in soup.findAll('a'): # finding all the elements on the page
var = txt.get('href')
if var is not None:
# we do not need images and colon and main page
if checkUrl(var) and '.jpg' not in var and 'JPG' not in var and '.jpeg' not in var:
if var.find('/wiki/') is 0:
print(var);
a = 'https://en.wikipedia.org' + var
#print a
if a in urlList:
docArr=a.split('/')
docId=docArr[len(docArr)-1].replace('\n','')
#print docId
if docId not in graph:
listD=[]
listD.append(line)
graph[docId]=listD
#print graph
else:
listD=graph[docId]
if line not in listD:
listD.append(line)
graph[docId]=listD
return graph
## Helper method to check if a given URL matches our conditions
## namely correct Wikipedia link, English link, non-administrative link,
## not a link of the same page and not the Wikipedia main page
def checkUrl(url):
link = str(url);
if(isWikiPattern(link)
and not isMainPage(link)
and not isAdministrativeLink(link)
and not isSamePageLink(link)):
return True;
else:
return False;
## Returns true iff the given link is the Wikipedia main page
def isMainPage(link):
if("/wiki/Main_Page" in repr(link)):
return True;
else:
return False;
## Returns true iff the given link is an administrative link (:)
def isAdministrativeLink(link):
link = re.sub('https:', '', link);
if(":" in link):
return True;
else:
return False;
## Returns true iff the given link is a link of the same page (#)
def isSamePageLink(link):
if("#" in link):
return True
else:
return False
## Returns true iff the given URL has the correct wiki pattern
def isWikiPattern(url):
wikiPattern = "/wiki/"
wikipediaPattern = "//en.wikipedia.org"
fullPattern = wikipediaPattern + wikiPattern
if(url and wikiPattern in url[0:6] or fullPattern in url[0:50]):
return True
else:
return False
def main():
docGraph=generateDocIds()
fileWrite=open("G1_2.txt","w")
keys=docGraph.keys()
for key in keys:
inlinks=docGraph.get(key)
fileWrite.write(key+' ')
for docs in inlinks:
fileWrite.write(docs+' ')
fileWrite.write('\n')
#fileWrite.flush()
main()
| StarcoderdataPython |
12848628 | <filename>plugins/pattern_navigate.py
import sublime
import sublime_plugin
# Related Reading:
# https://forum.sublimetext.com/t/find-for-a-macro/57387/
#
# This example command allows you to jump the cursor to the next or previous
# location of a given pattern of text, which can be either a regex or not and
# case sensitive or not based on command arguments.
#
# A use case for this is implementing a specific Find operation in a macro in
# a repeatable way.
class PatternNavigateCommand(sublime_plugin.TextCommand):
"""
Jump the selection in the file to the next or previous location of the
given textual pattern based on the current cursor location. The search
direction is controlled by the forward argument, and will wrap around the
ends of the buffer.
"""
def run(self, edit, pattern, literal=True, ignorecase=False, forward=True):
# Convert the incoming arguments to the appropriate search flags.
flags = ((sublime.LITERAL if literal else 0) |
(sublime.IGNORECASE if ignorecase else 0))
# Find the locations where this pattern occurs; leave if none
regions = self.view.find_all(pattern, flags)
if not regions:
return
# Get a starting point for our search, and where we should jump to if
# there are no matches in the specified direction.
point = self.view.sel()[0].b
fallback = regions[-1] if not forward else regions[0]
# Remove all selections.
self.view.sel().clear()
# Look in the given direction for the first match from the current
# position; if one is found jump there.
pick = lambda p: (point < p.a) if forward else (point > p.a)
for pos in regions if forward else reversed(regions):
if pick(pos):
return self.jump(pos.a)
# No matches in the search direction, so wrap around.
self.jump(fallback.a)
def jump(self, point):
# Add in the given position as a selection and ensure that it's
# visible.
self.view.sel().add(sublime.Region(point))
self.view.show(point, True)
| StarcoderdataPython |
6548281 | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.scylla import ScyllaCheck
from .common import INSTANCE_DEFAULT_METRICS
@pytest.mark.e2e
def test_check_ok(dd_agent_check):
aggregator = dd_agent_check(rate=True)
for metric in INSTANCE_DEFAULT_METRICS:
aggregator.assert_metric(metric)
aggregator.assert_all_metrics_covered()
aggregator.assert_service_check('scylla.prometheus.health', ScyllaCheck.OK)
| StarcoderdataPython |
329981 | <reponame>xiaqi516/video-content-description-VCD
"""
VCD (Video Content Description) library v4.3.1
Project website: http://vcd.vicomtech.org
Copyright (C) 2021, Vicomtech (http://www.vicomtech.es/),
(Spain) all rights reserved.
VCD is a Python library to create and manage VCD content version 4.3.1.
VCD is distributed under MIT License. See LICENSE.
"""
# NOTE: This test is left out of v4.3.1
# TODO: Update proto to v4.3.1
import unittest
import vcd.core as core
import vcd.schema as schema
import vcd.serializer as serializer
vcd_version_name = "vcd" + schema.vcd_schema_version.replace(".", "")
class TestBasic(unittest.TestCase):
def test_json_proto(self):
vcd_json_file_name = './etc/' + vcd_version_name + '_test_create_search_simple_nopretty.json'
vcd_proto_file_name = './etc/' + vcd_version_name + '_test_create_search_simple_nopretty_proto_from_json.txt'
vcd_json_file_name_rebuilt = './etc/' + vcd_version_name + '_test_create_search_simple_nopretty_from_proto.json'
serializer.json2proto_bin(vcd_json_file_name, vcd_proto_file_name)
serializer.proto_bin2json(vcd_proto_file_name, vcd_json_file_name_rebuilt)
vcd_src = core.VCD(vcd_json_file_name)
vcd_dst = core.VCD(vcd_json_file_name_rebuilt)
self.assertEqual(vcd_src.stringify(False), vcd_dst.stringify(False))
if __name__ == '__main__': # This changes the command-line entry point to call unittest.main()
print("Running test_serializer.py...")
unittest.main()
| StarcoderdataPython |
1703129 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""A pipeline for crawling BIDS exported files from a LORIS database"""
# Import necessary nodes
from ..nodes.crawl_url import crawl_url
from datalad.utils import updated
from ..nodes.annex import Annexificator
from datalad_crawler.consts import DATALAD_SPECIAL_REMOTE
from os.path import basename, join
import json
# Possibly instantiate a logger if you would like to log
# during pipeline creation
from logging import getLogger
lgr = getLogger("datalad.crawler.pipelines.kaggle")
class LorisAPIBIDSExtractor(object):
def __init__(self, apibase=None, annex=None):
self.apibase = apibase
self.meta = {}
self.repo = annex.repo
def __call__(self, data):
bids_root_dir = 'BIDS_dataset'
jsdata = json.loads(data["response"])
for key in ['DatasetDescription', 'README', 'BidsValidatorConfig']:
if key in jsdata.keys():
yield updated(data, {
'url' : self.apibase + jsdata[key]['Link'],
'path': bids_root_dir
})
if 'Participants' in jsdata.keys():
yield updated(data, {
'url' : self.apibase + jsdata['Participants']['TsvLink'],
'path': bids_root_dir
})
yield updated(data, {
'url' : self.apibase + jsdata['Participants']['JsonLink'],
'path': bids_root_dir
})
if 'SessionFiles' in jsdata.keys():
for file_dict in jsdata['SessionFiles']:
candid = 'sub-' + file_dict['Candidate']
visit = 'ses-' + file_dict['Visit']
yield updated(data, {
'url' : self.apibase + file_dict['TsvLink'],
'path': join(bids_root_dir, candid, visit)
})
yield updated(data, {
'url' : self.apibase + file_dict['JsonLink'],
'path': join(bids_root_dir, candid, visit)
})
if 'Images' in jsdata.keys():
for file_dict in jsdata["Images"]:
candid = 'sub-' + file_dict["Candidate"]
visit = 'ses-' + file_dict["Visit"]
subfolder = file_dict['Subfolder']
filename = basename(file_dict["NiftiLink"])
self.meta[filename] = file_dict
yield updated(data, {
"url" : self.apibase + file_dict["NiftiLink"],
"path": join(bids_root_dir, candid, visit, subfolder)
})
for associated_file in ['JsonLink', 'BvalLink', 'BvecLink', 'EventLink']:
if associated_file in file_dict:
yield updated(data, {
"url" : self.apibase + file_dict[associated_file],
"path": join(bids_root_dir, candid, visit, subfolder)
})
return
def finalize(self):
def _finalize(data):
for filename in self.meta:
metadata_setter = self.repo.set_metadata(
filename,
reset=self.meta[filename],
)
for meta in metadata_setter:
lgr.info("Appending metadata to %s", filename)
yield data
return _finalize
def pipeline(url=None, apibase=None):
"""Pipeline to crawl/annex a LORIS database via the LORIS API.
It will crawl every file matching the format of the $API/project/images/
endpoint as documented in the LORIS API. Requires a LORIS version
which has API v0.0.3-dev (or higher).
"""
if apibase is None:
raise RuntimeError("Must set apibase that links are relative to.")
lgr.info("Creating a pipeline to crawl data files from %s", url)
annex = Annexificator(
create=False,
statusdb='json',
skip_problematic=True,
special_remotes=[DATALAD_SPECIAL_REMOTE],
options=[
"-c",
"annex.largefiles="
"exclude=README.md and exclude=DATS.json and exclude=logo.png"
" and exclude=.datalad/providers/loris.cfg"
" and exclude=.datalad/crawl/crawl.cfg"
" and exclude=*scans.json"
" and exclude=*bval"
" and exclude=BIDS_dataset/dataset_description.json"
" and exclude=BIDS_dataset/participants.json"
]
)
lorisapi = LorisAPIBIDSExtractor(apibase, annex)
return [
# Get the list of images
[
crawl_url(url),
lorisapi,
annex,
],
annex.finalize(),
lorisapi.finalize(),
]
| StarcoderdataPython |
332472 | import numpy as np
from numpy import einsum
class Linear:
"""Class for generic linear transformation of the Wigner distribution"""
def __init__(self, matrix, name):
"""Initialize the element
Args:
matrix : numpy array
The matrix that represents this generic element
name : str
The name of the element
"""
assert type(name) == str, "Linear element name arg not a str"
self._elem_name = name
self.matrix = matrix
self._elem_type = 'Matrix'
@property
def name(self):
"""Get the name of the element"""
return self._elem_name
@property
def elem_type(self):
"""Get the name of the element"""
return self._elem_type
def propagate(self, wd):
"""Propagate a Wigner distribution through this element
Args:
-----
wd : WignerDistribution class
The Wigner distribution to be propagated
"""
new_coords = einsum('ij, abj -> abi', self.matrix, wd.coord_grid)
wd.update_weights(new_coords)
| StarcoderdataPython |
12825830 | <reponame>MicroMedIAn/MicroMind
#!/usr/bin/env python
"""Provides Generic Classes to make an image analysis.
"""
from abc import ABC, abstractmethod
import pandas as pd
class InputData(ABC):
def __init__(self, data):
self._content = data
@abstractmethod
def read(self):
pass
class Cohort(InputData):
def __init__(self, dataframe, workdir=None):
super().__init__(dataframe)
self.workdir = workdir
def read(self):
for _, row in self._content.iterrows():
filepath = row.path
name = row.id
if row.todo == 1 and filepath != 0:
if self.workdir:
filepath = str(self.workdir / filepath)
print(type(filepath))
yield (name, filepath)
class AnalysisCV(object):
'''
'''
def __init__(self, procedure):
self.procedure = procedure
def run(self, input_data):
print('running analysis !!')
all_results = {}
for (name, filepath) in input_data.read():
result = self.procedure.run(filepath, name)
results_df = pd.DataFrame(result, columns=result[0].keys())
all_results[name] = results_df
results_df.to_csv(name + '.csv')
return all_results
| StarcoderdataPython |
3505567 | <filename>lsh/build_table.py<gh_stars>0
#!/usr/bin/env python3
from __future__ import print_function
import argparse
import io
import random
import pickle
import sys
import os
import lsh
def main():
parser = argparse.ArgumentParser(
description="Generates a locality sensitive hashing table "
"from a collection of files.")
parser.add_argument('-s', '--size', type=int, default=256,
help='length of hash to generate (default: %(default)d)')
parser.add_argument('-l', '--length', type=int, default=8,
help='length of substring to use for hashing (default: %(default)d)')
parser.add_argument('-r', '--random', type=int, default=None,
help='use this specific random seed')
parser.add_argument('outfile',
help='file in which to generate table')
parser.add_argument('infile', nargs='+',
help='file(s) to index in table')
args = parser.parse_args()
if os.path.isfile(args.outfile):
parser.error('{0} already exists! Exiting.'.format(args.outfile))
if args.random:
random.seed(args.random)
xors = [random.randint(0, lsh.MAX_HASH_CODE)
for _ in range(args.size)]
out = io.open(args.outfile, 'wb')
pickler = pickle.Pickler(out, 2) # Py2 compatibility
pickler.dump(xors)
pickler.dump(args.length)
for fname in args.infile:
with io.open(fname, 'rb') as fp:
data = fp.read()
minhash = lsh.minhash(data, args.length, xors)
pickler.dump(minhash)
pickler.dump(fname)
print(fname, file=sys.stderr)
out.close()
if __name__=='__main__':
main()
| StarcoderdataPython |
89061 | <reponame>nahumj/CSE-431<gh_stars>0
import collections
import pprint
import copy
class Node:
def __init__(self, id_, data=None):
self.id_ = id_
self.data = data
def __eq__(self, other):
if isinstance(other, Node):
return self.id_ == other.id_
return False
def __hash__(self):
return hash(self.id_)
def __repr__(self):
return f"Node(id_ = {self.id_}, data = {self.data})"
class Graph:
def __init__(self):
self.nodes = []
self.edges = collections.defaultdict(list)
def add_node(self, node):
assert node not in self.nodes
assert isinstance(node, Node)
self.nodes.append(node)
def add_edge(self, start_node_id, end_node_id, data=None):
assert end_node_id not in self.edges[start_node_id]
self.edges[start_node_id].append((end_node_id, data))
def __repr__(self):
return f"Graph(nodes={pprint.pformat(self.nodes)}, edges={pprint.pformat(self.edges)})"
def breadth_first_traversal(self, start_node_id):
discovered = collections.deque()
fully_explored = []
done_with = set()
discovered.append(start_node_id)
done_with.add(start_node_id)
while discovered:
node_id = discovered.popleft()
for child_node_id, _ in self.edges[node_id]:
if child_node_id not in done_with:
discovered.append(child_node_id)
done_with.add(child_node_id)
fully_explored.append(node_id)
return fully_explored
def depth_first_traversal(self, start_node_id):
discovered = []
fully_explored = []
done_with = set()
discovered.append(start_node_id)
done_with.add(start_node_id)
while discovered:
node_id = discovered[-1]
for child_node_id, _ in self.edges[node_id]:
if child_node_id not in done_with:
discovered.append(child_node_id)
done_with.add(child_node_id)
break
else:
pop_id = discovered.pop()
assert pop_id == node_id
fully_explored.append(node_id)
return fully_explored
def has_edge(self, start_id, end_id):
for existing_end_id, _ in self.edges[start_id]:
if end_id == existing_end_id:
return True
return False
def depth_first_search_tree(self, start_node_id):
def decorate_dfs_tree(dfs_tree, node_id, depth=0):
children = [child_ids for child_ids, data in dfs_tree.edges[node_id] if data is None or "back-edge" not in data]
back_edges = [child_ids for child_ids, data in dfs_tree.edges[node_id] if data is not None and data["back-edge"]]
is_leaf = not children
# save to data
nodes_index = dfs_tree.nodes.index(Node(node_id))
assert dfs_tree.nodes[nodes_index].id_ == node_id
dfs_tree.nodes[nodes_index].data = {"depth": depth, "is_leaf": is_leaf, "children": children, "back-edges": back_edges}
for child_id in children:
decorate_dfs_tree(dfs_tree, child_id, depth + 1)
dfs_tree = Graph()
dfs_tree.add_node(Node(start_node_id))
discovered = []
fully_explored = []
done_with = set()
discovered.append(start_node_id)
done_with.add(start_node_id)
while discovered:
node_id = discovered[-1]
for child_node_id, _ in self.edges[node_id]:
if child_node_id not in done_with:
discovered.append(child_node_id)
done_with.add(child_node_id)
dfs_tree.add_node(Node(child_node_id))
dfs_tree.add_edge(node_id, child_node_id)
break
else:
if child_node_id in discovered and not dfs_tree.has_edge(node_id, child_node_id) and not dfs_tree.has_edge(child_node_id, node_id):
dfs_tree.add_edge(node_id, child_node_id, data={"back-edge": True})
else:
pop_id = discovered.pop()
assert pop_id == node_id
fully_explored.append(node_id)
decorate_dfs_tree(dfs_tree, start_node_id)
return dfs_tree
def is_connected(self):
if not self.nodes:
return True
traversal_order = self.breadth_first_traversal(start_node_id=self.nodes[0].id_)
return len(traversal_order) == len(self.nodes)
def remove_node(self, node):
# remove edges with node as start
del self.edges[node.id_]
# remove edges with node as end
for start_id, end_list in self.edges.items():
end_list = [(end_id, edge_data) for end_id, edge_data in end_list if end_id != node.id_]
self.edges[start_id] = end_list
self.nodes.remove(node)
def articulation_nodes_slow(self):
"""
For each vertex
Remove vertex from graph
If still connected (do BFS or DFS to see if connected)
vertex is an articulation
"""
articulation_nodes = []
for node in self.nodes:
graph_without_node = copy.deepcopy(self)
graph_without_node.remove_node(node)
if not graph_without_node.is_connected():
articulation_nodes.append(node)
return articulation_nodes
def articulation_nodes_fast(self):
dfs_tree = self.depth_first_search_tree(self.nodes[0].id_)
print(dfs_tree)
articulation_nodes = []
for node in dfs_tree.nodes:
if node.data["depth"] == 0:
# root
if len(node.data["children"]) > 1:
articulation_nodes.append(node)
continue
if node.data["is_leaf"]:
continue
# hard case
for child_id in node.data["children"]:
child_node_index = dfs_tree.nodes.index(Node(child_id))
child_node = dfs_tree.nodes[child_node_index]
is_safe_child = False
# TODO: Need to check all children in subtree, not just direct children
for back_edge in child_node.data["back-edges"]:
back_edge_index = dfs_tree.nodes.index(Node(back_edge))
back_edge_node = dfs_tree.nodes[back_edge_index]
if back_edge_node.data["depth"] < node.data["depth"]:
is_safe_child = True
if not is_safe_child:
articulation_nodes.append(node)
break
return articulation_nodes
def main():
letter_nodes = "ABCDEFGHI"
list_of_tuple_edges = [
("A", "B"), ("A", "C"), ("A", "F"),
("B", "D"), ("B", "E"), ("B", "I"), ("B", "G"),
("D", "F"), ("D", "G"),
("E", "H"), ("E", "I"),
]
graph = Graph()
for letter_id in letter_nodes:
node = Node(letter_id)
graph.add_node(node)
for start_node, end_node in list_of_tuple_edges:
graph.add_edge(start_node, end_node)
graph.add_edge(end_node, start_node)
pprint.pprint(graph.articulation_nodes_fast())
if __name__ == "__main__":
main() | StarcoderdataPython |
9709850 | <filename>mtml/toy/_echo_args.py<gh_stars>1-10
__doc__ = "Provides the ``echo_args`` function used in ``mtml.toy`` unit tests."
def echo_args(*args, **kwargs):
"""Returns a dict containng all positional and keyword arguments.
The dict has keys ``"args"`` and ``"kwargs"``, corresponding to positional
and keyword arguments respectively.
:param *args: Positional arguments
:param **kwargs: Keyword arguments
:rtype: dict
"""
return {"args": args, "kwargs": kwargs} | StarcoderdataPython |
9690764 | import spacy
spacy.load('pt_core_news_sm')
from pyate import combo_basic, term_extractor, cvalues
# source: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1994795/
string = """Central to the development of cancer are genetic changes that endow these “cancer cells” with many of the
hallmarks of cancer, such as self-sufficient growth and resistance to anti-growth and pro-death signals. However, while the
genetic changes that occur within cancer cells themselves, such as activated oncogenes or dysfunctional tumor suppressors,
are responsible for many aspects of cancer development, they are not sufficient. Tumor promotion and progression are
dependent on ancillary processes provided by cells of the tumor environment but that are not necessarily cancerous
themselves. Inflammation has long been associated with the development of cancer. This review will discuss the reflexive
relationship between cancer and inflammation with particular focus on how considering the role of inflammation in physiologic
processes such as the maintenance of tissue homeostasis and repair may provide a logical framework for understanding the
connection between the inflammatory response and cancer."""
verbose=False
print('Combo Basic:')
print(combo_basic(string, verbose=verbose).sort_values(ascending=False))
print('')
print('Term Extractor:')
print(term_extractor(string, verbose=verbose).sort_values(ascending=False))
print('')
print('C-Values:')
print(cvalues(string, verbose=verbose).sort_values(ascending=False)) | StarcoderdataPython |
117872 | #!/usr/bin/env python
import sys, os
import argparse
import numpy as np
#import atomsinmolecule as mol
import topology as topo
import math
import pandas as pd
class topologyDiff(object):
def __init__(self, molecule1, molecule2, covRadFactor=1.3):
errors = {}
requirements_for_comparison(molecule1, molecule2)
self.molecule1 = molecule1
self.molecule2 = molecule2
self.topology1 = topo.topology(molecule1, covRadFactor)
self.topology2 = topo.topology(molecule2, covRadFactor)
self.orderedBonds1 = self.topology1.order_convalentBondDistances()
self.orderedBonds2 = self.topology2.order_convalentBondDistances()
#print "\n".join([str(elem) for elem in self.orderedBonds2])
self.orderedAngles1 = self.topology1.order_angles()
self.orderedAngles2 = self.topology2.order_angles()
self.orderedDihedral1 = self.topology1.order_dihedralAngles()
self.orderedDihedral2 = self.topology2.order_dihedralAngles()
self.error_bonds = self.compare_bonds(percentLargest = 1.5)
self.error_angles = self.compare_angles()
self.error_dihedrals = self.compare_dihedralAngles()
# print "error_bonds", self.error_bonds
# print "error_angles", self.error_angles
# print "error_dihedrals", self.error_dihedrals
def compare_bonds(self, percentLargest = -1):
## Keep all data toghether and filter/sort on it
nameCol_i = "index_i"
nameCol_j = "index_j"
nameCol_IDs = "uniquePairID"
nameCol_dist = "distance [A]"
nameCol_dist1 = "Mol1 dist. [A]"
nameCol_dist2 = "Mol2 dist. [A]"
nameCol_errors = "Dist.error [A]"
nameCol_absError = "absError [A]"
# same nb. of bonds?
if len(self.orderedBonds1) != len(self.orderedBonds2):
msg = "Not as many covalents bonds detected in both structures:\n - {}".format(molecule1.shortname, molecule2.shortname)
sys.exit(msg)
## error in distance (Angstrom) for each bond
## checking that the unique ID is the same, if not as many bonds, exit with an error
id1 = np.array(self.orderedBonds1[1:])[:,0]
id2 = np.array(self.orderedBonds2[1:])[:,0]
diffIDs = np.sum(np.absolute(np.subtract(id1, id2)))
if diffIDs > 0:
msg = "As many covalents bonds detected, but not between the same atoms comparing structures:\n - {}".format(molecule1.shortname, molecule2.shortname)
sys.exit(msg)
## Pandas Dataframe
df1 = pd.DataFrame(self.orderedBonds1[1:], columns=self.orderedBonds1[0])
df2 = pd.DataFrame(self.orderedBonds2[1:], columns=self.orderedBonds2[0])
## convert string to float/int
for header in [nameCol_dist]:
df1[header] = df1[header].astype('float64')
df2[header] = df2[header].astype('float64')
for header in [nameCol_IDs, nameCol_i, nameCol_j]:
df1[header] = df1[header].astype('int')
df2[header] = df2[header].astype('int')
df1 = df1.rename(columns={nameCol_dist:nameCol_dist1})
df2 = df2.rename(columns={nameCol_dist:nameCol_dist2})
df = df1
df[nameCol_dist2] = df2[nameCol_dist2]
df[nameCol_errors] = df[nameCol_dist1] - df[nameCol_dist2]
###df = df.sort([nameCol_errors, nameCol_IDs], ascending=[False,True])
df[nameCol_absError] = df[nameCol_errors].abs()
df = df.sort([nameCol_absError], ascending=[False])
# print df
## STATISTICS
return get_statistics(df, nameCol_errors, unit="angstrom")
def compare_angles(self):
## Keep all data toghether and filter/sort on it
nameCol_IDs = "uniqueID"
nameCol_i = "index_i"
nameCol_j = "index_j"
nameCol_k = "index_k"
nameCol_anglDeg = 'Angle IJK [deg]'
nameCol_anglDeg1 = 'Angle1 IJK [deg]'
nameCol_anglDeg2 = 'Angle2 IJK [deg]'
nameCol_errors = "Angle error [deg]"
nameCol_absError = "absError [deg]"
nameCol_relError = "relError [deg]"
# same nb. of angles?
if len(self.orderedAngles1) != len(self.orderedAngles2):
msg = "Not as many covalents angles detected in both structures:\n - {}".format(molecule1.shortname, molecule2.shortname)
sys.exit(msg)
## Pandas Dataframe
df1 = pd.DataFrame(self.orderedAngles1[1:], columns=self.orderedAngles1[0])
df2 = pd.DataFrame(self.orderedAngles2[1:], columns=self.orderedAngles2[0])
## convert string to float/int
for header in [nameCol_IDs, nameCol_i, nameCol_j, nameCol_k]:
df1[header] = df1[header].astype('int')
df2[header] = df2[header].astype('int')
for header in [nameCol_anglDeg]:
df1[header] = df1[header].astype('float64')
df2[header] = df2[header].astype('float64')
df1 = df1.rename(columns={nameCol_anglDeg:nameCol_anglDeg1})
df2 = df2.rename(columns={nameCol_anglDeg:nameCol_anglDeg2})
df = df1
df[nameCol_anglDeg2] = df2[nameCol_anglDeg2]
df[nameCol_errors] = df[nameCol_anglDeg1] - df[nameCol_anglDeg2]
## checking that the unique ID is the same, if not as many angles, exit with an error
diffIDs = pd.DataFrame(df1[nameCol_IDs].values - df2[nameCol_IDs].values).abs().sum()
if diffIDs.values[0] > 0:
msg = "As many covalents angles detected, but not between the same atoms comparing structures:\n - {}".format(molecule1.shortname, molecule2.shortname)
sys.exit(msg)
###df = df.sort([nameCol_errors, nameCol_IDs], ascending=[False,True])
df[nameCol_absError] = df[nameCol_errors].abs()
df[nameCol_relError] = df[nameCol_errors].map( lambda x: x if abs(x) < 180. else np.sign(x)*(abs(x)-360.))
df = df.sort([nameCol_relError, nameCol_IDs], ascending=[False,True])
#print pd.DataFrame(d8.values-d7.values)
## STATISTICS
return get_statistics(df, nameCol_relError, unit="degrees")
def compare_dihedralAngles(self):
## Keep all data toghether and filter/sort on it
nameCol_IDs = "uniqueID"
nameCol_i = "index_i"
nameCol_j = "index_j"
nameCol_k = "index_k"
nameCol_l = "index_l"
nameCol_dihedDeg = "Dihedral IJ-KL [deg]"
nameCol_dihedDeg1 = "Dihedral1 IJ-KL [deg]"
nameCol_dihedDeg2 = "Dihedral2 IJ-KL [deg]"
nameCol_errors = "Dihedral angle error [deg]"
nameCol_absError = "absError [deg]"
nameCol_relError = "relError [deg]"
# same nb. of dihedral angles?
if len(self.orderedDihedral1) != len(self.orderedDihedral2):
msg = "Not as many covalents dihedral angles detected in both structures:\n - {}".format(molecule1.shortname, molecule2.shortname)
sys.exit(msg)
## Pandas Dataframe
df1 = pd.DataFrame(self.orderedDihedral1[1:], columns=self.orderedDihedral1[0])
df2 = pd.DataFrame(self.orderedDihedral2[1:], columns=self.orderedDihedral2[0])
## convert string to float/int
for header in [nameCol_IDs, nameCol_i, nameCol_j, nameCol_k, nameCol_l]:
df1[header] = df1[header].astype('int')
df2[header] = df2[header].astype('int')
for header in [nameCol_dihedDeg]:
df1[header] = df1[header].astype('float64')
df2[header] = df2[header].astype('float64')
df1 = df1.rename(columns={nameCol_dihedDeg:nameCol_dihedDeg1})
df2 = df2.rename(columns={nameCol_dihedDeg:nameCol_dihedDeg2})
df = df1
df[nameCol_dihedDeg2] = df2[nameCol_dihedDeg2]
df[nameCol_errors] = df[nameCol_dihedDeg1] - df[nameCol_dihedDeg2]
## checking that the unique ID is the same, if not as many angles, exit with an error
diffIDs = pd.DataFrame(df1[nameCol_IDs].values - df2[nameCol_IDs].values).abs().sum()
if diffIDs.values[0] > 0:
msg = "As many covalents dihedral angles detected, but not between the same atoms comparin structures:\n - {}".format(molecule1.shortname, molecule2.shortname)
sys.exit(msg)
df[nameCol_absError] = df[nameCol_errors].abs()
df[nameCol_relError] = df[nameCol_errors].map( lambda x: x if abs(x) < 180. else np.sign(x)*(abs(x)-360.))
df = df.sort([nameCol_relError, nameCol_IDs], ascending=[False,True])
#print pd.DataFrame(d8.values-d7.values)
## STATISTICS
return get_statistics(df, nameCol_relError, unit="degrees")
def get_object(self):
obj = {}
obj["molecule1"] = self.molecule1.get_object()
obj["molecule2"] = self.molecule2.get_object()
# obj["atomEntities"] = [e.get_object() for e in self.atomEntities]
# obj["atomicPairs"] = [p.get_object() for p in self.atomicPairs]
# obj["covalentBonds"] = [b.get_object() for b in self.covalentBonds]
# obj["covalentBondAngles"] = [b.get_object() for b in self.covalentBondAngles]
# obj["covalentDihedralAngles"] = [b.get_object() for b in self.covalentDihedralAngles]
return obj
def __str__(self):
return "COMPARISON OF TOPOLOGIES (summary):\
\n\tmolecules compared:\
\n\t\t- {} ({} atoms)\
\n\t\t- {} ({} atoms)\
\n\tCovalent radius factor: {}\
\n\tCovalents bonds errors:\
\n\t\t- mean: {:-.1e} {}\
\n\t\t- std: {:-.1e} {}\
\n\tCovalents angles errors:\
\n\t\t- mean: {:-.1e} {}\
\n\t\t- std: {:-.1e} {}\
\n\tDihedral angles errors:\
\n\t\t- mean: {:-.1e} {}\
\n\t\t- std: {:-.1e} {}\
".format(self.molecule1.shortname,
self.molecule1.nbAtomsInMolecule,
self.molecule2.shortname,
self.molecule2.nbAtomsInMolecule,
self.topology1.covRadFactor,
self.error_bonds['mean'],self.error_bonds['unit'],
self.error_bonds['stdDev'],self.error_bonds['unit'],
self.error_angles['mean'],self.error_angles['unit'],
self.error_angles['stdDev'],self.error_angles['unit'],
self.error_dihedrals['mean'],self.error_dihedrals['unit'],
self.error_dihedrals['stdDev'],self.error_dihedrals['unit'])
def get_as_JSON(self):
topoComparison = self.get_object()
import json
return json.dumps(topo, sort_keys=True, indent=4)
def requirements_for_comparison(molecule1, molecule2):
msg = ""
## the molecules should have the same atoms, provided in the same order
if molecule1.nbAtomsInMolecule != molecule2.nbAtomsInMolecule:
msg = "Not the same number of atoms comparing:\n-{} and\n-{}".format(molecule1.shortname, molecule2.shortname)
sys.exit(msg)
if molecule1.charge != molecule2.charge:
msg = "Not the same molecular charge comparing:\n-{} and\n-{}".format(molecule1.shortname, molecule2.shortname)
sys.exit(msg)
for atom1, atom2 in zip(molecule1.listAtoms, molecule2.listAtoms):
if atom1.atomSymbol != atom2.atomSymbol:
msg = "Not the same atom symbols: comparing:\n-{} and\n-{}".format(str(atom1), str(atom2))
sys.exit(msg)
if atom1.atomCharge != atom2.atomCharge:
msg = "Not the same atom charge: comparing:\n-{} and\n-{}".format(str(atom1), str(atom2))
sys.exit(msg)
if atom1.unitDistance != atom2.unitDistance:
msg = "Not the same atom unitDistance: comparing:\n-{} and\n-{}".format(str(atom1), str(atom2))
sys.exit(msg)
def read_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("file_mol1",
help="First molecular geometry in .XYZ format.")
parser.add_argument("file_mol2",
help="Second molecular geometry in .XYZ format.")
parser.add_argument('-out', nargs='?', type=argparse.FileType('w'),
default=sys.stdout,
help="optional output filename,\
if not, default is mol1_vs_mol2.top")
parser.add_argument("-crf", "--covRadFactor", type=float,
help="optional covalent radius factor,\
equal to 1 by default")
parser.add_argument("-v", "--verbose", action="store_true",
help="increase output verbosity")
args = parser.parse_args()
return args
def get_statistics(dataFrame, nameData, unit=""):
mean = dataFrame[nameData].mean()
variance = dataFrame[nameData].var()
stdDev = dataFrame[nameData].std()
mad = dataFrame[nameData].mad()
maxAbs = dataFrame[nameData].abs().max()
return {
"unit":unit,
"mean":mean,
"variance":variance,
"stdDev":stdDev,
"mad":mad, ## mean/average absolute deviation
"maxAbs":maxAbs}
def example_valinomycin_pureLinK_vs_LinKwithDF():
# read inputs
# args = read_arguments()
# path_to_file1 = os.path.abspath(args.file_mol1)
# path_to_file2 = os.path.abspath(args.file_mol2)
path_to_file1 = "/home/ctcc2/Documents/CODE-DEV/xyz2top/xyz2top/tests/files/valinomycin_geomOpt_DFT-b3lyp_cc-pVTZ.xyz"
path_to_file2 = "/home/ctcc2/Documents/CODE-DEV/xyz2top/xyz2top/tests/files/valinomycin_geomOpt_DFT-b3lyp-noDF_cc-pVTZ.xyz"
import xyz2molecule as xyz
molecule1 = xyz.parse_XYZ(path_to_file1)
molecule2 = xyz.parse_XYZ(path_to_file2)
diff = topologyDiff(molecule1, molecule2, covRadFactor=1.3)
if __name__ == "__main__":
example_valinomycin_pureLinK_vs_LinKwithDF()
| StarcoderdataPython |
1808060 | <filename>wlauto/tests/test_agenda.py<gh_stars>1-10
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E0611
# pylint: disable=R0201
import os
from StringIO import StringIO
from unittest import TestCase
from nose.tools import assert_equal, assert_in, raises
from wlauto.core.agenda import Agenda
from wlauto.exceptions import ConfigError
YAML_TEST_FILE = os.path.join(os.path.dirname(__file__), 'data', 'test-agenda.yaml')
invalid_agenda_text = """
workloads:
- id: 1
workload_parameters:
test: 1
"""
invalid_agenda = StringIO(invalid_agenda_text)
invalid_agenda.name = 'invalid1'
duplicate_agenda_text = """
global:
iterations: 1
workloads:
- id: 1
workload_name: antutu
workload_parameters:
test: 1
- id: 1
workload_name: andebench
"""
duplicate_agenda = StringIO(duplicate_agenda_text)
duplicate_agenda.name = 'invalid2'
short_agenda_text = """
workloads: [antutu, linpack, andebench]
"""
short_agenda = StringIO(short_agenda_text)
short_agenda.name = 'short'
default_ids_agenda_text = """
workloads:
- antutu
- id: 1
name: linpack
- id: test
name: andebench
params:
number_of_threads: 1
- vellamo
"""
default_ids_agenda = StringIO(default_ids_agenda_text)
default_ids_agenda.name = 'default_ids'
sectioned_agenda_text = """
sections:
- id: sec1
runtime_params:
dp: one
workloads:
- antutu
- andebench
- name: linpack
runtime_params:
dp: two
- id: sec2
runtime_params:
dp: three
workloads:
- antutu
workloads:
- nenamark
"""
sectioned_agenda = StringIO(sectioned_agenda_text)
sectioned_agenda.name = 'sectioned'
dup_sectioned_agenda_text = """
sections:
- id: sec1
workloads:
- antutu
- id: sec1
workloads:
- andebench
workloads:
- nenamark
"""
dup_sectioned_agenda = StringIO(dup_sectioned_agenda_text)
dup_sectioned_agenda.name = 'dup-sectioned'
caps_agenda_text = """
config:
device: TC2
global:
runtime_parameters:
sysfile_values:
/sys/test/MyFile: 1
/sys/test/other file: 2
workloads:
- id: 1
name: linpack
"""
caps_agenda = StringIO(caps_agenda_text)
caps_agenda.name = 'caps'
bad_syntax_agenda_text = """
config:
# tab on the following line
reboot_policy: never
workloads:
- antutu
"""
bad_syntax_agenda = StringIO(bad_syntax_agenda_text)
bad_syntax_agenda.name = 'bad_syntax'
section_ids_test_text = """
config:
device: TC2
reboot_policy: never
workloads:
- name: bbench
id: bbench
- name: audio
sections:
- id: foo
- id: bar
"""
section_ids_agenda = StringIO(section_ids_test_text)
section_ids_agenda.name = 'section_ids'
class AgendaTest(TestCase):
def test_yaml_load(self):
agenda = Agenda(YAML_TEST_FILE)
assert_equal(len(agenda.workloads), 4)
def test_duplicate_id(self):
try:
Agenda(duplicate_agenda)
except ConfigError, e:
assert_in('duplicate', e.message.lower()) # pylint: disable=E1101
else:
raise Exception('ConfigError was not raised for an agenda with duplicate ids.')
def test_yaml_missing_field(self):
try:
Agenda(invalid_agenda_text)
except ConfigError, e:
assert_in('workload name', e.message)
else:
raise Exception('ConfigError was not raised for an invalid agenda.')
def test_defaults(self):
agenda = Agenda(short_agenda)
assert_equal(len(agenda.workloads), 3)
assert_equal(agenda.workloads[0].workload_name, 'antutu')
assert_equal(agenda.workloads[0].id, '1')
def test_default_id_assignment(self):
agenda = Agenda(default_ids_agenda)
assert_equal(agenda.workloads[0].id, '2')
assert_equal(agenda.workloads[3].id, '3')
def test_sections(self):
agenda = Agenda(sectioned_agenda)
assert_equal(agenda.sections[0].workloads[0].workload_name, 'antutu')
assert_equal(agenda.sections[1].runtime_parameters['dp'], 'three')
@raises(ConfigError)
def test_dup_sections(self):
Agenda(dup_sectioned_agenda)
@raises(ConfigError)
def test_bad_syntax(self):
Agenda(bad_syntax_agenda)
| StarcoderdataPython |
3500088 | <reponame>czbiohub/opencell-portal-pub
"""rename abundance table and columns
Revision ID: <KEY>
Revises: 14dc4c67ea56
Create Date: 2021-09-27 17:27:21.463837
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '14dc4c67ea56'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
'abundance_measurement',
sa.Column(
'date_created',
sa.DateTime(timezone=True),
server_default=sa.text('now()'),
nullable=True,
),
sa.Column('uniprot_id', sa.String(), nullable=False),
sa.Column('measured_transcript_expression', sa.Float(), nullable=True),
sa.Column('measured_protein_concentration', sa.Float(), nullable=True),
sa.ForeignKeyConstraint(
['uniprot_id'],
['uniprot_metadata.uniprot_id'],
name=op.f('fk_abundance_measurement_uniprot_id_uniprot_metadata'),
ondelete='CASCADE',
),
sa.PrimaryKeyConstraint('uniprot_id', name=op.f('pk_abundance_measurement')),
)
op.drop_table('protein_abundance')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
'protein_abundance',
sa.Column(
'date_created',
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text('now()'),
autoincrement=False,
nullable=True,
),
sa.Column('uniprot_id', sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column(
'measured_rna_abundance',
postgresql.DOUBLE_PRECISION(precision=53),
autoincrement=False,
nullable=True,
),
sa.Column(
'measured_protein_abundance',
postgresql.DOUBLE_PRECISION(precision=53),
autoincrement=False,
nullable=True,
),
sa.ForeignKeyConstraint(
['uniprot_id'],
['uniprot_metadata.uniprot_id'],
name='fk_protein_abundance_uniprot_id_uniprot_metadata',
ondelete='CASCADE',
),
sa.PrimaryKeyConstraint('uniprot_id', name='pk_protein_abundance'),
)
op.drop_table('abundance_measurement')
# ### end Alembic commands ###
| StarcoderdataPython |
3492176 | from os import path
from pathlib import Path
from shutil import rmtree
from typing import Union
from pyspark.sql import DataFrame
from pyspark.sql import SparkSession
from pyspark.sql.types import DataType
from pyspark.sql.types import StructType
from spark_fhir_schemas.r4.resources.patient import PatientSchema
from tests.spark_json_helpers import create_jsonl_files
def test_can_load_patient(spark_session: SparkSession) -> None:
# Arrange
data_dir: Path = Path(__file__).parent.joinpath("./")
patient_test_folder: Path = data_dir.joinpath("test_files").joinpath("patient.json")
temp_folder = data_dir.joinpath("./temp")
if path.isdir(temp_folder):
rmtree(temp_folder)
minified_json_path: Path = create_jsonl_files(
src_file=patient_test_folder,
dst_folder=temp_folder.joinpath("minified_patient"),
dst_file_name="1.json",
)
schema = StructType([])
df: DataFrame = spark_session.createDataFrame(
spark_session.sparkContext.emptyRDD(), schema
)
patient_schema: Union[StructType, DataType] = PatientSchema.get_schema()
assert isinstance(patient_schema, StructType)
result_df: DataFrame = df.sql_ctx.read.schema(patient_schema).json(
str(minified_json_path)
)
result_df.printSchema()
result_df.show(truncate=False)
result_df.createOrReplaceTempView("result_view")
assert result_df.where("id == 27384972").select("gender").collect()[0][0] == "male"
assert (
spark_session.sql(
"SELECT identifier[0].type.coding[0].code FROM result_view where id = '27384972'"
).collect()[0][0]
== "2"
)
# now make sure we can persist it
| StarcoderdataPython |
1792271 | <filename>UserInfo.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Userinfo.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import pymysql
from PyQt5.QtWidgets import QMessageBox
class Ui_User(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(672, 435)
MainWindow.setStyleSheet("*{\n"
" background:rgb(46, 52, 54);\n"
"}\n"
"QLabel{\n"
"color:rgb(238, 238, 236);\n"
"}\n"
"QLineEdit{\n"
" color:rgb(238, 238, 236);\n"
" border:2px solid white;\n"
" border-radius:10px; \n"
" font: 13pt \"URW Gothic L\";\n"
" background:transparent;\n"
"}\n"
"QLineEdit:hover{\n"
" border:2px solid rgb(0, 0, 0);\n"
"}\n"
"QRadioButton{\n"
" border:none;\n"
" background:none;\n"
" color:rgb(238, 238, 236);\n"
"font: 13pt \"URW Gothic L\";\n"
"}\n"
"QPushButton{\n"
" background:rgb(186, 189, 182);\n"
"}\n"
"QPushButton:hover{\n"
" border:1px solid rgb(85, 87, 83);\n"
"}\n"
"")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(200, 20, 301, 41))
self.label.setStyleSheet("QLabel{\n"
" font: 25pt \"URW Gothic L\";\n"
" background:none;\n"
" border:none;\n"
"}")
self.label.setObjectName("label")
self.lineEdit_fname = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_fname.setGeometry(QtCore.QRect(40, 90, 271, 41))
self.lineEdit_fname.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_fname.setObjectName("lineEdit_fname")
self.lineEdit_lname = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_lname.setGeometry(QtCore.QRect(350, 90, 281, 41))
self.lineEdit_lname.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_lname.setObjectName("lineEdit_lname")
self.lineEdit_age = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_age.setGeometry(QtCore.QRect(40, 160, 271, 41))
self.lineEdit_age.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_age.setObjectName("lineEdit_age")
self.radioButton_m = QtWidgets.QRadioButton(self.centralwidget)
self.radioButton_m.setGeometry(QtCore.QRect(450, 170, 61, 23))
self.radioButton_m.setObjectName("radioButton_m")
self.radioButton_f = QtWidgets.QRadioButton(self.centralwidget)
self.radioButton_f.setGeometry(QtCore.QRect(540, 170, 81, 23))
self.radioButton_f.setObjectName("radioButton_f")
self.lineEdit_uname = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_uname.setGeometry(QtCore.QRect(40, 230, 271, 41))
self.lineEdit_uname.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_uname.setObjectName("lineEdit_uname")
self.lineEdit_pwd = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_pwd.setGeometry(QtCore.QRect(360, 230, 271, 41))
self.lineEdit_pwd.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_pwd.setObjectName("lineEdit_pwd")
self.lineEdit_ea = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_ea.setGeometry(QtCore.QRect(150, 300, 371, 41))
self.lineEdit_ea.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_ea.setObjectName("lineEdit_ea")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(350, 170, 91, 21))
self.label_2.setStyleSheet("QLabel{\n"
" font: 14pt \"URW Gothic L\";\n"
" background:none;\n"
" border:none;\n"
"}")
self.label_2.setObjectName("label_2")
self.btn_edit = QtWidgets.QPushButton(self.centralwidget)
self.btn_edit.setGeometry(QtCore.QRect(190, 380, 121, 31))
self.btn_edit.setObjectName("btn_edit")
self.btn_save = QtWidgets.QPushButton(self.centralwidget)
self.btn_save.setGeometry(QtCore.QRect(360, 380, 121, 31))
self.btn_save.setObjectName("btn_save")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.lock(True)
self.btn_edit.clicked.connect(self.lock,False)
self.btn_save.clicked.connect(self.update_record)
self.lineEdit_pwd.setEchoMode(QtWidgets.QLineEdit.Password)
def lock(self,boolean):
self.lineEdit_lname.setDisabled(boolean)
self.lineEdit_fname.setDisabled(boolean)
self.lineEdit_age.setDisabled(boolean)
self.radioButton_f.setDisabled(boolean)
self.radioButton_m.setDisabled(boolean)
self.lineEdit_uname.setDisabled(boolean)
self.lineEdit_pwd.setDisabled(boolean)
self.lineEdit_ea.setDisabled(boolean)
def qmsg(self,msg,check):
qmsgBox = QMessageBox()
qmsgBox.move(((qmsgBox.width()) // 2+60) , ((qmsgBox.height()) // 2- 50))
qmsgBox.setStyleSheet(
'QMessageBox {background-color: #2b5b84; color: white;}\nQLabel{color: white;}\nQPushButton{color: white; font-size: 16px; background-color: #1d1d1d; border-radius: 10px; padding: 10px; text-align: center;}\n QPushButton:hover{color: #2b5b84;}')
if check == 0:
QMessageBox.information(qmsgBox, 'PyQt5 message', msg)
if check == 1:
QMessageBox.critical(qmsgBox, 'PyQt5 message', msg)
def update_record(self):
FNAME = self.lineEdit_fname.text()
LNAME = self.lineEdit_lname.text()
AGE = self.lineEdit_age.text()
GENDER = None
if self.radioButton_m.isChecked():
GENDER="Male"
else:
GENDER="Female"
UNAME = self.lineEdit_uname.text()
PWD = self.lineEdit_pwd.text()
EA = self.lineEdit_ea.text()
connection = pymysql.connect("localhost","root","rootpass","project")
cursor = connection.cursor()
update_query = "update signup set fname = '%s', lname = '%s', age = %d, gender = '%s',uname = '%s', pwd = '%s',email = '%s' where uname ='%s'" \
%(FNAME,LNAME,int(AGE),GENDER,UNAME,PWD,EA,UNAME)
cursor.execute(update_query)
connection.commit()
connection.close()
self.qmsg('Updated Successfully',0)
def userinfo(self,UNAME):
connection = pymysql.connect("localhost", "root", "rootpass", "project")
cursor = connection.cursor()
select_query = "select * from signup where uname = '%s'"%(UNAME)
cursor.execute(select_query)
row = cursor.fetchone()
self.lineEdit_fname.setText(str(row[0]))
self.lineEdit_lname.setText(row[1])
self.lineEdit_age.setText(str(row[2]))
self.lineEdit_uname.setText(row[4])
self.lineEdit_pwd.setText(row[5])
self.lineEdit_ea.setText(row[6])
if self.radioButton_m.text() == row[3]:
self.radioButton_m.setChecked(True)
else:
self.radioButton_f.setChecked(True)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "User Information"))
self.label.setText(_translate("MainWindow", "ᴜsᴇʀ ɪɴғᴏʀᴍᴀᴛɪᴏɴ"))
self.lineEdit_fname.setPlaceholderText(_translate("MainWindow", "Firstname"))
self.lineEdit_lname.setPlaceholderText(_translate("MainWindow", "Lastname"))
self.lineEdit_age.setPlaceholderText(_translate("MainWindow", "Age"))
self.radioButton_m.setText(_translate("MainWindow", "Male"))
self.radioButton_f.setText(_translate("MainWindow", "Female"))
self.lineEdit_uname.setPlaceholderText(_translate("MainWindow", "Username"))
self.lineEdit_pwd.setPlaceholderText(_translate("MainWindow", "Password"))
self.lineEdit_ea.setPlaceholderText(_translate("MainWindow", "Email Address"))
self.label_2.setText(_translate("MainWindow", "Gender:"))
self.btn_edit.setText(_translate("MainWindow", "Edit"))
self.btn_save.setText(_translate("MainWindow", "Save"))
if __name__=="__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_User()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| StarcoderdataPython |
3500200 | import math
class Reward:
def __init__(self, verbose=False):
self.first_racingpoint_index = None
self.verbose = verbose
def reward_function(self, params):
# Import package (needed for heading)
import math
################## HELPER FUNCTIONS ###################
def dist_2_points(x1, x2, y1, y2):
return abs(abs(x1-x2)**2 + abs(y1-y2)**2)**0.5
def closest_2_racing_points_index(racing_coords, car_coords):
# Calculate all distances to racing points
distances = []
for i in range(len(racing_coords)):
distance = dist_2_points(x1=racing_coords[i][0], x2=car_coords[0],
y1=racing_coords[i][1], y2=car_coords[1])
distances.append(distance)
# Get index of the closest racing point
closest_index = distances.index(min(distances))
# Get index of the second closest racing point
distances_no_closest = distances.copy()
distances_no_closest[closest_index] = 999
second_closest_index = distances_no_closest.index(
min(distances_no_closest))
return [closest_index, second_closest_index]
def dist_to_racing_line(closest_coords, second_closest_coords, car_coords):
# Calculate the distances between 2 closest racing points
a = abs(dist_2_points(x1=closest_coords[0],
x2=second_closest_coords[0],
y1=closest_coords[1],
y2=second_closest_coords[1]))
# Distances between car and closest and second closest racing point
b = abs(dist_2_points(x1=car_coords[0],
x2=closest_coords[0],
y1=car_coords[1],
y2=closest_coords[1]))
c = abs(dist_2_points(x1=car_coords[0],
x2=second_closest_coords[0],
y1=car_coords[1],
y2=second_closest_coords[1]))
# Calculate distance between car and racing line (goes through 2 closest racing points)
# try-except in case a=0 (rare bug in DeepRacer)
try:
distance = abs(-(a**4) + 2*(a**2)*(b**2) + 2*(a**2)*(c**2) -
(b**4) + 2*(b**2)*(c**2) - (c**4))**0.5 / (2*a)
except:
distance = b
return distance
# Calculate which one of the closest racing points is the next one and which one the previous one
def next_prev_racing_point(closest_coords, second_closest_coords, car_coords, heading):
# Virtually set the car more into the heading direction
heading_vector = [math.cos(math.radians(
heading)), math.sin(math.radians(heading))]
new_car_coords = [car_coords[0]+heading_vector[0],
car_coords[1]+heading_vector[1]]
# Calculate distance from new car coords to 2 closest racing points
distance_closest_coords_new = dist_2_points(x1=new_car_coords[0],
x2=closest_coords[0],
y1=new_car_coords[1],
y2=closest_coords[1])
distance_second_closest_coords_new = dist_2_points(x1=new_car_coords[0],
x2=second_closest_coords[0],
y1=new_car_coords[1],
y2=second_closest_coords[1])
if distance_closest_coords_new <= distance_second_closest_coords_new:
next_point_coords = closest_coords
prev_point_coords = second_closest_coords
else:
next_point_coords = second_closest_coords
prev_point_coords = closest_coords
return [next_point_coords, prev_point_coords]
def racing_direction_diff(closest_coords, second_closest_coords, car_coords, heading):
# Calculate the direction of the center line based on the closest waypoints
next_point, prev_point = next_prev_racing_point(closest_coords,
second_closest_coords,
car_coords,
heading)
# Calculate the direction in radius, arctan2(dy, dx), the result is (-pi, pi) in radians
track_direction = math.atan2(
next_point[1] - prev_point[1], next_point[0] - prev_point[0])
# Convert to degree
track_direction = math.degrees(track_direction)
# Calculate the difference between the track direction and the heading direction of the car
direction_diff = abs(track_direction - heading)
if direction_diff > 180:
direction_diff = 360 - direction_diff
return direction_diff
# Gives back indexes that lie between start and end index of a cyclical list
# (start index is included, end index is not)
def indexes_cyclical(start, end, array_len):
if end < start:
end += array_len
return [index % array_len for index in range(start, end)]
# Calculate how long car would take for entire lap, if it continued like it did until now
def projected_time(first_index, closest_index, step_count, times_list):
# Calculate how much time has passed since start
current_actual_time = (step_count-1) / 15
# Calculate which indexes were already passed
indexes_traveled = indexes_cyclical(first_index, closest_index, len(times_list))
# Calculate how much time should have passed if car would have followed optimals
current_expected_time = sum([times_list[i] for i in indexes_traveled])
# Calculate how long one entire lap takes if car follows optimals
total_expected_time = sum(times_list)
# Calculate how long car would take for entire lap, if it continued like it did until now
try:
projected_time = (current_actual_time/current_expected_time) * total_expected_time
except:
projected_time = 9999
return projected_time
#################### RACING LINE ######################
# Optimal racing line for the Spain track
# Each row: [x,y,speed,timeFromPreviousPoint]
racing_track = [[0.3312, 2.82902, 1.30028, 0.1125],
[0.33882, 2.68171, 1.30028, 0.11344],
[0.36236, 2.53659, 1.30028, 0.11307],
[0.40087, 2.39698, 1.30507, 0.11097],
[0.45275, 2.26508, 1.31176, 0.10805],
[0.51635, 2.142, 1.31921, 0.10502],
[0.59024, 2.02819, 1.33692, 0.10149],
[0.67311, 1.92365, 1.35733, 0.09828],
[0.76393, 1.82822, 1.38414, 0.09517],
[0.8618, 1.74165, 1.41847, 0.09212],
[0.96597, 1.66362, 1.46121, 0.08907],
[1.07578, 1.59378, 1.52061, 0.08558],
[1.19062, 1.53169, 1.59994, 0.08159],
[1.30986, 1.47678, 1.65938, 0.07912],
[1.43296, 1.42844, 1.62186, 0.08154],
[1.55936, 1.38595, 1.59191, 0.08376],
[1.68849, 1.34851, 1.56949, 0.08567],
[1.8198, 1.31519, 1.55379, 0.08719],
[1.9527, 1.28493, 1.5382, 0.08861],
[2.08658, 1.25661, 1.53483, 0.08916],
[2.22298, 1.22686, 1.53483, 0.09096],
[2.35884, 1.19553, 1.53483, 0.09084],
[2.49393, 1.16191, 1.53483, 0.0907],
[2.628, 1.12536, 1.53483, 0.09054],
[2.76085, 1.08528, 1.53483, 0.09041],
[2.89229, 1.04114, 1.53483, 0.09033],
[3.02211, 0.99244, 1.53483, 0.09034],
[3.15014, 0.93869, 1.53483, 0.09047],
[3.27618, 0.87939, 1.53483, 0.09075],
[3.40004, 0.81404, 1.53483, 0.09125],
[3.52152, 0.742, 1.53483, 0.09202],
[3.64034, 0.66262, 1.53483, 0.09311],
[3.76074, 0.59177, 1.53483, 0.09102],
[3.88233, 0.52963, 1.53483, 0.08897],
[4.00501, 0.47617, 1.53483, 0.08719],
[4.12874, 0.43135, 1.53483, 0.08574],
[4.25349, 0.39509, 1.53483, 0.08464],
[4.37923, 0.36738, 1.53483, 0.08389],
[4.50595, 0.3482, 1.53483, 0.0835],
[4.6336, 0.33768, 1.53483, 0.08345],
[4.76212, 0.33589, 1.54, 0.08346],
[4.89139, 0.34292, 1.54759, 0.08365],
[5.02124, 0.35891, 1.56493, 0.0836],
[5.15142, 0.38395, 1.58956, 0.08339],
[5.2816, 0.41807, 1.62477, 0.08283],
[5.4114, 0.4612, 1.65551, 0.08262],
[5.54035, 0.5133, 1.69861, 0.08188],
[5.66795, 0.57414, 1.74608, 0.08096],
[5.79366, 0.6434, 1.80192, 0.07965],
[5.91696, 0.7206, 1.8709, 0.07776],
[6.03741, 0.80512, 1.94201, 0.07577],
[6.15464, 0.89629, 2.02687, 0.07327],
[6.26838, 0.99337, 2.12691, 0.07031],
[6.37852, 1.09559, 2.23799, 0.06714],
[6.48505, 1.20221, 2.13052, 0.07074],
[6.58809, 1.31255, 1.96143, 0.07697],
[6.68784, 1.42601, 1.83003, 0.08255],
[6.78459, 1.54207, 1.72188, 0.08775],
[6.87867, 1.6603, 1.6428, 0.09198],
[6.97035, 1.78041, 1.57631, 0.09586],
[7.05971, 1.90227, 1.52045, 0.09938],
[7.1468, 2.02575, 1.47813, 0.10223],
[7.23169, 2.15076, 1.4419, 0.1048],
[7.31445, 2.27719, 1.4134, 0.10691],
[7.39504, 2.40502, 1.39004, 0.10871],
[7.47341, 2.53422, 1.37567, 0.10984],
[7.5495, 2.66476, 1.36693, 0.11054],
[7.62327, 2.79664, 1.36328, 0.11084],
[7.69465, 2.92982, 1.36328, 0.11084],
[7.76358, 3.06429, 1.36328, 0.11084],
[7.82997, 3.20002, 1.36328, 0.11084],
[7.89374, 3.33701, 1.36328, 0.11084],
[7.95479, 3.47524, 1.36328, 0.11084],
[8.01207, 3.61501, 1.36328, 0.1108],
[8.06441, 3.75637, 1.36328, 0.11057],
[8.11059, 3.89904, 1.36328, 0.11],
[8.14944, 4.04252, 1.36328, 0.10904],
[8.1799, 4.18616, 1.36328, 0.10771],
[8.20114, 4.32922, 1.36328, 0.10608],
[8.2126, 4.47094, 1.36328, 0.1043],
[8.21392, 4.6106, 1.36328, 0.10245],
[8.20493, 4.74752, 1.36328, 0.10065],
[8.18559, 4.88108, 1.36328, 0.09899],
[8.15597, 5.01071, 1.36328, 0.09753],
[8.1162, 5.13587, 1.36328, 0.09633],
[8.06644, 5.25605, 1.36328, 0.09541],
[8.00692, 5.37079, 1.36328, 0.09481],
[7.93789, 5.47964, 1.36328, 0.09455],
[7.85959, 5.58214, 1.36531, 0.09447],
[7.77232, 5.67786, 1.37127, 0.09446],
[7.67635, 5.76633, 1.38082, 0.09453],
[7.57197, 5.84706, 1.39066, 0.09488],
[7.45949, 5.91949, 1.40348, 0.09533],
[7.33925, 5.98304, 1.41835, 0.09589],
[7.2117, 6.03706, 1.437, 0.0964],
[7.07742, 6.0809, 1.45157, 0.09731],
[6.93719, 6.11382, 1.46888, 0.09806],
[6.79213, 6.13516, 1.48699, 0.0986],
[6.64383, 6.14441, 1.50369, 0.09881],
[6.49436, 6.14136, 1.51987, 0.09836],
[6.3459, 6.12623, 1.54239, 0.09675],
[6.20016, 6.09972, 1.56605, 0.09459],
[6.05822, 6.06266, 1.59331, 0.09207],
[5.92067, 6.01592, 1.63208, 0.08901],
[5.78774, 5.96039, 1.60394, 0.08982],
[5.65947, 5.89687, 1.56189, 0.09164],
[5.53577, 5.82614, 1.52974, 0.09315],
[5.41646, 5.74889, 1.5097, 0.09415],
[5.30127, 5.66582, 1.4917, 0.09521],
[5.18985, 5.57764, 1.48342, 0.09579],
[5.08179, 5.48502, 1.48113, 0.09609],
[4.97664, 5.38868, 1.48113, 0.09629],
[4.87391, 5.28929, 1.48113, 0.09651],
[4.77307, 5.18754, 1.48113, 0.09671],
[4.67361, 5.08412, 1.48113, 0.09688],
[4.57497, 4.97969, 1.48113, 0.09699],
[4.47987, 4.87962, 1.4601, 0.09455],
[4.38389, 4.78101, 1.4257, 0.09652],
[4.28641, 4.68486, 1.39322, 0.09827],
[4.18688, 4.59216, 1.3655, 0.09961],
[4.08481, 4.50378, 1.34119, 0.10067],
[3.97981, 4.4205, 1.32388, 0.10123],
[3.87157, 4.34304, 1.31182, 0.10146],
[3.75985, 4.27207, 1.30536, 0.10139],
[3.64453, 4.20816, 1.30188, 0.10128],
[3.52552, 4.15187, 1.30124, 0.10117],
[3.40284, 4.10371, 1.3, 0.10138],
[3.27657, 4.0642, 1.3, 0.10177],
[3.14689, 4.03382, 1.3, 0.10246],
[3.01402, 4.0131, 1.3, 0.10344],
[2.87828, 4.00255, 1.3, 0.10472],
[2.74013, 4.00268, 1.3, 0.10628],
[2.6001, 4.01393, 1.3, 0.10806],
[2.45888, 4.0368, 1.3, 0.11004],
[2.31735, 4.07166, 1.3, 0.11213],
[2.17655, 4.11882, 1.3, 0.11422],
[2.03352, 4.15406, 1.3, 0.11332],
[1.89032, 4.17652, 1.3, 0.1115],
[1.74811, 4.18603, 1.3, 0.10964],
[1.60798, 4.18259, 1.3, 0.10783],
[1.47095, 4.16632, 1.3, 0.10615],
[1.33799, 4.13745, 1.3, 0.10466],
[1.20995, 4.09635, 1.3, 0.10344],
[1.08765, 4.04344, 1.3, 0.10251],
[0.97178, 3.97921, 1.3, 0.1019],
[0.86304, 3.90417, 1.3, 0.10163],
[0.7621, 3.81879, 1.3, 0.1017],
[0.66966, 3.72352, 1.30028, 0.10209],
[0.58645, 3.61884, 1.30028, 0.10284],
[0.51326, 3.50525, 1.30028, 0.10392],
[0.45096, 3.38332, 1.30028, 0.10531],
[0.4005, 3.25369, 1.30028, 0.10698],
[0.36297, 3.11721, 1.30028, 0.10886],
[0.33953, 2.97506, 1.30028, 0.1108]]
################## INPUT PARAMETERS ###################
# Read all input parameters
all_wheels_on_track = params['all_wheels_on_track']
x = params['x']
y = params['y']
distance_from_center = params['distance_from_center']
is_left_of_center = params['is_left_of_center']
heading = params['heading']
progress = params['progress']
steps = params['steps']
speed = params['speed']
steering_angle = params['steering_angle']
track_width = params['track_width']
waypoints = params['waypoints']
closest_waypoints = params['closest_waypoints']
is_offtrack = params['is_offtrack']
############### OPTIMAL X,Y,SPEED,TIME ################
# Get closest indexes for racing line (and distances to all points on racing line)
closest_index, second_closest_index = closest_2_racing_points_index(
racing_track, [x, y])
# Get optimal [x, y, speed, time] for closest and second closest index
optimals = racing_track[closest_index]
optimals_second = racing_track[second_closest_index]
# Save first racingpoint of episode for later
if self.verbose == True:
self.first_racingpoint_index = 0 # this is just for testing purposes
if steps == 1:
self.first_racingpoint_index = closest_index
################ REWARD AND PUNISHMENT ################
## Define the default reward ##
reward = 1
## Reward if car goes close to optimal racing line ##
DISTANCE_MULTIPLE = 1
dist = dist_to_racing_line(optimals[0:2], optimals_second[0:2], [x, y])
distance_reward = max(1e-3, 1 - (dist/(track_width*0.5)))
reward += distance_reward * DISTANCE_MULTIPLE
## Reward if speed is close to optimal speed ##
SPEED_DIFF_NO_REWARD = 1
SPEED_MULTIPLE = 2
speed_diff = abs(optimals[2]-speed)
if speed_diff <= SPEED_DIFF_NO_REWARD:
# we use quadratic punishment (not linear) bc we're not as confident with the optimal speed
# so, we do not punish small deviations from optimal speed
speed_reward = (1 - (speed_diff/(SPEED_DIFF_NO_REWARD))**2)**2
else:
speed_reward = 0
reward += speed_reward * SPEED_MULTIPLE
# Reward if less steps
REWARD_PER_STEP_FOR_FASTEST_TIME = 1
STANDARD_TIME = 37
FASTEST_TIME = 27
times_list = [row[3] for row in racing_track]
projected_time = projected_time(self.first_racingpoint_index, closest_index, steps, times_list)
try:
steps_prediction = projected_time * 15 + 1
reward_prediction = max(1e-3, (-REWARD_PER_STEP_FOR_FASTEST_TIME*(FASTEST_TIME) /
(STANDARD_TIME-FASTEST_TIME))*(steps_prediction-(STANDARD_TIME*15+1)))
steps_reward = min(REWARD_PER_STEP_FOR_FASTEST_TIME, reward_prediction / steps_prediction)
except:
steps_reward = 0
reward += steps_reward
# Zero reward if obviously wrong direction (e.g. spin)
direction_diff = racing_direction_diff(
optimals[0:2], optimals_second[0:2], [x, y], heading)
if direction_diff > 30:
reward = 1e-3
# Zero reward of obviously too slow
speed_diff_zero = optimals[2]-speed
if speed_diff_zero > 0.5:
reward = 1e-3
## Incentive for finishing the lap in less steps ##
REWARD_FOR_FASTEST_TIME = 1500 # should be adapted to track length and other rewards
STANDARD_TIME = 37 # seconds (time that is easily done by model)
FASTEST_TIME = 27 # seconds (best time of 1st place on the track)
if progress == 100:
finish_reward = max(1e-3, (-REWARD_FOR_FASTEST_TIME /
(15*(STANDARD_TIME-FASTEST_TIME)))*(steps-STANDARD_TIME*15))
else:
finish_reward = 0
reward += finish_reward
## Zero reward if off track ##
if all_wheels_on_track == False:
reward = 1e-3
####################### VERBOSE #######################
if self.verbose == True:
print("Closest index: %i" % closest_index)
print("Distance to racing line: %f" % dist)
print("=== Distance reward (w/out multiple): %f ===" % (distance_reward))
print("Optimal speed: %f" % optimals[2])
print("Speed difference: %f" % speed_diff)
print("=== Speed reward (w/out multiple): %f ===" % speed_reward)
print("Direction difference: %f" % direction_diff)
print("Predicted time: %f" % projected_time)
print("=== Steps reward: %f ===" % steps_reward)
print("=== Finish reward: %f ===" % finish_reward)
#################### RETURN REWARD ####################
# Always return a float value
return float(reward)
reward_object = Reward() # add parameter verbose=True to get noisy output for testing
def reward_function(params):
return reward_object.reward_function(params)
| StarcoderdataPython |
3406334 | import os
from dateutil.tz import gettz
from celery.schedules import crontab
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
DEBUG = False
TESTING = False
CSRF_ENABLED = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = '<PASSWORD>-<PASSWORD>-<PASSWORD>-to-be-<PASSWORD>'
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL',
'mysql://root:@localhost:3306/gfe')
REDIS_URL = os.getenv('REDIS_URL', 'redis://localhost:6379')
BASE_URL = 'http://www.mybidmatch.com'
QUERY_URL = '/go?sub=7F604A76-0EF9-48F1-A83F-ABC17511B6FC'
SCRAPER_BASE_URL = BASE_URL + QUERY_URL
TIME_ZONE = gettz(os.getenv('TIME_ZONE', 'America/Atlanta'))
RUN_AT = os.getenv('RUN_AT', '17:15')
RQ_POLL_INTERVAL = 5
HOUR, MINUTE = RUN_AT.split(':')
CELERY_BROKER_URL = REDIS_URL
CELERY_RESULT_BACKEND = REDIS_URL
CELERY_TIMEZONE = TIME_ZONE
CELERYBEAT_SCHEDULE = {
'scrape-every-evening': {
'task': 'scraper.scraper',
'schedule': crontab(minute=10)
}
}
class ProductionConfig(Config):
DEBUG = False
class StagingConfig(Config):
DEVELOPMENT = True
DEBUG = True
class DevelopmentConfig(Config):
DEVELOPMENT = True
SQLALCHEMY_TRACK_MODIFICATIONS = True
DEBUG = True
class TestingConfig(Config):
TESTING = True
| StarcoderdataPython |
9658924 | import yaml
import math
import ray
import wandb
import itertools
import matplotlib.pyplot as plt
from collections import defaultdict
from utils.visualization import visualize_semantic_processed
class Logger:
def __init__(self, wandb_project, args):
# Read configs
with open(args.config_path, 'rb') as f:
config = yaml.safe_load(f)
for key, value in config.items():
setattr(self, key, value)
wandb.init(project=wandb_project, config=config)
@property
def save_dir(self):
return wandb.run.dir
def log_ego_info(self, it, opt_info):
width = 0.1
length = 0.5
locs = opt_info.pop('locs')
yaws = opt_info.pop('yaws')
pred_locs = opt_info.pop('pred_locs')
pred_yaws = opt_info.pop('pred_yaws')
# Normalize the locations
pred_locs = pred_locs - locs[:,0:1]
locs = locs - locs[:,0:1]
f, axes = plt.subplots(2,2,figsize=(10,10))
for i, ax in enumerate(itertools.chain(*axes)):
ax.set_xlim([-10,10])
ax.set_ylim([-10,10])
for loc, yaw, pred_loc, pred_yaw in zip(locs[i,1:], yaws[i,1:], pred_locs[i], pred_yaws[i]):
ax.arrow(*loc,length*math.cos(yaw),length*math.sin(yaw), color='blue', width=width)
ax.arrow(*pred_loc,length*math.cos(pred_yaw),length*math.sin(pred_yaw), color='red', width=width)
opt_info.update({'it': it, 'viz': wandb.Image(plt)})
wandb.log(opt_info)
plt.close('all')
def log_main_info(self, it, opt_info):
wide_rgb = opt_info.pop('wide_rgb')
narr_rgb = opt_info.pop('narr_rgb')
spd = opt_info.pop('spd')
cmd = opt_info.pop('cmd')
pred_seg = opt_info.pop('pred_seg')
gt_seg = opt_info.pop('gt_seg')
act_prob = opt_info.pop('act_prob')
act_brak = opt_info.pop('act_brak')
pred_act_prob = opt_info.pop('pred_act_prob')
pred_act_brak = opt_info.pop('pred_act_brak')
pred_seg = visualize_semantic_processed(pred_seg, self.seg_channels)
gt_seg = visualize_semantic_processed(gt_seg, self.seg_channels)
f, [[ax1,ax2,ax3], [ax4,ax5,ax6]] = plt.subplots(2,3,figsize=(30,10))
ax1.imshow(narr_rgb);
ax4.imshow(wide_rgb); ax4.set_title({0:'Left',1:'Right',2:'Straight',3:'Follow',4:'Change Left',5:'Change Right'}.get(cmd))
ax2.imshow(pred_seg); ax2.set_title('predicted sem')
ax5.imshow(gt_seg); ax5.set_title('gt sem')
ax3.imshow(pred_act_prob); ax3.set_title(f'(pred) brake: {pred_act_brak:.3f}')
ax6.imshow(act_prob); ax6.set_title(f'(gt) brake: {act_brak:.3f}')
opt_info.update({'it': it, 'viz': wandb.Image(plt)})
wandb.log(opt_info)
plt.close('all')
def log_label_info(self, label_info):
act_val_norm = label_info.pop('act_val_norm')
act_val_brak = label_info.pop('act_val_brak')
cmd = label_info.pop('cmd')
wide_rgb = label_info.pop('wide_rgb')
f, [ax1, ax2] = plt.subplots(1,2,figsize=(30,10))
ax1.imshow(wide_rgb)
ax2.imshow(act_val_norm)
ax1.set_title({0:'Left',1:'Right',2:'Straight',3:'Follow',4:'Change left',5:'Change right'}.get(cmd))
ax2.set_title(act_val_brak)
wandb.log({'viz': wandb.Image(plt)})
plt.close('all')
@ray.remote
class RemoteLogger(Logger):
def __init__(self, wandb_project, config):
super().__init__(wandb_project, config)
self.worker_counts = defaultdict(lambda: 0)
def log_label_info(self, label_info, worker_count, worker_id=0):
super().log_label_info(label_info)
self.worker_counts[worker_id] = worker_count
def total_frames(self):
return sum(self.worker_counts.values())
| StarcoderdataPython |
6792 | <reponame>hhdMrLion/mxshop-api<filename>apps/users/adminx.py
import xadmin
from users.models import VerifyCode
from xadmin import views
class BaseSetting(object):
# 添加主题功能
enable_themes = True
user_bootswatch = True
class GlobalSettings(object):
# 全局配置,后端管理标题和页脚
site_title = '天天生鲜后台管理'
site_footer = 'https://www.qnmlgb.top/'
# 菜单收缩
menu_style = 'accordion'
class VerifyCodeAdmin(object):
list_display = ['code', 'mobile', 'add_time']
xadmin.site.register(VerifyCode, VerifyCodeAdmin)
xadmin.site.register(views.BaseAdminView, BaseSetting)
xadmin.site.register(views.CommAdminView, GlobalSettings)
| StarcoderdataPython |
1730202 | import numpy as np
import scipy.sparse.linalg
import scipy.sparse as sparse
from time import time as tm
from einsum_tools import *
# a class to store the operators with the bottom and left indices,
# and bottom and right indices moved to the right for contraction
# while sparse
class SparseOperator():
def __init__(self, Op):
self.shape = Op.shape
self.dtype = Op.dtype
# l_form mujd
self.l_form = NDSparse(Op, [0, 2])
# r_form jumd
self.r_form = NDSparse(Op, [1, 2])
# contracts terms into the left tensor
def contract_left(Op, Md, Mu, L):
# Mu = Mu.conj()
L = einsum("dil,jik->dljk", Md, L)
if type(Op) == SparseOperator:
L = einsum("mujd,dljk->lkmu", Op.l_form, L)
else:
L = einsum("dljk,jmdu->lkmu", L, Op)
L = einsum("lkmu,ukn->mln", L, Mu)
return L
# contracts terms into the right tensor
def contract_right(Op, Md, Mu, R):
# Mu = Mu.conj()
R = einsum("dil,mln->dimn", Md, R)
if type(Op) == SparseOperator:
R = einsum("jumd,dimn->inju", Op.r_form, R)
else:
R = einsum("dimn,jmdu->inju", R, Op)
R = einsum("inju,ukn->jik", R, Mu)
return R
# returns the expectation of an MPO
def expectation(MPS, MPO):
E = np.array([[[1]]])
for i in range(len(MPS)):
E = contract_left(MPO[i], MPS[i], MPS[i], E)
return E[0, 0, 0]
# contracts two mpos by the common indices
def contract_mpo(MPO1, MPO2):
MPO = []
for i in range(len(MPO1)):
MPO += [einsum("ijqu,kldq->ikjldu", MPO1[i], MPO2[i])
.reshape(MPO1[i].shape[0] * MPO2[i].shape[0],
MPO1[i].shape[1] * MPO2[i].shape[1],
MPO2[i].shape[2], MPO1[i].shape[3])]
return MPO
# A liear operator for the sparse eigenvalue problem
class SparseHamProd(sparse.linalg.LinearOperator):
def __init__(self, L, OL, OR, R):
self.L = L
self.OL = OL
self.OR = OR
self.R = R
self.dtype = OL.dtype
self.issparse = type(OL) == SparseOperator
self.req_shape = [OL.shape[2], OR.shape[2], L.shape[1], R.shape[1]]
self.req_shape2 = [OL.shape[2] * OR.shape[2], L.shape[1], R.shape[1]]
self.size = prod(self.req_shape)
self.shape = [self.size, self.size]
# return the output of H*B
def _matvec(self, B):
L = einsum("jik,adil->jkadl", self.L, np.reshape(B, self.req_shape))
if self.issparse: # for sparse
L = einsum("cbja,jkadl->kdlcb", self.OL.l_form, L)
L = einsum("mucd,kdlcb->klbmu", self.OR.l_form, L)
else:
L = einsum("jkadl,jcab->kdlcb", L, self.OL)
L = einsum("kdlcb,cmdu->klbmu", L, self.OR)
L = einsum("klbmu,mln->bukn", L, self.R)
return np.reshape(L, -1)
# truncates the svd output by m
def trunacte_svd(u, s, v, m):
if len(s) < m: m = len(s)
truncation = s[m:].sum()
u = u[:, :, :m]
s = s[:m]
v = v[:m, :, :]
return u, s, v, truncation, m
# optimises the current site
def optimize_sites(M1, M2, O1, O2, L, R, m, heading=True, tol=0):
# generate intial guess B
B = einsum("aiz,dzl->adil", M1, M2)
# create sparse operator
H = SparseHamProd(L, O1, O2, R)
# solve for lowest energy state
E, V = sparse.linalg.eigsh(H, 1, v0=B, which='SA', tol=tol)
V = V[:, 0].reshape(H.req_shape)
# re-arange output so the indices are in the correct location
V = np.moveaxis(V, 1, 2) # aidl
V = V.reshape(O1.shape[2] * L.shape[1], O2.shape[2] * R.shape[1])
# truncate
u, s, v = np.linalg.svd(V)
u = u.reshape(O1.shape[2], L.shape[1], -1)
v = v.reshape(-1, O2.shape[2], R.shape[1])
u, s, v, trunc, m_i = trunacte_svd(u, s, v, m)
# if going right, contract s into the right unitary, else left
if heading:
# v = einsum_with_str("ij,djl->dil", np.diag(s), v)
v = s[:, None] * v.reshape(-1, O2.shape[2] * R.shape[1]) # broadcasting should be faster
v = v.reshape(-1, O2.shape[2], R.shape[1])
else:
# u = einsum_with_str("dik,kl->dil", u, np.diag(s))
u = u.reshape(O1.shape[2] * L.shape[1], -1) * s
u = u.reshape(O1.shape[2], L.shape[1], -1)
v = np.moveaxis(v, 0, 1)
return E[0], u, v, trunc, m_i
def two_site_DMRG(MPS, MPO, m, num_sweeps, verbose=1):
N = len(MPS)
# get first Rj tensor
R = [np.array([[[1.0]]])]
# find Rj tensors starting from the right
for j in range(N - 1, 1, -1):
R += [contract_right(MPO[j], MPS[j], MPS[j], R[-1])]
L = [np.array([[[1.0]]])]
# lists for storing outputs
t = [];
E_s = [];
E_j = []
for i in range(num_sweeps):
t0 = tm()
# sweep right
for j in range(0, N - 2):
# optimise going right
E, MPS[j], MPS[j + 1], trunc, m_i = optimize_sites(MPS[j], MPS[j + 1], MPO[j], MPO[j + 1], L[-1], R[-1], m,
tol=0, heading=True)
R = R[:-1] # remove leftmost R tensor
L += [contract_left(MPO[j], MPS[j], MPS[j], L[-1])] # add L tensor
E_j += [E]
if verbose >= 3: print(E, "sweep right", i, "sites:", (j, j + 1), "m:", m_i)
# sweep left
for j in range(N - 2, 0, -1):
E, MPS[j], MPS[j + 1], trunc, m_i = optimize_sites(MPS[j], MPS[j + 1], MPO[j], MPO[j + 1], L[-1], R[-1], m,
tol=0, heading=False)
R += [contract_right(MPO[j + 1], MPS[j + 1], MPS[j + 1], R[-1])] # add R tensor
L = L[:-1] # remove L tensor
E_j += [E]
if verbose >= 3: print(E, "sweep left", i, "sites:", (j, j + 1), "m:", m_i)
t1 = tm()
t += [t1 - t0]
E_s += [E]
if verbose >= 2: print("sweep", i, "complete")
if verbose >= 1: print("N:", N, "m:", m, "time for", num_sweeps, "sweeps:", *t)
return MPS, t, E_j, E_s
# create |0101..> state
def construct_init_state(d, N):
down = np.zeros((d, 1, 1))
down[0, 0, 0] = 1
up = np.zeros((d, 1, 1))
up[1, 0, 0] = 1
# state 0101...
return [down, up] * (N // 2) + [down] * (N % 2)
def construct_MPO(N, type="heisenberg", h=1, issparse=False):
# operators
I = np.identity(2)
Z = np.zeros([2, 2])
Sz = np.array([[0.5, 0], [0, -0.5]])
Sp = np.array([[0, 0], [1, 0]])
Sm = np.array([[0, 1], [0, 0]])
sz = np.array([[0, 1], [1, 0]])
sx = np.array([[0, -1j], [1j, 0]])
# heisenberg MPO
if type == "h":
W = np.array([[I, Sz, 0.5 * Sp, 0.5 * Sm, Z],
[Z, Z, Z, Z, Sz],
[Z, Z, Z, Z, Sm],
[Z, Z, Z, Z, Sp],
[Z, Z, Z, Z, I]])
W0 = np.array([[I, Sz, 0.5 * Sp, 0.5 * Sm, Z]])
Wn = np.array([[Z], [Sz], [Sm], [Sp], [I]])
else: # ising model mpo
assert (type == "i")
W = np.array([[I, sz, h * sx],
[Z, Z, sz],
[Z, Z, I]])
W0 = np.array([[I, sz, h * sx]])
Wn = np.array([[h * sx], [sz], [I]])
# create H^2 terms
[W02, W2, Wn2] = contract_mpo([W0, W, Wn], [W0, W, Wn])
if issparse: # convert to sparse
W = SparseOperator(W)
W0 = SparseOperator(W0)
Wn = SparseOperator(Wn)
MPO = [W0] + ([W] * (N - 2)) + [Wn]
MPO2 = [W02] + ([W2] * (N - 2)) + [Wn2]
return MPO, MPO2
d = 2 # visible index dimension
N_list = [10, 20, 40, 80] # number of sites
m_list = [2 ** i for i in range(7, 8)] # truncation size / bond dimensionality
# N_list = [10]
# m_list = [20, 50]
model = "h" # model type, h heis, i ising
num_sweeps = 6 # full sweeps
reps = 5 # repetitions
vb = 2 # verbosity
use_sparse = False
t = []
E = []
Var = []
E_sweeps = []
E_steps = []
t_sweeps = []
# run for all configurations
for N in N_list:
MPO, MPO2 = construct_MPO(N, type=model, issparse=use_sparse)
E_steps2 = []
for m in m_list:
for r in range(reps):
MPS = construct_init_state(d, N)
t0 = tm()
MPS, t_s, E_j, E_s = two_site_DMRG(MPS, MPO, m, num_sweeps, verbose=vb)
t1 = tm()
E1 = np.real(expectation(MPS, MPO))
E2 = np.real(expectation(MPS, MPO2))
E += [E1]
Var += [E2 - E1 * E1]
t += [t1 - t0]
E_sweeps += [E_s]
E_steps += [E_j]
t_sweeps += [t_s]
E_steps2 += [E_j]
print("N", N, "m", m, "rep", r, "time:", t1 - t0, "energy:", E1, "var", Var[-1])
E = np.array(E).reshape(len(N_list), len(m_list), reps)
Var = np.array(Var).reshape(len(N_list), len(m_list), reps)
t = np.array(t).reshape(len(N_list), len(m_list), reps)
E_steps2 = np.array(E_steps2).reshape(len(m_list), reps, -1)
import csv
# print the outputs of each trial
file = open(model + "out.csv", 'w', newline='')
f = csv.writer(file)
f.writerow(["N", "m", "reps", "E", "var", "t", "dt"])
for i in range(len(N_list)):
for j in range(len(m_list)):
f.writerow([N_list[i], m_list[j], reps, E[i, j, 0], Var[i, j, 0], t[i, j, :].mean(), t[i, j, :].std()])
file.close()
# print all times for each rpeetition for more detailed analysis later
file = open(model + "tout.csv", 'w', newline='')
f = csv.writer(file)
f.writerow(["N", "m", "rep", "t"])
for i in range(len(N_list)):
for j in range(len(m_list)):
for r in range(reps):
f.writerow([N_list[i], m_list[j], r, *t_sweeps[len(m_list) * reps * i + reps * j + r]])
file.close()
# print the energy found for each iteration
file = open(model + "Eout.csv", 'w', newline='')
f = csv.writer(file)
f.writerow(["m", "E"])
for j in range(len(m_list)):
for i in range((N_list[-1] - 2) * 2 * num_sweeps):
f.writerow([m_list[j], E_steps2[j, 0, i]])
file.close()
| StarcoderdataPython |
1808006 | <filename>cleverhans/utils.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from distutils.version import LooseVersion
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
import matplotlib.pyplot as plt
import os
if LooseVersion(keras.__version__) >= LooseVersion('2.0.0'):
from keras.layers import Conv2D
else:
from keras.layers import Convolution2D
class _ArgsWrapper(object):
"""
Wrapper that allows attribute access to dictionaries
"""
def __init__(self, args):
if not isinstance(args, dict):
args = vars(args)
self.args = args
def __getattr__(self, name):
return self.args.get(name)
def save_model(model, dir, filename, weights_only=False):
"""
Save Keras model
:param model:
:param dir:
:param filename:
:param weights_only:
:return:
"""
# If target directory does not exist, create
if not os.path.exists(dir):
os.makedirs(dir)
# Construct full path
filepath = os.path.join(dir, filename)
if weights_only:
# Dump model weights
model.save_weights(filepath)
print("Model weights were saved to: " + filepath)
else:
# Dump model architecture and weights
model.save(filepath)
print("Model was saved to: " + filepath)
def load_model(directory, filename, weights_only=False, model=None):
"""
Loads Keras model
:param directory:
:param filename:
:return:
"""
# If restoring model weights only, make sure model argument was given
if weights_only:
assert model is not None
# Construct full path to dumped model
filepath = os.path.join(directory, filename)
# Check if file exists
assert os.path.exists(filepath)
# Return Keras model
if weights_only:
result = model.load_weights(filepath)
print(result)
return model.load_weights(filepath)
else:
return keras.models.load_model(filepath)
def batch_indices(batch_nb, data_length, batch_size):
"""
This helper function computes a batch start and end index
:param batch_nb: the batch number
:param data_length: the total length of the data being parsed by batches
:param batch_size: the number of inputs in each batch
:return: pair of (start, end) indices
"""
# Batch start and end index
start = int(batch_nb * batch_size)
end = int((batch_nb + 1) * batch_size)
# When there are not enough inputs left, we reuse some to complete the
# batch
if end > data_length:
shift = end - data_length
start -= shift
end -= shift
return start, end
def other_classes(nb_classes, class_ind):
"""
Heper function that returns a list of class indices without one class
:param nb_classes: number of classes in total
:param class_ind: the class index to be omitted
:return: list of class indices without one class
"""
other_classes_list = list(range(nb_classes))
other_classes_list.remove(class_ind)
return other_classes_list
def conv_2d(filters, kernel_shape, strides, padding):
"""
Defines the right convolutional layer according to the
version of Keras that is installed.
:param filters: (required integer) the dimensionality of the output
space (i.e. the number output of filters in the
convolution)
:param kernel_shape: (required tuple or list of 2 integers) specifies
the strides of the convolution along the width and
height.
:param padding: (required string) can be either 'valid' (no padding around
input or feature map) or 'same' (pad to ensure that the
output feature map size is identical to the layer input)
:return: the Keras layer
"""
if LooseVersion(keras.__version__) >= LooseVersion('2.0.0'):
return Conv2D(filters=filters, kernel_size=kernel_shape,
strides=strides, padding=padding)
else:
return Convolution2D(filters, kernel_shape[0], kernel_shape[1],
subsample=strides, border_mode=padding)
def cnn_model(logits=False, input_ph=None, img_rows=28, img_cols=28,
channels=1, nb_filters=64, nb_classes=10):
"""
Defines a CNN model using Keras sequential model
:param logits: If set to False, returns a Keras model, otherwise will also
return logits tensor
:param input_ph: The TensorFlow tensor for the input
(needed if returning logits)
("ph" stands for placeholder but it need not actually be a
placeholder)
:param img_rows: number of row in the image
:param img_cols: number of columns in the image
:param channels: number of color channels (e.g., 1 for MNIST)
:param nb_filters: number of convolutional filters per layer
:param nb_classes: the number of output classes
:return:
"""
model = Sequential()
# Define the layers successively (convolution layers are version dependent)
if keras.backend.image_dim_ordering() == 'th':
input_shape = (channels, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, channels)
layers = [Dropout(0.2, input_shape=input_shape),
conv_2d(nb_filters, (8, 8), (2, 2), "same"),
Activation('relu'),
conv_2d((nb_filters * 2), (6, 6), (2, 2), "valid"),
Activation('relu'),
conv_2d((nb_filters * 2), (5, 5), (1, 1), "valid"),
Activation('relu'),
Dropout(0.5),
Flatten(),
Dense(nb_classes)]
for layer in layers:
model.add(layer)
if logits:
logits_tensor = model(input_ph)
model.add(Activation('softmax'))
if logits:
return model, logits_tensor
else:
return model
def pair_visual(original, adversarial, figure=None):
"""
This function displays two images: the original and the adversarial sample
:param original: the original input
:param adversarial: the input after perterbations have been applied
:param figure: if we've already displayed images, use the same plot
:return: the matplot figure to reuse for future samples
"""
# Ensure our inputs are of proper shape
assert(len(original.shape) == 2 or len(original.shape) == 3)
# To avoid creating figures per input sample, reuse the sample plot
if figure is None:
plt.ion()
figure = plt.figure()
figure.canvas.set_window_title('Cleverhans: Pair Visualization')
# Add the images to the plot
perterbations = adversarial - original
for index, image in enumerate((original, perterbations, adversarial)):
figure.add_subplot(1, 3, index + 1)
plt.axis('off')
# If the image is 2D, then we have 1 color channel
if len(image.shape) == 2:
plt.imshow(image, cmap='gray')
else:
plt.imshow(image)
# Give the plot some time to update
plt.pause(0.01)
# Draw the plot and return
plt.show()
return figure
def grid_visual(data):
"""
This function displays a grid of images to show full misclassification
:param data: grid data of the form;
[nb_classes : nb_classes : img_rows : img_cols : nb_channels]
:return: if necessary, the matplot figure to reuse
"""
# Ensure interactive mode is disabled and initialize our graph
plt.ioff()
figure = plt.figure()
figure.canvas.set_window_title('Cleverhans: Grid Visualization')
# Add the images to the plot
num_cols = data.shape[0]
num_rows = data.shape[1]
num_channels = data.shape[4]
current_row = 0
for y in xrange(num_rows):
for x in xrange(num_cols):
figure.add_subplot(num_cols, num_rows, (x+1)+(y*num_rows))
plt.axis('off')
if num_channels == 1:
plt.imshow(data[x, y, :, :, 0], cmap='gray')
else:
plt.imshow(data[x, y, :, :, :])
# Draw the plot and return
plt.show()
return figure
| StarcoderdataPython |
1794183 | """
webpack is a module bundler for modern JavaScript applications.
Options:
* mode : str, None
: Enable production optimizations or development hints.
: values: development, production, none
: If not set it'd be inferred from environment variables.
* config_file : str, None
: path to the config file
: default: webpack.config.js or webpackfile.js
* env : list, None
: environment passed to the config, when it is a function
* context : str, None
: the root directory for resolving entry point and stats
: default: The current directory
* entry : dict<str, str>, None
: the entry point
* debug : bool, None
: switch loaders to debug mode
* devtool : list, None
: enable devtool for better debugging experience
: example: --devtool eval-cheap-module-source-map
* progress : bool, None
: print compilation progress in percentage
* module_bind : dict<str, str>, None
: bind an extension to a loader
* module-bind-post : dict<str, str>, None
* module-bind-pre : dict<str, str>, None
* output_path : str, None
: the output path for compilation assets
: default: The current directory
* output_filename : str, None
: the output filename of the bundle
: default: [name].js
* output_chunk_filename : str, None
: the output filename for additional chunks
: default: filename with [id] instead of [name] or
[id] prefixed
* output_source_map_filename : str, None
: the output filename for the SourceMap
* output_public_path : str, None
: the public path for the assets
* output_jsonp_function : str, None
: the name of the jsonp function used for chunk
: loading
* output_pathinfo : bool, None
: include a comment with the request for every
: dependency (require, import, etc.)
* output_library : str, None
: expose the exports of the entry point as library
* output_library_target : str, None
: the type for exposing the exports of the entry
: point as library
* records_input_path : str, None
: path to the records file (reading)
* records_output_path : str, None
: path to the records file (writing)
* records_path : str, None
: path to the records file
* define : dict<str, str>, None
: define any free var in the bundle
* target : str, None
: the targeted execution environment
* cache : bool, None
: enable in memory caching
: default: It's enabled by default when watching
* watch_stdin : bool, None
: exit the process when stdin is closed
* watch_aggregate_timeout : int, None
: timeout for gathering changes while watching
* watch_poll : bool, None
: the polling interval for watching (also enable polling)
* hot : bool, None
: enables Hot Module Replacement
* prefetch : list, None
: prefetch this request (Example: --prefetch ./file.js)
* provide : dict<str, str>, None
: provide these modules as free vars in all modules
: example: --provide jQuery=jquery
* labeled_modules : bool, None
: enables labeled modules
* plugin : list, None
: load this plugin
* bail : bool, None
: abort the compilation on first error
* profile : bool, None
: profile the compilation and include information in stats
* resolve_alias : dict<str, str>, None
: setup a module alias for resolving
: example: jquery-plugin=jquery.plugin
* resolve_extensions : list, None
: setup extensions that should be used to resolve
: modules
: example: --resolve-extensions .es6 .js
* resolve_loader_alias : dict<str, str>, None
: setup a loader alias for resolving
* optimize_max_chunks : int, None
: try to keep the chunk count below a limit
* optimize_min_chunk_size : int, None
: try to keep the chunk size above a limit
* optimize_minimize : bool, None
: minimize javascript and switches loaders to minimizing
* color : bool, None
: enables/Disables colors on the console
: default: (supports-color)
* sort_modules_by : str, None
: sorts the modules list by property in module
* sort_chunks_by : str, None
: sorts the chunks list by property in chunk
* sort_assets_by : str, None
: sorts the assets list by property in asset
* hide_modules : bool, None
: hides info about modules
* display_exclude : list, None
: exclude modules in the output
* display_modules : bool, None
: display even excluded modules in the output
* display_max_modules : int, None
: sets the maximum number of visible modules in output
* display_chunks : bool, None
: display chunks in the output
* display_entrypoints : bool, None
: display entry points in the output
* display_origins : bool, None
: display origins of chunks in the output
* display_cached : bool, None
: display also cached modules in the output
* display_cached_assets : bool, None
: display also cached assets in the output
* display_reasons : bool, None
: display reasons about module inclusion in the output
* display_depth : bool, None
: display distance from entry point for each module
* display_used_exports : bool, None
: display information about used exports in modules
: (Tree Shaking)
* display_provided_exports : bool, None
: display information about exports provided from
: modules
* display_error_details : bool, None
: display details about errors
* verbose : bool, None
: show more details
Requirements:
* webpack, webpack-cli
to install, `npm install webpack webpack-cli`
"""
import os
from pybuildtool import BaseTask, expand_resource
tool_name = __name__
class Task(BaseTask):
name = tool_name
workdir = None
def prepare(self):
cfg = self.conf
args = self.args
c = cfg.get('work_dir')
if c:
self.workdir = expand_resource(self.group, c)
c = cfg.get('mode', os.environ.get('NODE_ENV'))
if not c:
if self.bld.variant in ('prod', 'production'):
c = 'production'
else:
c = 'development'
args.append('--mode=' + c)
self.add_bool_args('debug', 'verbose', 'progress', 'output_pathinfo',
'cache', 'watch_stdin', 'watch_poll', 'hot', 'labeled_modules',
'bail', 'profile', 'optimize_minimize', 'color', 'hide_modules',
'display_modules', 'display_chunks', 'display_entrypoints',
'display_origins', 'display_cached', 'display_cached_assets',
'display_reasons', 'display_depth', 'display_used_exports',
'display_provided_exports', 'display_error_details')
self.add_dict_args('module_bind', 'module_bind_pre', 'module_bind_post',
'define', 'provide', 'resolve_alias', 'resolve_loader_alias',
opt_val_sep=' ')
self.add_int_args('watch_aggregate_timeout', 'optimize_max_chunks',
'optimize_min_chunk_size', 'display_max_modules')
self.add_list_args_multi('devtool', 'plugin', 'display_exclude')
self.add_list_args_multi('env', opt_val_sep='.')
self.add_list_args_multi('resolve_extensions', opt_val_sep=' ')
self.add_path_args('context', 'records_input_path')
self.add_path_list_args_multi('prefetch')
self.add_str_args('output_path', 'output_filename',
'output_chunk_filename', 'output_source_map_filename',
'output_public_path', 'output_jsonp_function', 'output_library',
'output_library_target', 'records_output_path', 'records_path',
'target', 'sort_modules_by', 'sort_chunks_by', 'sort_assets_by',
)
c = cfg.get('config_file')
if c:
args.append('--config=' + expand_resource(self.group, c))
c = cfg.get('entry', {})
for entry_name, entry_js_file in c.items():
args.append('--%s=%s' % (entry_name, expand_resource(
self.group, entry_js_file)))
def perform(self):
if len(self.file_out) > 1:
self.bld.fatal('%s at most produces one output' %\
tool_name.capitalize())
kwargs = {}
if self.workdir is not None:
kwargs['cwd'] = self.workdir
executable = self.env['%s_BIN' % tool_name.upper()]
return self.exec_command(
"{exe} {arg} {in_} {out}".format(
exe=executable,
arg=' '.join(self.args),
in_=' '.join(self.file_in),
out=' '.join(self.file_out),
),
**kwargs)
def configure(conf):
bin_path = 'node_modules/webpack-cli/bin/cli.js'
conf.start_msg("Checking for program '%s'" % tool_name)
if os.path.exists(bin_path):
bin_path = os.path.realpath(bin_path)
conf.end_msg(bin_path)
else:
conf.end_msg('not found', color='YELLOW')
bin_path = conf.find_program('webpack')[0]
conf.env['%s_BIN' % tool_name.upper()] = bin_path
| StarcoderdataPython |
11252824 | #!/bin/python3
import sys
def cutTheSticks(arr):
out = []
while arr:
out.append(len(arr))
arr_min = min(arr)
arr = list(map(lambda x: x - arr_min, arr))
arr = list(filter(lambda x: x > 0, arr))
return out
if __name__ == "__main__":
n = int(input().strip())
arr = list(map(int, input().strip().split(' ')))
result = cutTheSticks(arr)
print ("\n".join(map(str, result)))
| StarcoderdataPython |
4931198 | import requests
def test_get_entries():
r = requests.get('https://deco3801.wisebaldone.com/api/entry')
assert r.status_code == 200
data = r.json()
assert 'signed_in' in data
assert 'signed_out' in data
assert 'entries' in data
def test_get_entry_query():
send = {'id': 0, 'lower': '1/1/2018', 'upper': '2/1/2018'}
r = requests.post('https://deco3801.wisebaldone.com/api/entry/query', json=send)
assert r.status_code == 200
data = r.json()
assert 'entries' in data
def test_get_entry_stats():
send = {'lower': '1/1/2018', 'upper': '2/1/2018'}
r = requests.post('https://deco3801.wisebaldone.com/api/entry/stats', json=send)
assert r.status_code == 200
data = r.json()
assert 'days' in data
assert len(data['days']) == 3 | StarcoderdataPython |
5142695 | import sys
import os
import tempfile
import logging
from distributed.cli import dask_scheduler, dask_worker
def help_info():
print('Available commands:')
print('remote.worker, remote.scheduler')
if __name__ == '__main__':
if len(sys.argv) <= 1:
print('No command\n')
help_info()
sys.exit()
cmd = sys.argv[1]
sub_cmd = sys.argv[2:] if len(sys.argv) >= 3 else []
sys.argv.pop(1)
if cmd in ('help', '--help', 'h', '-h'):
help_info()
sys.exit()
sys.argv[0] = cmd # replace prog name
temp_path = os.path.join(tempfile.gettempdir(), 'rltk', 'remote')
if not os.path.exists(temp_path):
os.makedirs(temp_path, exist_ok=True)
if cmd == 'remote.worker':
logger = logging.getLogger('distributed.dask_worker')
logger.setLevel(logging.ERROR)
sys.argv.append('--local-directory')
sys.argv.append(temp_path)
# sys.argv.append('--change-directory')
sys.exit(dask_worker.go())
elif cmd == 'remote.scheduler':
logger = logging.getLogger('distributed.scheduler')
logger.setLevel(logging.ERROR)
sys.argv.append('--local-directory')
sys.argv.append(temp_path)
sys.exit(dask_scheduler.go())
else:
print('Unknown command\n')
help_info()
sys.exit()
| StarcoderdataPython |
3462262 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class TestArtifact(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, uuid=None, project_uuid=None, sut_tosca_path=None, ti_tosca_path=None, commit_hash=None): # noqa: E501
"""TestArtifact - a model defined in OpenAPI
:param uuid: The uuid of this TestArtifact. # noqa: E501
:type uuid: str
:param project_uuid: The project_uuid of this TestArtifact. # noqa: E501
:type project_uuid: str
:param sut_tosca_path: The sut_tosca_path of this TestArtifact. # noqa: E501
:type sut_tosca_path: str
:param ti_tosca_path: The ti_tosca_path of this TestArtifact. # noqa: E501
:type ti_tosca_path: str
:param commit_hash: The commit_hash of this TestArtifact. # noqa: E501
:type commit_hash: str
"""
self.openapi_types = {
'uuid': str,
'project_uuid': str,
'sut_tosca_path': str,
'ti_tosca_path': str,
'commit_hash': str
}
self.attribute_map = {
'uuid': 'uuid',
'project_uuid': 'project_uuid',
'sut_tosca_path': 'sut_tosca_path',
'ti_tosca_path': 'ti_tosca_path',
'commit_hash': 'commit_hash'
}
self._uuid = uuid
self._project_uuid = project_uuid
self._sut_tosca_path = sut_tosca_path
self._ti_tosca_path = ti_tosca_path
self._commit_hash = commit_hash
@classmethod
def from_dict(cls, dikt) -> 'TestArtifact':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The TestArtifact of this TestArtifact. # noqa: E501
:rtype: TestArtifact
"""
return util.deserialize_model(dikt, cls)
@property
def uuid(self):
"""Gets the uuid of this TestArtifact.
:return: The uuid of this TestArtifact.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this TestArtifact.
:param uuid: The uuid of this TestArtifact.
:type uuid: str
"""
self._uuid = uuid
@property
def project_uuid(self):
"""Gets the project_uuid of this TestArtifact.
:return: The project_uuid of this TestArtifact.
:rtype: str
"""
return self._project_uuid
@project_uuid.setter
def project_uuid(self, project_uuid):
"""Sets the project_uuid of this TestArtifact.
:param project_uuid: The project_uuid of this TestArtifact.
:type project_uuid: str
"""
self._project_uuid = project_uuid
@property
def sut_tosca_path(self):
"""Gets the sut_tosca_path of this TestArtifact.
:return: The sut_tosca_path of this TestArtifact.
:rtype: str
"""
return self._sut_tosca_path
@sut_tosca_path.setter
def sut_tosca_path(self, sut_tosca_path):
"""Sets the sut_tosca_path of this TestArtifact.
:param sut_tosca_path: The sut_tosca_path of this TestArtifact.
:type sut_tosca_path: str
"""
self._sut_tosca_path = sut_tosca_path
@property
def ti_tosca_path(self):
"""Gets the ti_tosca_path of this TestArtifact.
:return: The ti_tosca_path of this TestArtifact.
:rtype: str
"""
return self._ti_tosca_path
@ti_tosca_path.setter
def ti_tosca_path(self, ti_tosca_path):
"""Sets the ti_tosca_path of this TestArtifact.
:param ti_tosca_path: The ti_tosca_path of this TestArtifact.
:type ti_tosca_path: str
"""
self._ti_tosca_path = ti_tosca_path
@property
def commit_hash(self):
"""Gets the commit_hash of this TestArtifact.
:return: The commit_hash of this TestArtifact.
:rtype: str
"""
return self._commit_hash
@commit_hash.setter
def commit_hash(self, commit_hash):
"""Sets the commit_hash of this TestArtifact.
:param commit_hash: The commit_hash of this TestArtifact.
:type commit_hash: str
"""
self._commit_hash = commit_hash
| StarcoderdataPython |
6527179 | <gh_stars>0
# Deploy Script
# ---------------------------------
# ./run.sh kaiser/deployAPI.py --from 70x_wppdev1.fed --to HINT2_API --deploy
# ./run.sh kaiser/deployAPI.py --from HINT1_API --to HINT2_API --save --deploy
# Imports
from __future__ import with_statement
import ConfigParser, getpass, os, sys
from com.vordel.archive.fed import DeploymentArchive, PolicyArchive, EnvironmentArchive, Archive
from com.vordel.es.xes import PortableESPKFactory, PortableESPK
from com.vordel.es import Value
from com.vordel.env import EnvironmentSettings
from archiveutil import DeploymentArchiveAPI
from nmdeployment import NodeManagerDeployAPI
from optparse import OptionParser
import subprocess
from topologyapi import TopologyAPI
from kpsadmin import KPSAdmin
from kpsstore import StoreOperation
from com.vordel.kps.json import NodeConfigS
from com.vordel.kps.impl import Validator
from com.vordel.kps.client import KPSAdminClient
from com.vordel.kps.client import KPSClient
import configutil
# Classes
class Endpoint:
def __init__(self, instanceId, topology, verbose=False, username="admin", password="<PASSWORD>"):
self.instanceId = instanceId
self.verbose = verbose
self.username = username
self.password = password
(self.adminScheme, self.adminHostname, self.adminPort) = configutil.getAdminNodeManagerSchemeHostPortFromTopology(topology)
def getClient(self):
url = self.getKPSApiServerUrl("kps")
result = KPSClient(self.getConnectionContext(url))
if self.verbose:
result.setDebug(True)
return result
def getAdminClient(self):
url = self.getKPSApiServerUrl("kpsadmin")
result = KPSAdminClient(self.getConnectionContext(url))
if self.verbose:
result.setDebug(True)
return result
def getConnectionContext(self, url):
return KPSClient.createConnectionContext(url, self.username, self.password)
def getKPSApiServerUrl(self, servicePath):
return "%s://%s:%s/api/router/service/%s/api/%s" % (self.adminScheme, self.adminHostname, self.adminPort, self.instanceId, servicePath)
def getAdminConnectionDetails(self):
branding = configutil.branding
if self.adminScheme is not None:
return "%s: %s://%s:%s" % (branding["admin.node.manager.display.name"], self.adminScheme, self.adminHostname, self.adminPort)
else:
print "Run managedomin to set %s connection details." % branding["admin.node.manager.display.name"]
sys.exit(0)
class MyOptionParser(OptionParser):
def error(self, msg): # ignore unknown arguments
pass
# Functions
def deploy(adminNM, group, archive):
results = adminNM.deployToGroup(group, archive)
for result in results:
print
print result.getArchiveId()
failurecount = result.getErrorCount()
if not result.getStatus():
print "%i failures have occurred. " % failurecount
print "Failed to deploy: Reason: "+ result.getFailureReason()
else:
if failurecount > 0:
if failurecount == 1:
errString = "issue"
else:
errString = "issues"
print "The deployment succeeded but %i %s recorded. " % (result.getErrorCount(), errString)
for traceRecord in result.getTraceData().getTraceRecords():
if traceRecord.getLevel() <= 2:
print traceRecord.getMessage()
def findEnvEntity(environmentalizedEntities, name):
for envEntity in environmentalizedEntities:
if envEntity.getKeyDescription() == name:
return envEntity
return None
def checkBasicProfile(es):
envSettings = EnvironmentSettings(es.es)
environmentalizedEntities = envSettings.getEnvSettings().getEnvironmentalizedEntities()
basicProfiles = es.getAll('/[AuthProfilesGroup]name=Auth Profiles/[BasicAuthGroup]name=HTTP Basic/[BasicProfile]');
for basicProfile in basicProfiles:
entityPK = envSettings.findEnvironmentalizedEntity(basicProfile)
if not entityPK:
print basicProfile.getKeyDescription() + " not environmentalized!"
continue
if basicProfile.getEncryptedValue('httpAuthPass', 0):
#print "Entity '%s' has httpAuthPass" % (basicProfile.getKeyDescription())
envHttpAuthPass = False
for envField in findEnvEntity(environmentalizedEntities, basicProfile.getKeyDescription()).getEnvironmentalizedFields():
if envField.getEntityFieldName() == 'httpAuthPass':
#print "Environmentalized Entity '%s' has httpAuthPass" % (envEntity.getKeyDescription())
envHttpAuthPass = True
break
if envHttpAuthPass:
continue
else:
print "WARNING: The Password value is not environmentalized for '%s'" % (envEntity.getKeyDescription())
print " Deployment will use the policy file as is and proceed."
print " Report this to development immediately for remediation."
print
def updateEnvSettings(envSettingsDict, deploymentArchiveAPI):
print;print "Updating Target Environment Package..."
for pk in envSettingsDict.keys():
envSettingsForEntityListofLists = envSettingsDict[pk]
entityToEnvironmentalize = deploymentArchiveAPI.entityStoreAPI.get(pk)
for envSettingsForEntityList in envSettingsForEntityListofLists:
fieldName = envSettingsForEntityList[0]
index = int(envSettingsForEntityList[1])
envSettingValue = envSettingsForEntityList[2]
print " Add env setting for pk '%s' field %s[%i]=%s" % (pk, fieldName, index, envSettingValue)
envFieldEntity = deploymentArchiveAPI.envSettings.addEnviromentSetting(entityToEnvironmentalize, fieldName, index)
if str(envFieldEntity.getType()) == "EnvironmentalizedFieldEncrypted":
envSettingValue = str(deploymentArchiveAPI.entityStoreAPI.decrypt(envSettingValue))
if str(envFieldEntity.getType()) == "EnvironmentalizedFieldInteger":
envSettingValue = int(envSettingValue)
if str(envFieldEntity.getType()) == "EnvironmentalizedFieldBoolean":
if str(envSettingValue) == "false":
envSettingValue = str(0)
else:
envSettingValue = str(1)
if type(envSettingValue) is str:
deploymentArchiveAPI.envSettings.setEnvironmentSettingValueAsString(envFieldEntity, envSettingValue)
else:
#The following check and set methods are added to environmentalize Reference type fields like Certs
if deploymentArchiveAPI.envSettings.isReferenceField(entityToEnvironmentalize, envFieldEntity):
referenceType = deploymentArchiveAPI.envSettings.getReferenceType(entityToEnvironmentalize, envFieldEntity)
if (referenceType == 'Certificate'):
if fieldName == "caCerts":
envFieldEntity.setField("selectorType",[Value("true")])
envFieldEntity.setField("selectorAttributeType",[Value("com.vordel.client.manager.attr.CertTreeAttribute")])
envFieldEntity.setField("selectorSearch",[Value("false")])
envFieldEntity.setField("displayName",[Value("Signer Certificate(s)")])
elif fieldName == "sslUsers" or fieldName == "serverCert":
envFieldEntity.setField("selectorType",[Value("false")])
envFieldEntity.setField("selectorAttributeType",[Value("com.vordel.client.manager.attr.CertTreeAttribute")])
envFieldEntity.setField("selectorSearch",[Value("true")])
envFieldEntity.setField("displayName",[Value("Sever Certificate (optional)")])
elif (referenceType == 'DbConnection'):
envFieldEntity.setField("selectorType",[Value("DbConnection")])
envFieldEntity.setField("selectorAttributeType",[Value("com.vordel.client.manager.attr.ESPKReferenceSummaryAttribute")])
envFieldEntity.setField("selectorSearch",[Value("DbConnectionGroup")])
envSettingValue = PortableESPKFactory.newInstance().createPortableESPK(envSettingValue)
deploymentArchiveAPI.envSettings.setEnvironmentSettingValue(envFieldEntity, envSettingValue)
deploymentArchiveAPI.entityStoreAPI.updateEntity(envFieldEntity)
# Update the federated entity store in the archive as have updated some env settings in EnvSettingsStore.xml
deploymentArchiveAPI.deploymentArchive.updateConfiguration(deploymentArchiveAPI.entityStore)
print " Target Environment Package updated."
def getSHK(pk):
keys = pk[1:-1].split('><')
shk = ""
for key in keys:
if key.startswith('key'):
shk = shk + '/[' + key[:-1].split("='")[1] + ']'
elif key.startswith('id'):
shk = shk + 'name=' + key[:-2].split("value='")[1]
shk = shk.replace("'", "'")
return shk
def displayEnvEntities(environmentalizedEntities, envEntityStore=None):
for envEntity in environmentalizedEntities.getEnvironmentalizedEntities():
fields = envEntity.getEnvironmentalizedFields()
print "Entity '%s' of type '%s' has environmentalized fields:" % (envEntity.getKeyDescription(), envEntity.getType())
for envField in fields:
print " %s[%i]=%s" % (envField.getEntityFieldName(), envField.getIndex(), envField.getValue())
print
def getEnvSettingsDict(envEntities):
envDict = {}
for envEntity in envEntities.getEnvironmentalizedEntities():
itemKey = getSHK(envEntity.getEntityPk())
envDict[itemKey] = []
envDict[itemKey].append(iniSection(getSHK(envEntity.getEntityPk())))
envFields = {}
for envField in envEntity.getEnvironmentalizedFields():
fieldName = "%s[%i]" % (envField.getEntityFieldName(), envField.getIndex())
envFields[fieldName] = envField.getValue()
envDict[itemKey].append(envFields)
return envDict
def buildField(field, value):
envField = []
envField.append(field.split('[')[0])
envField.append(int(field.split('[')[1].split(']')[0]))
envField.append(value)
return envField
def findField(searchList, field):
fieldName = field.split('[')[0]
fieldIndex = int(field.split('[')[1].split(']')[0])
searchIndex = -1
for envField in searchList:
if envField[0] == fieldName and envField[1] == fieldIndex:
searchIndex = searchList.index(envField)
break
return searchIndex
def compareEnvEntities(fromEnvEntities, toEnvEntities, toIni, promotedEnvEntities):
print;print "Comparing Source to Target environmentalized entities..."
valid = True
for fromEnvEntity in fromEnvEntities:
promotedEnvEntities[fromEnvEntity] = []
fromFields = fromEnvEntities[fromEnvEntity][1]
if fromEnvEntity in toEnvEntities: # from in target but not in INI
# fromFields = fromEnvEntities[fromEnvEntity][1]
toFields = toEnvEntities[fromEnvEntity][1]
for fromField in fromFields:
print fromField
if str(fromFields[fromField]) == '-1' and str(toFields[fromField]) == '-1':
print "no value set for field in source or target environment packages"
valid = False
elif str(fromFields[fromField]) == '-1':
print "no value set for field in source environment package"
elif not toFields.get(fromField, None):
print "promoting source field to target environment package - no field in target"
promotedEnvEntities[fromEnvEntity].append(buildField(fromField, fromFields[fromField]))
elif fromFields[fromField] != toFields[fromField]:
print "promoting source field to target environment pacakge - fields are different"
promotedEnvEntities[fromEnvEntity].append(buildField(fromField, fromFields[fromField]))
# if toFields.get(fromField, None):
# del toFields[fromField]
else: # from not in target and not in INI
print fromEnvEntity + " not in toEnvEntities"
# fromFields = fromEnvEntities[fromEnvEntity][1]
for fromField in fromFields:
promotedEnvEntities[fromEnvEntity].append(buildField(fromField, fromFields[fromField]))
print
#Check for customized values for entity in target INI
iniFields = None
try:
iniFields = toIni.items(fromEnvEntities[fromEnvEntity][0])
except ConfigParser.NoSectionError:
pass
if iniFields: # from in INI
for iniField in iniFields:
iniFieldValue = iniField[1].replace('>>>>', '\r\n')
index = findField(promotedEnvEntities[fromEnvEntity], iniField[0])
if index == -1:
promotedEnvEntities[fromEnvEntity].append(buildField(iniField[0], iniFieldValue))
else:
promotedEnvEntities[fromEnvEntity][index] = buildField(iniField[0], iniFieldValue)
# Delete update entity since it will not be promoted
if len(promotedEnvEntities[fromEnvEntity]) == 0:
del promotedEnvEntities[fromEnvEntity]
# Delete to entity if all fields will not be changed
if fromEnvEntity in toEnvEntities and len(toEnvEntities[fromEnvEntity][1]) == 0:
del toEnvEntities[fromEnvEntity]
# Display entities to be deleted from target
print
print "Entities that are deleted:"
print toEnvEntities
# Display entities to be promoted
print
print "Entities to promote:"
print promotedEnvEntities
# Return valid updates
print " Compare comoplete."
return valid
def iniSection(shk):
names = shk.split("/")
entityType = names[-1].split("name=")[0][1:-1]
section = entityType + ":::" + names[-1].split("name=")[1]
if entityType in "ConnectToURLFilter,ConnectionFilter,JavaScriptFilter":
section = names[-2].split("name=")[0][1:-1] + ":::" + names[-2].split("name=")[1] + ":::" + section
return section
def buildIni(environmentalizedEntities, ini, ignoreTypes):
print;print "Building Target INI..."
if ignoreTypes == 'none':
ignoreTypes = None
elif ignoreTypes == '':
# ignoreTypes = 'ConnectToURLFilter,ConnectionFilter,BasicProfile,JavaScriptFilter'
ignoreTypes = 'ConnectToURLFilter,ConnectionFilter,BasicProfile'
# Clean up ignored Sections
if ignoreTypes:
print " Ignoring Entity Types: " + ignoreTypes
sections = sorted(ini.sections())
ignores = ignoreTypes.split(',')
for section in sections:
for ignore in ignores:
if ignore in section:
ini.remove_section(section)
else:
print " Adding all environmentalized entities to INI file"
for envEntity in environmentalizedEntities.getEnvironmentalizedEntities():
if ignoreTypes and envEntity.getType() in ignoreTypes:
continue
section = iniSection(getSHK(envEntity.getEntityPk()))
if not ini.has_section(section):
ini.add_section(section)
fields = envEntity.getEnvironmentalizedFields()
for envField in fields:
ini.set(section, "%s[%i]" % (envField.getEntityFieldName(), envField.getIndex()), envField.getValue())
print " INI built."
return ini
def writeIni(ini, inifile):
print;print "Writing Target INI..."
f = open(inifile, 'w')
f.truncate()
f.write("[Locations]\n")
for item in ini.items("Locations"):
f.write("%s = %s\n" % (item[0], item[1]))
f.write("\n")
sections = sorted(ini.sections())
for section in sections:
if section != "Locations":
f.write("[%s]\n" % section)
for item in ini.items(section):
if item[1]:
value = item[1].replace('\r\n', '>>>>')
value = value.replace('\n', '>>>>')
else:
value = ''
f.write("%s = %s\n" % (item[0], value))
f.write("\n")
f.close()
print " Target INI written to " + inifile
def getIniFile(scriptDir, envName):
# Get the environment
parts = envName.split("_")
lowerEnvName = ""
for part in parts:
if part == "API":
break
if lowerEnvName == "":
lowerEnvName = part.lower()
else:
lowerEnvName = lowerEnvName + "-" + part.lower()
iniFile = "%s/environments/%s/%s.ini" % (scriptDir, lowerEnvName, envName)
envDir = "%s/environments/%s/" % (scriptDir, lowerEnvName)
return iniFile, envDir, lowerEnvName
def importConfigs(es, importsDir):
print;print "Importing custom Target certs & policies..."
for configFile in os.listdir(importsDir):
if configFile.endswith(".xml"):
configXml = os.path.join(importsDir, configFile)
es.importConf(configXml)
print " Successfully imported: %s " %(configXml)
return es
def updateKPS(nmURL, nmUserId, nmPassword, kpsSource, ini):
if not os.path.isdir(kpsSource):
print 'The KPS JSON source directory ' + kpsSource + ' does not exist'
return
groupName = ini.get('Locations', 'group')
instanceName = ini.get('Locations', 'server')
primaryHost = ini.get('Locations', 'node1')
topologyAPI = TopologyAPI.create(nmURL, nmUserId, nmPassword)
instance = topologyAPI.getServiceByName(groupName, instanceName).getId()
if copyKPSJSON(kpsSource, '/apps/Axway-7.4/apigateway/instances/' + groupName + '/conf/kps/backup', primaryHost):
restoreKPS(instance, nmUserId, nmPassword, topologyAPI.getTopology())
deleteKPSJSON('/apps/Axway-7.4/apigateway/instances/' + groupName + '/conf/kps/backup', primaryHost)
def restoreKPS(instance, nmUserId, nmPassword, topology):
#kpsadmin = KPSAdmin(verbose=True,username=nmUserId, password=<PASSWORD>)
kpsadmin = KPSAdmin(username=nmUserId, password=<PASSWORD>)
client = Endpoint(instance, topology, username=nmUserId, password=nmPassword).getClient()
adminClient = Endpoint(instance, topology, username=nmUserId, password=nmPassword).getAdminClient()
model = client.getModel()
stores = model.stores
uniquePackages = set()
for store in stores:
uniquePackages.add(store.config.get("package"))
packages = list(uniquePackages)
kpPackage = None
for package in packages:
if package == "Consumer_Authorization":
print "Package: %s" %(package)
kpPackage = package
kpsadmin.model = model
kpsadmin.package = kpPackage
for s in kpsadmin.getStoresInPackage():
print "Store alias: %s" %(s.alias)
op = StoreOperation(adminClient, s, False)
op.clear(False)
uuid = ""
safeid = Validator.createSafeId("%s_%s.json" % (uuid, s.identity))
print "Safeid: %s" %(safeid)
op.restore(safeid)
def copyKPSJSON(kpsSource, kpsBackupLocation, primaryHost):
if os.system('scp ' + kpsSource + '/*json wasadm@' + primaryHost + ':' + kpsBackupLocation) == 0:
return True
else:
return False
def deleteKPSJSON(kpsBackupLocation, primaryHost):
if os.system('ssh wasadm@' + primaryHost + ' "rm -f ' + kpsBackupLocation + '/*json"') == 0:
return True
else:
return False
def exportAPICJSON(apicInput, kpsDir, debug):
# Export APIC Data
apicExportArgs = "apicExport.py --input " + apicInput + " -j " + kpsDir
if debug:
apicExportArgs += " --debug"
execCommand('python', apicExportArgs)
def copyJars(jarsDir, ini):
index = 1
while True:
if ini.has_option('Locations', 'node' + str(index)):
host = ini.get('Locations', 'node' + str(index))
execCommand('rsync', '-a ' + jarsDir + ' wasadm@' + host + ':/apps/Axway-7.4/apigateway/ext/lib')
index += 1
else:
break
def execCommand(command, args):
cmd = []
arguments = args.split()
cmd.append(command)
for arg in arguments:
cmd.append(arg)
try:
result = subprocess.check_call(cmd)
except subprocess.CalledProcessError, e:
print "Execution failed:", e
return False
print "The command completed successfully: %s %s" % (command, args)
return True
def buildDirectories(scriptDir):
print "Composing required directories..."
fedDir = scriptDir + "/FEDS/"
backupDir = scriptDir + "/BACKUPS/"
svnDir = scriptDir + "/SVN/"
jarsDir = scriptDir + "/JARS/"
kpsDir = scriptDir + "/KPS/"
apicExport = '/'.join(scriptDir.split('/')[:-2]) + '/apic-export/'
print " Saved Deployment Packages: " + fedDir
print " Deployment Package Backups: " + backupDir
print " SVN: " + svnDir
print " JAR Files: " + jarsDir
print " KPS JSON Files: " + kpsDir
print " APIC Export Input: " + apicExport
return scriptDir, fedDir, backupDir, svnDir, jarsDir, kpsDir, apicExport
def parseOptions(copyargs):
print;print "Parsing options..."
parser = MyOptionParser()
parser.add_option("-f", "--from", dest="fromSource", help="Promoting from - URL or file name")
parser.add_option("-t", "--to", dest="toTarget", help="Promoting to - URL or file name")
parser.add_option("-d", "--deploy", action="store_true", dest="deploy", help="Deploy updates to group", default=False)
parser.add_option("-s", "--save", action="store_true", dest="save", help="Save the target fed file", default=False)
parser.add_option("-m", "--month", dest="month", help="Release Month")
parser.add_option("-c", "--drop", dest="drop", help="Release Drop")
parser.add_option("", "--create-ini", action="store_true", dest="createIni", help="Create / update an .ini file", default=False)
parser.add_option("", "--ini-ignore-types", dest="ignoreTypes", help="Ignore entity types", default="")
parser.add_option("", "--env-props", action="store_true", dest="envProps", help="Copy envSettings.props", default=False)
parser.add_option("", "--kps", action="store_true", dest="kps", help="Export KPS JSON files from API Connect and load into KPS", default=False)
# parser.add_option("", "--kps", dest="kpsSource", help="Export KPS JSON files from API Connect and load into KPS")
parser.add_option("", "--kpsDebug", action="store_true", dest="kpsDebug", help="Debug Export of KPS JSON files from APIC", default=False)
# parser.add_option("", "--copy-jars", action="store_true", dest="copyJars", help="Copy JAR files to target hosts", default=False)
(options, args) = parser.parse_args(args=copyargs)
if options.fromSource:
print " Promoting from source: " + options.fromSource
if options.toTarget:
print " Promoting to target: " + options.toTarget
if options.deploy:
print " Deploy to target environment"
if options.save:
print " Target deployment package will be saved"
if options.month:
print " Release month: " + options.month
if options.drop:
print " Release drop: " + options.drop
if options.createIni:
print " INI file will be created for target"
if options.ignoreTypes:
print " Entitiy types to ignore during INI creation: " + options.ignoreTypes
if options.envProps:
print " Deploy envSettings.props to target"
if options.kps:
print " Export KPS JSON files from API Connect and load into KPS"
return options
### MAIN ###
# Setup directories
scriptDir, fedDir, backupDir, svnDir, jarsDir, kpsDir, apicExport = buildDirectories(os.path.dirname(os.path.realpath(sys.argv[0])))
# Parse the input arguments
options = parseOptions(sys.argv[:])
# Check for the target to deploy to
if not options.toTarget:
print;print "Please provide a target for the deployment package."
exit()
if options.envProps or options.deploy:
to_inifile, to_envDir, to_envName = getIniFile(scriptDir, options.toTarget)
try:
print;print "Copying envSettings.props to target..."
command = []
command.append(scriptDir + "/copyEnvSettings.sh")
command.append(to_envName)
copyEnvSettings = subprocess.check_call(command)
print " Copy is complete."
except subprocess.CalledProcessError:
print " Copy failed."
print " " + str(subprocess.CalledProcessError.returncode)
print " " + subprocess.CalledProcessError.cmd
print " " + subprocess.CalledProcessError.output
exit()
if not (options.save or options.deploy or options.createIni):
print;print "No action given. Execute 'jython deployAPI.py -h' to display options."
exit()
if options.save or options.deploy or options.kps or options.kpsDebug:
# Check for release information
if options.month:
releaseMonth = options.month
elif options.save or options.deploy:
print "Provide a release month (eg. Sept2016)"
exit()
if options.drop:
releaseDrop = options.drop
elif options.save or options.deploy:
print "Provide a release code drop (eg. CD3)"
exit()
svnDir = svnDir + releaseMonth.lower() + '/' + releaseDrop.lower() + '/'
if not os.path.isdir(svnDir):
print 'The directory ' + svnDir + ' is not found.'
exit()
if options.kps or options.kpsDebug:
if 'PROD' in options.toTarget:
kpsEnv = 'Prod'
else:
kpsEnv = 'NonProd'
kpsSource = svnDir + 'KPS/' + kpsEnv + '/'
if not os.path.isdir(kpsSource):
print 'The directory ' + kpsSource + ' is not found.'
exit()
apicExportInput = apicExport + releaseMonth.lower() + '/' + releaseDrop.lower() + '/' + kpsEnv + '/APIC_Scripts_Input'
if not os.path.isfile(apicExportInput):
print 'The file ' + apicExportInput + ' is not found.'
exit()
if options.save or options.deploy:
# Check for source to deploy from
if not options.fromSource:
print "Please provide a source for the deployment package. It may be a file name (.fed or .ini)."
exit()
# Get the source
if ".fed" in options.fromSource:
# Input is a fed file
if os.path.isfile(options.fromSource):
from_fed = options.fromSource
elif os.path.isfile(svnDir + options.fromSource):
from_fed = svnDir + options.fromSource
else:
print;print "The " + options.fromSource + " file is not found."
exit()
print;print "Source FED file: " + from_fed
from_nm = ""
else:
# Input is an ini file
from_inifile, from_envDir, from_envName = getIniFile(scriptDir, options.fromSource)
if os.path.isfile(from_inifile):
print;print "Source INI file: " + from_inifile
from_ini = ConfigParser.RawConfigParser()
from_ini.optionxform = str
from_ini.read(from_inifile)
from_nm = from_ini.get('Locations', 'admin_node_mgr')
print " Source Admin Node Manager: " + from_nm
fromGroup = from_ini.get('Locations', 'group')
print " Source Group: " + fromGroup
fromServer = from_ini.get('Locations', 'server')
print " Source Server: " + fromServer
print
from_nmUser = raw_input(' Enter admin user for ' + fromGroup + ': ')
from_nmPassword = getpass.getpass(' Enter password for ' + fromGroup + ': ')
else:
print "The " + from_inifile + " file is not found."
exit()
else:
from_nm = ''
# Get the target
to_inifile, to_envDir, to_envName = getIniFile(scriptDir, options.toTarget)
if os.path.isfile(to_inifile):
print;print "Target INI file: " + to_inifile
to_ini = ConfigParser.RawConfigParser()
to_ini.optionxform = str
to_ini.read(to_inifile)
to_nm = to_ini.get('Locations', 'admin_node_mgr')
print " Target Admin Node Manager: " + from_nm
toGroup = to_ini.get('Locations', 'group')
print " Target Group: " + toGroup
toServer = to_ini.get('Locations', 'server')
print " Target Server: " + toServer
to_envFile = to_envDir + toGroup + '.env'
print " Environment Package File to save: " + to_envFile
if to_nm == from_nm:
print;print " Already have Admin Node Manager credentials from Source"
to_nmUser = from_nmUser
to_nmPassword = <PASSWORD>
else:
print
to_nmUser = raw_input(' Enter admin user for ' + toGroup + ': ')
to_nmPassword = getpass.getpass(' Enter password for ' + toGroup + ': ')
else:
print;print "The " + to_inifile + " file is not found."
exit()
# Load target deployment package
if to_nm != "" and (options.save or options.deploy or options.createIni):
# Connects to the Admin Node Manager and downloads a configuration from it
print;print "Download Target deployment package..."
to_adminNM = NodeManagerDeployAPI.create(to_nm, to_nmUser, to_nmPassword)
# check for valid connection
to_depArchive = to_adminNM.getDeploymentArchiveForServerByName(toGroup, toServer)
print " Download completed."
# Initialize the target Deployment Archive API object
to_depArchiveAPI = DeploymentArchiveAPI(to_depArchive, "")
# Import custom certificates for target
if options.save or options.deploy or options.createIni:
es = to_depArchiveAPI.getEntityStoreAPI()
es = importConfigs(es, scriptDir + to_ini.get('Locations', 'importConfigs'))
to_depArchiveAPI.deploymentArchive.updateConfiguration(es.es)
es.close
print " Import completed."
# Get the environment package
if options.save or options.deploy or options.createIni:
toEnv = EnvironmentArchive(to_depArchive)
# Get Environmentalized Values in target Deployment Package
to_envEntities = to_depArchiveAPI.getEnvSettings().getEnvSettings()
# Create the target INI file if requested
if options.createIni:
to_ini = buildIni(to_envEntities, to_ini, options.ignoreTypes)
writeIni(to_ini, to_inifile)
exit()
# Load the FED from the node manager or from a file
if options.save or options.deploy:
if options.toTarget == options.fromSource:
print;print "Source and Target are the same - setting Source deployment package to Target deployment package."
from_depArchive = to_depArchive
elif from_nm != "":
# Connects to the Admin Node Manager and download deployment package
print;print "Download Source deployment package..."
from_adminNM = NodeManagerDeployAPI.create(from_nm, from_nmUser, from_nmPassword)
from_depArchive = from_adminNM.getDeploymentArchiveForServerByName(fromGroup, fromServer)
print " Download completed."
else:
# Open the FED file
print;print "Open Source FED file..."
from_depArchive = DeploymentArchive(from_fed)
print " Opened FED " + from_fed
# Get the policy package
fromPol = PolicyArchive(from_depArchive)
# Initialize the source Deployment Archive API object
from_depArchiveAPI = DeploymentArchiveAPI(from_depArchive, "")
# Get Environmentalized Values in source Deployment Package
from_envEntities = from_depArchiveAPI.getEnvSettings().getEnvSettings()
# Generate environmentalized entities as dictionaries
print;print "Get Source environmentalized settings..."
fromEnvDict = getEnvSettingsDict(from_envEntities)
print;print "Get Target environmentalized settings..."
toEnvDict = getEnvSettingsDict(to_envEntities)
# Generate the delta comparison between the environmentalized entities
updateEnvEntities = {}
compareEnvEntities(fromEnvDict, toEnvDict, to_ini, updateEnvEntities)
# Merge the source policy package with the target environment package
# to create a new target deployment package
print;print "Merging Source Policy Package and Target Environment Package to create the Target Deployment Package..."
mergedArchive = DeploymentArchive(fromPol, toEnv)
to_depArchiveAPI = DeploymentArchiveAPI(mergedArchive, "")
# Update the target deployment package with the changed environmentalized entities
updateEnvSettings(updateEnvEntities, to_depArchiveAPI)
# Check Basic HTTP Auth Passwords are environmentalized
checkBasicProfile(to_depArchiveAPI.getEntityStoreAPI())
# Display the target environmentalized entities
#displayEnvEntities(to_depArchiveAPI.getEnvSettings().getEnvSettings())
# Update Environment properties
if options.save or options.deploy:
print; print "Updating enviroment properties..."
to_depArchiveAPI.updateEnvironmentProps(dict([(Archive.NAME_DEFAULT_PROPERTY, to_ini.get('Locations', 'env_name'))]))
to_depArchiveAPI.updateEnvironmentProps(dict([(Archive.DESCRIPTION_DEFAULT_PROPERTY, "Environment Settings for " + to_ini.get('Locations', 'env_name'))]))
to_depArchiveAPI.updateEnvironmentProps(dict([(Archive.VERSION_DEFAULT_PROPERTY, 'v' + to_ini.get('Locations', 'env_name') + releaseMonth + releaseDrop)]))
to_depArchiveAPI.updateEnvironmentProps(dict([(Archive.VERSIONCOMMENT_DEFAULT_PROPERTY, "Updated Environment Settings for " + releaseMonth + " " + releaseDrop)]))
es = to_depArchiveAPI.getEntityStoreAPI()
es = importConfigs(es, scriptDir + to_ini.get('Locations', 'importConfigs'))
to_depArchiveAPI.deploymentArchive.updateConfiguration(es.es)
es.close
print " Import completed."
# Save the updated target environment package file
print;print "Saving Target Environment Package..."
environmentArchive = EnvironmentArchive(mergedArchive)
environmentArchive.writeToArchiveFile(to_envFile)
print " Saved environment package to %s " % (to_envFile)
# Save the updated target deployment package file
if options.save:
print;print "Saving Target Deployment Package..."
outFedFile = fedDir + options.toTarget + '.fed'
mergedArchive.writeToArchiveFile(outFedFile)
print " Saved deployment package to %s " % (outFedFile)
# Deploy to To Environment
if options.deploy:
print;print "Deploying Target Deployment Package..."
print " Backing up current target deployment package..."
backupFed = backupDir + options.toTarget.split('.ini')[0] + '_bak.fed'
to_depArchive.writeToArchiveFile(backupFed)
print " Current target deployment package backed up to " + backupFed
print " Deploying..."
deploy(to_adminNM, toGroup, mergedArchive)
print " Deployment complete"
# Update KPS
if options.kpsDebug:
# exportAPICJSON("APIC_Scripts_Input", kpsDir, options.kpsDebug)
exportAPICJSON(apicExportInput, kpsDir, options.kpsDebug)
elif options.kps or options.deploy:
# Copy other non-APIC KPS JSON files to temp
#kpsSource = svnDir + options.kpsSource
os.system('cp ' + kpsSource + '/*json ' + kpsDir)
# Export KPS JSON from APIC
# exportAPICJSON("APIC_Scripts_Input", kpsDir, options.kpsDebug)
exportAPICJSON(apicExportInput, kpsDir, options.kpsDebug)
# backup KPS file system
groupName = to_ini.get('Locations', 'group')
print groupName
i = 1
while True:
try:
os.system('ssh wasadm@' + to_ini.get('Locations', 'node' + str(i)) + ' "cd /apps/Axway-7.4/apigateway/instances/' + groupName + '/conf/kps;rm -f cassandra-bak.tgz;tar -zcvf cassandra-bak.tgz cassandra"')
except ConfigParser.NoOptionError:
break
i += 1
print i
# Update KPS
updateKPS(to_nm, to_nmUser, to_nmPassword, kpsDir, to_ini)
# Delete APIC KPS JSON files from kpsDir
os.system('rm -f ' + kpsDir + '/*')
# Copy JAR Files
#if options.copyJars:
# copyJars(jarsDir)
| StarcoderdataPython |
1666761 | from datetime import datetime
from multiprocessing import Queue
from threading import Semaphore, Thread, Lock
from undine.utils.system import System
from undine.utils import logging
from time import sleep
import subprocess
class TaskThread:
def __init__(self):
self._state = None
self._result = 'Empty'
self._error = 'Empty'
def run(self, cmd):
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
self._result, self._error = process.communicate()
self._state = process.returncode
@property
def result_message(self):
return bytes(self._result).decode('utf-8')
@property
def error_message(self):
return bytes(self._error).decode('utf-8')
@property
def success(self):
return self._state == 0
class _SchedulerStats:
def __init__(self, worker_count):
self._worker_count = worker_count
self._lock = Lock()
self._on_the_fly = dict()
def query(self):
with self._lock:
run = len(self._on_the_fly)
return {'run_workers': run,
'utilization': run / self._worker_count * 100.0,
'list': [{'id': task, 'time': str(datetime.now() - time)}
for task, time in self._on_the_fly.items()]}
def add(self, tid):
with self._lock:
self._on_the_fly[tid] = datetime.now()
def remove(self, tid):
with self._lock:
self._on_the_fly.pop(tid, None)
class TaskScheduler:
_SCHEDULER_LOGGER_NAME = 'undine-scheduler'
_SCHEDULER_LOGGER_PATH = '/tmp/{}.log'.format(_SCHEDULER_LOGGER_NAME)
_SCHEDULER_LOGGER_LEVEL = 'ERROR'
_SCHEDULER_TASK_INTERVAL = '1'
_MAX_CPU = 999999
def _log_string(self, name, task):
if logging.is_debug(self._logger):
return "tid({1}) {0}\n\tcommand -> {2}".format(name,
task.tid, task.cmd)
else:
return "tid({1}) {0}".format(name, task.tid)
def __init__(self, manager, config):
system_cpu = System.cpu_cores() - 1
config_cpu = int(config.setdefault('max_cpu', self._MAX_CPU))
interval = int(config.setdefault('task_interval',
self._SCHEDULER_TASK_INTERVAL))
# If the configured # of CPUs is 0, change the config_cpu to MAX_CPU.
# value
if config_cpu == 0:
config_cpu = self._MAX_CPU
self._workers = min(system_cpu, config_cpu)
self._manager = manager
self._pool = Semaphore(self._workers)
self._task_interval = interval
# Initialize SchedulerState
self._state = _SchedulerStats(self._workers)
# Create logger instance
log_path = config.setdefault('log_file', self._SCHEDULER_LOGGER_PATH)
log_level = config.setdefault('log_level', self._SCHEDULER_LOGGER_LEVEL)
self._logger = logging.get_logger(self._SCHEDULER_LOGGER_NAME,
log_path, log_level)
# ==================================================
# TODO Check thread table is useful.
self._ticket = Queue()
self._thread = dict()
for thread_id in range(0, self._workers):
self._ticket.put(thread_id)
# ==================================================
@property
def max_cpu(self):
return self._workers
def is_ready(self):
self._pool.acquire()
self._pool.release()
return True
def wait_all(self):
# Get all semaphore pool
for _ in range(0, self._workers):
self._pool.acquire()
def run(self, task):
# Get a worker resource from pool
self._pool.acquire()
# ==================================================
# TODO Check thread table is useful.
thread_id = self._ticket.get()
if thread_id in self._thread:
self._thread[thread_id].join()
del self._thread[thread_id]
thread = Thread(target=TaskScheduler._procedure,
args=(self, task, thread_id))
self._thread[thread_id] = thread
# ==================================================
# else condition method
# thread = Thread(target=TaskScheduler._procedure, args=(self, task))
# ==================================================
self._state.add(task.tid)
thread.start()
# Sleep during task_interval to avoid the task interference.
sleep(self._task_interval)
def stats_procedure(self, *_args, **_kwargs):
# Currently args and kwargs not in use.
return self._state.query()
# ==================================================
# TODO Check thread table is useful.
# @staticmethod
# def _procedure(self, task):
# ==================================================
@staticmethod
def _procedure(self, task, worker_id):
thread = TaskThread()
self._logger.info(self._log_string("Task start", task))
thread.run(task.cmd)
if thread.success:
task.success(thread.result_message)
self._logger.info(self._log_string("Task complete", task))
else:
task.fail(thread.error_message)
self._logger.info(self._log_string("Task fail", task))
self._state.remove(task.tid)
self._manager.task_complete(task)
# ==================================================
# TODO Check thread table is useful.
self._ticket.put(worker_id)
self._pool.release()
# ==================================================
| StarcoderdataPython |
3982 | #MenuTitle: Copy Layer to Layer
# -*- coding: utf-8 -*-
__doc__="""
Copies one master to another master in selected glyphs.
"""
import GlyphsApp
import vanilla
import math
def getComponentScaleX_scaleY_rotation( self ):
a = self.transform[0]
b = self.transform[1]
c = self.transform[2]
d = self.transform[3]
scale_x = math.sqrt(math.pow(a,2)+math.pow(b,2))
scale_y = math.sqrt(math.pow(c,2)+math.pow(d,2))
if (b<0 and c<0):
scale_y = scale_y * -1
rotation = math.atan2(b, a) * (180/math.pi)
return [scale_x, scale_y, rotation]
class MasterFiller( object ):
def __init__( self ):
# Window 'self.w':
windowWidth = 280
windowHeight = 155
windowWidthResize = 120 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Copy layer to layer", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.MasterFiller.mainwindow" # stores last window position and size
)
self.w.text_1 = vanilla.TextBox((15, 12+2, 120, 14), "Copy paths from", sizeStyle='small')
self.w.master_from = vanilla.PopUpButton((120, 12, -15, 17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback)
self.w.text_2 = vanilla.TextBox((15, 32+2, 120, 14), "into selection of", sizeStyle='small')
self.w.master_into = vanilla.PopUpButton((120, 32, -15, 17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback)
self.w.include_components = vanilla.CheckBox((15, 52+2, -100, 20), "Include components", sizeStyle='small', callback=self.SavePreferences, value=True)
self.w.include_anchors = vanilla.CheckBox((15, 52+20, -100, 20), "Include anchors", sizeStyle='small', callback=self.SavePreferences, value=True)
self.w.include_metrics = vanilla.CheckBox((15, 52+38, -100, 20), "Include metrics", sizeStyle='small', callback=self.SavePreferences, value=True)
self.w.keep_window_open = vanilla.CheckBox((15, 52+56, -100, 20), "Keep window open", sizeStyle='small', callback=self.SavePreferences, value=True)
self.w.copybutton = vanilla.Button((-80, -30, -15, -10), "Copy", sizeStyle='small', callback=self.buttonCallback)
self.w.setDefaultButton( self.w.copybutton )
# Load Settings:
if not self.LoadPreferences():
print "Note: 'Copy Layer to Layer' could not load preferences. Will resort to defaults."
self.w.open()
self.w.makeKey()
self.w.master_into.set(1)
def SavePreferences( self, sender ):
try:
Glyphs.defaults["com.mekkablue.MasterFiller.include_components"] = self.w.include_components.get()
Glyphs.defaults["com.mekkablue.MasterFiller.include_anchors"] = self.w.include_anchors.get()
Glyphs.defaults["com.mekkablue.MasterFiller.include_metrics"] = self.w.include_metrics.get()
Glyphs.defaults["com.mekkablue.MasterFiller.keep_window_open"] = self.w.keep_window_open.get()
except:
return False
return True
def LoadPreferences( self ):
try:
NSUserDefaults.standardUserDefaults().registerDefaults_(
{
"com.mekkablue.MasterFiller.include_components" : "1",
"com.mekkablue.MasterFiller.include_anchors" : "1",
"com.mekkablue.MasterFiller.include_metrics" : "1",
"com.mekkablue.MasterFiller.keep_window_open" : "1"
}
)
self.w.include_components.set( Glyphs.defaults["com.mekkablue.MasterFiller.include_components"] )
self.w.include_anchors.set( Glyphs.defaults["com.mekkablue.MasterFiller.include_anchors"] )
self.w.include_metrics.set( Glyphs.defaults["com.mekkablue.MasterFiller.include_metrics"] )
self.w.keep_window_open.set( Glyphs.defaults["com.mekkablue.MasterFiller.keep_window_open"] )
except:
return False
return True
def GetMasterNames( self ):
myMasterList = []
for i in range( len( Glyphs.currentDocument.font.masters ) ):
x = Glyphs.currentDocument.font.masters[i]
myMasterList.append( '%i: %s' % (i, x.name) )
return myMasterList
def MasterChangeCallback( self, sender ):
if self.w.master_from.get() == self.w.master_into.get():
self.w.copybutton.enable( False )
else:
self.w.copybutton.enable( True )
def copyPathsFromLayerToLayer( self, sourceLayer, targetLayer ):
"""Copies all paths from sourceLayer to targetLayer"""
num_from = len( sourceLayer.paths )
num_into = len( targetLayer.paths )
if num_into != 0:
print "- Cleaning out paths in target layer"
for i in range( num_into )[::-1]:
del targetLayer.paths[i]
if num_from > 0:
print "- Copying paths"
for thisPath in sourceLayer.paths:
newPath = GSPath()
for n in thisPath.nodes:
newNode = GSNode()
newNode.type = n.type
newNode.connection = n.connection
newNode.setPosition_( (n.x, n.y) )
newPath.addNode_( newNode )
newPath.closed = thisPath.closed
targetLayer.paths.append( newPath )
def copyComponentsFromLayerToLayer( self, sourceLayer, targetLayer ):
"""Copies all components from sourceLayer to targetLayer."""
comp_from = len( sourceLayer.components )
comp_into = len( targetLayer.components )
if comp_into != 0:
print "- Cleaning out components in target layer"
for i in range( comp_into )[::-1]:
del targetLayer.components[i]
if comp_from > 0:
print "- Copying components:"
for thisComp in sourceLayer.components:
compName = str( thisComp.componentName ) # str() probably not necessary anymore, but once fixed a problem
newComp = GSComponent( compName )
newComp.setPosition_( (thisComp.x, thisComp.y) )
ScaleX_scaleY_rotation = getComponentScaleX_scaleY_rotation(thisComp)
newComp.setScaleX_scaleY_rotation_(ScaleX_scaleY_rotation[0],ScaleX_scaleY_rotation[1],ScaleX_scaleY_rotation[2])
print "-- Component: %s" % ( compName )
targetLayer.components.append( newComp )
def copyAnchorsFromLayerToLayer( self, sourceLayer, targetLayer ):
"""Copies all anchors from sourceLayer to targetLayer."""
anch_from = len( sourceLayer.anchors )
anch_into = len( targetLayer.anchors )
if anch_into != 0:
print "- Cleaning out anchors in target layer"
sourceLayer.setAnchors_( None )
if anch_from > 0:
print "- Copying anchors from source layer:"
for thisAnchor in sourceLayer.anchors:
anchorName = thisAnchor.name
anchorPosition = NSPoint( thisAnchor.x, thisAnchor.y )
newAnchor = GSAnchor( anchorName, anchorPosition )
print "-- %s (%i, %i)" % ( anchorName, anchorPosition.x, anchorPosition.y )
targetLayer.addAnchor_( newAnchor )
def copyMetricsFromLayerToLayer( self, sourceLayer, targetLayer ):
"""Copies width of sourceLayer to targetLayer."""
sourceWidth = sourceLayer.width
if targetLayer.width != sourceWidth:
targetLayer.width = sourceWidth
print "- Copying width (%.1f)" % sourceWidth
else:
print "- Width not changed (already was %.1f)" % sourceWidth
def buttonCallback( self, sender ):
Glyphs.clearLog()
Glyphs.showMacroWindow()
print "Copy Layer to Layer Protocol:"
Font = Glyphs.font
Doc = Glyphs.currentDocument
selectedGlyphs = [ x.parent for x in Font.selectedLayers ]
index_from = self.w.master_from.get()
index_into = self.w.master_into.get()
compYesNo = self.w.include_components.get()
anchYesNo = self.w.include_anchors.get()
metrYesNo = self.w.include_metrics.get()
for thisGlyph in selectedGlyphs:
try:
print "\nProcessing", thisGlyph.name
sourcelayer = thisGlyph.layers[ index_from ]
targetlayer = thisGlyph.layers[ index_into ]
Font.disableUpdateInterface()
# copy paths:
self.copyPathsFromLayerToLayer( sourcelayer, targetlayer )
# copy components:
if compYesNo:
self.copyComponentsFromLayerToLayer( sourcelayer, targetlayer )
# copy anchors:
if anchYesNo:
self.copyAnchorsFromLayerToLayer( sourcelayer, targetlayer )
# copy metrics:
if metrYesNo:
self.copyMetricsFromLayerToLayer( sourcelayer, targetlayer )
Font.enableUpdateInterface()
except Exception, e:
print e
if not self.w.keep_window_open.get():
self.w.close()
MasterFiller()
| StarcoderdataPython |
12817292 | import numpy
import math
_EPS = numpy.finfo(float).eps * 4.0
# Downloaded on 6 August, 2013 from
# http://www.lfd.uci.edu/~gohlke/code/transformations.py
#
# Author: <NAME>
# License: BSD
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> M = quaternion_matrix([1, 0, 0, 0])
>>> numpy.allclose(M, numpy.identity(4))
True
>>> M = quaternion_matrix([0, 1, 0, 0])
>>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
n = numpy.dot(q, q)
if n < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / n)
q = numpy.outer(q, q)
return numpy.array([
[1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],
[ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def quaternion_from_matrix(matrix, isprecise=False):
"""Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
>>> q = quaternion_from_matrix(numpy.identity(4), True)
>>> numpy.allclose(q, [1, 0, 0, 0])
True
>>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1]))
>>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])
True
>>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
True
>>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
if isprecise:
q = numpy.empty((4, ))
t = numpy.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 1, 2, 3
if M[1, 1] > M[0, 0]:
i, j, k = 2, 3, 1
if M[2, 2] > M[i, i]:
i, j, k = 3, 1, 2
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0],
[m01+m10, m11-m00-m22, 0.0, 0.0],
[m02+m20, m12+m21, m22-m00-m11, 0.0],
[m21-m12, m02-m20, m10-m01, m00+m11+m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = numpy.linalg.eigh(K)
q = V[[3, 0, 1, 2], numpy.argmax(w)]
if q[0] < 0.0:
numpy.negative(q, q)
return q
| StarcoderdataPython |
94583 | <filename>transforms/__init__.py<gh_stars>1-10
from .normalize import Normalize
from .transforms import Transforms
from .random_flips import RandomHorizontalFlip,RandomVerticalFlip
from .resize import Resize
from .color_shift import ColorShift | StarcoderdataPython |
1633178 | # Dedicated to the public domain under CC0: https://creativecommons.org/publicdomain/zero/1.0/.
'''
`craft-py-ext` is an experimental tool for generating the boilerplate to build simple CPython extensions.
It should be considered a work in progress.
To use it, you write a `.pyi` type declaration file, and then generate the boilerplate from that.
The boilerplate comes in two layers.
An outer function provides the C extension interface (e.g. PyObject* types),
and does a certain amount of error checking and unwrapping to native C types.
Then, a corresponding inner function is called, which is where the actual implementation goes.
The idea is to let the implementor fill out the inner funcntion, and keep most of the generated boilerplate separate.
'''
import re
from argparse import ArgumentParser
from ast import (AST, AnnAssign, Assign, AsyncFunctionDef, ClassDef, Expr as ExprStmt, FunctionDef, Import, ImportFrom, Module,
Name, Str, parse, stmt as Stmt)
from dataclasses import dataclass
from enum import Enum
from functools import singledispatch
from inspect import Parameter, Signature, signature
from typing import Any, ByteString, Callable, Dict, Iterator, List, NoReturn, Optional, TextIO, Tuple, Type, Union
from mypy_extensions import VarArg
from pithy.io import errL, errSL, read_from_path, read_line_from_path
from pithy.path import path_name, path_stem
KEYWORD_ONLY = Parameter.KEYWORD_ONLY
POSITIONAL_ONLY = Parameter.POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = Parameter.POSITIONAL_OR_KEYWORD
VAR_KEYWORD = Parameter.VAR_KEYWORD
empty = Parameter.empty
def main() -> None:
arg_parser = ArgumentParser(description='Generate CPython extension stubs from .pyi files.')
arg_parser.add_argument('paths', nargs='+', default=[])
arg_parser.add_argument('-dbg', action='store_true')
args = arg_parser.parse_args()
if not args.paths: exit('No paths specified.')
for path in args.paths:
if not path.endswith('.pyi'): exit(f'interface path does not end with `.pyi`: {path}')
generate_ext(path=path)
# Python type mapppings.
@dataclass
class TypeInfo:
'Maps Python types, as parsed from .pyi files, to C extension types and associated metadata.'
type:Any
c_type:str
c_init:str
c_arg_parser_fmt:str
return_conv:Optional[str]
type_info_any = TypeInfo(Any,
c_type='PyObject *', c_init='NULL', c_arg_parser_fmt='o', return_conv='(PyObject*)')
type_infos = { t.type : t for t in [
type_info_any,
TypeInfo(None,
c_type='void ', c_init='', c_arg_parser_fmt='', return_conv=None),
TypeInfo(bytes,
c_type='PyBytesObject *', c_init='NULL', c_arg_parser_fmt='S', return_conv='(PyObject *)'),
TypeInfo(Union[str,ByteString],
c_type='Py_buffer ', c_init='{.buf=NULL, .obj=NULL, .len=0}', c_arg_parser_fmt='s*', return_conv=None),
]}
TypeAnn = Union[None,str,Type[Any]]
@dataclass
class Par:
'Function parameter info, as parsed from Python annotations.'
name:str
type:TypeAnn
dflt:Any
@property
def c_arg_cleanup(self) -> Optional[str]:
'Optional C argument cleanup code.'
if self.ti.c_type == 'Py_buffer ': return f'if ({self.name}.obj) PyBuffer_Release(&{self.name})'
return None
@property
def ti(self) -> TypeInfo: return type_infos.get(self.type, type_info_any)
class FuncKind(Enum):
Plain = 0
Method = 1
Class = 2
Static = 3
@dataclass
class Func:
'Function info, as parsed from Python annotations.'
name:str
type_name:Optional[str]
sig:Signature
pars:List[Par]
ret:TypeAnn
doc:str
kind:FuncKind
@dataclass
class Var:
name:str
type:Type[Any]
Decl = Union['Class',Func,Var]
class SourceReporter:
'Base class that can report source diagnostics from an AST node.'
path:str
def warn(self, node:AST, msg:str) -> None:
errSL('warning:', node_diagnostic(path=self.path, node=node, msg=msg))
def error(self, node:AST, msg:str) -> NoReturn:
exit('error: ' + node_diagnostic(path=self.path, node=node, msg=msg))
class Scope(SourceReporter):
'Scope base class is either a ExtMod (whole module being generated) or a Class.'
def __init__(self, path:str, name:str, doc:str) -> None:
self.path = path
self.name = name
self.doc:str = doc
self.decls:List[Decl] = []
class ExtMod(Scope):
'The parsed/generated extension module.'
class Class(Scope):
'Class scope; both a Scope and a Decl, which is what makes the whole thing compicated.'
def generate_ext(path:str) -> None:
'Top level parsing and code generation for a path.'
errL('\n', path)
stem = path_stem(path)
name = path_name(stem)
mod_source = parse_pyi_module(path=path) # Input.
mod = ExtMod(path=path, name=name, doc=mod_source.doc)
for name, syntax, obj in mod_source:
parse_decl(syntax, name=name, obj=obj, scope=mod, global_vals=mod_source.vals)
dst_c = stem + '.gen.cpp'
dst_h = stem + '.gen.h'
with open(dst_c, 'w') as c, open(dst_h, 'w') as h:
write_module(mod, c=c, h=h)
ScopeNode = Union[ClassDef,Module]
@dataclass
class ScopeSource(SourceReporter):
'The source of a module or class scope. Contains both the syntactic and dynamic representations.'
path:str
node:ScopeNode
vals:Dict[str,Any]
@property
def body(self) -> List[Stmt]: return self.node.body
@property
def doc(self) -> str:
body = self.body
if not (body and isinstance(body[0], ExprStmt) and isinstance(body[0].value, Str)):
self.error(self.node, 'missing docstring')
doc_expr = body[0].value
doc = doc_expr.s
assert isinstance(doc, str)
m = invalid_doc_re.search(doc)
if m:
s, e = m.span()
self.error(doc_expr, f'invalid docstring: {m[0]!r}')
return doc
def __iter__(self) -> Iterator[Tuple[str,AST,Any]]:
'Iterate over a source and return (name, AST statement, runtime value) triples.'
for stmt in self.body:
name:str
if isinstance(stmt, AnnAssign) and isinstance(stmt.target, Name):
name = stmt.target.id
elif isinstance(stmt, (AsyncFunctionDef, ClassDef, FunctionDef)):
name = stmt.name
elif isinstance(stmt, (Assign, Import, ImportFrom)):
continue
elif isinstance(stmt, ExprStmt) and isinstance(stmt.value, Str):
continue # Docstring.
else:
type_name = type(stmt).__name__
self.warn(stmt, msg=f'unexpected interface statement: {type_name}')
continue
yield (name, stmt, self.vals[name])
def parse_pyi_module(path:str) -> ScopeSource:
'''
Parse .pyi declarations by both execing the source, and also parsing it into an AST.
The former lets us inspect the dynamic objects;
the latter lets us distinguish between declarations and imports.
'''
src = read_from_path(path)
# Parse src into an AST Module.
module = parse(src, filename=path)
# Compile.
try: code = compile(module, filename=path, mode='exec', optimize=1)
except SyntaxError as e:
line1 = e.lineno or 0 # If lineno is None, then line0 in our diagnostic becomes -1, which will print as '0'.
exit(src_diagnostic(path, line0=line1-1, col0=(e.offset or 0), msg=str(e)))
except ValueError as e: exit(src_diagnostic(path, line0=0, col0=0, msg=str(e)))
# Exec.
globals:Dict[str,Any] = {'__builtins__': __builtins__}
exec(code, globals) # As of python3.7, passing separate locals does not work because type annotation lookup is broken.
return ScopeSource(path=path, node=module, vals=globals)
# Parsing is dispatched over syntax type.
@singledispatch
def parse_decl(syntax:AST, name:str, obj:Any, scope:Scope, global_vals:Dict[str,Any]) -> None:
'Default implementation raises.'
raise Exception(f'unknown syntax type: {name}; type: {syntax}')
@parse_decl.register
def _(syntax:AnnAssign, name:str, obj:Any, scope:Scope, global_vals:Dict[str,Any]) -> None:
'Parse an annotated variable declaration.'
scope.warn(syntax, f'assignment not implemented')
@parse_decl.register
def _(syntax:AsyncFunctionDef, name:str, obj:Any, scope:Scope, global_vals:Dict[str,Any]) -> None:
'Async function.'
scope.warn(syntax, f'async function def is not implemented')
@parse_decl.register
def _(syntax:FunctionDef, name:str, obj:Any, scope:Scope, global_vals:Dict[str,Any]) -> None:
'Function declaration.'
is_method = isinstance(scope, Class)
if is_method:
if isinstance(obj, classmethod):
kind = FuncKind.Class
elif isinstance(obj, staticmethod):
kind = FuncKind.Static
else: # Instance method.
kind = FuncKind.Method
else: # Plain module function.
kind = FuncKind.Plain
is_class_method = isinstance(obj, (classmethod, staticmethod)) # Not sure if it is correct to handle both kinds the same way.
if is_class_method:
func = obj.__func__
else:
func = obj
doc = func.__doc__ or ''
sig = signature(func)
pars:List[Par] = []
for i, p in enumerate(sig.parameters.values()):
n = p.name
t = p.annotation
d = p.default
#k = p.kind # POSITIONAL_ONLY, POSITIONAL_OR_KEYWORD, KEYWORD_ONLY, VAR_KEYWORD.
if isinstance(t, str):
try: t = global_vals[t]
except KeyError: scope.error(syntax, f'parameter {n!r} has invalid string annotation: {t!r}')
if i == 0 and is_method:
expected_name = 'cls' if is_class_method else 'self'
if n != expected_name: scope.error(syntax, f'parameter {n!r} has unexpected name; expected {expected_name!r}')
elif t == empty: scope.error(syntax, f'parameter {n!r} has no type annotation')
pars.append(Par(name=n, type=t, dflt=d))
ret = sig.return_annotation
if isinstance(ret, str):
try: ret = global_vals[ret]
except KeyError: scope.error(syntax, f'return type has invalid string annotation: {ret!r}')
ret_ti = type_infos.get(ret, type_info_any)
if ret is not None and ret_ti.return_conv is None:
scope.error(syntax, f'return type is mapped to a C type that cannot be converted to a return value: {ret!r}')
type_name = scope.name if is_method else None
scope.decls.append(Func(name=name, type_name=type_name, sig=sig, pars=pars, ret=ret, doc=doc, kind=kind))
@parse_decl.register
def _(syntax:ClassDef, name:str, obj:Any, scope:Scope, global_vals:Dict[str,Any]) -> None:
'Class declaration.'
class_source = ScopeSource(path=scope.path, node=syntax, vals=vars(obj))
c = Class(path=scope.path, name=name, doc=class_source.doc)
for member_name, decl_syntax, member in class_source:
parse_decl(decl_syntax, name=member_name, obj=member, scope=c, global_vals=global_vals)
# Register this custom type in our global dictionary.
type_infos[obj] = TypeInfo(obj,
c_type=f'{name} *', c_init='NULL', c_arg_parser_fmt='o', return_conv='(PyObject*)')
scope.decls.append(c)
# Code generation.
_Writers = Tuple[Callable[[VarArg(str)],None],...] # Cheap hack to provied convenience writer functions.
def write_module(mod:ExtMod, c:TextIO, h:TextIO) -> None:
'Generate code for a module.'
def bZ(*strings:str) -> None:
'Both.'
for s in strings:
c.write(s)
h.write(s)
def bL(*strings:str) -> None:
bZ(*strings, '\n')
def cZ(*strings:str) -> None:
'C only.'
for s in strings: c.write(s)
def cL(*strings:str) -> None:
cZ(*strings, '\n')
def hZ(*strings:str) -> None:
'Header only.'
for s in strings: h.write(s)
def hL(*strings:str) -> None:
hZ(*strings, '\n')
writers = (bZ, bL, cZ, cL, hZ, hL)
bL('// Dedicated to the public domain under CC0: https://creativecommons.org/publicdomain/zero/1.0/.') # TODO: license config.
bL()
hL('#define PY_SSIZE_T_CLEAN')
hL('#include "Python.h"')
cL(f'#include "{mod.name}.h"')
write_scope(scope=mod, prefix='', writers=writers)
cL()
cL()
cL( 'static struct PyModuleDef module_def = {')
cL( ' PyModuleDef_HEAD_INIT,')
cL(f' .m_name = "{mod.name}",')
cL(f' .m_doc = {mod.name}_doc,')
cL( ' .m_size = 0,')
cL(f' .m_methods = {mod.name}_methods,')
cL( ' .m_slots = NULL, // Single-phase initialization.')
cL( '};')
cL()
cL()
cL('PyMODINIT_FUNC')
cL('PyInit_hashing_cpy(void) {')
cL()
cL(' PyObject *module = PyModule_Create(&module_def);')
cL(' if (!module) return NULL;')
for decl in mod.decls:
if not isinstance(decl, Class):continue
type_obj = decl.name + '_type'
cL()
cL(f' if (PyType_Ready(&{type_obj}) < 0) return NULL;')
cL(f' Py_INCREF(&Aquahash_type);')
cL(f' PyModule_AddObject(module, {type_obj}.tp_name, (PyObject *)&{type_obj});')
cL()
cL(' return module;')
cL('}')
def write_doc(name:str, doc:str, writers:_Writers) -> None:
(bZ, bL, cZ, cL, hZ, hL) = writers
cL()
cL(f'PyDoc_STRVAR({name}_doc,')
for line in doc.strip().split('\n'):
cL(c_quote(line))
cL(');')
def write_scope(scope:Scope, prefix:str, writers:_Writers) -> None:
(bZ, bL, cZ, cL, hZ, hL) = writers
cL()
write_doc(name=scope.name, doc=scope.doc, writers=writers)
methods:List[str] = []
for decl in scope.decls:
if isinstance(decl, Var):
write_var(decl, writers=writers)
elif isinstance(decl, Func):
method = write_func(decl, prefix=prefix, writers=writers)
if method: methods.append(method)
elif isinstance(decl, Class):
write_class(decl, writers=writers)
else: raise NotImplementedError
cL()
cL()
cL(f'static struct PyMethodDef {scope.name}_methods[] = {{')
for method in methods:
cL(' ', method, ',')
cL(' {NULL, NULL}')
cL('};')
invalid_doc_re = re.compile(r'[^\n -~]+')
def c_quote(s:str) -> str:
q = s # TODO
return f'"{q}"'
c_chars = {
'\n' : '\\n',
'\\' : '\\\\',
'"' : '\\"',
}
def write_var(var:Var, writers:_Writers) -> None:
(bZ, bL, cZ, cL, hZ, hL) = writers
bL()
bL()
bL(f'// `{var.name}:{var.type}`.')
def write_func(func:Func, prefix:str, writers:_Writers) -> str:
(bZ, bL, cZ, cL, hZ, hL) = writers
name = prefix + func.name
bL()
bL()
bL(f'// `def {func.name}{func.sig}`.')
write_doc(name=name, doc=func.doc, writers=writers)
if func.kind == FuncKind.Plain:
lead_c_par = 'PyObject *module'
lead_h_par:List[str] = []
elif func.kind == FuncKind.Method:
lead_c_par = f'{func.type_name} *{func.pars[0].name}'
lead_h_par = [lead_c_par]
else:
lead_c_par = f'PyTypeObject *{func.pars[0].name}'
lead_h_par = [lead_c_par]
pars = func.pars
if func.kind != FuncKind.Plain: pars = pars[1:] # Drop the leading variable, which is handled above.
has_pars = bool(pars)
h_pars = ', '.join(lead_h_par + [f'{p.ti.c_type}{p.name}' for p in pars])
ret_ti = type_infos.get(func.ret, type_info_any)
h_ret_type = ret_ti.c_type
hL(f'inline static {h_ret_type}{name}({h_pars}) {{')
hL('}')
cL()
cL()
c_args = 'PyObject *args, PyObject *kwargs' if has_pars else 'PyObject *noargs'
cL(f'static PyObject *_{name}({lead_c_par}, {c_args}) {{')
if ret_ti.c_type != 'void ':
cL(f' {ret_ti.c_type}_ret = {ret_ti.c_init};')
cL(' PyObject *ret = NULL;')
# Generate argument local variables.
for par in pars:
cL(f' {par.ti.c_type}{par.name} = {par.ti.c_init};')
if has_pars:
cL()
c_arg_strs = ', '.join(f'"{p.name}"' for p in pars)
cL(f' static const char * const _keywords[] = {{{c_arg_strs}, NULL}};')
parser_fmts:List[str] = []
fmt_dflt = False
for p in pars:
if not fmt_dflt and p.dflt is not empty: # TODO: this would be better determined by value of Parameter.kind.
fmt_dflt = True
parser_fmts.append('|$')
parser_fmts.append(p.ti.c_arg_parser_fmt)
parser_fmt = ''.join(parser_fmts)
fmt_name = func.type_name if (func.type_name and func.name == '__new__') else func.name
cL(f' static _PyArg_Parser _parser = {{"{parser_fmt}:{fmt_name}", _keywords, 0}};')
c_arg_addrs = ', '.join(f'&{p.name}' for p in pars)
cL(f' if (!_PyArg_ParseTupleAndKeywordsFast(args, kwargs, &_parser, {c_arg_addrs})) goto cleanup;')
cL()
h_args = ', '.join(p.name for p in func.pars) # Note: original list includes the first argument.
if ret_ti.type is None:
cL(f' {name}({h_args});')
cL( ' ret = Py_None;')
cL(f' Py_INCREF(ret);')
else:
cL(f' _ret = {name}({h_args});')
cL(f' ret = {ret_ti.return_conv}(_ret);')
if has_pars:
cL('\n cleanup:')
for par in pars:
cleanup = par.c_arg_cleanup
if cleanup: cL(f' {cleanup};')
cL(' return ret;')
cL('}')
if func.type_name and func.name == '__new__': return '' # Not a member of the method table.
method_kind = 'METH_VARARGS|METH_KEYWORDS' if has_pars else 'METH_NOARGS'
return f'{{"{func.name}", (PyCFunction)_{name}, {method_kind}, {name}_doc}}'
def write_class(class_:Class, writers:_Writers) -> None:
(bZ, bL, cZ, cL, hZ, hL) = writers
name = class_.name
bL()
bL()
bL(f'// `class {name}`.')
bL()
hL(f'inline static void {name}_dealloc({name} *self) {{')
hL('}')
cL(f'static void _{name}_dealloc({name} *self) {{')
cL(f' {name}_dealloc(self);')
cL( ' PyObject_Del(self);')
cL( '}')
prefix = name + '_'
write_scope(class_, prefix=prefix, writers=writers)
bL()
cL(f'static PyTypeObject {name}_type = {{')
cL(' PyVarObject_HEAD_INIT(NULL, 0)')
cL(f' .tp_name = "{name}",')
cL(f' .tp_basicsize = sizeof({name}),')
cL(f' .tp_doc = {name}_doc,')
cL(f' .tp_dealloc = (destructor)_{name}_dealloc,')
cL( ' .tp_flags = Py_TPFLAGS_DEFAULT,')
cL(f' .tp_methods = {name}_methods,')
cL(f' .tp_new = _{name}___new__,')
cL( '};')
def src_diagnostic(path:str, line0:int, col0:int, msg:str, text:str=None) -> str:
pad = ' ' * col0
if text is None:
text = read_line_from_path(path, line_index=line0, default='<MISSING>')
return f'{path}:{line0+1}:{col0+1}: {msg}.\n {text}\n {pad}^'
def node_diagnostic(path:str, node:AST, msg:str) -> str:
line0 = getattr(node, 'lineno', 1) - 1 # `Module` has no lineno.
col0 = getattr(node, 'col_offset', 0)
return src_diagnostic(path=path, line0=line0, col0=col0, msg=msg)
if __name__ == '__main__': main()
| StarcoderdataPython |
11287962 | <filename>msl/qt/io.py
"""
I/O helper functions.
"""
import os
import sys
import fnmatch
from . import QtWidgets, QtGui, QtCore, application, prompt
__all__ = (
'get_drag_enter_paths',
'get_icon',
'icon_to_base64',
'rescale_icon'
)
def get_icon(obj, size=None, mode=QtCore.Qt.KeepAspectRatio):
"""Convert the input object to a :class:`QtGui.QIcon`.
Parameters
----------
obj : :class:`object`
The object to be converted to a :class:`QtGui.QIcon`. The data type of `obj` can be one of:
* :class:`QtGui.QIcon`
* :class:`QtGui.QPixmap`
* :class:`QtGui.QImage`
* `QtWidgets.QStyle.StandardPixmap <http://doc.qt.io/qt-5/qstyle.html#StandardPixmap-enum>`_:
One of the built-in Qt pixmaps. Example::
get_icon(QtWidgets.QStyle.SP_TitleBarMenuButton)
get_icon(14) # the QtWidgets.QStyle.SP_TrashIcon enum value
* :class:`QtCore.QByteArray`: A `Base64 <https://en.wikipedia.org/wiki/Base64>`_
representation of an encoded icon.
See :func:`icon_to_base64`.
* :class:`str`: The path to an icon file or an icon embedded in a DLL or EXE file.
If `obj` is a path to an icon file and only the filename is specified then the
directories in :obj:`sys.path` and :obj:`os.environ['PATH'] <os.environ>` are also
used to search for the icon file. If `obj` refers to an icon in a Windows DLL/EXE
file then `obj` is the path to the DLL/EXE file and the icon index separated by the
``|`` character.
The following examples illustrate the various ways to request an icon by passing
in a :class:`str` argument::
# provide the full path to the icon file
get_icon('D:/code/resources/icons/msl.png')
get_icon('D:/code/resources/icons/photon.png')
# insert the folder where the icons are located in to sys.path
sys.path.insert(0, 'D:/code/resources/icons/')
# so now only the filename needs to be specified to load the icon
get_icon('msl.png')
get_icon('photon.png')
# load icon 23 from the Windows shell32.dll file
get_icon('C:/Windows/System32/shell32.dll|23')
# load icon 0 from the Windows explorer.exe file
get_icon('C:/Windows/explorer.exe|0')
# it is assumed that the DLL/EXE file is located in a default directory:
# - a DLL file in C:/Windows/System32/
# - an EXE file in C:/Windows/
# so the following is a simplified way to load an icon in a DLL file
get_icon('shell32|23')
get_icon('imageres|1')
get_icon('compstui|51')
# and the following is a simplified way to load an icon in an EXE file
get_icon('explorer|0')
size : :class:`int`, :class:`float`, :class:`tuple` of :class:`int` or :class:`QtCore.QSize`, optional
Rescale the icon to the specified `size`.
If the value is :obj:`None` then do not rescale the icon.
If an :class:`int` then set the width and the height to be the `size` value.
If a :class:`float` then a scaling factor.
If a :class:`tuple` then the (width, height) values.
mode : `QtCore.Qt.AspectRatioMode <https://doc.qt.io/qt-5/qt.html#AspectRatioMode-enum>`_, optional
How to maintain the aspect ratio if rescaling. The default mode is to keep the aspect ratio.
Returns
-------
:class:`QtGui.QIcon`
The input object converted to a :class:`QtGui.QIcon`.
Raises
------
:exc:`IOError`
If the icon cannot be found.
:exc:`TypeError`
If the data type of `obj` or `size` is not supported.
Example
-------
To view the standard icons that come with Qt and that come with Windows run:
>>> from msl.examples.qt import ShowStandardIcons
>>> ShowStandardIcons() # doctest: +SKIP
"""
_icon = None
if isinstance(obj, QtGui.QIcon):
_icon = obj
elif isinstance(obj, str):
if '|' in obj: # then loading an icon from a Windows DLL/EXE file
_icon = get_icon(icon_to_base64(obj))
elif os.path.isfile(obj):
_icon = QtGui.QIcon(obj)
else:
search_paths = sys.path + os.environ['PATH'].split(os.pathsep)
for path in search_paths:
full_path = os.path.join(path, obj)
if os.path.isfile(full_path):
_icon = QtGui.QIcon(full_path)
break
if _icon is None:
raise IOError("Cannot find icon file '{}'".format(obj))
elif isinstance(obj, QtWidgets.QStyle.StandardPixmap):
app = application()
_icon = QtGui.QIcon(app.style().standardIcon(obj))
elif isinstance(obj, int):
std_icons = [value for name, value in vars(QtWidgets.QStyle).items() if name.startswith('SP_')]
if obj in std_icons:
app = application()
_icon = QtGui.QIcon(app.style().standardIcon(obj))
else:
raise IOError('Invalid QStyle.StandardPixmap enum value of {}'.format(obj))
elif isinstance(obj, QtGui.QPixmap):
_icon = QtGui.QIcon(obj)
elif isinstance(obj, QtGui.QImage):
_icon = QtGui.QIcon(QtGui.QPixmap.fromImage(obj))
elif isinstance(obj, (bytes, bytearray, QtCore.QByteArray)):
img = QtGui.QImage()
img.loadFromData(QtCore.QByteArray.fromBase64(obj))
_icon = QtGui.QIcon(QtGui.QPixmap.fromImage(img))
if _icon is None:
raise TypeError('Icon object has unsupported data type {}'.format(type(obj)))
if size is None:
return _icon
return QtGui.QIcon(rescale_icon(_icon, size, mode))
def icon_to_base64(icon=None, size=None, mode=QtCore.Qt.KeepAspectRatio, fmt='PNG'):
"""Convert the icon to a :class:`QtCore.QByteArray` encoded as Base64_.
This function is useful if you want to save icons in a database, use it in a
data URI scheme_, or if you want to use icons in your GUI and rather than loading
icons from a file on the hard disk you define your icons in a Python module as
Base64_ variables. Loading the icons from the hard disk means that you must also
distribute the icons with your Python code if you share your code.
.. _Base64: https://en.wikipedia.org/wiki/Base64
.. _scheme: https://en.wikipedia.org/wiki/Data_URI_scheme
Parameters
----------
icon : :class:`object`, optional
An icon with a data type that is handled by :func:`get_icon`. If :obj:`None`
then a dialog window is created to allow the user to select an icon file
that is saved in a folder.
size : :class:`int`, :class:`float`, :class:`tuple` of :class:`int` or :class:`QtCore.QSize`, optional
Rescale the icon to the specified `size` before converting it to Base64_.
If the value is :obj:`None` then do not rescale the icon.
If an :class:`int` then set the width and the height to be the `size` value.
If a :class:`float` then a scaling factor.
If a :class:`tuple` then the (width, height) values.
mode : QtCore.Qt.AspectRatioMode_, optional
How to maintain the aspect ratio if rescaling. The default mode is to keep the aspect ratio.
fmt : :class:`str`, optional
The icon format to use when converting. The supported values are: ``BMP``,
``JPG``, ``JPEG`` and ``PNG``.
Returns
-------
:class:`QtCore.QByteArray`
The Base64_ representation of the icon.
Raises
------
:exc:`IOError`
If the icon file cannot be found.
:exc:`ValueError`
If the icon format, `fmt`, to use for converting is not supported.
"""
fmt = fmt.upper()
ALLOWED_FORMATS = ['BMP', 'JPG', 'JPEG', 'PNG']
if fmt not in ALLOWED_FORMATS:
raise ValueError('Invalid format {}. Must be one of: {}'.format(fmt, ', '.join(ALLOWED_FORMATS)))
if isinstance(icon, str) and '|' in icon:
# extract an icon from a Windows DLL/EXE file
# uses ctypes and the .NET Framework to convert the icon to base64
# import here in case pythonnet is not installed
import clr
import ctypes
clr.AddReference('System.Drawing')
from System.Drawing.Imaging import ImageFormat
shell32 = ctypes.windll.shell32
img_fmts = {
'BMP': ImageFormat.Bmp,
'JPG': ImageFormat.Jpeg,
'JPEG': ImageFormat.Jpeg,
'PNG': ImageFormat.Png,
}
s = icon.split('|')
path = s[0]
icon_index = int(s[1])
if icon_index < 0:
raise IOError('The icon index must be >= 0')
if not os.path.isfile(path):
err_msg = "Cannot find DLL/EXE file '{}'".format(s[0])
if os.path.split(path)[0]: # then it wasn't just the filename that was specified
raise IOError(err_msg)
filename = os.path.splitext(os.path.basename(path))[0]
path = 'C:/Windows/System32/{}.dll'.format(filename)
if not os.path.isfile(path):
path = 'C:/Windows/{}.exe'.format(filename)
if not os.path.isfile(path):
raise IOError(err_msg)
# extract the handle to the "large" icon
path_ptr = ctypes.c_char_p(path.encode())
handle_large = ctypes.c_int()
res = shell32.ExtractIconExA(path_ptr, icon_index, ctypes.byref(handle_large), ctypes.c_void_p(), 1)
if res != 1:
# Check if the icon index is valid
max_index = shell32.ExtractIconExA(path_ptr, -1, ctypes.c_void_p(), ctypes.c_void_p(), 0) - 1
if icon_index > max_index:
msg = 'Requested icon {}, the maximum icon index allowed is {}'.format(icon_index, max_index)
else:
msg = "ExtractIconExA: Cannot extract icon {} from '{}'".format(icon_index, path)
raise IOError(msg)
# get the icon bitmap and convert it to base64
handle = clr.System.Int32(handle_large.value)
handle_ptr = clr.System.IntPtr.op_Explicit(handle)
bmp = clr.System.Drawing.Bitmap.FromHicon(handle_ptr)
stream = clr.System.IO.MemoryStream()
bmp.Save(stream, img_fmts[fmt])
base = QtCore.QByteArray(clr.System.Convert.ToBase64String(stream.GetBuffer()).encode())
# clean up
ctypes.windll.user32.DestroyIcon(handle_large)
stream.Dispose()
return base
# ensure that a QApplication exists in order to access Qt classes
app = application()
if icon is None:
title = 'Select an icon file to convert to Base64'
filters = {'Images': ('bmp', 'jpg', 'jpeg', 'png'), 'All files': '*'}
icon = prompt.filename(title=title, filters=filters)
if icon is None:
return QtCore.QByteArray()
icon = str(icon)
icon = get_icon(icon)
try:
default_size = icon.availableSizes()[-1] # use the largest size as the default size
except IndexError:
prompt.critical('Invalid icon file.')
return QtCore.QByteArray()
pixmap = icon.pixmap(default_size)
if size is not None:
pixmap = rescale_icon(pixmap, size, mode)
array = QtCore.QByteArray()
buffer = QtCore.QBuffer(array)
buffer.open(QtCore.QIODevice.WriteOnly)
pixmap.save(buffer, fmt)
buffer.close()
return array.toBase64()
def get_drag_enter_paths(event, pattern=None):
"""Returns the list of file paths from a :class:`QtGui.QDragEnterEvent`.
Parameters
----------
event : :class:`QtGui.QDragEnterEvent`
A drag-enter event.
pattern : :class:`str`, optional
Include only the file paths that match the `pattern`.
See :func:`fnmatch.fnmatch`.
Returns
-------
:class:`list` of :class:`str`
The list of file paths.
"""
if event.mimeData().hasUrls():
urls = event.mimeData().urls()
paths = [str(url.toLocalFile()) for url in urls if url.isValid() and url.scheme() == 'file']
if pattern is None:
return paths
return fnmatch.filter(paths, pattern)
return []
def rescale_icon(icon, size, mode=QtCore.Qt.KeepAspectRatio):
"""Rescale an icon.
Parameters
----------
icon : :class:`object`
Any object that is supported by :func:`~msl.qt.io.get_icon`.
size : :class:`int`, :class:`float`, :class:`tuple` of :class:`int` or :class:`QtCore.QSize`
Rescale the icon to the specified `size`.
If the value is :obj:`None` then do not rescale the icon.
If an :class:`int` then set the width and the height to be the `size` value.
If a :class:`float` then a scaling factor.
If a :class:`tuple` then the (width, height) values.
mode : QtCore.Qt.AspectRatioMode_, optional
How to maintain the aspect ratio if rescaling. The default mode is to keep the aspect ratio.
Returns
-------
:class:`QtGui.QPixmap`
The rescaled icon.
"""
if isinstance(icon, QtGui.QIcon):
try:
max_size = icon.availableSizes()[-1]
except IndexError:
max_size = QtCore.QSize(16, 16)
pixmap = icon.pixmap(max_size)
elif isinstance(icon, QtGui.QPixmap):
pixmap = icon
else:
return rescale_icon(get_icon(icon), size, mode)
if size is None:
return pixmap
default_size = pixmap.size()
if isinstance(size, int):
size = QtCore.QSize(size, size)
elif isinstance(size, float):
size = QtCore.QSize(int(default_size.width()*size), int(default_size.height()*size))
elif isinstance(size, (list, tuple)):
if len(size) == 0:
size = default_size
elif len(size) == 1:
size = QtCore.QSize(size[0], size[0])
else:
size = QtCore.QSize(size[0], size[1])
elif not isinstance(size, QtCore.QSize):
raise TypeError('Unsupported "size" data type of "{}"'.format(type(size)))
if (size.width() != default_size.width()) or (size.height() != default_size.height()):
pixmap = pixmap.scaled(size, aspectRatioMode=mode)
return pixmap
| StarcoderdataPython |
3358974 | <gh_stars>0
from .centerline import centerline
import numpy as np
import vtk
from vtk.util import numpy_support
import matplotlib.pyplot as plt
class COGridGen():
def __init__(self, cl_geom, nx, ny, width, tension=10.0):
self.cl_geom = cl_geom
self.tension = tension
self.width = width
self.nx = nx
self.ny = ny
self.cl = centerline(cl_geom, self.nx, self.tension)
self.xgrid = np.zeros(self.nx*self.ny, dtype=np.double)
self.ygrid = np.zeros(self.nx*self.ny, dtype=np.double)
self.sgrid = vtk.vtkStructuredGrid()
self.sg_points = vtk.vtkPoints()
self.__buildXYGrid()
def description(self):
return r"Uses centerline {} and width {} with nx = {} and ny = {} " \
"to generate curvilinear orthogonal grid".format(self.cl.getCLShapeFile(), self.width, self.nx, self.ny)
def getGridDims(self):
return self.nx, self.ny
def __buildXYGrid(self):
clx, cly = self.cl.getinterppts()
delt = np.double(self.width/(self.ny - 1))
nm = np.int((self.ny+1)/2)
self.sgrid.SetDimensions(self.nx, self.ny, 1)
for i in range(0, self.nx):
for j in range(0, self.ny):
index = (i*self.ny) + j
self.xgrid[index] = clx[i] + delt * (nm-j-1) * np.sin(self.cl.getphiinterp(i))
self.ygrid[index] = cly[i] - delt * (nm-j-1) * np.cos(self.cl.getphiinterp(i))
self.sg_points.InsertNextPoint(self.xgrid[index], self.ygrid[index], 0.0)
self.sgrid.SetPoints(self.sg_points)
def getXYGrid(self):
return self.xgrid, self.ygrid
def getGrid(self):
return self.cl, self.sgrid
def plotmapgrid(self, valname):
vtknodes = self.sgrid.GetPoints().GetData()
npnodes = numpy_support.vtk_to_numpy(vtknodes)
x, y, z = npnodes[:, 0], npnodes[:, 1], npnodes[:, 2]
nx, ny, nz = self.sgrid.GetDimensions()
x = np.reshape(x, (nx, ny))
y = np.reshape(y, (nx, ny))
val = self.sgrid.GetPointData().GetArray(valname)
val = np.reshape(val, (nx, ny))
plt.contourf(x, y, val)
plt.show()
| StarcoderdataPython |
3491027 | <reponame>ARQtty/parqser
from .base_session import BaseSession
from .empty_session import EmptySession
| StarcoderdataPython |
6446910 | <reponame>hpgl/hpgl<filename>src/geo_testing/test_scripts/psgs_test_regions.py
#
#
# Copyright 2009 HPGL Team
#
# This file is part of HPGL (High Perfomance Geostatistics Library).
#
# HPGL is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, version 2 of the License.
#
# HPGL is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with HPGL. If not, see http://www.gnu.org/licenses/.
#
from geo import *
from sys import *
import os
prop_con = load_cont_property("test_data/BIG_SOFT_DATA_CON_160_141_20.INC", -99)
grid = SugarboxGrid(166, 141, 20)
sgs_params = {
"radiuses": (20, 20, 20),
"max_neighbours": 12,
"covariance_type": covariance.exponential,
"ranges": (10, 10, 10),
"sill": 0.4,
"kriging_type": "sk"}
sgs_result1 = sgs_simulation(prop_con, grid, seed=3439275, workers_count=2, use_new_psgs = True, region_size = (20,20,20), **sgs_params)
| StarcoderdataPython |
1666057 | <filename>kerrokantasi/settings/util.py
import copy
import os
from importlib.util import find_spec
def load_local_settings(settings, module_name):
"""
Load local settings from `module_name`.
Search for a `local_settings` module, load its code and execute it in the
`settings` dict. All of the settings declared in the sertings dict are thus available
to the local_settings module. The settings dict is updated.
"""
local_settings_spec = find_spec(module_name)
if local_settings_spec:
local_settings_code = local_settings_spec.loader.get_code(module_name)
exec(local_settings_code, settings)
def get_settings(settings_module):
"""
Get a copy of the settings (upper-cased variables) declared in the given settings module.
:param settings_module: A settings module
:type settings_module: module
:return: Dict of settings
:rtype: dict[str, object]
"""
return copy.deepcopy({k: v for (k, v) in vars(settings_module).items() if k.isupper()})
| StarcoderdataPython |
85799 | from flask_marshmallow import Schema
from marshmallow import ValidationError
from .models import EnumUploadFileType
from .models import FileField
from app.models.enum_field import EnumField
def file_validate(file):
if file.mimetype not in ['image/jpeg', 'image/png']:
raise ValidationError('File format not allowed')
class FileUploadFormParameters(Schema):
upload_type = EnumField(
EnumUploadFileType,
by_value=True,
required=True,
description='upload type(invoice/id/evidence)',
location='form',
)
pic_file = FileField(
required=True,
validate=file_validate,
description='image file',
location='files',
)
| StarcoderdataPython |
3337316 | <gh_stars>1-10
import nearest_n
import numpy as np
import pandas as pd
def test_NP_removal():
customer = pd.DataFrame(
data={'LoadEntering': [6200.0],
'Longitude': [17.0],
'Latitude': [47.0],
'NRemoval': [True],
'PRemoval': [True]})
df_NP = nearest_n.NP_removal(customer)
if not isinstance(df_NP, pd.DataFrame):
raise Exception('Bad type, not a DataFrame!')
if df_NP.empty is True:
raise Exception('Wrong append method!')
def test_nearest_point():
df = pd.read_csv('clean.csv')
df_sample = df.sample(100)
customer = pd.DataFrame(
data={'LoadEntering': [6200.0],
'Longitude': [17.0],
'Latitude': [47.0],
'NRemoval': [True],
'PRemoval': [True]})
nearest = nearest_n.nearest_point(df_sample, customer)
if nearest.empty is True:
raise Exception('Function is broken!')
| StarcoderdataPython |
8193773 | <reponame>towadroid/t3f<gh_stars>100-1000
import numpy as np
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
from t3f import variables
from t3f import ops
from t3f import initializers
class _VariablesTest():
def testAttributes(self):
# Test that after converting an initializer into a variable all the
# attributes stays the same.
tens = initializers.random_tensor([2, 3, 2], tt_rank=2, dtype=self.dtype)
tens_v = variables.get_variable('tt_tens', initializer=tens)
mat = initializers.random_matrix([[3, 2, 2], [3, 3, 3]], tt_rank=3,
dtype=self.dtype)
mat_v = variables.get_variable('tt_mat', initializer=mat)
for (init, var) in [[tens, tens_v], [mat, mat_v]]:
self.assertEqual(init.get_shape(), var.get_shape())
self.assertEqual(init.get_raw_shape(), var.get_raw_shape())
self.assertEqual(init.ndims(), var.ndims())
self.assertEqual(init.get_tt_ranks(), var.get_tt_ranks())
self.assertEqual(init.is_tt_matrix(), var.is_tt_matrix())
def testAssign(self):
old_init = initializers.random_tensor([2, 3, 2], tt_rank=2,
dtype=self.dtype)
tt = variables.get_variable('tt', initializer=old_init)
new_init = initializers.random_tensor([2, 3, 2], tt_rank=2,
dtype=self.dtype)
self.evaluate(tf.compat.v1.global_variables_initializer())
init_value = self.evaluate(ops.full(tt))
assigner = variables.assign(tt, new_init)
assigner_value = self.evaluate(ops.full(assigner))
after_value = ops.full(tt)
after_value = self.evaluate(after_value)
self.assertAllClose(assigner_value, after_value)
# Assert that the value actually changed:
abs_diff = np.linalg.norm((init_value - after_value).flatten())
rel_diff = abs_diff / np.linalg.norm((init_value).flatten())
self.assertGreater(rel_diff, 0.2)
class VariablesTestFloat32(tf.test.TestCase, _VariablesTest):
dtype = tf.float32
class VariablesTestFloat64(tf.test.TestCase, _VariablesTest):
dtype = tf.float64
if __name__ == "__main__":
tf.test.main()
| StarcoderdataPython |
9761680 | <reponame>abarnert/floatliteralhack
import ast
import importlib
import importlib.machinery
import decimal
import sys
def _call_with_frames_removed(f, *args, **kwargs):
return f(*args, **kwargs)
class FloatLiteral(float):
def __new__(cls, *args):
obj = super().__new__(cls, *args)
if args and len(args) == 1 and isinstance(args[0], str):
obj._str = args[0]
return obj
# optionally use _str in repr/str
class Decimal(decimal.Decimal):
def __new__(cls, value="0", *args, **kwargs):
try:
value = value._str
except AttributeError:
pass
return super().__new__(cls, value, *args, **kwargs)
decimal.Decimal = Decimal
class FloatNodeWrapper(ast.NodeTransformer):
def visit_Num(self, node):
if isinstance(node.n, float):
return ast.Call(func=ast.Name(id='FloatLiteral', ctx=ast.Load()),
args=[ast.Str(s=str(node.n))], keywords=[])
return node
class FloatLiteralLoader(importlib.machinery.SourceFileLoader):
def source_to_code(self, data, path, *, _optimize=-1):
source = importlib._bootstrap.decode_source(data)
tree = _call_with_frames_removed(compile, source, path, 'exec',
dont_inherit=True,
optimize=_optimize,
flags=ast.PyCF_ONLY_AST)
tree = FloatNodeWrapper().visit(tree)
ast.fix_missing_locations(tree)
return _call_with_frames_removed(compile, tree, path, 'exec',
dont_inherit=True,
optimize=_optimize)
_real_pathfinder = sys.meta_path[-1]
class FloatLiteralFinder(type(_real_pathfinder)):
@classmethod
def find_module(cls, fullname, path=None):
spec = _real_pathfinder.find_spec(fullname, path)
if not spec: return spec
loader = spec.loader
loader.__class__ = FloatLiteralLoader
return loader
sys.meta_path[-1] = FloatLiteralFinder
| StarcoderdataPython |
3526574 | <gh_stars>1-10
import ruamel.yaml
import os
BASIC_DIRS=["tensorboard", "exps", "conf", "data", "decoded"]
def load_yaml_config(config_path):
with open(config_path, 'r') as f:
config = ruamel.yaml.safe_load(f)
check_validation(config)
return config
def check_validation(input):
assert os.path.isdir(input["basic"]["exp_root_path"]), "{} does not exit".format(input["exp_root_path"])
exp_root_path = input["basic"]["exp_root_path"]
for i in BASIC_DIRS:
file = os.path.join(exp_root_path, i)
assert os.path.isdir(file), "{} does not exist".format(file)
for i in input["data"]:
file = os.path.join(exp_root_path, "data", input["data"][i])
assert os.path.isfile(file), "{} does not exist".format(file)
if input["basic"]["augmentation_config_name"]:
file = os.path.join(exp_root_path, "conf", input["basic"]["augmentation_config_name"])
assert os.path.isfile(file), "{} does not exist".format(file)
| StarcoderdataPython |
1835061 | import sys
write = sys.stdout.write
def print_table(data, schema, titles = None):
'''Prints a list of dictionaries as a table
params: data: The list / tuple of dictionaries / lists / tuples (of dictionaries / lists / tuples ...)
schema: A list or a set that describes how to iterate in d
titles: Column titles to use instead of the keys
Schema format:
[key1, key2, [key3, key3-1], [key3, key3-2], [key3, key3-3, key3-3-1]]
This prints (here d is the short for data):
|-------------|--------------|----------------------|----------------------|--------------------------------|
| key1 | key2 | key3-1 | key3-2 | key3-3-1 |
|-------------|--------------|----------------------|----------------------|--------------------------------|
| d[0][key1] | d[0][key2] | d[0][key3][key3-1] | d[0][key3][key3-2] | d[0][key3][key3-3][key3-3-1] |
| d[1][key1] | d[1][key2] | d[1][key3][key3-1] | d[1][key3][key3-2] | d[1][key3][key3-3][key3-3-1] |
| d[2][key1] | d[2][key2] | d[2][key3][key3-1] | d[2][key3][key3-2] | d[2][key3][key3-3][key3-3-1] |
| .... |
|-------------|--------------|----------------------|----------------------|--------------------------------|
if titles list / tuple is provided, it's values will be used as column titles instead of the keys.
'''
# Verify param types
if type(data) not in (list, tuple):
raise TypeError("data must be a list or a tuple")
if type(schema) not in (list, tuple):
raise TypeError("schema must be a list or a tuple")
if type(titles) not in (list, tuple, None):
raise TypeError("titles must be a list or a tuple or None")
# Use keys as titles if needed
if titles == None:
titles = _create_titles(schema)
lens = _find_lengths(data, schema, titles)
# Print top border
for l in lens:
write(('|{:-<%d}' % (l+4)).format(""))
write("|\n")
# Print column names
for c in range(len(lens)):
write(('| {:<%d} ' % lens[c]).format(titles[c]))
write("|\n")
# Print middle border
for l in lens:
write(('|{:-<%d}' % (l+4)).format(""))
write("|\n")
# Print data
for line in data:
for c in range(len(lens)):
write(('| {:<%d} ' % lens[c]).format(_col_value(line, schema, c)))
write("|\n")
# Print lower border
for l in lens:
write(('|{:-<%d}' % (l+4)).format(""))
write("|\n")
def _create_titles(schema):
'''Returns a list of column names filled with the keys from schema'''
ret = []
for k in schema:
if type(k) == str:
ret.append(k)
else:
if type(k) in (list, tuple):
ret.append(k[-1])
else:
raise TypeError("Invalid type in schema: Expected list or tuple, got %s." % str(type(k)))
return ret
def _find_lengths(data, schema, titles):
'''Returns a list of the lengths of the longest string in each column (including the title)'''
ret = []
for k in titles:
ret.append(len(k))
for line in data:
for col in range(len(schema)):
l = len(_col_value(line, schema, col))
if l > ret[col]:
ret[col] = l
return ret
def _col_value(line, schema, col):
'''Returns the value of col-th column of line according to the schema'''
if type(schema[col]) in (str, int, bool, tuple, float):
return str(line[schema[col]])
else:
val = line
for k in schema[col]:
val = val[k]
return str(val)
return ""
# test code:
#a = [[1,2,3], [4,5,6], [7,8,9]]
#s = [0,1,2]
#t = ["a", "b", "c"]
#
#print_table(a,s,t)
#
#print("")
#
#a = [["teste 1", ["sub 1-1", "sub 1-2"], "aaaa", {"asd":321, "bbb":"bla bla", "c":[1,2,3]}]]
#s = [0, [1,0], [1,1], 2, [3, "asd"], [3,"bbb"], [3,"c",1]]
#t = ["A", "Very long title :)", "3", "TTTT", "5555", "6666", "7777"]
#
#print_table(a,s,t)
| StarcoderdataPython |
241595 | import os
import six
import pytest
import pdf_hunter
if six.PY2:
import mock
else:
from unittest import mock
TEST_URL = "https://github.com/EbookFoundation/free-programming-books/blob/master/free-programming-books.md" # noqa: E501
def test_url_transforms():
pdf_url = "https://github.com/.../blob/master/....pdf"
new_url = pdf_hunter.UrlTransforms.apply(pdf_url)
assert new_url == "https://github.com/.../raw/master/....pdf"
pdf_url = "https://this_will_not_change.pdf"
new_url = pdf_hunter.UrlTransforms.apply(pdf_url)
assert new_url == "https://this_will_not_change.pdf"
def test_get_pdf_urls():
pdf_urls = pdf_hunter.get_pdf_urls(TEST_URL)
assert isinstance(pdf_urls, list)
assert len(pdf_urls) > 0
def test_validate_pdf_url():
pdf_hunter.validate_pdf_url("https://test_site/books/somefile.pdf")
with pytest.raises(TypeError):
pdf_hunter.validate_pdf_url(42) # must be string
with pytest.raises(ValueError):
pdf_hunter.validate_pdf_url("xxx.txt") # must end with '.pdf'
def test_get_pdf_name():
pdf_url = "https://people.gnome.org/~swilmet/glib-gtk-dev-platform.pdf"
file_name = pdf_hunter.get_pdf_name(pdf_url)
assert file_name == "glib-gtk-dev-platform.pdf"
def remove_file(file_path):
if os.path.exists(file_path):
os.remove(file_path)
def test_download_file():
pdf_url = "https://people.gnome.org/~swilmet/glib-gtk-dev-platform.pdf"
folder_path = os.path.dirname(os.path.abspath(__file__))
file_name = pdf_hunter.get_pdf_name(pdf_url)
file_path = os.path.join(folder_path, file_name)
remove_file(file_path)
pdf_hunter.download_file(pdf_url, folder_path)
assert os.path.isfile(file_path)
remove_file(file_path)
def test_download_file_warning():
pdf_url = "this_will_404.pdf"
folder_path = os.path.dirname(os.path.abspath(__file__))
with pytest.warns(UserWarning):
pdf_hunter.download_file(pdf_url, folder_path)
def get_pdf_urls_test(*args):
return [
"https://people.gnome.org/~swilmet/glib-gtk-dev-platform.pdf",
"http://safehammad.com/downloads/python-idioms-2014-01-16.pdf",
]
@mock.patch("pdf_hunter.search.get_pdf_urls", side_effect=get_pdf_urls_test)
def test_download_pdf_files(mock_func):
folder_path = os.path.dirname(os.path.abspath(__file__))
def remove_test_pdfs():
for pdf_url in get_pdf_urls_test():
file_path = os.path.join(
folder_path, pdf_hunter.get_pdf_name(pdf_url)
)
if os.path.isfile(file_path):
os.remove(file_path)
remove_test_pdfs()
pdf_hunter.download_pdf_files(TEST_URL, folder_path)
assert os.path.isfile(
os.path.join(folder_path, "glib-gtk-dev-platform.pdf")
)
assert os.path.isfile(
os.path.join(folder_path, "python-idioms-2014-01-16.pdf")
)
remove_test_pdfs()
| StarcoderdataPython |
1895712 | <filename>retic/services/general/json.py<gh_stars>1-10
# Json
from json import dumps, loads
# Retic
from retic.services.exceptions import get_file_error_exception
def jsonify(object: any):
"""Convert a object to a JSON string.
:param object: is the client response object, if the object is str,
it returns this value, otherwise it creates a jsonify of the object
"""
try:
# if is none, return a empty string
if not object:
return ""
# if is a bytesm return the same value
elif isinstance(object, bytes):
return object
# if is a string, return the same value
elif isinstance(object, str):
return object
# return the value in string format
elif isinstance(object, dict):
return dumps(object)
# return a error message
else:
raise "error: The format of the object for the response is invalid."
except Exception as e:
return dumps({
u"path": get_file_error_exception(3),
u"error": str(e)
})
def parse(content: str):
"""Deserialize (a str, bytes, or bytearray instance that contains
a JSON document) to a Python object.
:param content: Content of type str, bytes, or bytearray that contains a valid JSON.
"""
return loads(content)
| StarcoderdataPython |
4933118 | from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
import urllib.request
import time
from PIL import Image, ImageOps
from tkinter.filedialog import askopenfilename
import pyperclip
def pose_character():
# Somewhat arbitrary numbers for rotation sliders
rotate_x = -1
rotate_y = 13
# Click the pose tab
nav_pose_button = driver.find_element_by_css_selector("#pose")
nav_pose_button.click()
# Mode X Slider
rotation_slider_x_node = driver.find_element_by_css_selector("div.slider:nth-of-type(1) > a")
ActionChains(driver).click_and_hold(rotation_slider_x_node) \
.move_to_element_with_offset(rotation_slider_x_node, rotate_x, 0) \
.release().perform()
# Move Y Slider
rotation_slider_y_node = driver.find_element_by_css_selector("div.slider:nth-of-type(2) > a")
ActionChains(driver).click_and_hold(rotation_slider_y_node) \
.move_to_element_with_offset(rotation_slider_y_node, rotate_y, 0) \
.release().perform()
driver.find_element_by_css_selector("#gallery").click()
def prepare_render(input_username):
# Enter the username into the search box
skin_search_box = driver.find_element_by_id("gallery-search")
skin_search_box.send_keys(Keys.CONTROL, "a")
skin_search_box.send_keys(Keys.DELETE)
# Use the clipboard to avoid pressing the shift button
pyperclip.copy("@" + input_username)
skin_search_box.send_keys(Keys.CONTROL, 'v')
skin_search_box.send_keys(Keys.RETURN)
# Wait until the search result return
wait = WebDriverWait(driver, 10)
wait.until(expected_conditions.presence_of_all_elements_located((By.CLASS_NAME, "partdetail")))
# Click the load skin button
skin_load_button = driver.find_element_by_css_selector("div.partzoom.base > button.load")
skin_load_button.click()
# The website has a race condition, just give 'er a second ... literally
time.sleep(1)
# Click picture button
driver.find_element_by_css_selector("#screenshot").click()
# Get image src
wait.until(expected_conditions.presence_of_all_elements_located((By.CLASS_NAME, "fancybox-image")))
return_value = driver.find_element_by_css_selector("img.fancybox-image").get_attribute("src")
# Reset to gallery tab
wait.until(expected_conditions.presence_of_all_elements_located((By.CLASS_NAME, "fancybox-close")))
driver.find_element_by_css_selector("div.fancybox-close").click()
wait.until(expected_conditions.invisibility_of_element_located((By.ID, "fancybox-overlay")))
driver.find_element_by_css_selector("#gallery").click()
return return_value
def save_with_backgrounds(input_username, input_image, image_size):
# Colors
background_blue = "#264653"
background_green = "#2A9D8F"
background_yellow = "#E9C46A"
background_orange = "#F4A261"
background_red = "#E76F51"
# Images of colors
background_image_blue = Image.new("RGB", (image_size, image_size), background_blue)
background_image_green = Image.new("RGB", (image_size, image_size), background_green)
background_image_yellow = Image.new("RGB", (image_size, image_size), background_yellow)
background_image_orange = Image.new("RGB", (image_size, image_size), background_orange)
background_image_red = Image.new("RGB", (image_size, image_size), background_red)
# Paste input image onto the images of colors and save
background_image_blue.paste(input_image, (0, 0), input_image)
background_image_blue.save('image-output/skin-' + input_username + '-blue.png')
background_image_green.paste(input_image, (0, 0), input_image)
background_image_green.save('image-output/skin-' + input_username + '-green.png')
background_image_yellow.paste(input_image, (0, 0), input_image)
background_image_yellow.save('image-output/skin-' + input_username + '-yellow.png')
background_image_orange.paste(input_image, (0, 0), input_image)
background_image_orange.save('image-output/skin-' + input_username + '-orange.png')
background_image_red.paste(input_image, (0, 0), input_image)
background_image_red.save('image-output/skin-' + input_username + '-red.png')
# Get list of usernames
username_list = []
temp_name_file = askopenfilename()
for line in open(temp_name_file):
username_list.append(line.strip())
# Load the website
driver = webdriver.Firefox()
driver.get("https://minecraft.novaskin.me/")
assert "Nova Skin" in driver.title
pose_character()
# For each username in the list
for username in username_list:
# Get image src from page
imageSrc = prepare_render(username)
# Save image from src
response = urllib.request.urlopen(imageSrc)
with open('novaskin-download/skin-' + username + '.png', 'wb') as f:
f.write(response.file.read())
# Make image with transparency
transparent_head = ImageOps.expand(
ImageOps.fit(
ImageOps.expand(
Image.open('novaskin-download/skin-' + username + '.png'),
border=10, fill=(255, 255, 255, 0)), size=(210, 210), centering=(0, 0)
), border=5, fill="black")
# Save image with different backgrounds
save_with_backgrounds(username, transparent_head, 220)
print('Done')
| StarcoderdataPython |
1637456 | # data __init__.py
__author__ = ", ".join(["<NAME>"])
__email__ = ", ".join(["<EMAIL>",])
from ._data_tools._data_loader_utils import _create_data_cache as create_data_cache
# from .load_EMT_simulation import load_EMT_simulation
# from .load_LARRY import load_LARRY
# from .load_share_seq import load_share_seq
| StarcoderdataPython |
3323991 | <reponame>staghuntrpg/rpg
#!/usr/bin/env python
import copy
import glob
import os
import time
import numpy as np
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from tensorboardX import SummaryWriter
from envs import GridWorldAdaptiveEnv, MGAdaptiveEnv
from algorithm.ppo import PPO
from algorithm.model import Policy
from config import get_config
from utils.env_wrappers import SubprocVecEnv, DummyVecEnv, SingleSubprocVecEnv, SingleDummyVecEnv, SingleSubprocVecEnv2, SingleDummyVecEnv2
from utils.util import update_linear_schedule
from utils.storage import RolloutStorage
import shutil
def make_parallel_env(args, policy_candidates=None):
def get_env_fn(rank):
def init_env():
if args.env_name == "StagHuntAdaptive":
assert args.num_agents == 1, ("only 1 agents is supported, check the config.py.")
env = MGAdaptiveEnv(args, policy_candidates)
elif args.env_name == "StagHuntGWAdaptive":
assert args.num_agents == 2, ("only 2 agents is supported, check the config.py.")
env = GridWorldAdaptiveEnv(args, policy_candidates)
else:
print("Can not support the " + args.env_name + "environment." )
raise NotImplementedError
env.seed(args.seed + rank * 1000)
return env
return init_env
if args.n_rollout_threads == 1:
if args.critic_full_obs:
return SingleDummyVecEnv2([get_env_fn(0)])
else:
return SingleDummyVecEnv([get_env_fn(0)])
else:
if args.critic_full_obs:
return SingleSubprocVecEnv2([get_env_fn(i) for i in range(args.n_rollout_threads)])
else:
return SingleSubprocVecEnv([get_env_fn(i) for i in range(args.n_rollout_threads)])
def main():
args = get_config()
# cuda
if args.cuda and torch.cuda.is_available():
device = torch.device("cuda:0")
torch.set_num_threads(args.n_training_threads)
if args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
else:
device = torch.device("cpu")
torch.set_num_threads(args.n_training_threads)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# path
model_dir = Path('./results') / args.env_name / args.algorithm_name / ("run"+str(args.seed))
if args.critic_full_obs:
run_dir = model_dir / 'adaptive'
else:
run_dir = model_dir / 'adaptive_only'
log_dir = run_dir / 'logs'
save_dir = run_dir / 'models'
os.makedirs(str(log_dir))
os.makedirs(str(save_dir))
logger = SummaryWriter(str(log_dir))
print("\n Now we have %i fixed policy! Train Single Adaptive Policy... \n" %args.num_policy_candidates)
args.env_name = args.env_name + "Adaptive"
policy_candidates = []
for i in range(args.num_policy_candidates):
ac = torch.load(str(model_dir) +("/models/Policy%i" %(i+1)) + "-agent0_model.pt")['model'].cpu()
policy_candidates.append(ac)
# env
envs = make_parallel_env(args, policy_candidates)
#Policy network
# agent 0
actor_critic = Policy(envs.observation_space[0],
envs.action_space[0],
num_agents = args.num_agents,
base_kwargs={'lstm': args.lstm,
'naive_recurrent': args.naive_recurrent_policy,
'recurrent': args.recurrent_policy,
'hidden_size': args.hidden_size})
actor_critic.to(device)
agent0 = PPO(actor_critic,
0,
args.clip_param,
args.ppo_epoch,
args.num_mini_batch,
args.data_chunk_length,
args.value_loss_coef,
args.entropy_coef,
logger,
lr=args.lr,
eps=args.eps,
max_grad_norm=args.max_grad_norm,
use_clipped_value_loss= args.use_clipped_value_loss)
#replay buffer
rollout = RolloutStorage(args.num_agents,
0,
args.episode_length,
args.n_rollout_threads,
envs.observation_space[0],
envs.action_space[0],
actor_critic.recurrent_hidden_state_size)
# reset
if args.critic_full_obs:
obs, obs_critic, select_opponent = envs.reset()
else:
obs, select_opponent = envs.reset()
# rollout
if len(envs.observation_space[0]) == 1:
if args.critic_full_obs:
rollout.share_obs[0].copy_(torch.tensor(obs_critic.reshape(args.n_rollout_threads, -1)))
else:
rollout.share_obs[0].copy_(torch.tensor(obs.reshape(args.n_rollout_threads, -1)))
rollout.obs[0].copy_(torch.tensor(obs[:,0,:]))
rollout.recurrent_hidden_states.zero_()
rollout.recurrent_hidden_states_critic.zero_()
rollout.recurrent_c_states.zero_()
rollout.recurrent_c_states_critic.zero_()
else:
raise NotImplementedError
rollout.to(device)
# run
collective_return = []
apple_consumption = []
waste_cleared = []
sustainability = []
fire = []
start = time.time()
episodes = int(args.num_env_steps) // args.episode_length // args.n_rollout_threads
all_episode = 0
all_episode_adaptive = np.zeros(args.num_policy_candidates)
for episode in range(episodes):
if args.use_linear_lr_decay:
update_linear_schedule(agent0.optimizer,
episode,
episodes,
args.lr)
for step in range(args.episode_length):
with torch.no_grad():
value, action0, action_log_prob, recurrent_hidden_states, recurrent_hidden_states_critic, recurrent_c_states, recurrent_c_states_critic = actor_critic.act(rollout.share_obs[step],
rollout.obs[step],
rollout.recurrent_hidden_states[step],
rollout.recurrent_hidden_states_critic[step],
rollout.recurrent_c_states[step],
rollout.recurrent_c_states_critic[step],
rollout.masks[step])
# rearrange action
actions_env = []
for i in range(args.n_rollout_threads):
one_hot_action = np.zeros((1, envs.action_space[0].n))
one_hot_action[0][action0[i]] = 1
actions_env.append(one_hot_action)
# Obser reward and next obs
if args.critic_full_obs:
obs, obs_critic, select_opponent, reward, done, infos = envs.step(actions_env)
else:
obs, select_opponent, reward, done, infos = envs.step(actions_env)
# If done then clean the history of observations.
# insert data in buffer
masks = []
bad_masks = []
for i in range(args.num_agents):
mask = []
bad_mask = []
for done_ in done:
if done_[i]:
mask.append([0.0])
bad_mask.append([1.0])
else:
mask.append([1.0])
bad_mask.append([1.0])
masks.append(torch.FloatTensor(mask))
bad_masks.append(torch.FloatTensor(bad_mask))
if len(envs.observation_space[0]) == 1:
if args.critic_full_obs:
rollout.insert(torch.tensor(obs_critic.reshape(args.n_rollout_threads, -1)),
torch.tensor(obs[:,0,:]),
recurrent_hidden_states,
recurrent_hidden_states_critic,
recurrent_c_states,
recurrent_c_states_critic,
action0,
action_log_prob,
value,
torch.tensor(reward[:, 0].reshape(-1,1)),
masks[0],
bad_masks[0])
else:
rollout.insert(torch.tensor(obs.reshape(args.n_rollout_threads, -1)),
torch.tensor(obs[:,0,:]),
recurrent_hidden_states,
recurrent_hidden_states_critic,
recurrent_c_states,
recurrent_c_states_critic,
action0,
action_log_prob,
value,
torch.tensor(reward[:, 0].reshape(-1,1)),
masks[0],
bad_masks[0])
else:
raise NotImplementedError
with torch.no_grad():
next_value = actor_critic.get_value(rollout.share_obs[-1],
rollout.obs[-1],
rollout.recurrent_hidden_states[-1],
rollout.recurrent_hidden_states_critic[-1],
rollout.recurrent_c_states[-1],
rollout.recurrent_c_states_critic[-1],
rollout.masks[-1]).detach()
rollout.compute_returns(next_value,
args.use_gae,
args.gamma,
args.gae_lambda,
args.use_proper_time_limits)
# update the network
value_loss, action_loss, dist_entropy = agent0.update(rollout)
if args.env_name == "StagHuntAdaptive":
coop_num = []
defect_num = []
coopdefect_num = []
defectcoop_num = []
rew = []
for info in infos:
if 'coop&coop_num' in info.keys():
coop_num.append(info['coop&coop_num'])
if 'defect&defect_num' in info.keys():
defect_num.append(info['defect&defect_num'])
if 'coop&defect_num' in info.keys():
coopdefect_num.append(info['coop&defect_num'])
if 'defect&coop_num' in info.keys():
defectcoop_num.append(info['defect&coop_num'])
for i in range(args.n_rollout_threads):
rew.append(rollout.rewards[:,i,:].sum().cpu().numpy())
for i in range(args.n_rollout_threads):
logger.add_scalars('Policy-' + str(select_opponent[i]+1) + '/coop&coop_num_per_episode',
{'coop&coop_num_per_episode': coop_num[i]},
all_episode_adaptive[select_opponent[i]])
logger.add_scalars('Policy-' + str(select_opponent[i]+1) + '/defect&defect_num_per_episode',
{'defect&defect_num_per_episode': defect_num[i]},
all_episode_adaptive[select_opponent[i]])
logger.add_scalars('Policy-' + str(select_opponent[i]+1) + '/coop&defect_num_per_episode',
{'coop&defect_num_per_episode': coopdefect_num[i]},
all_episode_adaptive[select_opponent[i]])
logger.add_scalars('Policy-' + str(select_opponent[i]+1) + '/defect&coop_num_per_episode',
{'defect&coop_num_per_episode': defectcoop_num[i]},
all_episode_adaptive[select_opponent[i]])
logger.add_scalars('Policy-' + str(select_opponent[i]+1) + '/reward',
{'reward': np.mean(np.array(rew[i]))},
all_episode_adaptive[select_opponent[i]])
all_episode_adaptive[select_opponent[i]] += 1
elif args.env_name == "StagHuntGWAdaptive":
collective_return = []
coop_num = []
gore1_num = []
gore2_num = []
hare1_num = []
hare2_num = []
for info in infos:
if 'collective_return' in info.keys():
collective_return.append(info['collective_return'])
if 'coop&coop_num' in info.keys():
coop_num.append(info['coop&coop_num'])
if 'gore1_num' in info.keys():
gore1_num.append(info['gore1_num'])
if 'gore2_num' in info.keys():
gore2_num.append(info['gore2_num'])
if 'hare1_num' in info.keys():
hare1_num.append(info['hare1_num'])
if 'hare2_num' in info.keys():
hare2_num.append(info['hare2_num'])
for i in range(args.n_rollout_threads):
logger.add_scalars('Policy-' + str(select_opponent[i]+1) + '/collective_return',
{'collective_return': collective_return[i]},
all_episode_adaptive[select_opponent[i]])
logger.add_scalars('Policy-' + str(select_opponent[i]+1) + '/coop&coop_num_per_episode',
{'coop&coop_num_per_episode': coop_num[i]},
all_episode_adaptive[select_opponent[i]])
logger.add_scalars('Policy-' + str(select_opponent[i]+1) + '/gore1_num_per_episode',
{'gore1_num_per_episode': gore1_num[i]},
all_episode_adaptive[select_opponent[i]])
logger.add_scalars('Policy-' + str(select_opponent[i]+1) + '/gore2_num_per_episode',
{'gore2_num_per_episode': gore2_num[i]},
all_episode_adaptive[select_opponent[i]])
logger.add_scalars('Policy-' + str(select_opponent[i]+1) + '/hare1_num_per_episode',
{'hare1_num_per_episode': hare1_num[i]},
all_episode_adaptive[select_opponent[i]])
logger.add_scalars('Policy-' + str(select_opponent[i]+1) + '/hare2_num_per_episode',
{'hare2_num_per_episode': hare2_num[i]},
all_episode_adaptive[select_opponent[i]])
all_episode_adaptive[select_opponent[i]] += 1
if args.critic_full_obs:
obs, obs_critic, select_opponent = envs.reset()
else:
obs, select_opponent = envs.reset()
if len(envs.observation_space[0]) == 1:
if args.critic_full_obs:
rollout.share_obs[0].copy_(torch.tensor(obs_critic.reshape(args.n_rollout_threads, -1)))
else:
rollout.share_obs[0].copy_(torch.tensor(obs.reshape(args.n_rollout_threads, -1)))
rollout.obs[0].copy_(torch.tensor(obs[:,0,:]))
rollout.recurrent_hidden_states.zero_()
rollout.recurrent_hidden_states_critic.zero_()
rollout.recurrent_c_states.zero_()
rollout.recurrent_c_states_critic.zero_()
rollout.masks[0].copy_(torch.ones(args.n_rollout_threads, 1))
rollout.bad_masks[0].copy_(torch.ones(args.n_rollout_threads, 1))
else:
raise NotImplementedError
rollout.to(device)
if (episode % args.save_interval == 0 or episode == episodes - 1):
torch.save({
'model': actor_critic
},
str(save_dir) + "/agent0_model.pt")
# log information
if episode % args.log_interval == 0:
total_num_steps = (episode + 1) * args.episode_length * args.n_rollout_threads
end = time.time()
print("\n Updates {}/{} episodes, total num timesteps {}/{}, FPS {}.\n"
.format(episode,
episodes,
total_num_steps,
args.num_env_steps,
int(total_num_steps / (end - start))))
print("value loss: agent0--" + str(value_loss))
logger.export_scalars_to_json(str(log_dir / 'summary.json'))
logger.close()
if __name__ == "__main__":
main()
| StarcoderdataPython |
12806158 | #!/usr/bin/env python3
#
# astro_demo_common.py - <NAME> - 2019-07-26
#
# Utility functions shared by Python demo programs.
#
import sys
import re
import astronomy
def ParseArgs(args):
if len(args) not in [3, 4]:
print('USAGE: {} latitude longitude [yyyy-mm-ddThh:mm:ssZ]'.format(args[0]))
sys.exit(1)
latitude = float(args[1])
longitude = float(args[2])
if len(args) == 4:
time = astronomy.Time.Parse(args[3])
else:
time = astronomy.Time.Now()
observer = astronomy.Observer(latitude, longitude)
return (observer, time)
| StarcoderdataPython |
4945125 | #textos.py
import dao.mysql_connect as myc
class Textos(object):
id_narrativa =int
id_usuario = int
texto_narrativa = str
tempo_gasto = str
fase =str
def __init__(self):
id_narrativa =-1
id_usuario = -1
texto_narrativa = ""
tempo_gasto = ""
fase =""
def busca_processamento(self,id_pessoa,fase):
lista=[]
sql=("""select n.* from pessoa pe inner join narrativa n on pe.cod_pessoa = n.id_usuario
where n.id_narrativa not in ( select id_texto from processamento)
and cod_pessoa = """+str(id_pessoa)+" group by id_usuario,texto_narrativa,tempo_gasto,fase")
if(fase!=""):
sql+=""" and fase="""+fase
#print("sql é ", sql)
resps =myc.buscar(sql)
for resp in resps:
tx= Textos()
tx.id_narrativa=resp[0]
tx.id_usuario=resp[1]
tx.texto_narrativa=resp[2]
tx.tempo_gasto=resp[3]
tx.fase=resp[4]
lista.append(tx)
if len(lista) == 0:
tx= Textos()
lista.append(tx)
return lista | StarcoderdataPython |
3294583 | <reponame>ErikMC10/PyBioMed<gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, <NAME>, <NAME> and <NAME>
# All rights reserved.
# This file is part of the PyBioMed.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the PyBioMed source tree.
'''
#####################################################################################
This module is used for computing the composition, transition and distribution
descriptors based on the different properties of AADs. The AADs with the same
properties is marked as the same number. You can get 147 descriptors for a given
protein sequence. You can freely use and distribute it. If you hava any problem,
you could contact with us timely!
References:
[1]: <NAME>, <NAME>, <NAME> and <NAME>. Prediction
of protein folding class using global description of amino acid sequence. Proc.Natl.
Acad.Sci.USA, 1995, 92, 8700-8704.
[2]:<NAME>, <NAME>, <NAME>, <NAME> and <NAME>.
Recognition of a Protein Fold in the Context of the SCOP classification. Proteins:
Structure, Function and Genetics,1999,35,401-407.
Authors: <NAME> and <NAME>.
Date: 2016.06.04
Email: <EMAIL>
#####################################################################################
'''
import string, math, copy
AALetter = ["A", "R", "N", "D", "C", "E", "Q", "G", "H", "I", "L", "K", "M", "F", "P", "S", "T", "W", "Y", "V"]
_Hydrophobicity = {'1': 'RKEDQN', '2': 'GASTPHY', '3': 'CLVIMFW'}
# '1'stand for Polar; '2'stand for Neutral, '3' stand for Hydrophobicity
_NormalizedVDWV = {'1': 'GASTPD', '2': 'NVEQIL', '3': 'MHKFRYW'}
# '1'stand for (0-2.78); '2'stand for (2.95-4.0), '3' stand for (4.03-8.08)
_Polarity = {'1': 'LIFWCMVY', '2': 'CPNVEQIL', '3': 'KMHFRYW'}
# '1'stand for (4.9-6.2); '2'stand for (8.0-9.2), '3' stand for (10.4-13.0)
_Charge = {'1': 'KR', '2': 'ANCQGHILMFPSTWYV', '3': 'DE'}
# '1'stand for Positive; '2'stand for Neutral, '3' stand for Negative
_SecondaryStr = {'1': 'EALMQKRH', '2': 'VIYCWFT', '3': 'GNPSD'}
# '1'stand for Helix; '2'stand for Strand, '3' stand for coil
_SolventAccessibility = {'1': 'ALFCGIVW', '2': 'RKQEND', '3': 'MPSTHY'}
# '1'stand for Buried; '2'stand for Exposed, '3' stand for Intermediate
_Polarizability = {'1': 'GASDT', '2': 'CPNVEQIL', '3': 'KMHFRYW'}
# '1'stand for (0-0.108); '2'stand for (0.128-0.186), '3' stand for (0.219-0.409)
##You can continuely add other properties of AADs to compute descriptors of protein sequence.
_AATProperty = (
_Hydrophobicity, _NormalizedVDWV, _Polarity, _Charge, _SecondaryStr, _SolventAccessibility, _Polarizability)
_AATPropertyName = (
'_Hydrophobicity', '_NormalizedVDWV', '_Polarity', '_Charge', '_SecondaryStr', '_SolventAccessibility',
'_Polarizability')
##################################################################################################
def StringtoNum(ProteinSequence, AAProperty):
"""
###############################################################################################
Tranform the protein sequence into the string form such as 32123223132121123.
Usage:
result=StringtoNum(protein,AAProperty)
Input: protein is a pure protein sequence.
AAProperty is a dict form containing classifciation of amino acids such as _Polarizability.
Output: result is a string such as 123321222132111123222
###############################################################################################
"""
hardProteinSequence = copy.deepcopy(ProteinSequence)
for k, m in AAProperty.items():
for index in m:
hardProteinSequence = string.replace(hardProteinSequence, index, k)
TProteinSequence = hardProteinSequence
return TProteinSequence
def CalculateComposition(ProteinSequence, AAProperty, AAPName):
"""
###############################################################################################
A method used for computing composition descriptors.
Usage:
result=CalculateComposition(protein,AAProperty,AAPName)
Input: protein is a pure protein sequence.
AAProperty is a dict form containing classifciation of amino acids such as _Polarizability.
AAPName is a string used for indicating a AAP name.
Output: result is a dict form containing composition descriptors based on the given property.
###############################################################################################
"""
TProteinSequence = StringtoNum(ProteinSequence, AAProperty)
Result = {}
Num = len(TProteinSequence)
Result[AAPName + 'C' + '1'] = round(float(TProteinSequence.count('1')) / Num, 3)
Result[AAPName + 'C' + '2'] = round(float(TProteinSequence.count('2')) / Num, 3)
Result[AAPName + 'C' + '3'] = round(float(TProteinSequence.count('3')) / Num, 3)
return Result
def CalculateTransition(ProteinSequence, AAProperty, AAPName):
"""
###############################################################################################
A method used for computing transition descriptors
Usage:
result=CalculateTransition(protein,AAProperty,AAPName)
Input:protein is a pure protein sequence.
AAProperty is a dict form containing classifciation of amino acids such as _Polarizability.
AAPName is a string used for indicating a AAP name.
Output:result is a dict form containing transition descriptors based on the given property.
###############################################################################################
"""
TProteinSequence = StringtoNum(ProteinSequence, AAProperty)
Result = {}
Num = len(TProteinSequence)
CTD = TProteinSequence
Result[AAPName + 'T' + '12'] = round(float(CTD.count('12') + CTD.count('21')) / (Num - 1), 3)
Result[AAPName + 'T' + '13'] = round(float(CTD.count('13') + CTD.count('31')) / (Num - 1), 3)
Result[AAPName + 'T' + '23'] = round(float(CTD.count('23') + CTD.count('32')) / (Num - 1), 3)
return Result
def CalculateDistribution(ProteinSequence, AAProperty, AAPName):
"""
###############################################################################################
A method used for computing distribution descriptors.
Usage:
result=CalculateDistribution(protein,AAProperty,AAPName)
Input:protein is a pure protein sequence.
AAProperty is a dict form containing classifciation of amino acids such as _Polarizability.
AAPName is a string used for indicating a AAP name.
Output:result is a dict form containing Distribution descriptors based on the given property.
###############################################################################################
"""
TProteinSequence = StringtoNum(ProteinSequence, AAProperty)
Result = {}
Num = len(TProteinSequence)
temp = ('1', '2', '3')
for i in temp:
num = TProteinSequence.count(i)
ink = 1
indexk = 0
cds = []
while ink <= num:
indexk = string.find(TProteinSequence, i, indexk) + 1
cds.append(indexk)
ink = ink + 1
if cds == []:
Result[AAPName + 'D' + i + '001'] = 0
Result[AAPName + 'D' + i + '025'] = 0
Result[AAPName + 'D' + i + '050'] = 0
Result[AAPName + 'D' + i + '075'] = 0
Result[AAPName + 'D' + i + '100'] = 0
else:
Result[AAPName + 'D' + i + '001'] = round(float(cds[0]) / Num * 100, 3)
Result[AAPName + 'D' + i + '025'] = round(float(cds[int(math.floor(num * 0.25)) - 1]) / Num * 100, 3)
Result[AAPName + 'D' + i + '050'] = round(float(cds[int(math.floor(num * 0.5)) - 1]) / Num * 100, 3)
Result[AAPName + 'D' + i + '075'] = round(float(cds[int(math.floor(num * 0.75)) - 1]) / Num * 100, 3)
Result[AAPName + 'D' + i + '100'] = round(float(cds[-1]) / Num * 100, 3)
return Result
##################################################################################################
def CalculateCompositionHydrophobicity(ProteinSequence):
"""
###############################################################################################
A method used for calculating composition descriptors based on Hydrophobicity of
AADs.
Usage:
result=CalculateCompositionHydrophobicity(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Composition descriptors based on Hydrophobicity.
###############################################################################################
"""
result = CalculateComposition(ProteinSequence, _Hydrophobicity, '_Hydrophobicity')
return result
def CalculateCompositionNormalizedVDWV(ProteinSequence):
"""
###############################################################################################
A method used for calculating composition descriptors based on NormalizedVDWV of
AADs.
Usage:
result=CalculateCompositionNormalizedVDWV(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Composition descriptors based on NormalizedVDWV.
###############################################################################################
"""
result = CalculateComposition(ProteinSequence, _NormalizedVDWV, '_NormalizedVDWV')
return result
def CalculateCompositionPolarity(ProteinSequence):
"""
###############################################################################################
A method used for calculating composition descriptors based on Polarity of
AADs.
Usage:
result=CalculateCompositionPolarity(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Composition descriptors based on Polarity.
###############################################################################################
"""
result = CalculateComposition(ProteinSequence, _Polarity, '_Polarity')
return result
def CalculateCompositionCharge(ProteinSequence):
"""
###############################################################################################
A method used for calculating composition descriptors based on Charge of
AADs.
Usage:
result=CalculateCompositionCharge(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Composition descriptors based on Charge.
###############################################################################################
"""
result = CalculateComposition(ProteinSequence, _Charge, '_Charge')
return result
def CalculateCompositionSecondaryStr(ProteinSequence):
"""
###############################################################################################
A method used for calculating composition descriptors based on SecondaryStr of
AADs.
Usage:
result=CalculateCompositionSecondaryStr(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Composition descriptors based on SecondaryStr.
###############################################################################################
"""
result = CalculateComposition(ProteinSequence, _SecondaryStr, '_SecondaryStr')
return result
def CalculateCompositionSolventAccessibility(ProteinSequence):
"""
###############################################################################################
A method used for calculating composition descriptors based on SolventAccessibility
of AADs.
Usage:
result=CalculateCompositionSolventAccessibility(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Composition descriptors based on SolventAccessibility.
###############################################################################################
"""
result = CalculateComposition(ProteinSequence, _SolventAccessibility, '_SolventAccessibility')
return result
def CalculateCompositionPolarizability(ProteinSequence):
"""
###############################################################################################
A method used for calculating composition descriptors based on Polarizability of
AADs.
Usage:
result=CalculateCompositionPolarizability(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Composition descriptors based on Polarizability.
###############################################################################################
"""
result = CalculateComposition(ProteinSequence, _Polarizability, '_Polarizability')
return result
##################################################################################################
##################################################################################################
def CalculateTransitionHydrophobicity(ProteinSequence):
"""
###############################################################################################
A method used for calculating Transition descriptors based on Hydrophobicity of
AADs.
Usage:
result=CalculateTransitionHydrophobicity(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Transition descriptors based on Hydrophobicity.
###############################################################################################
"""
result = CalculateTransition(ProteinSequence, _Hydrophobicity, '_Hydrophobicity')
return result
def CalculateTransitionNormalizedVDWV(ProteinSequence):
"""
###############################################################################################
A method used for calculating Transition descriptors based on NormalizedVDWV of
AADs.
Usage:
result=CalculateTransitionNormalizedVDWV(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Transition descriptors based on NormalizedVDWV.
###############################################################################################
"""
result = CalculateTransition(ProteinSequence, _NormalizedVDWV, '_NormalizedVDWV')
return result
def CalculateTransitionPolarity(ProteinSequence):
"""
###############################################################################################
A method used for calculating Transition descriptors based on Polarity of
AADs.
Usage:
result=CalculateTransitionPolarity(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Transition descriptors based on Polarity.
###############################################################################################
"""
result = CalculateTransition(ProteinSequence, _Polarity, '_Polarity')
return result
def CalculateTransitionCharge(ProteinSequence):
"""
###############################################################################################
A method used for calculating Transition descriptors based on Charge of
AADs.
Usage:
result=CalculateTransitionCharge(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Transition descriptors based on Charge.
###############################################################################################
"""
result = CalculateTransition(ProteinSequence, _Charge, '_Charge')
return result
def CalculateTransitionSecondaryStr(ProteinSequence):
"""
###############################################################################################
A method used for calculating Transition descriptors based on SecondaryStr of
AADs.
Usage:
result=CalculateTransitionSecondaryStr(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Transition descriptors based on SecondaryStr.
###############################################################################################
"""
result = CalculateTransition(ProteinSequence, _SecondaryStr, '_SecondaryStr')
return result
def CalculateTransitionSolventAccessibility(ProteinSequence):
"""
###############################################################################################
A method used for calculating Transition descriptors based on SolventAccessibility
of AADs.
Usage:
result=CalculateTransitionSolventAccessibility(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Transition descriptors based on SolventAccessibility.
###############################################################################################
"""
result = CalculateTransition(ProteinSequence, _SolventAccessibility, '_SolventAccessibility')
return result
def CalculateTransitionPolarizability(ProteinSequence):
"""
###############################################################################################
A method used for calculating Transition descriptors based on Polarizability of
AADs.
Usage:
result=CalculateTransitionPolarizability(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Transition descriptors based on Polarizability.
###############################################################################################
"""
result = CalculateTransition(ProteinSequence, _Polarizability, '_Polarizability')
return result
##################################################################################################
##################################################################################################
def CalculateDistributionHydrophobicity(ProteinSequence):
"""
###############################################################################################
A method used for calculating Distribution descriptors based on Hydrophobicity of
AADs.
Usage:
result=CalculateDistributionHydrophobicity(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Distribution descriptors based on Hydrophobicity.
###############################################################################################
"""
result = CalculateDistribution(ProteinSequence, _Hydrophobicity, '_Hydrophobicity')
return result
def CalculateDistributionNormalizedVDWV(ProteinSequence):
"""
###############################################################################################
A method used for calculating Distribution descriptors based on NormalizedVDWV of
AADs.
Usage:
result=CalculateDistributionNormalizedVDWV(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Distribution descriptors based on NormalizedVDWV.
###############################################################################################
"""
result = CalculateDistribution(ProteinSequence, _NormalizedVDWV, '_NormalizedVDWV')
return result
def CalculateDistributionPolarity(ProteinSequence):
"""
###############################################################################################
A method used for calculating Distribution descriptors based on Polarity of
AADs.
Usage:
result=CalculateDistributionPolarity(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Distribution descriptors based on Polarity.
###############################################################################################
"""
result = CalculateDistribution(ProteinSequence, _Polarity, '_Polarity')
return result
def CalculateDistributionCharge(ProteinSequence):
"""
###############################################################################################
A method used for calculating Distribution descriptors based on Charge of
AADs.
Usage:
result=CalculateDistributionCharge(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Distribution descriptors based on Charge.
###############################################################################################
"""
result = CalculateDistribution(ProteinSequence, _Charge, '_Charge')
return result
def CalculateDistributionSecondaryStr(ProteinSequence):
"""
###############################################################################################
A method used for calculating Distribution descriptors based on SecondaryStr of
AADs.
Usage:
result=CalculateDistributionSecondaryStr(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Distribution descriptors based on SecondaryStr.
###############################################################################################
"""
result = CalculateDistribution(ProteinSequence, _SecondaryStr, '_SecondaryStr')
return result
def CalculateDistributionSolventAccessibility(ProteinSequence):
"""
###############################################################################################
A method used for calculating Distribution descriptors based on SolventAccessibility
of AADs.
Usage:
result=CalculateDistributionSolventAccessibility(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Distribution descriptors based on SolventAccessibility.
###############################################################################################
"""
result = CalculateDistribution(ProteinSequence, _SolventAccessibility, '_SolventAccessibility')
return result
def CalculateDistributionPolarizability(ProteinSequence):
"""
###############################################################################################
A method used for calculating Distribution descriptors based on Polarizability of
AADs.
Usage:
result=CalculateDistributionPolarizability(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing Distribution descriptors based on Polarizability.
###############################################################################################
"""
result = CalculateDistribution(ProteinSequence, _Polarizability, '_Polarizability')
return result
##################################################################################################
def CalculateC(ProteinSequence):
"""
###############################################################################################
Calculate all composition descriptors based seven different properties of AADs.
Usage:
result=CalculateC(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing all composition descriptors.
###############################################################################################
"""
result = {}
result.update(CalculateCompositionPolarizability(ProteinSequence))
result.update(CalculateCompositionSolventAccessibility(ProteinSequence))
result.update(CalculateCompositionSecondaryStr(ProteinSequence))
result.update(CalculateCompositionCharge(ProteinSequence))
result.update(CalculateCompositionPolarity(ProteinSequence))
result.update(CalculateCompositionNormalizedVDWV(ProteinSequence))
result.update(CalculateCompositionHydrophobicity(ProteinSequence))
return result
def CalculateT(ProteinSequence):
"""
###############################################################################################
Calculate all transition descriptors based seven different properties of AADs.
Usage:
result=CalculateT(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing all transition descriptors.
###############################################################################################
"""
result = {}
result.update(CalculateTransitionPolarizability(ProteinSequence))
result.update(CalculateTransitionSolventAccessibility(ProteinSequence))
result.update(CalculateTransitionSecondaryStr(ProteinSequence))
result.update(CalculateTransitionCharge(ProteinSequence))
result.update(CalculateTransitionPolarity(ProteinSequence))
result.update(CalculateTransitionNormalizedVDWV(ProteinSequence))
result.update(CalculateTransitionHydrophobicity(ProteinSequence))
return result
def CalculateD(ProteinSequence):
"""
###############################################################################################
Calculate all distribution descriptors based seven different properties of AADs.
Usage:
result=CalculateD(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing all distribution descriptors.
###############################################################################################
"""
result = {}
result.update(CalculateDistributionPolarizability(ProteinSequence))
result.update(CalculateDistributionSolventAccessibility(ProteinSequence))
result.update(CalculateDistributionSecondaryStr(ProteinSequence))
result.update(CalculateDistributionCharge(ProteinSequence))
result.update(CalculateDistributionPolarity(ProteinSequence))
result.update(CalculateDistributionNormalizedVDWV(ProteinSequence))
result.update(CalculateDistributionHydrophobicity(ProteinSequence))
return result
def CalculateCTD(ProteinSequence):
"""
###############################################################################################
Calculate all CTD descriptors based seven different properties of AADs.
Usage:
result=CalculateCTD(protein)
Input:protein is a pure protein sequence.
Output:result is a dict form containing all CTD descriptors.
###############################################################################################
"""
result = {}
result.update(CalculateCompositionPolarizability(ProteinSequence))
result.update(CalculateCompositionSolventAccessibility(ProteinSequence))
result.update(CalculateCompositionSecondaryStr(ProteinSequence))
result.update(CalculateCompositionCharge(ProteinSequence))
result.update(CalculateCompositionPolarity(ProteinSequence))
result.update(CalculateCompositionNormalizedVDWV(ProteinSequence))
result.update(CalculateCompositionHydrophobicity(ProteinSequence))
result.update(CalculateTransitionPolarizability(ProteinSequence))
result.update(CalculateTransitionSolventAccessibility(ProteinSequence))
result.update(CalculateTransitionSecondaryStr(ProteinSequence))
result.update(CalculateTransitionCharge(ProteinSequence))
result.update(CalculateTransitionPolarity(ProteinSequence))
result.update(CalculateTransitionNormalizedVDWV(ProteinSequence))
result.update(CalculateTransitionHydrophobicity(ProteinSequence))
result.update(CalculateDistributionPolarizability(ProteinSequence))
result.update(CalculateDistributionSolventAccessibility(ProteinSequence))
result.update(CalculateDistributionSecondaryStr(ProteinSequence))
result.update(CalculateDistributionCharge(ProteinSequence))
result.update(CalculateDistributionPolarity(ProteinSequence))
result.update(CalculateDistributionNormalizedVDWV(ProteinSequence))
result.update(CalculateDistributionHydrophobicity(ProteinSequence))
return result
##################################################################################################
if __name__ == "__main__":
protein = "ADGCGVGEGTGQGPMCNCMCMKWVYADEDAADLESDSFADEDASLESDSFPWSNQRVFCSFADEDAS"
print CalculateCTD(protein)
| StarcoderdataPython |
1974783 | <reponame>kul-forbes/nmpc-codegen<filename>tst/python/trailer/trailer_simple_controller.py
import sys
sys.path.insert(0, '../../../src_python')
import nmpccodegen as nmpc
import nmpccodegen.tools as tools
import nmpccodegen.models as models
import nmpccodegen.controller as controller
import nmpccodegen.Cfunctions as cfunctions
import nmpccodegen.example_models as example_models
import ctypes
import numpy as np
import matplotlib.pyplot as plt
import math
## -- GENERATE STATIC FILES --
# start by generating the static files and folder of the controller
location="../../../test_controller_builds/trailer_simple_controller"
tools.Bootstrapper.bootstrap(location,python_interface_enabled=True)
## -----------------------------------------------------------------
# get the continious system equations
(system_equations, number_of_states, number_of_inputs, coordinates_indices) = example_models.get_trailer_model(L=0.5)
step_size = 0.1
simulation_time = 5
number_of_steps = math.ceil(simulation_time / step_size)
integrator = "RK44"
constraint_input = cfunctions.IndicatorBoxFunction([-1,-1],[1,1]) # input needs stay within these borders
model = models.Model_continious(system_equations, constraint_input, step_size, number_of_states,\
number_of_inputs, coordinates_indices, integrator)
Q = np.diag([1,1,1])
R = np.eye(model.number_of_inputs, model.number_of_inputs)
reference_state=np.array([2,2,0])
reference_input=np.array([0,0])
stage_cost = controller.Stage_cost_QR(model, Q, R)
# define the controller
trailer_controller = controller.Nmpc_panoc(location,model,stage_cost )
trailer_controller.horizon = number_of_steps
trailer_controller.step_size = step_size
trailer_controller.integrator_casadi = True
# generate the code
trailer_controller.generate_code()
## -- simulate controller --
# setup a simulator to test
sim = tools.Simulator(trailer_controller.location)
initial_state=np.array([0.,0.,0.])
state=initial_state
state_history = np.zeros((number_of_states,number_of_steps))
result_simulation = sim.simulate_nmpc(state,reference_state,reference_input)
for i in range(1,number_of_steps):
print("The optimal input is: [" + str(result_simulation.optimal_input[0]) + "," + str(result_simulation.optimal_input[0]) + "]")
state = model.get_next_state_numpy(state,result_simulation.optimal_input)
state_history[:,i] = np.reshape(state[:],number_of_states)
print(state_history[:,0:5])
plt.figure(1)
example_models.trailer_print(state_history)
plt.xlim([0, 2.5])
plt.ylim([0, 2.5])
plt.show() | StarcoderdataPython |
8053328 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import argparse
from mo.graph.graph import Graph
from mo.pipeline.common import get_ir_version
from mo.utils import class_registration
def unified_pipeline(argv: argparse.Namespace):
graph = Graph(cmd_params=argv, name=argv.model_name, ir_version=get_ir_version(argv))
class_registration.apply_replacements(graph, [
class_registration.ClassType.LOADER,
class_registration.ClassType.FRONT_REPLACER,
class_registration.ClassType.MIDDLE_REPLACER,
class_registration.ClassType.BACK_REPLACER
])
return graph
| StarcoderdataPython |
8043748 | <filename>sumatra/recordstore/serialization.py<gh_stars>100-1000
"""
Handles serialization/deserialization of record store contents to/from JSON.
:copyright: Copyright 2006-2015 by the Sumatra team, see doc/authors.txt
:license: BSD 2-clause, see LICENSE for details.
"""
from __future__ import unicode_literals
from builtins import str
import json
from datetime import datetime
from sumatra import programs, launch, datastore, versioncontrol, parameters, dependency_finder
from sumatra.records import Record
from ..core import get_registered_components
from sumatra.formatting import record2json, record2dict
def encode_record(record, indent=None):
return record2json(record, indent)
def encode_project_info(long_name, description):
"""Encode a Sumatra project as JSON"""
data = {}
if long_name:
data["name"] = long_name
if description:
data["description"] = description
return json.dumps(data)
def keys2str(D):
"""
Return a new dictionary whose keys are the same as in `D`, but converted
to strings.
"""
E = {}
for k, v in D.items():
E[str(k)] = v
return E
def decode_project_list(content):
"""docstring"""
return json.loads(content)
def decode_project_data(content):
"""docstring"""
return json.loads(content)
# shouldn't this be called decode_project_info, for symmetry?
def datestring_to_datetime(s):
"""docstring"""
if s is None:
return s
try:
timestamp = datetime.strptime(s, "%Y-%m-%d %H:%M:%S")
except ValueError:
timestamp = datetime.strptime(s, "%Y-%m-%dT%H:%M:%S")
return timestamp
def build_record(data):
"""Create a Sumatra record from a nested dictionary."""
edata = data["executable"]
cls = get_registered_components(programs.Executable).get(edata["name"], programs.Executable)
executable = cls(edata["path"], edata["version"], edata.get("options", ""))
executable.name = edata["name"]
rdata = data["repository"]
repos_cls = None
for m in versioncontrol.vcs_list:
if hasattr(m, rdata["type"]):
repos_cls = getattr(m, rdata["type"])
break
if repos_cls is None:
repos_cls = versioncontrol.base.Repository
repository = repos_cls(rdata["url"])
repository.upstream = rdata.get("upstream", None)
pdata = data["parameters"]
if pdata["type"] == "dict":
parameter_set = eval(pdata["content"])
assert isinstance(parameter_set, dict)
else:
parameter_set = getattr(parameters, pdata["type"])(pdata["content"])
ldata = data["launch_mode"]
lm_parameters = ldata["parameters"]
if isinstance(lm_parameters, str): # prior to 0.3
lm_parameters = eval(lm_parameters)
launch_mode = getattr(launch, ldata["type"])(**keys2str(lm_parameters))
def build_data_store(ddata):
ds_parameters = ddata["parameters"]
if isinstance(ds_parameters, str): # prior to 0.3
ds_parameters = eval(ds_parameters)
return getattr(datastore, ddata["type"])(**keys2str(ds_parameters))
data_store = build_data_store(data["datastore"])
if "input_datastore" in data: # 0.4 onwards
input_datastore = build_data_store(data["input_datastore"])
else:
input_datastore = datastore.FileSystemDataStore("/")
input_data = data.get("input_data", [])
if isinstance(input_data, str): # 0.3
input_data = eval(input_data)
if input_data:
if isinstance(input_data[0], str): # versions prior to 0.4
input_data = [datastore.DataKey(path, digest=datastore.IGNORE_DIGEST, creation=None)
for path in input_data]
else:
input_data = [datastore.DataKey(keydata["path"], keydata["digest"],
creation=datestring_to_datetime(keydata.get("creation", None)),
**keys2str(keydata["metadata"]))
for keydata in input_data]
record = Record(executable, repository, data["main_file"],
data["version"], launch_mode, data_store, parameter_set,
input_data, data.get("script_arguments", ""),
data["label"], data["reason"], data["diff"],
data.get("user", ""), input_datastore=input_datastore,
timestamp=datestring_to_datetime(data["timestamp"]))
tags = data["tags"]
if not hasattr(tags, "__iter__"):
tags = (tags,)
record.tags = set(tags)
record.output_data = []
if "output_data" in data:
for keydata in data["output_data"]:
data_key = datastore.DataKey(keydata["path"], keydata["digest"],
creation=datestring_to_datetime(keydata.get("creation", None)),
**keys2str(keydata["metadata"]))
record.output_data.append(data_key)
elif "data_key" in data: # (versions prior to 0.4)
for path in eval(data["data_key"]):
data_key = datastore.DataKey(path, digest=datastore.IGNORE_DIGEST,
creation=None)
record.output_data.append(data_key)
record.duration = data["duration"]
record.outcome = data["outcome"]
record.stdout_stderr = data.get("stdout_stderr", "")
record.platforms = [launch.PlatformInformation(**keys2str(pldata)) for pldata in data["platforms"]]
record.dependencies = []
for depdata in data["dependencies"]:
dep_args = [depdata["name"], depdata["path"], depdata["version"],
depdata["diff"]]
if "source" in depdata: # 0.5 onwards
dep_args.append(depdata["source"])
dep = getattr(dependency_finder, depdata["module"]).Dependency(*dep_args)
record.dependencies.append(dep)
record.repeats = data.get("repeats", None)
return record
def decode_record(content):
"""Create a Sumatra record from a JSON string."""
return build_record(json.loads(content))
def decode_records(content):
"""Create multiple Sumatra records from a JSON string."""
return [build_record(data) for data in json.loads(content)]
| StarcoderdataPython |
4851370 | import copy
import random
from functools import wraps
import numpy as np
from deap.gp import PrimitiveTree, compile, cxOnePoint, mutUniform
from scipy.special import softmax
class MultipleGeneGP():
def __init__(self, content, gene_num):
self.gene = []
self.gene_num = gene_num
for i in range(self.gene_num):
self.gene.append(PrimitiveTree(content()))
def random_select(self):
return self.gene[random.randint(0, self.gene_num - 1)]
def weight_select(self):
weight = np.abs(self.coef)[:, :-1].mean(axis=0)
p = softmax(-abs(weight))
return self.gene[np.random.choice(np.arange(len(weight)), p=p)]
def deterministic_select(self):
weight = np.abs(self.coef)[:, :-1].mean(axis=0)
return self.gene[np.argmax(-weight)]
def __len__(self):
return sum([len(g) for g in self.gene])
def multiple_gene_evaluation(compiled_genes, x):
result = []
for gene in compiled_genes:
result.append(gene(*x))
return result
def multiple_gene_initialization(container, generator, gene_num=5):
return container(generator, gene_num)
def multiple_gene_compile(expr: MultipleGeneGP, pset):
gene_compiled = []
for gene in expr.gene:
gene_compiled.append(compile(gene, pset))
return gene_compiled
def cxOnePoint_multiple_gene(ind1: MultipleGeneGP, ind2: MultipleGeneGP):
cxOnePoint(ind1.random_select(), ind2.random_select())
return ind1, ind2
def mutUniform_multiple_gene(individual: MultipleGeneGP, expr, pset):
mutUniform(individual.random_select(), expr, pset)
return individual,
def cxOnePoint_multiple_gene_weight(ind1: MultipleGeneGP, ind2: MultipleGeneGP):
cxOnePoint(ind1.weight_select(), ind2.weight_select())
return ind1, ind2
def mutUniform_multiple_gene_weight(individual: MultipleGeneGP, expr, pset):
mutUniform(individual.weight_select(), expr, pset)
return individual,
def cxOnePoint_multiple_gene_deterministic(ind1: MultipleGeneGP, ind2: MultipleGeneGP):
cxOnePoint(ind1.deterministic_select(), ind2.deterministic_select())
return ind1, ind2
def mutUniform_multiple_gene_deterministic(individual: MultipleGeneGP, expr, pset):
mutUniform(individual.deterministic_select(), expr, pset)
return individual,
def staticLimit_multiple_gene(key, max_value):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
keep_inds = [copy.deepcopy(ind) for ind in args]
new_inds = list(func(*args, **kwargs))
for i, ind in enumerate(new_inds):
limit_exceed = False
for x in ind.gene:
if key(x) > max_value:
limit_exceed = True
break
if limit_exceed:
new_inds[i] = random.choice(keep_inds)
return new_inds
return wrapper
return decorator
def result_calculation(func, data):
result = multiple_gene_evaluation(func, data.T)
for i in range(len(result)):
yp = result[i]
if not isinstance(yp, np.ndarray):
yp = np.full(len(data), 0)
elif yp.size == 1:
yp = np.full(len(data), yp)
result[i] = yp
result = np.concatenate([np.array(result).T, np.ones((len(data), 1))], axis=1)
return result
| StarcoderdataPython |
11331538 | <gh_stars>1-10
from compas_cloud import Proxy
import time
proxy = Proxy()
dot = proxy.function('numpy.dot')
a = [[1, 0], [0, 1]]
b = [['a']]
dot(a, b)
"""
This should raise an error:
Exception: ValueError:shapes (2,2) and (1,1) not aligned: 2 (dim 1) != 1 (dim 0)
"""
| StarcoderdataPython |
1601993 | """
Module containing classes, methods and functions related to queries to the ZAMG datahub.
"""
import pandas as pd
class ZAMGdatahubQuery:
"""
Attributes:
"""
def __init__(self,dataset,params,gridboxlabel,lat_min,lat_max,lon_min,lon_max,output="netcdf"):
dataset = dataset.upper()
if dataset=="INCA":
query = makeQuery(params,gridboxlabel,lat_min,lat_max,lon_min,lon_max,output=output)
self.output_filename_head = "incal-hourly"
elif dataset=="SPARTACUS":
query = makeQuery(params,gridboxlabel,lat_min,lat_max,lon_min,lon_max,output=output)
self.output_filename_head = "spartacus-daily"
else:
print("Specified dataset was not 'SPARTACUS' or 'INCA'. Setting the output filename to 'data'...")
query = makeQuery(params,gridboxlabel,lat_min,lat_max,lon_min,lon_max,output=output)
self.output_filename_head = "data"
# add attributes
self.query = query
self.params = params
self.lat_min = lat_min
self.lat_max = lat_max
self.lon_min = lon_min
self.lon_max = lon_max
self.dataset = dataset
self.name = gridboxlabel
def saveQuery(self,filename=None,DIR=None):
if DIR is None:
DIR="."
if filename is None:
filename = f"{self.dataset}_query_{self.name}"
saveQuery(self.query,filename,DIR=DIR)
def makeQuery(params,gridboxlabel,lat_min,lat_max,lon_min,lon_max,output="netcdf"):
query = {"params":params,
"gridboxlabel":gridboxlabel,
"lat_min":lat_min,
"lat_max":lat_max,
"lon_min":lon_min,
"lon_max":lon_max}
if output == "netcdf":
query["output_format"]=output
query["file_extention"]="nc"
elif output == "csv":
query["output_format"]=output
query["file_extention"]=output
else:
raise ValueError("The output can only be 'netcdf' or 'csv'.")
return query
def saveQuery(query,filename,DIR="."):
queryTable = pd.DataFrame.from_dict(query,orient="index",columns=["query"])
queryTable.to_csv(f"{DIR}/{filename}.txt",sep="\t")
print(f'Query saved to "{DIR}/{filename}.txt"')
def loadQuery(file):
query = pd.read_table(file,index_col=0).to_dict()["query"]
return query
| StarcoderdataPython |
5007780 |
from account.tests.factories import OrgaoFactory, ProfileFactory
from core.models import AssuntoAdministrativo
from core.tests.factories import AssuntoAdministrativoFactory
from django.contrib.auth.models import Group, Permission
from django.test import Client
assunto_adm = AssuntoAdministrativo.objects.all()
assunto_adm = [assunto.assunto for assunto in assunto_adm]
GRUPO_SUPERIOR_ADMINISTRATIVO = 'COORDENADORES'
GRUPO_ADMINISTRATIVO = 'USUÁRIOS'
PERMISSOES_ADMINISTRATIVO = [
'view_administrativo',
'add_administrativo',
'change_administrativo'
]
class SetUpTestViewsData:
orgao_permission_required = 'access_ADMINISTRATIVO'
@classmethod
def setUpTestData(cls):
cls.client = Client()
cls.password = '<PASSWORD>'
cls.orgao = OrgaoFactory()
orgao_perm = Permission.objects.get(codename=cls.orgao_permission_required)
cls.orgao.permissions.add(orgao_perm)
profile0 = ProfileFactory(orgao_link=cls.orgao)
cls.usuario = profile0.user
cls.usuario.set_password(<PASSWORD>)
cls.usuario.save()
profile1 = ProfileFactory(orgao_link=cls.orgao)
cls.normal_user = profile1.user
cls.normal_user.set_password(<PASSWORD>)
cls.normal_user.save()
profile2 = ProfileFactory(orgao_link=cls.orgao)
cls.superior = profile2.user
cls.superior.set_password(<PASSWORD>)
cls.superior.save()
cls.assunto_adm = AssuntoAdministrativoFactory()
cls.group_superior = Group(name=GRUPO_SUPERIOR_ADMINISTRATIVO)
cls.group_superior.save()
cls.superior.groups.add(cls.group_superior)
cls.group_adm = Group(name=GRUPO_ADMINISTRATIVO)
cls.group_adm.save()
class SetUpTestDataBase:
@classmethod
def setUpTestData(cls):
cls.client = Client()
cls.password = '<PASSWORD>'
cls.orgao = OrgaoFactory()
orgao_perm = Permission.objects.get(codename='access_ADMINISTRATIVO')
cls.orgao.permissions.add(orgao_perm)
profile0 = ProfileFactory(orgao_link=cls.orgao)
cls.usuario = profile0.user
cls.usuario.set_password(<PASSWORD>)
cls.usuario.save()
profile1 = ProfileFactory(orgao_link=cls.orgao)
cls.normal_user = profile1.user
cls.normal_user.set_password(<PASSWORD>)
cls.normal_user.save()
cls.group_superior = Group(name=GRUPO_SUPERIOR_ADMINISTRATIVO)
cls.group_superior.save()
class SetUpTestViewProcessoData(SetUpTestDataBase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
profile2 = ProfileFactory(orgao_link=cls.orgao)
cls.superior = profile2.user
cls.superior.set_password(<PASSWORD>)
cls.superior.save()
cls.superior.groups.add(cls.group_superior)
class SetUpTestViewAtoData(SetUpTestDataBase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
profile2 = ProfileFactory(orgao_link=cls.orgao)
cls.superior = profile2.user
cls.superior.set_password(<PASSWORD>)
cls.superior.save()
cls.superior.groups.add(cls.group_superior)
| StarcoderdataPython |
1779300 | <reponame>psh0502/packtpub-crawler
from logs import *
import requests
class Join(object):
"""
"""
def __init__(self, config, packpub_info, upload_info):
self.__config = config
self.__packpub_info = packpub_info
def send(self):
url = "https://joinjoaomgcd.appspot.com/_ah/api/messaging/v1/sendPush?apikey={apiKey}&deviceId={deviceIds}&title={title}&text={description}".format(
apiKey=self.__config.get('join', 'join.api_key'),
deviceIds=self.__config.get('join', 'join.device_ids'),
title="New book downloaded from Packt: " + self.__packpub_info['title'].encode('utf-8'),
description=self.__packpub_info['description'].encode('utf-8')
)
r = requests.post(url)
log_success('[+] notification sent to Join')
def sendError(self, exception, source):
url = "https://joinjoaomgcd.appspot.com/_ah/api/messaging/v1/sendPush?apikey={apiKey}&deviceId={deviceIds}&title={title}&text={description}&url={url}".format(
apiKey=self.__config.get('join', 'join.api_key'),
deviceIds=self.__config.get('join', 'join.device_ids'),
title='packtpub-crawler {source}: Could not download ebook: {title}'.format(source=source, title=self.__packpub_info['title']),
description=repr(exception),
url=self.__packpub_info['landingPageUrl']
)
r = requests.post(url)
log_success('[+] error notification sent to Join')
| StarcoderdataPython |
6451600 | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests.functional.docs import BaseDocsFunctionalTest
from botocore.docs.service import ServiceDocumenter
class TestS3Docs(BaseDocsFunctionalTest):
def test_auto_populates_sse_customer_key_md5(self):
self.assert_is_documented_as_autopopulated_param(
service_name='s3',
method_name='put_object',
param_name='SSECustomerKeyMD5')
def test_auto_populates_copy_source_sse_customer_key_md5(self):
self.assert_is_documented_as_autopopulated_param(
service_name='s3',
method_name='copy_object',
param_name='CopySourceSSECustomerKeyMD5')
def test_hides_content_md5_when_impossible_to_provide(self):
modified_methods = ['delete_objects', 'put_bucket_acl',
'put_bucket_cors', 'put_bucket_lifecycle',
'put_bucket_logging', 'put_bucket_policy',
'put_bucket_notification', 'put_bucket_tagging',
'put_bucket_replication', 'put_bucket_website',
'put_bucket_request_payment', 'put_object_acl',
'put_bucket_versioning']
service_contents = ServiceDocumenter(
's3', self._session).document_service()
for method_name in modified_methods:
method_contents = self.get_method_document_block(
method_name, service_contents)
self.assertNotIn('ContentMD5=\'string\'',
method_contents.decode('utf-8'))
def test_copy_source_documented_as_union_type(self):
content = self.get_docstring_for_method('s3', 'copy_object')
dict_form = (
"{'Bucket': 'string', 'Key': 'string', 'VersionId': 'string'}")
self.assert_contains_line(
"CopySource='string' or %s" % dict_form, content)
def test_copy_source_param_docs_also_modified(self):
content = self.get_docstring_for_method('s3', 'copy_object')
param_docs = self.get_parameter_document_block('CopySource', content)
# We don't want to overspecify the test, so I've picked
# an arbitrary line from the customized docs.
self.assert_contains_line(
"You can also provide this value as a dictionary", param_docs)
| StarcoderdataPython |
6461608 | <reponame>ParkJonghyeon/tor_hs_archive_web
from django.db import models
class Date_info(models.Model):
date = models.CharField(max_length=10)
| StarcoderdataPython |
190714 | """
**Flask App Configuration Settings**
*Python Version 3.8 and above*
Used to setup environment variables for python flask app
"""
__developer__ = "mobius-crypt"
__email__ = "<EMAIL>"
__twitter__ = "@blueitserver"
__github_repo__ = "https://github.com/freelancing-solutions/memberships-and-affiliate-api"
__github_profile__ = "https://github.com/freelancing-solutions/"
import os
import typing
# noinspection PyPackageRequirements
from decouple import config
import datetime
class Config:
"""
**APP Configuration Settings**
configuration variables for setting up the application
"""
# TODO - Clean up configuration settings
def __init__(self) -> None:
# APP URLS
self.BASE_URL: str = ""
self.APP_ID: str = ""
self.SEP: str = "#"
self.IS_PRODUCTION: bool = False
self.PROJECT = 'justice-ndou'
self.ADMIN_UID: str = ''
config_instance: Config = Config() | StarcoderdataPython |
12809790 | """
Test visuel d'un générateur et solveur de labyrinthe.
Author: Dalker
Date: 2021-04-10
"""
import logging as log
# import concurrent.futures as cf
import matplotlib.pyplot as plt
import generateur_ab as ab
# import solveur_astar_naif as astar
import solveur_astar_heapq as astar_v2
import solveur_astar_v3 as astar_v3
def test(maze, solver, distance, axes):
"""Tester le solver sur le maze donné et visualiser dans les axes."""
log.debug("initial maze:\n%s", maze)
path = solver(maze, distance=distance, view=axes)
# if path is not None:
# print("A* solution found:")
# print("\n".join([
# "".join(["*" if (nrow, ncol) in path else val
# for ncol, val in enumerate(row)])
# for nrow, row in enumerate(str(maze).split("\n"))]))
if path is None:
print("No A* solution found.")
def triple_test():
"""Comparer 3 choix de distances: Manhattan, Euclidean, 0 (pour v2)."""
maze = ab.Maze(20, 30, 0.1)
d1 = astar_v2.distance1
d2 = astar_v2.distance2
dj = astar_v2.distance0
fig = plt.figure()
ax1 = fig.add_subplot(2, 2, 1)
ax1.set_title("A* with Manhattan distance")
ax2 = fig.add_subplot(2, 2, 2)
ax2.set_title("A* with euclidean distance")
axj = fig.add_subplot(2, 2, 3)
axj.set_title("A* with 0 distance (= Dijkstra)")
test(maze, astar_v2.astar, d1, ax1)
test(maze, astar_v2.astar, d2, ax2)
test(maze, astar_v2.astar, dj, axj)
def astar_vs_dijkstra():
"""Comparer 2 choix de distances: heuristique Manhattan vs. 0 (pour v3)."""
TAILLE = 50
maze = ab.Maze(TAILLE, TAILLE, 0)
fig = plt.figure()
ax0 = fig.add_subplot(1, 2, 1)
ax0.set_title("A* with null heuristic")
ax1 = fig.add_subplot(1, 2, 2)
ax1.set_xlim(0, 2*TAILLE)
ax1.set_ylim(2*TAILLE, 0)
ax1.set_axis_off()
ax1.matshow([[0]])
ax1.set_title("A* with Manhattan heuristic")
# tentative de concurrence ci-dessous: ne marche pas à cause du GUI
# with cf.ThreadPoolExecutor(max_workers=2) as executor:
# executor.submit(test, maze, astar.astar, d0, ax0)
# executor.submit(test, maze, astar.astar, d1, ax1)
test(maze, astar_v3.astar, astar_v3.null_distance, ax0)
test(maze, astar_v3.astar, astar_v3.manhattan_distance, ax1)
if __name__ == "__main__":
log.basicConfig(level=log.INFO)
astar_vs_dijkstra()
plt.show()
| StarcoderdataPython |
12845412 | # coding: utf-8
import datetime
import random
from unittest.mock import Mock
from django.db import reset_queries
import pytest
from src.domain.exchange_rate import CurrencyEntity, CurrencyExchangeRateEntity
from src.usecases.exchange_rate import CurrencyInteractor, CurrencyExchangeRateInteractor
from tests.fixtures import currency, exchange_rate
@pytest.mark.unit
def test_currency_interactor_get(currency):
currency_repo = Mock()
currency_repo.get.return_value = currency
currency_interactor = CurrencyInteractor(currency_repo)
result = currency_interactor.get(currency.code)
assert currency_repo.get.called
assert result.code == currency.code
assert result.name == currency.name
assert result.symbol == currency.symbol
assert CurrencyEntity.to_string(result) == CurrencyEntity.to_string(currency)
@pytest.mark.unit
def test_currency_interactor_get_availables(currency):
num_of_currencies = random.randint(1, 10)
currencies_available = [currency for _ in range(num_of_currencies)]
currency_repo = Mock()
currency_repo.get_availables.return_value = currencies_available
currency_interactor = CurrencyInteractor(currency_repo)
result = currency_interactor.get_availables()
assert currency_repo.get_availables.called
assert isinstance(result, list)
assert len(result) == num_of_currencies
assert all([isinstance(currency, CurrencyEntity) for currency in result])
@pytest.mark.unit
def test_currency_interactor_save(currency):
currency_repo = Mock()
currency_repo.save.return_value = None
currency_interactor = CurrencyInteractor(currency_repo)
result = currency_interactor.save(currency)
assert currency_repo.save.called
assert result is None
@pytest.mark.unit
def test_currency_interactor_bulk_save(currency):
currencies = [currency for _ in range(random.randint(1, 10))]
currency_repo = Mock()
currency_repo.bulk_save.return_value = None
currency_interactor = CurrencyInteractor(currency_repo)
result = currency_interactor.bulk_save(currencies)
assert currency_repo.bulk_save.called
assert result is None
@pytest.mark.unit
def test_currency_exchange_rate_interactor_get(exchange_rate):
exchange_rate_repo = Mock()
exchange_rate_repo.get.return_value = exchange_rate
exchange_rate_interactor = CurrencyExchangeRateInteractor(exchange_rate_repo)
filter = {
'source_currency': exchange_rate.source_currency,
'exchanged_currency': exchange_rate.exchanged_currency,
'valuation_date': exchange_rate.valuation_date
}
result = exchange_rate_interactor.get(**filter)
assert exchange_rate_repo.get.called
assert result.source_currency == exchange_rate.source_currency
assert result.exchanged_currency == exchange_rate.exchanged_currency
assert result.valuation_date == exchange_rate.valuation_date
assert result.rate_value == exchange_rate.rate_value
assert CurrencyExchangeRateEntity.to_string(
result) == CurrencyExchangeRateEntity.to_string(exchange_rate)
@pytest.mark.unit
def test_currency_exchange_rate_interactor_get_latest(exchange_rate):
exchange_rate_repo = Mock()
exchange_rate_repo.get.return_value = exchange_rate
exchange_rate_interactor = CurrencyExchangeRateInteractor(exchange_rate_repo)
filter = {
'source_currency': exchange_rate.source_currency,
'exchanged_currency': exchange_rate.exchanged_currency
}
result = exchange_rate_interactor.get_latest(**filter)
assert exchange_rate_repo.get.called
assert result.source_currency == exchange_rate.source_currency
assert result.exchanged_currency == exchange_rate.exchanged_currency
assert result.valuation_date == datetime.date.today().strftime('%Y-%m-%d')
assert result.rate_value == exchange_rate.rate_value
assert CurrencyExchangeRateEntity.to_string(
result) == CurrencyExchangeRateEntity.to_string(exchange_rate)
@pytest.mark.unit
def test_currency_exchange_rate_interactor_get_rate_series(exchange_rate):
num_of_rates = random.randint(1, 10)
rate_series = [round(random.uniform(0.8, 1.2), 6) for _ in range(num_of_rates)]
exchange_rate_repo = Mock()
exchange_rate_repo.get_rate_series.return_value = rate_series
exchange_rate_interactor = CurrencyExchangeRateInteractor(exchange_rate_repo)
filter = {
'source_currency': exchange_rate.source_currency,
'exchanged_currency': exchange_rate.exchanged_currency,
'date_from': datetime.date.today() + datetime.timedelta(days=-num_of_rates),
'date_to': datetime.date.today()
}
result = exchange_rate_interactor.get_rate_series(**filter)
assert exchange_rate_repo.get_rate_series.called
assert isinstance(result, list)
assert len(result) == num_of_rates
assert all([isinstance(rate, float) for rate in result])
@pytest.mark.unit
def test_currency_exchange_rate_interactor_get_time_series(exchange_rate):
series_length = random.randint(1, 10)
time_series = [exchange_rate for _ in range(series_length)]
exchange_rate_repo = Mock()
exchange_rate_repo.get_time_series.return_value = time_series
exchange_rate_interactor = CurrencyExchangeRateInteractor(exchange_rate_repo)
filter = {
'source_currency': exchange_rate.source_currency,
'exchanged_currency': exchange_rate.exchanged_currency,
'date_from': datetime.date.today() + datetime.timedelta(days=-series_length),
'date_to': datetime.date.today()
}
result = exchange_rate_interactor.get_time_series(**filter)
assert exchange_rate_repo.get_time_series.called
assert isinstance(result, list)
assert len(result) == series_length
assert all([isinstance(cer, CurrencyExchangeRateEntity) for cer in result])
@pytest.mark.unit
def test_currency_exchange_rate_interactor_save(exchange_rate):
exchange_rate_repo = Mock()
exchange_rate_repo.save.return_value = None
exchange_rate_interactor = CurrencyExchangeRateInteractor(exchange_rate_repo)
result = exchange_rate_interactor.save(exchange_rate)
assert exchange_rate_repo.save.called
assert result is None
@pytest.mark.unit
def test_currency_exchange_rate_interactor_bulk_save(exchange_rate):
exchange_rates = [exchange_rate for _ in range(random.randint(1, 10))]
exchange_rate_repo = Mock()
exchange_rate_repo.bulk_save.return_value = None
exchange_rate_interactor = CurrencyExchangeRateInteractor(exchange_rate_repo)
result = exchange_rate_interactor.bulk_save(exchange_rates)
assert exchange_rate_repo.bulk_save.called
assert result is None
| StarcoderdataPython |
228508 | # Generated by Django 3.1.7 on 2021-02-22 19:13
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("modeemintternet", "0036_auto_20210222_2111"),
]
operations = [
migrations.AlterField(
model_name="membershipfee",
name="year",
field=models.PositiveIntegerField(
primary_key=True,
serialize=False,
unique=True,
validators=[
django.core.validators.MinValueValidator(1975),
django.core.validators.MaxValueValidator(2022),
],
verbose_name="Vuosi",
),
),
]
| StarcoderdataPython |
11251347 | # -*- coding: utf-8 -*-
# Copyright (c) 2021, TEAMPRO and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
# import datetime
# from frappe.utils import today
class OTApproval(Document):
def get_default_data(self):
att_data = frappe.db.sql(""" select * from `tabAttendance` where attendance_date BETWEEN '%s' and '%s' and ot IS NULL""" %(self.from_date,self.to_date),as_dict =True)
for att in att_data:
frappe.errprint(att.employee)
self.append("ot_approval",{
"attendance_name":att.name,
"employee_id":att.employee,
"in_time": att.in_time,
"out_time" : att.out_time,
"qr_scan_time":att.qr_scan_time,
"total_working_hours":att.working_hours,
"extra_hours":att.ot,
"ot_hours":att.ot
}).save(ignore_permissions=True)
frappe.db.commit()
def get_data_datewise(self):
frappe.errprint("from date and to date")
if self.employee_id:
att_data = frappe.db.sql(""" select * from `tabAttendance` where employee = '%s' and attendance_date BETWEEN '%s' and '%s' and ot IS NULL""" %(self.employee_id,self.from_date,self.to_date),as_dict =True)
else:
att_data = frappe.db.sql(""" select * from `tabAttendance` where attendance_date BETWEEN '%s' and '%s' and ot IS NULL""" %(self.from_date,self.to_date),as_dict =True)
for att in att_data:
frappe.errprint(att.employee)
self.append("ot_approval",{
"attendance_name":att.name,
"employee_id":att.employee,
"in_time": att.in_time,
"out_time" : att.out_time,
"qr_scan_time":att.qr_scan_time,
"total_working_hours":att.working_hours,
"extra_hours":att.ot,
"ot_hours":att.ot
}).save(ignore_permissions=True)
frappe.db.commit()
return "ok"
def get_data_id(self):
frappe.errprint("employee")
frappe.errprint(self.employee_id)
frappe.errprint(self.from_date)
frappe.errprint(self.to_date)
att_data = frappe.db.sql(""" select * from `tabAttendance` where employee = '%s' and attendance_date BETWEEN '%s' and '%s' and ot IS NULL""" %(self.employee_id,self.from_date,self.to_date),as_dict =True)
for att in att_data:
frappe.errprint(att)
self.append("ot_approval",{
"attendance_name":att.name,
"employee_id":att.employee,
"in_time": att.in_time,
"out_time" : att.out_time,
"qr_scan_time":att.qr_scan_time,
"total_working_hours":att.working_hours,
"extra_hours":att.ot,
"ot_hours":att.ot
}).save(ignore_permissions=True)
frappe.db.commit()
return "ok"
def update_attendance(self,row):
frappe.db.set_value("Attendance",row["attendance_name"],"ot",row["ot_hours"])
new_ts = frappe.new_doc("Timesheet")
new_ts.employee = row['employee_id']
new_ts.append("time_logs",{
"hours":row['ot_hours'],
"from_time":row['in_time'],
"to_time":row['out_time']
})
new_ts.save(ignore_permissions=True)
frappe.db.commit()
| StarcoderdataPython |
5026455 |
import pymysql
con = pymysql.connect(host = '192.168.93.128',user = 'biguser',password = '<PASSWORD>', db = 'userDB') #호스트번호(또는 연결번호)
cur = con.cursor() #연결 통로 생성(쿼리문을 날릴 통로=cursor)
try :
sql = "create table userTable2(userID char(10), userName char(5), userAge int);"
cur.execute(sql) #sql문을 커서로 날려라
except :
pass
sql = "insert into userTable2 values('AAA','aaa',21);"
cur.execute(sql)
sql = "insert into userTable2 values('BBB','bbb',23);"
cur.execute(sql)
sql = "insert into userTable2 values('CCC','ccc',35);"
cur.execute(sql)
con.commit()
cur.close()
con.close() #데이터베이스 연결 종료
print('ok') | StarcoderdataPython |
1807791 | """Clean Code in Python - Chapter 6: Descriptors
> A Pythonic Implementation
Tests for src/descriptors_pythonic_{1,2}.py
"""
import unittest
from descriptors_pythonic_1 import Traveler as TravelerNaiveImplementation
from descriptors_pythonic_2 import Traveler as TravelerWithDescriptor
class TestDescriptorTraceability(unittest.TestCase):
def _test_case(self, traveller_cls):
alice = traveller_cls("Alice", "Barcelona")
alice.current_city = "Paris"
alice.current_city = "Brussels"
alice.current_city = "Amsterdam"
self.assertListEqual(
alice.cities_visited,
["Barcelona", "Paris", "Brussels", "Amsterdam"],
)
self.assertEqual(alice.current_city, "Amsterdam")
alice.current_city = "Amsterdam"
self.assertListEqual(
alice.cities_visited,
["Barcelona", "Paris", "Brussels", "Amsterdam"],
)
bob = traveller_cls("Bob", "Rotterdam")
bob.current_city = "Amsterdam"
self.assertEqual(bob.current_city, "Amsterdam")
self.assertListEqual(bob.cities_visited, ["Rotterdam", "Amsterdam"])
def test_trace_attribute(self):
for test_cls in (
TravelerNaiveImplementation,
TravelerWithDescriptor,
):
with self.subTest(case=test_cls):
self._test_case(test_cls)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
9743395 | from Resources.default_resource import DefaultResource
from .asignatura_query import AsignaturaQuery
import Utils.messages_constants as mc
class Asignatura(DefaultResource):
def __init__(self):
self.query = AsignaturaQuery()
super().__init__()
def get(self):
try:
data = self.query.get_asignaturas()
return {'data': data}, 200
except Exception as e:
return {'error': str(e)}, 500
| StarcoderdataPython |
1844958 | db_connection = "postgresql:///tsintegration.db" | StarcoderdataPython |
1790953 | from Point_t import Point_t
class WPT(Point_t):
"""Classe définissant les waypoints"""
def __init__(self, coo_tulpe, description, id):
self._waypoint = description #description sous forme de string du waypoints : 'waypoint', 'node standard'...
self._id = id #identifiant numerique du waypoint
Point_t.__init__(self,coo_tulpe) #position du waypoint
self._on_leg = {} #dictionnaire ayant pour clé un leg. Renvoi la distance entre le waypoint et le point start du leg.
def is_waypoint(self):
"""retourne la description du waypoint"""
return self._waypoint
def set_waypoint(self, des):
"""Set la description"""
self._waypoint = des
def set_point(self, pt):
"""Set point par la position"""
self._pos = pt
def get_id(self):
"""Retourne l'id du waypoint"""
return self._id
def get_pos(self):
"""Return position"""
return self._pos
def add_on_leg(self, leg, dist_to_start):
"""ajout d'un leg au dictionnaire d'appartenance du waypoint."""
self._on_leg[leg] = dist_to_start
def get_on_leg(self):
"""retourne le dictionnaire d'appartenance du waypoint"""
return self._on_leg | StarcoderdataPython |
3218434 | from manmandon.provider import MMDChapterListProvider, MMDChapterProvider
from pathlib import Path
import json
import click
class CopyMangaChapters(MMDChapterListProvider):
patterns = [
r"^https://(www.)?copymanga.com/comic/[a-z]+/?$"
]
def resolve(self, uri):
self.driver.get(uri)
res = self.execute(Path(__file__).parent / "chapters.js")
return json.loads(res)
class CopyMangaChapter(MMDChapterProvider):
patterns = [
r"^https://copymanga.com/comic/[a-z]+/chapter/"
]
scope = []
def resolve(self, uri):
self.driver.get(uri)
res = self.execute(Path(__file__).parent / "images.js")
img_urls = json.loads(res)
img_urls = [u for u in img_urls if u != None]
directory = self.output_directory / self.driver.title
directory.mkdir(exist_ok=True)
with click.progressbar(length=len(img_urls), label=self.driver.title, show_pos=True) as bar:
bar.update(0)
for i, img_url in enumerate(img_urls):
fname = directory / f"{i+1:03d}.webp"
if not fname.exists():
self.driver.get(img_url)
req = self.driver.wait_for_request(img_url, timeout=60)
with open(fname, "wb") as fp:
fp.write(req.response.body)
self.sleep(3)
bar.update(1)
del self.driver.requests
providers = [
CopyMangaChapters,
CopyMangaChapter
]
| StarcoderdataPython |
1750716 | <gh_stars>0
# Labeling tool for creating the hand tracking dataset for Dranimate
#
#
# Program reads images from a given directory and lets the user draw points on
# the image with their mouse. These selected (x,y) coordinates are then saved
# into a text file in a user specified output directory. The program stores all
# the text files in a directory called labels.
#
# To run in command line:
# python labeler.py --input <InputDir> --output <OutputDir>
# Ex. python labeler.py --input <path/to/images/> --output <path/>
#
# Press 'd' to move on to the next image
# Press 'esc' to quit the program
#
# The data is stored in textfile as the (x,y) coordinates of the fingers in this order
# (x1,y1) => pinky
# (x2,y2) => ring
# (x3,y3) => middle
# (x4,y4) => index
# (x5,y5) => thumb
import cv2
import numpy as np
import glob
import argparse
import os
### Mouse event to save x,y pixel coordinates ###
def savePixelCoordinate(event, x, y, flags, param):
global points
if event == cv2.EVENT_LBUTTONDOWN:
cv2.circle(img,(x,y),5,(255,0,0),-1)
points.append((x,y))
print(points)
### Display the image ###
def displayImage(img):
global points
quit = False
while(1):
# Show the image in a new window
cv2.imshow('image', img)
# Wait for a key press
k = cv2.waitKey(1) & 0xFF
if k == ord('d'): # Press d for done
break
elif k == 27:
quit = True
break
# Destroy the window
cv2.destroyAllWindows()
return quit
#### MAIN ####
parser = argparse.ArgumentParser()
parser.add_argument('--input', help='image input directory')
parser.add_argument('--output', help='textfile output directory')
args = parser.parse_args()
# Create output directory
outDirectory = args.output + "labels/"
if not os.path.exists(outDirectory):
os.makedirs(outDirectory)
points = [];
for imgPath in glob.glob(args.input + "*.jpg"):
# Read the image using the path
img = cv2.imread(imgPath)
cv2.namedWindow('image')
# Intialize mouse callback
cv2.setMouseCallback('image', savePixelCoordinate)
# Show image in a new window
done = displayImage(img)
# Check if we can quit the program
if done:
break
# Save points to text file
fileName = os.path.basename(imgPath)
fileName = os.path.splitext(fileName)[0]
np.savetxt(outDirectory + fileName + '.txt', points, fmt='%i')
# Reset points for next image
points = [];
print('bye bye!')
| StarcoderdataPython |
9623971 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# author:junyili
# datetime:20-1-16 下午6:12
import json
def get_queue(target, server):
return "{}_feed_queue_{}".format(target, server.replace(":", "__").replace(".", "_"))
def get_server(target, queue):
return queue.replace("{}_feed_queue_".format(target), "").replace("__", ":").replace("_", ".")
def get_message_type_id_time(message, timestamp):
mess_list = json.loads(message)
return [mess_list[0], mess_list[1], timestamp]
def build_message(message_type, id):
return json.dumps([message_type, id])
class QMQ_util:
def __init__(self):
pass
def get_message(self, subject):
pass
def ack_message(self, subject, message):
pass
qmq_util = QMQ_util()
| StarcoderdataPython |
11246936 | <gh_stars>1-10
from lolacli import __version__
def test_version():
assert __version__=='0.2.4' | StarcoderdataPython |
3565986 | <reponame>gsq7474741/Quantum<gh_stars>1-10
# Copyright (c) 2021 Institute for Quantum Computing, Baidu Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functions and data simulator class of quantum finance.
"""
import fastdtw
import numpy as np
from paddle_quantum.utils import Hamiltonian
__all__ = [
"DataSimulator",
"portfolio_optimization_hamiltonian",
"portfolio_diversification_hamiltonian",
"arbitrage_opportunities_hamiltonian"
]
class DataSimulator:
r"""用于生成和计算投资组合优化和投资分散化问题要用的数据和相关参数
"""
def __init__(self, stocks, start=None, end=None):
r"""构造函数,用于实例化一个 ``DataSimulator`` 对象。
Args:
stocks (list): 表示所有可投资股票的名字
start (datetime): 默认为 ``None``,表示随机生成股票数据时交易日的起始日期
end (datetime): 默认为 ``None``,表示随机生成股票数据时交易日的结束日期
"""
self._n = len(stocks)
self._stocks = stocks
if start and end:
self._start = start
self._end = end
self._data = []
self.asset_return_mean = None
self.asset_return_cov = None
def set_data(self, data):
r"""决定实验使用的数据是随机生成的还是用户本地输入的
Args:
data (list): 用户输入的股票数据
"""
if len(data) == self._n:
self._data = data
else:
print("invalid data, data is still empty.")
self._data = []
def randomly_generate(self):
r"""根据开始日期和结束日期随机生成用于实验的股票数据
Note:
若要随机生成股票数据,需要以 ``datetime`` 包中的格式指定开始日期和结束日期,如 ``start = datetime.datetime(2016, 1, 1)``
"""
if self._start and self._end:
num_days = (self._end - self._start).days
for _ in self._stocks:
fluctuation = np.random.standard_normal(num_days)
fluctuation = np.cumsum(fluctuation)
data_i = np.random.randint(1, 101, size=1) + fluctuation
trimmed_data_i = [max(data_i[j], 0) for j in range(num_days)]
if 0 in trimmed_data_i:
zero_ind = trimmed_data_i.index(0)
trimmed_data_i = trimmed_data_i[:zero_ind] + [0 for _ in range(num_days - zero_ind)]
self._data.append(trimmed_data_i)
else:
print("Please provide the start time and the end time you want to generate stock data.")
def get_asset_return_mean_vector(self):
r"""用于计算所有可投资股票的平均投资回报率
Returns:
list: 所有可投资的股票的平均投资回报率
"""
returns = []
for i in range(self._n):
return_i = [self._data[i][j + 1] / self._data[i][j] - 1
if self._data[i][j] != 0
else np.nan for j in range(len(self._data[i]) - 1)]
returns.append(return_i)
self.asset_return_mean = np.mean(returns, axis=1)
return self.asset_return_mean
def get_asset_return_covariance_matrix(self):
r"""用于计算所有可投资股票回报率之间的协方差矩阵
Returns:
list: 所有可投资股票回报率之间的协方差矩阵
"""
returns = []
for i in range(self._n):
return_i = [self._data[i][j + 1] / self._data[i][j] - 1
if self._data[i][j] != 0
else np.nan for j in range(len(self._data[i]) - 1)]
returns.append(return_i)
self.asset_return_cov = np.cov(returns)
return self.asset_return_cov
def get_similarity_matrix(self):
r"""计算各股票之间的相似矩阵
通过动态时间规整算法(Dynamic Time Warping, DTW)计算两股票之间的相似性
Returns:
list: 各股票间的相似矩阵
"""
self.rho = np.zeros((self._n, self._n))
for i in range(0, self._n):
self.rho[i, i] = 1
for j in range(i + 1, self._n):
curr_rho, _ = fastdtw.fastdtw(self._data[i], self._data[j])
curr_rho = 1 / curr_rho
self.rho[i, j] = curr_rho
self.rho[j, i] = curr_rho
return self.rho
def portfolio_optimization_hamiltonian(penalty, mu, sigma, q, budget):
r"""构建投资组合优化问题的哈密顿量
Args:
penalty (int): 惩罚参数
mu (list): 各股票的预期回报率
sigma (list): 各股票回报率间的协方差矩阵
q (float): 投资股票的风险
budget (int): 投资预算, 即要投资的股票数量
.. math::
C(x) = q \sum_i \sum_j S_{ji}x_ix_j - \sum_{i}x_i \mu_i + A \left(B - \sum_i x_i\right)^2
Hint:
将布尔变量 :math:`x_i` 映射到哈密顿矩阵上,:math:`x_i \mapsto \frac{I-Z_i}{2}`
Returns:
Hamiltonian: 投资组合优化问题的哈密顿量
"""
n = len(mu)
H_C_list1 = []
for i in range(n):
for j in range(n):
sigma_ij = sigma[i][j]
H_C_list1.append([sigma_ij / 4, 'I'])
if i != j:
H_C_list1.append([sigma_ij / 4, 'Z' + str(i) + ',Z' + str(j)])
else:
H_C_list1.append([sigma_ij / 4, 'I'])
H_C_list1.append([- sigma_ij / 4, 'Z' + str(i)])
H_C_list1.append([- sigma_ij / 4, 'Z' + str((j))])
H_C_list1 = [[q * c, s] for (c, s) in H_C_list1]
H_C_list2 = []
for i in range(n):
H_C_list2.append([- mu[i] / 2, 'I'])
H_C_list2.append([mu[i] / 2, 'Z' + str(i)])
H_C_list3 = [[budget ** 2, 'I']]
for i in range(n):
H_C_list3.append([- 2 * budget / 2, 'I'])
H_C_list3.append([2 * budget / 2, 'Z' + str(i)])
H_C_list3.append([2 / 4, 'I'])
H_C_list3.append([- 2 / 4, 'Z' + str(i)])
for ii in range(i):
H_C_list3.append([2 / 4, 'I'])
H_C_list3.append([2 / 4, 'Z' + str(i) + ',Z' + str(ii)])
H_C_list3.append([- 2 / 4, 'Z' + str(i)])
H_C_list3.append([- 2 / 4, 'Z' + str(ii)])
H_C_list3 = [[penalty * c, s] for (c, s) in H_C_list3]
H_C_list = H_C_list1 + H_C_list2 + H_C_list3
po_hamiltonian = Hamiltonian(H_C_list)
return po_hamiltonian
def portfolio_diversification_hamiltonian(penalty, rho, q):
r"""构建投资组合分散化问题的哈密顿量
Args:
penalty (int): 惩罚参数
rho (list): 各股票间的相似矩阵
q (int): 股票聚类的类别数
.. math::
\begin{aligned}
C_x &= -\sum_{i=1}^{n}\sum_{j=1}^{n}\rho_{ij}x_{ij} + A\left(q- \sum_{j=1}^n y_j \right)^2 + \sum_{i=1}^n A\left(\sum_{j=1}^n 1- x_{ij} \right)^2 \\
&\quad + \sum_{j=1}^n A\left(x_{jj} - y_j\right)^2 + \sum_{i=1}^n \sum_{j=1}^n A\left(x_{ij}(1 - y_j)\right).\\
\end{aligned}
Hint:
将布尔变量 :math:`x_{ij}` 映射到哈密顿矩阵上,:math:`x_{ij} \mapsto \frac{I-Z_{ij}}{2}`
Returns:
Hamiltonian: 投资组合分散化问题的哈密顿量
"""
n = len(rho)
H_C_list1 = []
for i in range(n):
for j in range(n):
rho_ij = - rho[i][j]
H_C_list1.append([rho_ij / 2, 'I'])
H_C_list1.append([- rho_ij / 2, 'Z' + str(i * n + j)])
H_C_list2 = [[q ** 2, 'I']]
for j in range(n):
H_C_list2.append([- q, 'I'])
H_C_list2.append([q, 'Z' + str(n ** 2 + j)])
H_C_list2.append([1 / 2, 'I'])
H_C_list2.append([- 1 / 2, 'Z' + str(n ** 2 + j)])
for jj in range(j):
H_C_list2.append([1 / 2, 'I'])
H_C_list2.append([1 / 2, 'Z' + str(n ** 2 + j) + ',Z' + str(n ** 2 + jj)])
H_C_list2.append([- 1 / 2, 'Z' + str(n ** 2 + j)])
H_C_list2.append([- 1 / 2, 'Z' + str(n ** 2 + jj)])
H_C_list2 = [[penalty * c, s] for (c, s) in H_C_list2]
H_C_list3 = []
for i in range(n):
H_C_list3.append([1, 'I'])
for j in range(n):
H_C_list3.append([- 1, 'I'])
H_C_list3.append([1, 'Z' + str(i * n + j)])
H_C_list3.append([1 / 2, 'I'])
H_C_list3.append([- 1 / 2, 'Z' + str(i * n + j)])
for jj in range(j):
H_C_list3.append([1 / 2, 'I'])
H_C_list3.append([- 1 / 2, 'Z' + str(i * n + j)])
H_C_list3.append([1 / 2, 'Z' + str(i * n + j) + ',Z' + str(i * n + jj)])
H_C_list3.append([- 1 / 2, 'Z' + str(i * n + jj)])
H_C_list3 = [[penalty * c, s] for (c, s) in H_C_list3]
H_C_list4 = []
for j in range(n):
H_C_list4.append([1 / 2, 'I'])
H_C_list4.append([- 1 / 2, 'Z' + str(j * n + j) + ',Z' + str(n ** 2 + j)])
H_C_list4 = [[penalty * c, s] for (c, s) in H_C_list4]
H_C_list5 = []
for i in range(n):
for j in range(n):
H_C_list5.append([1 / 4, 'I'])
H_C_list5.append([- 1 / 4, 'Z' + str(i * n + j)])
H_C_list5.append([1 / 4, 'Z' + str(n ** 2 + j)])
H_C_list5.append([- 1 / 4, 'Z' + str(i * n + j) + ',Z' + str(n ** 2 + j)])
H_C_list5 = [[penalty * c, s] for (c, s) in H_C_list5]
H_C_list = H_C_list1 + H_C_list2 + H_C_list3 + H_C_list4 + H_C_list5
pd_hamiltonian = Hamiltonian(H_C_list)
return pd_hamiltonian
def arbitrage_opportunities_hamiltonian(g, penalty, n, K):
r"""构建最佳套利机会问题的哈密顿量
Args:
g (networkx.DiGraph): 不同货币市场间转换的图形化表示
A (int): 惩罚参数
n (int): 货币种类的数量,即图 g 中的顶点数量
K (int): 套利回路中包含的顶点数
.. math::
C(x) = - P(x) + A\sum_{k=0}^{K-1} \left(1 - \sum_{i=0}^{n-1} x_{i,k}\right)^2 + A\sum_{k=0}^{K-1}\sum_{(i,j)\notin E}x_{i,k}x_{j,k+1}
Hint:
将布尔变量 :math:`x_{i,k}` 映射到哈密顿矩阵上,:math:`x_{i,k} \mapsto \frac{I-Z_{i,k}}{2}`
Returns:
Hamiltonian: 最佳套利机会问题的哈密顿量
"""
nodes = list(g.nodes)
H_C_list1 = []
for (i, c) in enumerate(nodes):
for (j, cc) in enumerate(nodes):
if i != j:
c_ij = np.log2(g[c][cc]['weight'])
for t in range(K):
H_C_list1.append([c_ij / 4, 'I'])
H_C_list1.append([c_ij / 4, 'Z' + str(i * n + t) + ',Z' + str((j * n + (t + 1) % K))])
H_C_list1.append([- c_ij / 4, 'Z' + str(i * n + t)])
H_C_list1.append([- c_ij / 4, 'Z' + str((j * n + (t + 1) % K))])
H_C_list1 = [[-c, s] for (c, s) in H_C_list1]
H_C_list2 = []
for t in range(K):
H_C_list2.append([1, 'I'])
for i in range(n):
H_C_list2.append([- 2 * 1 / 2, 'I'])
H_C_list2.append([2 * 1 / 2, 'Z' + str(i * n + t)])
H_C_list2.append([2 / 4, 'I'])
H_C_list2.append([- 2 / 4, 'Z' + str(i * n + t)])
for ii in range(i):
H_C_list2.append([2 / 4, 'I'])
H_C_list2.append([2 / 4, 'Z' + str(i * n + t) + ',Z' + str(ii * n + t)])
H_C_list2.append([- 2 / 4, 'Z' + str(i * n + t)])
H_C_list2.append([- 2 / 4, 'Z' + str(ii * n + t)])
H_C_list2 = [[penalty * c, s] for (c, s) in H_C_list2]
H_C_list3 = []
for t in range(K):
for (i, c) in enumerate(nodes):
for (j, cc) in enumerate(nodes):
if (c, cc) not in g.edges and c != cc:
H_C_list3.append([1 / 4, "I"])
H_C_list3.append([- 1 / 4, 'Z' + str(i * n + t)])
H_C_list3.append([- 1 / 4, 'Z' + str((j * n + (t + 1) % K))])
H_C_list3.append([- 1 / 4, 'Z' + str(i * n + t) + ',Z' + str((j * n + (t + 1) % K))])
H_C_list3 = [[penalty * c, s] for (c, s) in H_C_list3]
H_C_list4 = []
for i in range(n):
H_C_list4.append([1, 'I'])
for t in range(K):
H_C_list4.append([- 2 * 1 / 2, 'I'])
H_C_list4.append([2 * 1 / 2, 'Z' + str(i * n + t)])
H_C_list4.append([2 / 4, 'I'])
H_C_list4.append([- 2 / 4, 'Z' + str(i * n + t)])
for tt in range(t):
H_C_list4.append([2 / 4, 'I'])
H_C_list4.append([2 / 4, 'Z' + str(i * n + t) + ',Z' + str(i * n + tt)])
H_C_list4.append([- 2 / 4, 'Z' + str(i * n + t)])
H_C_list4.append([- 2 / 4, 'Z' + str(i * n + tt)])
H_C_list4 = [[penalty * c, s] for (c, s) in H_C_list4]
H_C_list = H_C_list1 + H_C_list2 + H_C_list3 + H_C_list4
ao_hamiltonian = Hamiltonian(H_C_list)
return ao_hamiltonian
| StarcoderdataPython |
3404963 | <reponame>detrout/trackhub
from trackhub import upload
import os
import unittest
from trackhub import Hub, GenomesFile, Genome, Track, CompositeTrack, \
TrackDb, ViewTrack, SuperTrack, AggregateTrack
class TestUpload(object):
def setup(self):
self.hub = Hub(
hub='example_hub',
short_label='example hub',
long_label='an example hub for testing',
email='<EMAIL>')
self.genomes_file = GenomesFile()
self.genome = Genome('dm3')
self.trackdb = TrackDb()
self.tracks = [
Track(
name='track1',
tracktype='bam',
local_fn='data/track1.bam'
),
Track(
name='track2',
tracktype='bigWig',
local_fn='data/track2.bigwig',
),
]
self.hub.add_genomes_file(self.genomes_file)
self.genomes_file.add_genome(self.genome)
self.genome.add_trackdb(self.trackdb)
self.trackdb.add_tracks(self.tracks)
@unittest.skipUnless(os.path.exists('data/track1.bam'), 'No test data')
def test_upload(self):
self.hub.remote_fn = os.path.join(
'uploaded_version',
self.hub.remote_fn)
self.hub.render()
upload.upload_hub(
'localhost',
None,
self.hub,
symlink=True,
symlink_dir='staging',
run_local=True,)
for t, level in self.hub.leaves(Track):
upload.upload_track(
track=t, host='localhost', user=None, run_local=True)
def test_render(self):
trackdb = str(self.trackdb)
# make sure some of the trackdb rendered correctly
assert 'track track1' in trackdb
assert 'bigDataUrl track1.bam' in trackdb
| StarcoderdataPython |
3498085 | # encoding: utf-8
"""
circstat.py -- Circular statistics functions
Exported namespace: mean, std, var
Note: all functions take an array of radian-angle values on [0, 2*pi] as input.
Written by <NAME>, 4/17/2007
Center for Theoretical Neuroscience
Copyright (c) 2007 Columbia University. All rights reserved.
This software is provided AS IS under the terms of the Open Source MIT License.
See http://www.opensource.org/licenses/mit-license.php.
"""
# Library imports
from numpy import ones, dot, sin, cos, empty, arange, pi
from scipy.stats import histogram
# Package imports
from .radians import xy_to_rad
def mean(theta, w=None):
"""First circular moment
Input: theta - array of radian angle values
w - optional weighting if angle values are binned
Returns: scalar circular mean of theta
See: http://en.wikipedia.org/wiki/Directional_statistics
"""
sz = theta.shape[0]
if w is None:
w = ones(sz, 'd')
elif w.size != sz:
raise ValueError, 'weight array size mismatch'
s_bar = dot(w, sin(theta))
c_bar = dot(w, cos(theta))
return xy_to_rad(c_bar, s_bar)
def std(theta):
"""Sample circular deviation
Input: theta - array of radian angle values
Returns: circular standard deviation
"""
return (var(theta))**0.5
def var(theta, Nbins=360):
"""Sample circular variance, second moment
Calculated using the minimum variance method with moving cut points.
See: Weber RO (1997). J. Appl. Meteorol. 36(10), 1403-1415.
Input: theta - array of radian angle values
numbins - number of intervals across [0, 2pi] to minimize
Returns: circular variance
"""
N = len(theta)
delta_t = 2 * pi / Nbins
lims = (0, 2 * pi)
x = arange(delta_t, 2*pi + delta_t, delta_t)
n, xmin, w, extra = histogram(theta, numbins=Nbins, defaultlimits=lims)
tbar = empty((Nbins,), 'd')
S = empty((Nbins,), 'd')
s2 = empty((Nbins,), 'd')
tbar[0] = (x*n).sum() / N # A1
S[0] = ((x**2)*n).sum() / (N - 1) # A2
s2[0] = S[0] - N * (tbar[0]**2) / (N - 1) # A3
for k in xrange(1, Nbins):
tbar[k] = tbar[k-1] + (2*pi) * n[k-1] / N # A4
S[k] = S[k-1] + (2*pi) * (2*pi + 2*x[k-1]) * n[k-1] / (N - 1) # A5
s2[k] = S[k] - N * (tbar[k]**2) / (N - 1) # A6
return s2.min()
| StarcoderdataPython |
11376428 | # !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2020/1/13 15:41
# @author : Mo
# @function:
import tensorflow as tf
def serving_input_receiver_fn():
serialized_tf_example = tf.placeholder(dtype=tf.string, shape=[None], name='input_tensors')
receiver_tensors = {"predictor_inputs": serialized_tf_example}
feature_spec = {"words": tf.FixedLenFeature([25],tf.int64)}
features = tf.parse_example(serialized_tf_example, feature_spec)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
def estimator_spec_for_softmax_classification(logits, labels, mode):
predicted_classes = tf.argmax(logits, 1)
if (mode == tf.estimator.ModeKeys.PREDICT):
export_outputs = {'predict_output': tf.estimator.export.PredictOutput({"pred_output_classes": predicted_classes, 'probabilities': tf.nn.softmax(logits)})}
return tf.estimator.EstimatorSpec(mode=mode, predictions={'class': predicted_classes, 'prob': tf.nn.softmax(logits)}, export_outputs=export_outputs) # IMPORTANT!!!
onehot_labels = tf.one_hot(labels, 31, 1, 0)
loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits)
if (mode == tf.estimator.ModeKeys.TRAIN):
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {'accuracy': tf.metrics.accuracy(labels=labels, predictions=predicted_classes)}
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def model_custom(features, labels, mode):
bow_column = tf.feature_column.categorical_column_with_identity("words", num_buckets=1000)
bow_embedding_column = tf.feature_column.embedding_column(bow_column, dimension=50)
bow = tf.feature_column.input_layer(features, feature_columns=[bow_embedding_column])
logits = tf.layers.dense(bow, 31, activation=None)
return estimator_spec_for_softmax_classification(logits=logits, labels=labels, mode=mode)
def main_():
# ...
# preprocess-> features_train_set and labels_train_set
# ...
classifier = tf.estimator.Estimator(model_fn = model_custom)
train_input_fn = tf.estimator.inputs.numpy_input_fn(x={"words": features_train_set}, y=labels_train_set, batch_size=batch_size_param, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
full_model_dir = classifier.export_savedmodel(export_dir_base="C:/models/directory_base", serving_input_receiver_fn=serving_input_receiver_fn)
def main_tst():
# ...
# preprocess-> features_test_set
# ...
with tf.Session() as sess:
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], full_model_dir)
predictor = tf.contrib.predictor.from_saved_model(full_model_dir)
model_input = tf.train.Example(features=tf.train.Features( feature={"words": tf.train.Feature(int64_list=tf.train.Int64List(value=features_test_set)) }))
model_input = model_input.SerializeToString()
output_dict = predictor({"predictor_inputs":[model_input]})
y_predicted = output_dict["pred_output_classes"][0]
| StarcoderdataPython |
11365897 | <filename>crawler/TwitterScrapy/items.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from scrapy import Item, Field
class TwitterItem(Item):
ID = Field() # tweet id
url = Field() # tweet url
datetime = Field() # post time
text = Field() # text content
user_id = Field() # user id
usernameTweet = Field() # username of tweet
number_retweet = Field() # number of retweet
number_favorite = Field() # number of favorite
number_reply = Field() # number of reply
is_reply = Field() # boolean if the tweet is a reply or not
is_retweet = Field() # boolean if the is just a retweet of another tweet | StarcoderdataPython |
1815633 | <reponame>imall100/mrec
"""
Brute-force k-nearest neighbour recommenders
intended to provide evaluation baselines.
"""
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from recommender import ItemSimilarityRecommender
class KNNRecommender(ItemSimilarityRecommender):
"""
Abstract base class for k-nn recommenders. You must supply an
implementation of the compute_all_similarities() method.
Parameters
==========
k : int
The number of nearest neighbouring items to retain
"""
def __init__(self,k):
self.k = k
def compute_similarities(self,dataset,j):
A = dataset.X
a = dataset.fast_get_col(j)
d = self.compute_all_similarities(A,a)
d[j] = 0 # zero out self-similarity
# now zero out similarities for all but top-k items
nn = d.argsort()[-1:-1-self.k:-1]
w = np.zeros(A.shape[1])
w[nn] = d[nn]
return w
def compute_all_similarities(self,A,a):
"""
Compute similarity scores between item vector a
and all the rows of A.
Parameters
==========
A : scipy.sparse.csr_matrix
Matrix of item vectors.
a : array_like
The item vector to be compared to each row of A.
Returns
=======
similarities : numpy.ndarray
Vector of similarity scores.
"""
pass
class DotProductKNNRecommender(KNNRecommender):
"""
Similarity between two items is their dot product
(i.e. cooccurrence count if input data is binary).
"""
def compute_all_similarities(self,A,a):
return A.T.dot(a).toarray().flatten()
def __str__(self):
return 'DotProductKNNRecommender(k={0})'.format(self.k)
class CosineKNNRecommender(KNNRecommender):
"""
Similarity between two items is their cosine distance.
"""
def compute_all_similarities(self,A,a):
return cosine_similarity(A.T,a.T).flatten()
def __str__(self):
return 'CosineKNNRecommender(k={0})'.format(self.k)
if __name__ == '__main__':
# use knn models like this:
import random
import StringIO
from mrec import load_fast_sparse_matrix
random.seed(0)
print 'loading test data...'
data = """\
%%MatrixMarket matrix coordinate real general
3 5 9
1 1 1
1 2 1
1 3 1
1 4 1
2 2 1
2 3 1
2 5 1
3 3 1
3 4 1
"""
print data
dataset = load_fast_sparse_matrix('mm',StringIO.StringIO(data))
num_users,num_items = dataset.shape
model = CosineKNNRecommender(k=2)
num_samples = 2
def output(i,j,val):
# convert back to 1-indexed
print '{0}\t{1}\t{2:.3f}'.format(i+1,j+1,val)
print 'computing some item similarities...'
print 'item\tsim\tweight'
# if we want we can compute these individually without calling fit()
for i in random.sample(xrange(num_items),num_samples):
for j,weight in model.get_similar_items(i,max_similar_items=2,dataset=dataset):
output(i,j,weight)
print 'learning entire similarity matrix...'
# more usually we just call train() on the entire dataset
model = CosineKNNRecommender(k=2)
model.fit(dataset)
print 'making some recommendations...'
print 'user\trec\tscore'
for u in random.sample(xrange(num_users),num_samples):
for i,score in model.recommend_items(dataset.X,u,max_items=10):
output(u,i,score)
print 'making batch recommendations...'
recs = model.batch_recommend_items(dataset.X)
for u in xrange(num_users):
for i,score in recs[u]:
output(u,i,score)
print 'making range recommendations...'
for start,end in [(0,2),(2,3)]:
recs = model.range_recommend_items(dataset.X,start,end)
for u in xrange(start,end):
for i,score in recs[u-start]:
output(u,i,score)
| StarcoderdataPython |
5065374 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://docs.python.org/3/library/secrets.html
from secrets import choice
import string
def generate_password(length=8) -> str:
# Generate a ten-character alphanumeric password with at least one lowercase character,
# at least one uppercase character, and at least three digits:
alphabet = string.ascii_letters + string.digits
while True:
password = ''.join(choice(alphabet) for _ in range(length))
if (any(c.islower() for c in password)
and any(c.isupper() for c in password)
and sum(c.isdigit() for c in password) >= 3):
break
return password
if __name__ == '__main__':
print(generate_password())
print(generate_password())
print()
for i in (8, 8, 16, 32):
password = generate_password(length=i)
print(f'[{len(password)}]: {password}')
| StarcoderdataPython |
1985801 | def is_palindrome(input_string):
new_string = ""
reverse_string = ""
input_string = input_string.lower()
for i in input_string:
if i != " ":
new_string = new_string + i
reverse_string = i + reverse_string
if new_string == reverse_string:
return True
return False
print(is_palindrome("Never Odd or Even")) # Should be True
print(is_palindrome("abc")) # Should be False
print(is_palindrome("malayalam")) # Should be True
| StarcoderdataPython |
3384309 | #! /bin/env python
'''Rerun a collection's stored enrichments
'''
import sys
import argparse
from harvester.post_processing.couchdb_runner import CouchDBJobEnqueue
from harvester.collection_registry_client import Collection
import harvester.post_processing.enrich_existing_couch_doc
def main(args):
parser = argparse.ArgumentParser(
description='run the enrichments stored for a collection.')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--collection_id',
help='Registry id for the collection')
group.add_argument('--cid_file',
help='File with collection ids for running')
parser.add_argument('--rq_queue',
help='Override queue for jobs, normal-stage is default')
args = parser.parse_args(args)
Q = 'normal-stage'
if args.rq_queue:
Q = args.rq_queue
enq = CouchDBJobEnqueue(Q)
timeout = 10000
cids = []
if args.collection_id:
cids = [ args.collection_id ]
else: #cid file
with open(args.cid_file) as foo:
lines = foo.readlines()
cids = [ l.strip() for l in lines]
print "CIDS:{}".format(cids)
for cid in cids:
url_api = ''.join(('https://registry.cdlib.org/api/v1/collection/',
cid, '/'))
coll = Collection(url_api)
print coll.id
enrichments = coll.enrichments_item
enq.queue_collection(cid, timeout,
harvester.post_processing.enrich_existing_couch_doc.main,
enrichments
)
if __name__=='__main__':
main(sys.argv[1:])
| StarcoderdataPython |
1952581 | import CradlepointAPIClient
import unittest
import os
import json
username = os.environ.get('USERNAME')
password = <PASSWORD>('PASSWORD')
def run_full_test():
client = CradlepointAPIClient.Client(api_version='v2', verify=True)
print(f'***** Testing: Login '.ljust(60, '*'))
client.connect(x_cp_api_id='', x_cp_api_key='', x_ecm_api_id='', x_ecm_api_key='')
print(f'***** Testing: GET method '.ljust(60, '*'))
response = client.get(method='/groups', offset=0, limit=1)
print(json.dumps(response.json(), indent=4))
print(f'***** Testing: Logout '.ljust(60, '*'))
client.disconnect()
class TestCradlepointAPIWrapper(unittest.TestCase):
def test_methods_get(self):
client = CradlepointAPIClient.Client(api_version='v2')
client.connect(x_cp_api_id='', x_cp_api_key='', x_ecm_api_id='', x_ecm_api_key='')
response = client.get(method='/groups', offset=0, limit=1)
self.assertEqual(response.status_code, 200)
client.disconnect()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1667466 | <filename>app/blueprints/admin/organisation.py<gh_stars>0
from datetime import datetime
from flask import Blueprint, render_template, redirect, url_for, flash, abort, request
from flask_login import login_required, current_user
import commonmark
from app import db
from app.decorators import admin_required
from app.models import *
from app.blueprints.admin.views import admin
from wtforms import Flags
from .forms import *
from flask_uploads import UploadSet, IMAGES
from flask_wtf.file import FileAllowed
images = UploadSet('images', IMAGES)
photos = UploadSet('photos', IMAGES)
@admin.route('/add/organization/', methods=['GET', 'POST'])
@login_required
def create_org():
organization_exist = Organisation.query.get(1)
if organization_exist is not None:
return redirect(url_for('admin.edit_org', org_id=organization_exist.id))
form = OrganisationForm()
if request.method == 'POST':
if form.validate_on_submit():
#image_filename = images.save(request.files['logo'])
#image_url = images.url(image_filename)
org = Organisation(
id=current_user.id,
#image_filename=image_filename,
#image_url=image_url,
org_name=form.org_name.data,
org_industry=form.org_industry.data,
org_short_description=form.org_short_description.data,
org_website=form.org_website.data,
org_city=form.org_city.data,
org_state=form.org_state.data,
org_country=form.org_country.data,
org_description=form.org_description.data
)
db.session.add(org)
db.session.commit()
flash('Data added!', 'success')
logo = Organisation.query.filter(Organisation.logos).first()
if logo is None:
return redirect(url_for('admin.image_upload'))
return redirect(url_for('admin.frontend_dashboard'))
else:
flash('Error! Data was not added.', 'error')
return render_template('admin/organisations/create_org.html', form=form)
@admin.route('/organization/<int:org_id>/edit', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_org(org_id):
"""Edit information to the organisation."""
settings = Organisation.query.filter_by(id=org_id).first_or_404()
form = OrganisationForm(obj=settings)
if request.method == 'POST':
form.populate_obj(settings)
db.session.add(settings)
db.session.commit()
flash('Settings successfully edited', 'success')
return redirect(url_for('admin.frontend_dashboard'))
return render_template('admin/organisations/edit_org.html', form=form)
@admin.route('/organization', defaults={'page': 1}, methods=['GET'])
@admin.route('/organization/<int:page>', methods=['GET'])
@login_required
@admin_required
def orgs(page):
orgs = Organisation.query.paginate(page, per_page=100)
return render_template('admin/organisations/browse.html', orgs=orgs)
@admin.route('/organization/<int:org_id>/_delete', methods=['POST'])
@login_required
@admin_required
def delete_org(org_id):
org = Organisation.query.filter_by(id=org_id).first()
db.session.delete(org)
db.session.commit()
flash('Successfully deleted Organisation.', 'success')
return redirect(url_for('admin.orgs'))
| StarcoderdataPython |
1873773 | from social.backends.oauth import BaseOAuth2
class BattleNetOAuth2(BaseOAuth2):
""" battle.net Oauth2 backend"""
name = 'battlenet-oauth2'
ID_KEY = 'accountId'
REDIRECT_STATE = False
AUTHORIZATION_URL = 'https://eu.battle.net/oauth/authorize'
ACCESS_TOKEN_URL = 'https://eu.battle.net/oauth/token'
ACCESS_TOKEN_METHOD = 'POST'
REVOKE_TOKEN_METHOD = 'GET'
DEFAULT_SCOPE = ['wow.profile']
EXTRA_DATA = [
('refresh_token', 'refresh_token', True),
('expires_in', 'expires'),
('token_type', 'token_type', True)
]
def get_characters(self, access_token):
"""
Fetches the character list from the battle.net API. Returns list of
characters or empty list if the request fails.
"""
params = {'access_token': access_token}
if self.setting('API_LOCALE'):
params['locale'] = self.setting('API_LOCALE')
response = self.get_json(
'https://eu.api.battle.net/wow/user/characters',
params=params
)
return response.get('characters') or []
def get_user_details(self, response):
""" Return user details from Battle.net account """
return {'battletag': response.get('battletag')}
def user_data(self, access_token, *args, **kwargs):
""" Loads user data from service """
return self.get_json(
'https://eu.api.battle.net/account/user/battletag',
params={'access_token': access_token}
)
| StarcoderdataPython |
265416 | <filename>cf-security-group-update.py<gh_stars>1-10
import os
import boto3
from botocore.vendored import requests
def get_cloudflare_ip_list():
""" Call the CloudFlare API and return a list of IPs """
response = requests.get('https://api.cloudflare.com/client/v4/ips')
temp = response.json()
if 'result' in temp:
return temp['result']
raise Exception("Cloudflare response error")
def get_aws_security_group(group_id):
""" Return the defined Security Group """
ec2 = boto3.resource('ec2')
group = ec2.SecurityGroup(group_id)
if group.group_id == group_id:
return group
raise Exception('Failed to retrieve Security Group')
def check_ipv4_rule_exists(rules, address, port):
""" Check if the rule currently exists """
for rule in rules:
for ip_range in rule['IpRanges']:
if ip_range['CidrIp'] == address and rule['FromPort'] == port:
return True
return False
def add_ipv4_rule(group, address, port):
""" Add the IP address/port to the security group """
group.authorize_ingress(IpProtocol="tcp",
CidrIp=address,
FromPort=port,
ToPort=port)
print("Added %s : %i " % (address, port))
def delete_ipv4_rule(group, address, port):
""" Remove the IP address/port from the security group """
group.revoke_ingress(IpProtocol="tcp",
CidrIp=address,
FromPort=port,
ToPort=port)
print("Removed %s : %i " % (address, port))
def check_ipv6_rule_exists(rules, address, port):
""" Check if the rule currently exists """
for rule in rules:
for ip_range in rule['Ipv6Ranges']:
if ip_range['CidrIpv6'] == address and rule['FromPort'] == port:
return True
return False
def add_ipv6_rule(group, address, port):
""" Add the IP address/port to the security group """
group.authorize_ingress(IpPermissions=[{
'IpProtocol': "tcp",
'FromPort': port,
'ToPort': port,
'Ipv6Ranges': [
{
'CidrIpv6': address
},
]
}])
print("Added %s : %i " % (address, port))
def delete_ipv6_rule(group, address, port):
""" Remove the IP address/port from the security group """
group.revoke_ingress(IpPermissions=[{
'IpProtocol': "tcp",
'FromPort': port,
'ToPort': port,
'Ipv6Ranges': [
{
'CidrIpv6': address
},
]
}])
print("Removed %s : %i " % (address, port))
def lambda_handler(event, context):
""" AWS Lambda main function """
ports = map(int, os.environ['PORTS_LIST'].split(","))
if not ports:
ports = [80]
security_group = get_aws_security_group(os.environ['SECURITY_GROUP_ID'])
current_rules = security_group.ip_permissions
ip_addresses = get_cloudflare_ip_list()
## IPv4
# add new addresses
for ipv4_cidr in ip_addresses['ipv4_cidrs']:
for port in ports:
if not check_ipv4_rule_exists(current_rules, ipv4_cidr, port):
add_ipv4_rule(security_group, ipv4_cidr, port)
# remove old addresses
for port in ports:
for rule in current_rules:
# is it necessary/correct to check both From and To?
if rule['FromPort'] == port and rule['ToPort'] == port:
for ip_range in rule['IpRanges']:
if ip_range['CidrIp'] not in ip_addresses['ipv4_cidrs']:
delete_ipv4_rule(security_group, ip_range['CidrIp'], port)
## IPv6 -- because of boto3 syntax, this has to be separate
# add new addresses
for ipv6_cidr in ip_addresses['ipv6_cidrs']:
for port in ports:
if not check_ipv6_rule_exists(current_rules, ipv6_cidr, port):
add_ipv6_rule(security_group, ipv6_cidr, port)
# remove old addresses
for port in ports:
for rule in current_rules:
for ip_range in rule['Ipv6Ranges']:
if ip_range['CidrIpv6'] not in ip_addresses['ipv6_cidrs'] and port == ip_range['FromPort']:
delete_ipv6_rule(security_group, ip_range['CidrIpv6'], port)
| StarcoderdataPython |
4955554 | from __future__ import print_function
import io
import sys
import unittest
import collections
import yaml
from ..modules import cli
from ..modules import helpers
from ..modules import service
from ..modules.aux_services import Postgres, Redis
from ..modules.elastic_stack import ApmServer, Elasticsearch
from ..modules.helpers import parse_version
from ..modules.opbeans import (
OpbeansService, OpbeansDotnet, OpbeansGo, OpbeansJava, OpbeansNode, OpbeansPython,
OpbeansRuby, OpbeansRum, OpbeansLoadGenerator
)
from ..modules.cli import discover_services, LocalSetup
from .service_tests import ServiceTest
try:
import unittest.mock as mock
except ImportError:
try:
import mock
except ImportError:
class IgnoreMock(object):
@staticmethod
def patch(_):
return lambda *args: None
mock = IgnoreMock()
if sys.version_info[0] == 3:
stringIO = io.StringIO
else:
stringIO = io.BytesIO
def opbeans_services():
return (cls for cls in discover_services() if issubclass(cls, OpbeansService))
class OpbeansServiceTest(ServiceTest):
def test_opbeans_dotnet(self):
opbeans_go = OpbeansDotnet(version="6.3.10").render()
self.assertEqual(
opbeans_go, yaml.load("""
opbeans-dotnet:
build:
dockerfile: Dockerfile
context: docker/opbeans/dotnet
args:
- DOTNET_AGENT_BRANCH=master
- DOTNET_AGENT_REPO=elastic/apm-agent-dotnet
- DOTNET_AGENT_VERSION=
- OPBEANS_DOTNET_BRANCH=master
- OPBEANS_DOTNET_REPO=elastic/opbeans-dotnet
container_name: localtesting_6.3.10_opbeans-dotnet
ports:
- "127.0.0.1:3004:3000"
environment:
- ELASTIC_APM_SERVICE_NAME=opbeans-dotnet
- ELASTIC_APM_SERVER_URLS=http://apm-server:8200
- ELASTIC_APM_JS_SERVER_URL=http://localhost:8200
- ELASTIC_APM_VERIFY_SERVER_CERT=true
- ELASTIC_APM_FLUSH_INTERVAL=5
- ELASTIC_APM_TRANSACTION_MAX_SPANS=50
- ELASTIC_APM_TRANSACTION_SAMPLE_RATE=1
- ELASTICSEARCH_URL=elasticsearch:9200
- OPBEANS_DT_PROBABILITY=0.50
- ELASTIC_APM_ENVIRONMENT=production
logging:
driver: 'json-file'
options:
max-size: '2m'
max-file: '5'
depends_on:
elasticsearch:
condition: service_healthy
apm-server:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "--write-out", "'HTTP %{http_code}'", "--fail", "--silent", "--output", "/dev/null", "http://opbeans-dotnet:3000/"]
interval: 10s
retries: 36""")
)
def test_opbeans_dotnet_version(self):
opbeans = OpbeansDotnet(opbeans_dotnet_version="1.0").render()["opbeans-dotnet"]
value = [e for e in opbeans["build"]["args"] if e.startswith("DOTNET_AGENT_VERSION")]
self.assertEqual(value, ["DOTNET_AGENT_VERSION=1.0"])
def test_opbeans_dotnet_branch(self):
opbeans = OpbeansDotnet(opbeans_dotnet_branch="1.x").render()["opbeans-dotnet"]
branch = [e for e in opbeans["build"]["args"] if e.startswith("OPBEANS_DOTNET_BRANCH")]
self.assertEqual(branch, ["OPBEANS_DOTNET_BRANCH=1.x"])
def test_opbeans_dotnet_repo(self):
opbeans = OpbeansDotnet(opbeans_dotnet_repo="foo/bar").render()["opbeans-dotnet"]
branch = [e for e in opbeans["build"]["args"] if e.startswith("OPBEANS_DOTNET_REPO")]
self.assertEqual(branch, ["OPBEANS_DOTNET_REPO=foo/bar"])
def test_opbeans_go(self):
opbeans_go = OpbeansGo(version="6.3.10").render()
self.assertEqual(
opbeans_go, yaml.load("""
opbeans-go:
build:
dockerfile: Dockerfile
context: docker/opbeans/go
args:
- GO_AGENT_BRANCH=master
- GO_AGENT_REPO=elastic/apm-agent-go
- OPBEANS_GO_BRANCH=master
- OPBEANS_GO_REPO=elastic/opbeans-go
container_name: localtesting_6.3.10_opbeans-go
ports:
- "127.0.0.1:3003:3000"
environment:
- ELASTIC_APM_SERVICE_NAME=opbeans-go
- ELASTIC_APM_SERVER_URL=http://apm-server:8200
- ELASTIC_APM_JS_SERVER_URL=http://localhost:8200
- ELASTIC_APM_VERIFY_SERVER_CERT=true
- ELASTIC_APM_FLUSH_INTERVAL=5
- ELASTIC_APM_TRANSACTION_MAX_SPANS=50
- ELASTIC_APM_TRANSACTION_SAMPLE_RATE=1
- ELASTICSEARCH_URL=elasticsearch:9200
- OPBEANS_CACHE=redis://redis:6379
- OPBEANS_PORT=3000
- PGHOST=postgres
- PGPORT=5432
- PGUSER=postgres
- PGPASSWORD=<PASSWORD>
- PGSSLMODE=disable
- OPBEANS_DT_PROBABILITY=0.50
- ELASTIC_APM_ENVIRONMENT=production
logging:
driver: 'json-file'
options:
max-size: '2m'
max-file: '5'
depends_on:
elasticsearch:
condition: service_healthy
postgres:
condition: service_healthy
redis:
condition: service_healthy
apm-server:
condition: service_healthy""") # noqa: 501
)
def test_opbeans_go_branch(self):
opbeans = OpbeansGo(opbeans_go_branch="1.x").render()["opbeans-go"]
branch = [e for e in opbeans["build"]["args"] if e.startswith("OPBEANS_GO_BRANCH")]
self.assertEqual(branch, ["OPBEANS_GO_BRANCH=1.x"])
def test_opbeans_go_repo(self):
opbeans = OpbeansGo(opbeans_go_repo="foo/bar").render()["opbeans-go"]
branch = [e for e in opbeans["build"]["args"] if e.startswith("OPBEANS_GO_REPO")]
self.assertEqual(branch, ["OPBEANS_GO_REPO=foo/bar"])
def test_opbeans_java(self):
opbeans_java = OpbeansJava(version="6.3.10").render()
self.assertEqual(
opbeans_java, yaml.load("""
opbeans-java:
build:
dockerfile: Dockerfile
context: docker/opbeans/java
args:
- JAVA_AGENT_BRANCH=
- JAVA_AGENT_REPO=elastic/apm-agent-java
- OPBEANS_JAVA_IMAGE=opbeans/opbeans-java
- OPBEANS_JAVA_VERSION=latest
container_name: localtesting_6.3.10_opbeans-java
ports:
- "127.0.0.1:3002:3000"
environment:
- ELASTIC_APM_SERVICE_NAME=opbeans-java
- ELASTIC_APM_APPLICATION_PACKAGES=co.elastic.apm.opbeans
- ELASTIC_APM_SERVER_URL=http://apm-server:8200
- ELASTIC_APM_VERIFY_SERVER_CERT=true
- ELASTIC_APM_FLUSH_INTERVAL=5
- ELASTIC_APM_TRANSACTION_MAX_SPANS=50
- ELASTIC_APM_TRANSACTION_SAMPLE_RATE=1
- ELASTIC_APM_ENABLE_LOG_CORRELATION=true
- DATABASE_URL=jdbc:postgresql://postgres/opbeans?user=postgres&password=<PASSWORD>
- DATABASE_DIALECT=POSTGRESQL
- DATABASE_DRIVER=org.postgresql.Driver
- REDIS_URL=redis://redis:6379
- ELASTICSEARCH_URL=elasticsearch:9200
- OPBEANS_SERVER_PORT=3000
- JAVA_AGENT_VERSION
- OPBEANS_DT_PROBABILITY=0.50
- ELASTIC_APM_ENVIRONMENT=production
logging:
driver: 'json-file'
options:
max-size: '2m'
max-file: '5'
depends_on:
elasticsearch:
condition: service_healthy
postgres:
condition: service_healthy
apm-server:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "--write-out", "'HTTP %{http_code}'", "--fail", "--silent", "--output", "/dev/null", "http://opbeans-java:3000/"]
interval: 10s
retries: 36""") # noqa: 501
)
def test_opbeans_java_image(self):
opbeans = OpbeansJava(opbeans_java_image="foo").render()["opbeans-java"]
branch = [e for e in opbeans["build"]["args"] if e.startswith("OPBEANS_JAVA_IMAGE")]
self.assertEqual(branch, ["OPBEANS_JAVA_IMAGE=foo"])
def test_opbeans_java_image(self):
opbeans = OpbeansJava(opbeans_java_version="bar").render()["opbeans-java"]
branch = [e for e in opbeans["build"]["args"] if e.startswith("OPBEANS_JAVA_VERSION")]
self.assertEqual(branch, ["OPBEANS_JAVA_VERSION=bar"])
def test_opbeans_node(self):
opbeans_node = OpbeansNode(version="6.2.4").render()
self.assertEqual(
opbeans_node, yaml.load("""
opbeans-node:
build:
dockerfile: Dockerfile
context: docker/opbeans/node
args:
- OPBEANS_NODE_IMAGE=opbeans/opbeans-node
- OPBEANS_NODE_VERSION=latest
container_name: localtesting_6.2.4_opbeans-node
ports:
- "127.0.0.1:3000:3000"
logging:
driver: 'json-file'
options:
max-size: '2m'
max-file: '5'
environment:
- ELASTIC_APM_SERVER_URL=http://apm-server:8200
- ELASTIC_APM_JS_SERVER_URL=http://localhost:8200
- ELASTIC_APM_VERIFY_SERVER_CERT=true
- ELASTIC_APM_LOG_LEVEL=info
- ELASTIC_APM_SOURCE_LINES_ERROR_APP_FRAMES
- ELASTIC_APM_SOURCE_LINES_SPAN_APP_FRAMES=5
- ELASTIC_APM_SOURCE_LINES_ERROR_LIBRARY_FRAMES
- ELASTIC_APM_SOURCE_LINES_SPAN_LIBRARY_FRAMES
- WORKLOAD_ELASTIC_APM_APP_NAME=workload
- WORKLOAD_ELASTIC_APM_SERVER_URL=http://apm-server:8200
- WORKLOAD_DISABLED=False
- OPBEANS_SERVER_PORT=3000
- OPBEANS_SERVER_HOSTNAME=opbeans-node
- NODE_ENV=production
- PGHOST=postgres
- PGPASSWORD=<PASSWORD>
- PGPORT=5432
- PGUSER=postgres
- REDIS_URL=redis://redis:6379
- NODE_AGENT_BRANCH=
- NODE_AGENT_REPO=
- OPBEANS_DT_PROBABILITY=0.50
- ELASTIC_APM_ENVIRONMENT=production
depends_on:
redis:
condition: service_healthy
postgres:
condition: service_healthy
apm-server:
condition: service_healthy
healthcheck:
test: ["CMD", "wget", "-q", "--server-response", "-O", "/dev/null", "http://opbeans-node:3000/"]
interval: 10s
retries: 12
volumes:
- ./docker/opbeans/node/sourcemaps:/sourcemaps""") # noqa: 501
)
def test_opbeans_node_image(self):
opbeans = OpbeansNode(opbeans_node_image="foo").render()["opbeans-node"]
branch = [e for e in opbeans["build"]["args"] if e.startswith("OPBEANS_NODE_IMAGE")]
self.assertEqual(branch, ["OPBEANS_NODE_IMAGE=foo"])
def test_opbeans_python_version(self):
opbeans = OpbeansNode(opbeans_node_version="bar").render()["opbeans-node"]
branch = [e for e in opbeans["build"]["args"] if e.startswith("OPBEANS_NODE_VERSION")]
self.assertEqual(branch, ["OPBEANS_NODE_VERSION=bar"])
def test_opbeans_node_without_loadgen(self):
opbeans_node = OpbeansNode(no_opbeans_node_loadgen=True).render()["opbeans-node"]
value = [e for e in opbeans_node["environment"] if e.startswith("WORKLOAD_DISABLED")]
self.assertEqual(value, ["WORKLOAD_DISABLED=True"])
def test_opbeans_python(self):
opbeans_python = OpbeansPython(version="6.2.4").render()
self.assertEqual(
opbeans_python, yaml.load("""
opbeans-python:
build:
dockerfile: Dockerfile
context: docker/opbeans/python
args:
- OPBEANS_PYTHON_IMAGE=opbeans/opbeans-python
- OPBEANS_PYTHON_VERSION=latest
container_name: localtesting_6.2.4_opbeans-python
ports:
- "127.0.0.1:8000:3000"
logging:
driver: 'json-file'
options:
max-size: '2m'
max-file: '5'
environment:
- DATABASE_URL=postgres://postgres:verysecure@postgres/opbeans
- ELASTIC_APM_SERVICE_NAME=opbeans-python
- ELASTIC_APM_SERVER_URL=http://apm-server:8200
- ELASTIC_APM_JS_SERVER_URL=http://localhost:8200
- ELASTIC_APM_VERIFY_SERVER_CERT=true
- ELASTIC_APM_FLUSH_INTERVAL=5
- ELASTIC_APM_TRANSACTION_MAX_SPANS=50
- ELASTIC_APM_TRANSACTION_SAMPLE_RATE=0.5
- ELASTIC_APM_SOURCE_LINES_ERROR_APP_FRAMES
- ELASTIC_APM_SOURCE_LINES_SPAN_APP_FRAMES=5
- ELASTIC_APM_SOURCE_LINES_ERROR_LIBRARY_FRAMES
- ELASTIC_APM_SOURCE_LINES_SPAN_LIBRARY_FRAMES
- REDIS_URL=redis://redis:6379
- ELASTICSEARCH_URL=elasticsearch:9200
- OPBEANS_SERVER_URL=http://opbeans-python:3000
- PYTHON_AGENT_BRANCH=
- PYTHON_AGENT_REPO=
- PYTHON_AGENT_VERSION
- OPBEANS_DT_PROBABILITY=0.50
- ELASTIC_APM_ENVIRONMENT=production
depends_on:
apm-server:
condition: service_healthy
elasticsearch:
condition: service_healthy
postgres:
condition: service_healthy
redis:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "--write-out", "'HTTP %{http_code}'", "--fail", "--silent", "--output", "/dev/null", "http://opbeans-python:3000/"]
interval: 10s
retries: 12
""") # noqa: 501
)
def test_opbeans_python_agent_branch(self):
opbeans_python_6_1 = OpbeansPython(version="6.1", opbeans_python_agent_branch="1.x").render()["opbeans-python"]
branch = [e for e in opbeans_python_6_1["environment"] if e.startswith("PYTHON_AGENT_BRANCH")]
self.assertEqual(branch, ["PYTHON_AGENT_BRANCH=1.x"])
opbeans_python_master = OpbeansPython(
version="7.0.0-alpha1", opbeans_python_agent_branch="2.x").render()["opbeans-python"]
branch = [e for e in opbeans_python_master["environment"] if e.startswith("PYTHON_AGENT_BRANCH")]
self.assertEqual(branch, ["PYTHON_AGENT_BRANCH=2.x"])
def test_opbeans_python_agent_repo(self):
agent_repo_default = OpbeansPython().render()["opbeans-python"]
branch = [e for e in agent_repo_default["environment"] if e.startswith("PYTHON_AGENT_REPO")]
self.assertEqual(branch, ["PYTHON_AGENT_REPO="])
agent_repo_override = OpbeansPython(opbeans_python_agent_repo="myrepo").render()["opbeans-python"]
branch = [e for e in agent_repo_override["environment"] if e.startswith("PYTHON_AGENT_REPO")]
self.assertEqual(branch, ["PYTHON_AGENT_REPO=myrepo"])
def test_opbeans_python_agent_local_repo(self):
agent_repo_default = OpbeansPython().render()["opbeans-python"]
assert "volumes" not in agent_repo_default
agent_repo_override = OpbeansPython(opbeans_python_agent_local_repo=".").render()["opbeans-python"]
assert "volumes" in agent_repo_override, agent_repo_override
def test_opbeans_python_image(self):
opbeans = OpbeansPython(opbeans_python_image="foo").render()["opbeans-python"]
branch = [e for e in opbeans["build"]["args"] if e.startswith("OPBEANS_PYTHON_IMAGE")]
self.assertEqual(branch, ["OPBEANS_PYTHON_IMAGE=foo"])
def test_opbeans_python_version(self):
opbeans = OpbeansPython(opbeans_python_version="bar").render()["opbeans-python"]
branch = [e for e in opbeans["build"]["args"] if e.startswith("OPBEANS_PYTHON_VERSION")]
self.assertEqual(branch, ["OPBEANS_PYTHON_VERSION=bar"])
def test_opbeans_ruby(self):
opbeans_ruby = OpbeansRuby(version="6.3.10").render()
self.assertEqual(
opbeans_ruby, yaml.load("""
opbeans-ruby:
build:
dockerfile: Dockerfile
context: docker/opbeans/ruby
args:
- OPBEANS_RUBY_IMAGE=opbeans/opbeans-ruby
- OPBEANS_RUBY_VERSION=latest
container_name: localtesting_6.3.10_opbeans-ruby
ports:
- "127.0.0.1:3001:3000"
environment:
- ELASTIC_APM_SERVER_URL=http://apm-server:8200
- ELASTIC_APM_SERVICE_NAME=opbeans-ruby
- ELASTIC_APM_VERIFY_SERVER_CERT=true
- DATABASE_URL=postgres://postgres:verysecure@postgres/opbeans-ruby
- REDIS_URL=redis://redis:6379
- ELASTICSEARCH_URL=elasticsearch:9200
- OPBEANS_SERVER_URL=http://opbeans-ruby:3000
- RAILS_ENV=production
- RAILS_LOG_TO_STDOUT=1
- PORT=3000
- RUBY_AGENT_BRANCH=
- RUBY_AGENT_REPO=
- RUBY_AGENT_VERSION
- OPBEANS_DT_PROBABILITY=0.50
- ELASTIC_APM_ENVIRONMENT=production
logging:
driver: 'json-file'
options:
max-size: '2m'
max-file: '5'
depends_on:
redis:
condition: service_healthy
elasticsearch:
condition: service_healthy
postgres:
condition: service_healthy
apm-server:
condition: service_healthy
healthcheck:
test: ["CMD", "wget", "-q", "--server-response", "-O", "/dev/null", "http://opbeans-ruby:3000/"]
interval: 10s
retries: 50""") # noqa: 501
)
def test_opbeans_ruby_image(self):
opbeans = OpbeansRuby(opbeans_ruby_image="foo").render()["opbeans-ruby"]
branch = [e for e in opbeans["build"]["args"] if e.startswith("OPBEANS_RUBY_IMAGE")]
self.assertEqual(branch, ["OPBEANS_RUBY_IMAGE=foo"])
def test_opbeans_ruby_version(self):
opbeans = OpbeansRuby(opbeans_ruby_version="bar").render()["opbeans-ruby"]
branch = [e for e in opbeans["build"]["args"] if e.startswith("OPBEANS_RUBY_VERSION")]
self.assertEqual(branch, ["OPBEANS_RUBY_VERSION=bar"])
def test_opbeans_rum(self):
opbeans_rum = OpbeansRum(version="6.3.10").render()
self.assertEqual(
opbeans_rum, yaml.load("""
opbeans-rum:
build:
dockerfile: Dockerfile
context: docker/opbeans/rum
container_name: localtesting_6.3.10_opbeans-rum
environment:
- OPBEANS_BASE_URL=http://opbeans-node:3000
- ELASTIC_APM_VERIFY_SERVER_CERT=true
cap_add:
- SYS_ADMIN
ports:
- "127.0.0.1:9222:9222"
logging:
driver: 'json-file'
options:
max-size: '2m'
max-file: '5'
depends_on:
opbeans-node:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "--write-out", "'HTTP %{http_code}'", "--fail", "--silent", "--output", "/dev/null", "http://localhost:9222/"]
interval: 10s
retries: 12""") # noqa: 501
)
def test_opbeans_elasticsearch_urls(self):
def assertOneElasticsearch(opbean):
self.assertTrue("elasticsearch" in opbean['depends_on'])
self.assertTrue("ELASTICSEARCH_URL=elasticsearch01:9200" in opbean['environment'])
def assertTwoElasticsearch(opbean):
self.assertTrue("elasticsearch" in opbean['depends_on'])
self.assertTrue("ELASTICSEARCH_URL=elasticsearch01:9200,elasticsearch02:9200" in opbean['environment'])
opbeans = OpbeansDotnet(opbeans_elasticsearch_urls=["elasticsearch01:9200"]).render()["opbeans-dotnet"]
assertOneElasticsearch(opbeans)
opbeans = OpbeansDotnet(opbeans_elasticsearch_urls=["elasticsearch01:9200", "elasticsearch02:9200"]
).render()["opbeans-dotnet"]
assertTwoElasticsearch(opbeans)
opbeans = OpbeansGo(opbeans_elasticsearch_urls=["elasticsearch01:9200"]).render()["opbeans-go"]
assertOneElasticsearch(opbeans)
opbeans = OpbeansGo(opbeans_elasticsearch_urls=["elasticsearch01:9200", "elasticsearch02:9200"]
).render()["opbeans-go"]
assertTwoElasticsearch(opbeans)
opbeans = OpbeansJava(opbeans_elasticsearch_urls=["elasticsearch01:9200"]).render()["opbeans-java"]
assertOneElasticsearch(opbeans)
opbeans = OpbeansJava(opbeans_elasticsearch_urls=["elasticsearch01:9200", "elasticsearch02:9200"]
).render()["opbeans-java"]
assertTwoElasticsearch(opbeans)
opbeans = OpbeansPython(opbeans_elasticsearch_urls=["elasticsearch01:9200"]).render()["opbeans-python"]
assertOneElasticsearch(opbeans)
opbeans = OpbeansPython(opbeans_elasticsearch_urls=["elasticsearch01:9200", "elasticsearch02:9200"]
).render()["opbeans-python"]
assertTwoElasticsearch(opbeans)
opbeans = OpbeansRuby(opbeans_elasticsearch_urls=["elasticsearch01:9200"]).render()["opbeans-ruby"]
assertOneElasticsearch(opbeans)
opbeans = OpbeansRuby(opbeans_elasticsearch_urls=["elasticsearch01:9200", "elasticsearch02:9200"]
).render()["opbeans-ruby"]
assertTwoElasticsearch(opbeans)
def test_opbeans_service_environment(self):
def assertWithoutOption(opbean):
self.assertTrue("ELASTIC_APM_ENVIRONMENT=production" in opbean['environment'])
def assertWithOption(opbean):
self.assertTrue("ELASTIC_APM_ENVIRONMENT=test" in opbean['environment'])
opbeans = OpbeansDotnet().render()["opbeans-dotnet"]
assertWithoutOption(opbeans)
opbeans = OpbeansDotnet(opbeans_dotnet_service_environment="test").render()["opbeans-dotnet"]
assertWithOption(opbeans)
opbeans = OpbeansGo().render()["opbeans-go"]
assertWithoutOption(opbeans)
opbeans = OpbeansGo(opbeans_go_service_environment="test").render()["opbeans-go"]
assertWithOption(opbeans)
opbeans = OpbeansJava().render()["opbeans-java"]
assertWithoutOption(opbeans)
opbeans = OpbeansJava(opbeans_java_service_environment="test").render()["opbeans-java"]
assertWithOption(opbeans)
opbeans = OpbeansPython().render()["opbeans-python"]
assertWithoutOption(opbeans)
opbeans = OpbeansPython(opbeans_python_service_environment="test").render()["opbeans-python"]
assertWithOption(opbeans)
opbeans = OpbeansRuby().render()["opbeans-ruby"]
assertWithoutOption(opbeans)
opbeans = OpbeansRuby(opbeans_ruby_service_environment="test").render()["opbeans-ruby"]
assertWithOption(opbeans)
opbeans = OpbeansNode().render()["opbeans-node"]
assertWithoutOption(opbeans)
opbeans = OpbeansNode(opbeans_node_service_environment="test").render()["opbeans-node"]
assertWithOption(opbeans)
def test_opbeans_secret_token(self):
for cls in opbeans_services():
services = cls(version="6.5.0", apm_server_secret_token="supersecret").render()
opbeans_service = list(services.values())[0]
secret_token = [e for e in opbeans_service["environment"] if e.startswith("ELASTIC_APM_SECRET_TOKEN=")]
self.assertEqual(["ELASTIC_APM_SECRET_TOKEN=supersecret"], secret_token, cls.__name__)
if cls is None:
self.fail("no opbeans services tested")
def test_opbeans_loadgen(self):
opbeans_load_gen = OpbeansLoadGenerator(
version="6.3.1",
enable_opbeans_python=True,
enable_opbeans_ruby=True,
enable_opbeans_node=True,
no_opbeans_node_loadgen=True,
opbeans_python_loadgen_rpm=50,
opbeans_ruby_loadgen_rpm=10,
).render()
assert opbeans_load_gen == yaml.load("""
opbeans-load-generator:
image: opbeans/opbeans-loadgen:latest
container_name: localtesting_6.3.1_opbeans-load-generator
depends_on:
opbeans-python: {condition: service_healthy}
opbeans-ruby: {condition: service_healthy}
environment:
- 'OPBEANS_URLS=opbeans-python:http://opbeans-python:3000,opbeans-ruby:http://opbeans-ruby:3000'
- 'OPBEANS_RPMS=opbeans-python:50,opbeans-ruby:10'
logging:
driver: json-file
options: {max-file: '5', max-size: 2m}""")
class PostgresServiceTest(ServiceTest):
def test_postgres(self):
postgres = Postgres(version="6.2.4").render()
self.assertEqual(
postgres, yaml.load("""
postgres:
image: postgres:10
container_name: localtesting_6.2.4_postgres
environment:
- POSTGRES_DB=opbeans
- POSTGRES_PASSWORD=<PASSWORD>
ports:
- 5432:5432
logging:
driver: 'json-file'
options:
max-size: '2m'
max-file: '5'
volumes:
- ./docker/opbeans/sql:/docker-entrypoint-initdb.d
- pgdata:/var/lib/postgresql/data
healthcheck:
interval: 10s
test: ["CMD", "pg_isready", "-h", "postgres", "-U", "postgres"]""")
)
class RedisServiceTest(ServiceTest):
def test_redis(self):
redis = Redis(version="6.2.4").render()
self.assertEqual(
redis, yaml.load("""
redis:
image: redis:4
container_name: localtesting_6.2.4_redis
command: "--save ''"
ports:
- 6379:6379
logging:
driver: 'json-file'
options:
max-size: '2m'
max-file: '5'
healthcheck:
interval: 10s
test: ["CMD", "redis-cli", "ping"]""")
)
#
# Local setup tests
#
class LocalTest(unittest.TestCase):
maxDiff = None
common_setup_args = ["start", "--docker-compose-path", "-", "--no-apm-server-self-instrument"]
def test_service_registry(self):
registry = discover_services()
self.assertIn(ApmServer, registry)
def test_start_6_2_default(self):
docker_compose_yml = stringIO()
image_cache_dir = "/foo"
with mock.patch.dict(LocalSetup.SUPPORTED_VERSIONS, {'6.2': '6.2.10'}):
setup = LocalSetup(argv=self.common_setup_args +
["6.2", "--image-cache-dir", image_cache_dir, "--no-xpack-secure"])
setup.set_docker_compose_path(docker_compose_yml)
setup()
docker_compose_yml.seek(0)
got = yaml.load(docker_compose_yml)
want = yaml.load("""
version: '2.1'
services:
apm-server:
cap_add: [CHOWN, DAC_OVERRIDE, SETGID, SETUID]
cap_drop: [ALL]
command: [apm-server, -e, --httpprof, ':6060', -E, apm-server.frontend.enabled=true, -E, apm-server.frontend.rate_limit=100000,
-E, 'apm-server.host=0.0.0.0:8200', -E, apm-server.read_timeout=1m, -E, apm-server.shutdown_timeout=2m,
-E, apm-server.write_timeout=1m, -E, logging.json=true, -E, logging.metrics.enabled=false,
-E, 'setup.kibana.host=kibana:5601', -E, setup.template.settings.index.number_of_replicas=0,
-E, setup.template.settings.index.number_of_shards=1, -E, setup.template.settings.index.refresh_interval=1ms,
-E, xpack.monitoring.elasticsearch=true, -E, xpack.monitoring.enabled=true, -E, setup.dashboards.enabled=true,
-E, 'output.elasticsearch.hosts=["elasticsearch:9200"]', -E, output.elasticsearch.enabled=true]
container_name: localtesting_6.2.10_apm-server
depends_on:
elasticsearch: {condition: service_healthy}
kibana: {condition: service_healthy}
healthcheck:
interval: 10s
retries: 12
test: [CMD, curl, --write-out, '''HTTP %{http_code}''', --fail, --silent, --output, /dev/null, 'http://localhost:8200/healthcheck']
image: docker.elastic.co/apm/apm-server:6.2.10-SNAPSHOT
labels: [co.elastic.apm.stack-version=6.2.10]
logging:
driver: json-file
options: {max-file: '5', max-size: 2m}
ports: ['127.0.0.1:8200:8200', '127.0.0.1:6060:6060']
elasticsearch:
container_name: localtesting_6.2.10_elasticsearch
environment: [bootstrap.memory_lock=true, cluster.name=docker-cluster, cluster.routing.allocation.disk.threshold_enabled=false, discovery.type=single-node, path.repo=/usr/share/elasticsearch/data/backups, 'ES_JAVA_OPTS=-Xms1g -Xmx1g', path.data=/usr/share/elasticsearch/data/6.2.10, xpack.security.enabled=false, xpack.license.self_generated.type=trial]
healthcheck:
interval: '20'
retries: 10
test: [CMD-SHELL, 'curl -s http://localhost:9200/_cluster/health | grep -vq ''"status":"red"''']
image: docker.elastic.co/elasticsearch/elasticsearch-platinum:6.2.10-SNAPSHOT
labels: [co.elastic.apm.stack-version=6.2.10]
logging:
driver: json-file
options: {max-file: '5', max-size: 2m}
ports: ['127.0.0.1:9200:9200']
ulimits:
memlock: {hard: -1, soft: -1}
volumes: ['esdata:/usr/share/elasticsearch/data']
kibana:
container_name: localtesting_6.2.10_kibana
depends_on:
elasticsearch: {condition: service_healthy}
environment: {ELASTICSEARCH_URL: 'elasticsearch:9200', SERVER_NAME: kibana.example.org, XPACK_MONITORING_ENABLED: 'true'}
healthcheck:
interval: 10s
retries: 20
test: [CMD, curl, --write-out, '''HTTP %{http_code}''', --fail, --silent, --output, /dev/null, 'http://kibana:5601/api/status']
image: docker.elastic.co/kibana/kibana-x-pack:6.2.10-SNAPSHOT
labels: [co.elastic.apm.stack-version=6.2.10]
logging:
driver: json-file
options: {max-file: '5', max-size: 2m}
ports: ['127.0.0.1:5601:5601']
networks:
default: {name: apm-integration-testing}
volumes:
esdata: {driver: local}
pgdata: {driver: local}
""") # noqa: 501
self.assertDictEqual(got, want)
def test_start_6_3_default(self):
docker_compose_yml = stringIO()
image_cache_dir = "/foo"
with mock.patch.dict(LocalSetup.SUPPORTED_VERSIONS, {'6.3': '6.3.10'}):
setup = LocalSetup(argv=self.common_setup_args +
["6.3", "--image-cache-dir", image_cache_dir, "--no-xpack-secure"])
setup.set_docker_compose_path(docker_compose_yml)
setup()
docker_compose_yml.seek(0)
got = yaml.load(docker_compose_yml)
want = yaml.load("""
version: '2.1'
services:
apm-server:
cap_add: [CHOWN, DAC_OVERRIDE, SETGID, SETUID]
cap_drop: [ALL]
command: [apm-server, -e, --httpprof, ':6060', -E, apm-server.frontend.enabled=true, -E, apm-server.frontend.rate_limit=100000,
-E, 'apm-server.host=0.0.0.0:8200', -E, apm-server.read_timeout=1m, -E, apm-server.shutdown_timeout=2m,
-E, apm-server.write_timeout=1m, -E, logging.json=true, -E, logging.metrics.enabled=false,
-E, 'setup.kibana.host=kibana:5601', -E, setup.template.settings.index.number_of_replicas=0,
-E, setup.template.settings.index.number_of_shards=1, -E, setup.template.settings.index.refresh_interval=1ms,
-E, xpack.monitoring.elasticsearch=true, -E, xpack.monitoring.enabled=true, -E, setup.dashboards.enabled=true,
-E, 'output.elasticsearch.hosts=["elasticsearch:9200"]', -E, output.elasticsearch.enabled=true ]
container_name: localtesting_6.3.10_apm-server
depends_on:
elasticsearch: {condition: service_healthy}
kibana: {condition: service_healthy}
healthcheck:
interval: 10s
retries: 12
test: [CMD, curl, --write-out, '''HTTP %{http_code}''', --fail, --silent, --output, /dev/null, 'http://localhost:8200/healthcheck']
image: docker.elastic.co/apm/apm-server:6.3.10-SNAPSHOT
labels: [co.elastic.apm.stack-version=6.3.10]
logging:
driver: json-file
options: {max-file: '5', max-size: 2m}
ports: ['127.0.0.1:8200:8200', '127.0.0.1:6060:6060']
elasticsearch:
container_name: localtesting_6.3.10_elasticsearch
environment: [bootstrap.memory_lock=true, cluster.name=docker-cluster, cluster.routing.allocation.disk.threshold_enabled=false, discovery.type=single-node, path.repo=/usr/share/elasticsearch/data/backups, 'ES_JAVA_OPTS=-Xms1g -Xmx1g', path.data=/usr/share/elasticsearch/data/6.3.10, xpack.security.enabled=false, xpack.license.self_generated.type=trial, xpack.monitoring.collection.enabled=true]
healthcheck:
interval: '20'
retries: 10
test: [CMD-SHELL, 'curl -s http://localhost:9200/_cluster/health | grep -vq ''"status":"red"''']
image: docker.elastic.co/elasticsearch/elasticsearch:6.3.10-SNAPSHOT
labels: [co.elastic.apm.stack-version=6.3.10]
logging:
driver: json-file
options: {max-file: '5', max-size: 2m}
ports: ['127.0.0.1:9200:9200']
ulimits:
memlock: {hard: -1, soft: -1}
volumes: ['esdata:/usr/share/elasticsearch/data']
kibana:
container_name: localtesting_6.3.10_kibana
depends_on:
elasticsearch: {condition: service_healthy}
environment: {ELASTICSEARCH_URL: 'elasticsearch:9200', SERVER_NAME: kibana.example.org, XPACK_MONITORING_ENABLED: 'true', XPACK_XPACK_MAIN_TELEMETRY_ENABLED: 'false'}
healthcheck:
interval: 10s
retries: 20
test: [CMD, curl, --write-out, '''HTTP %{http_code}''', --fail, --silent, --output, /dev/null, 'http://kibana:5601/api/status']
image: docker.elastic.co/kibana/kibana:6.3.10-SNAPSHOT
labels: [co.elastic.apm.stack-version=6.3.10]
logging:
driver: json-file
options: {max-file: '5', max-size: 2m}
ports: ['127.0.0.1:5601:5601']
networks:
default: {name: apm-integration-testing}
volumes:
esdata: {driver: local}
pgdata: {driver: local}
""") # noqa: 501
self.assertDictEqual(got, want)
def test_start_master_default(self):
docker_compose_yml = stringIO()
image_cache_dir = "/foo"
with mock.patch.dict(LocalSetup.SUPPORTED_VERSIONS, {'master': '8.0.0'}):
setup = LocalSetup(argv=self.common_setup_args + ["master", "--image-cache-dir", image_cache_dir])
setup.set_docker_compose_path(docker_compose_yml)
setup()
docker_compose_yml.seek(0)
got = yaml.load(docker_compose_yml)
want = yaml.load("""
version: '2.1'
services:
apm-server:
cap_add: [CHOWN, DAC_OVERRIDE, SETGID, SETUID]
cap_drop: [ALL]
command: [apm-server, -e, --httpprof, ':6060', -E, apm-server.rum.enabled=true, -E, apm-server.rum.event_rate.limit=1000,
-E, 'apm-server.host=0.0.0.0:8200', -E, apm-server.read_timeout=1m, -E, apm-server.shutdown_timeout=2m,
-E, apm-server.write_timeout=1m, -E, logging.json=true, -E, logging.metrics.enabled=false,
-E, 'setup.kibana.host=kibana:5601', -E, setup.template.settings.index.number_of_replicas=0,
-E, setup.template.settings.index.number_of_shards=1, -E, setup.template.settings.index.refresh_interval=1ms,
-E, monitoring.elasticsearch=true, -E, monitoring.enabled=true,
-E, apm-server.kibana.enabled=true, -E, 'apm-server.kibana.host=kibana:5601', -E, apm-server.agent.config.cache.expiration=30s,
-E, apm-server.kibana.username=apm_server_user, -E, apm-server.kibana.password=<PASSWORD>,
-E, 'output.elasticsearch.hosts=["elasticsearch:9200"]',
-E, output.elasticsearch.username=apm_server_user, -E, output.elasticsearch.password=<PASSWORD>,
-E, output.elasticsearch.enabled=true,
-E, "output.elasticsearch.pipelines=[{pipeline: 'apm'}]", -E, 'apm-server.register.ingest.pipeline.enabled=true'
]
container_name: localtesting_8.0.0_apm-server
depends_on:
elasticsearch: {condition: service_healthy}
kibana: {condition: service_healthy}
healthcheck:
interval: 10s
retries: 12
test: [CMD, curl, --write-out, '''HTTP %{http_code}''', --fail, --silent, --output, /dev/null, 'http://localhost:8200/']
image: docker.elastic.co/apm/apm-server:8.0.0-SNAPSHOT
labels: [co.elastic.apm.stack-version=8.0.0]
logging:
driver: json-file
options: {max-file: '5', max-size: 2m}
ports: ['127.0.0.1:8200:8200', '127.0.0.1:6060:6060']
elasticsearch:
container_name: localtesting_8.0.0_elasticsearch
environment: [
bootstrap.memory_lock=true,
cluster.name=docker-cluster,
cluster.routing.allocation.disk.threshold_enabled=false,
discovery.type=single-node,
path.repo=/usr/share/elasticsearch/data/backups,
'ES_JAVA_OPTS=-XX:UseAVX=2 -Xms1g -Xmx1g',
path.data=/usr/share/elasticsearch/data/8.0.0,
indices.id_field_data.enabled=true,
xpack.security.authc.anonymous.roles=remote_monitoring_collector,
xpack.security.authc.realms.file.file1.order=0,
xpack.security.authc.realms.native.native1.order=1,
xpack.security.authc.token.enabled=true,
xpack.security.authc.api_key.enabled=true,
xpack.security.enabled=true,
xpack.license.self_generated.type=trial,
xpack.monitoring.collection.enabled=true
]
healthcheck:
interval: '20'
retries: 10
test: [CMD-SHELL, 'curl -s http://localhost:9200/_cluster/health | grep -vq ''"status":"red"''']
image: docker.elastic.co/elasticsearch/elasticsearch:8.0.0-SNAPSHOT
labels: [co.elastic.apm.stack-version=8.0.0]
logging:
driver: json-file
options: {max-file: '5', max-size: 2m}
ports: ['127.0.0.1:9200:9200']
ulimits:
memlock: {hard: -1, soft: -1}
volumes: [
'esdata:/usr/share/elasticsearch/data',
'./docker/elasticsearch/roles.yml:/usr/share/elasticsearch/config/roles.yml',
'./docker/elasticsearch/users:/usr/share/elasticsearch/config/users',
'./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles'
]
kibana:
container_name: localtesting_8.0.0_kibana
depends_on:
elasticsearch: {condition: service_healthy}
environment: {
ELASTICSEARCH_PASSWORD: <PASSWORD>,
ELASTICSEARCH_URL: 'elasticsearch:9200',
ELASTICSEARCH_USERNAME: kibana_system_user,
SERVER_NAME: kibana.example.org,
STATUS_ALLOWANONYMOUS: 'true',
XPACK_MONITORING_ENABLED: 'true',
XPACK_XPACK_MAIN_TELEMETRY_ENABLED: 'false'
}
healthcheck:
interval: 10s
retries: 20
test: [CMD, curl, --write-out, '''HTTP %{http_code}''', --fail, --silent, --output, /dev/null, 'http://kibana:5601/api/status']
image: docker.elastic.co/kibana/kibana:8.0.0-SNAPSHOT
labels: [co.elastic.apm.stack-version=8.0.0]
logging:
driver: json-file
options: {max-file: '5', max-size: 2m}
ports: ['127.0.0.1:5601:5601']
networks:
default: {name: apm-integration-testing}
volumes:
esdata: {driver: local}
pgdata: {driver: local}
""") # noqa: 501
self.assertDictEqual(got, want)
@mock.patch(cli.__name__ + ".load_images")
def test_start_6_x_xpack_secure(self, _ignore_load_images):
docker_compose_yml = stringIO()
with mock.patch.dict(LocalSetup.SUPPORTED_VERSIONS, {'6.6': '6.6.10'}):
setup = LocalSetup(argv=self.common_setup_args + ["6.6", "--elasticsearch-xpack-audit"])
setup.set_docker_compose_path(docker_compose_yml)
setup()
docker_compose_yml.seek(0)
got = yaml.load(docker_compose_yml)
# apm-server should use user/pass -> es
apm_server_cmd = got["services"]["apm-server"]["command"]
self.assertTrue(any(cmd.startswith("output.elasticsearch.password=") for cmd in apm_server_cmd), apm_server_cmd)
self.assertTrue(any(cmd.startswith("output.elasticsearch.username=") for cmd in apm_server_cmd), apm_server_cmd)
self.assertFalse(any(cmd == "setup.dashboards.enabled=true" for cmd in apm_server_cmd), apm_server_cmd)
# elasticsearch configuration
es_env = got["services"]["elasticsearch"]["environment"]
## auditing enabled
self.assertIn("xpack.security.audit.enabled=true", es_env)
## allow anonymous healthcheck
self.assertIn("xpack.security.authc.anonymous.roles=remote_monitoring_collector", es_env)
## file based realm
self.assertIn("xpack.security.authc.realms.file1.type=file", es_env)
## native realm
self.assertIn("xpack.security.authc.realms.native1.type=native", es_env)
# kibana should use user/pass -> es
kibana_env = got["services"]["kibana"]["environment"]
self.assertIn("ELASTICSEARCH_PASSWORD", kibana_env)
self.assertIn("ELASTICSEARCH_USERNAME", kibana_env)
## allow anonymous healthcheck
self.assertIn("STATUS_ALLOWANONYMOUS", kibana_env)
@mock.patch(cli.__name__ + ".load_images")
def test_start_7_0_xpack_secure(self, _ignore_load_images):
docker_compose_yml = stringIO()
with mock.patch.dict(LocalSetup.SUPPORTED_VERSIONS, {'master': '8.0.0'}):
setup = LocalSetup(argv=self.common_setup_args + ["master"])
setup.set_docker_compose_path(docker_compose_yml)
setup()
docker_compose_yml.seek(0)
got = yaml.load(docker_compose_yml)
# apm-server should use user/pass -> es
apm_server_cmd = got["services"]["apm-server"]["command"]
self.assertTrue(any(cmd.startswith("output.elasticsearch.password=") for cmd in apm_server_cmd), apm_server_cmd)
self.assertTrue(any(cmd.startswith("output.elasticsearch.username=") for cmd in apm_server_cmd), apm_server_cmd)
# elasticsearch configuration
es_env = got["services"]["elasticsearch"]["environment"]
# auditing disabled by default
self.assertNotIn("xpack.security.audit.enabled=true", es_env)
# allow anonymous healthcheck
self.assertIn("xpack.security.authc.anonymous.roles=remote_monitoring_collector", es_env)
# file based realm
self.assertIn("xpack.security.authc.realms.file.file1.order=0", es_env)
# native realm
self.assertIn("xpack.security.authc.realms.native.native1.order=1", es_env)
# kibana should use user/pass -> es
kibana_env = got["services"]["kibana"]["environment"]
self.assertIn("ELASTICSEARCH_PASSWORD", kibana_env)
self.assertIn("ELASTICSEARCH_USERNAME", kibana_env)
# allow anonymous healthcheck
self.assertIn("STATUS_ALLOWANONYMOUS", kibana_env)
@mock.patch(cli.__name__ + ".load_images")
def test_start_no_elasticesarch(self, _ignore_load_images):
docker_compose_yml = stringIO()
with mock.patch.dict(LocalSetup.SUPPORTED_VERSIONS, {'master': '8.0.0'}):
setup = LocalSetup(argv=self.common_setup_args + ["master", "--no-elasticsearch"])
setup.set_docker_compose_path(docker_compose_yml)
setup()
docker_compose_yml.seek(0)
got = yaml.load(docker_compose_yml)
services = got["services"]
self.assertNotIn("elasticsearch", services)
self.assertNotIn("elasticsearch", services["apm-server"]["depends_on"])
@mock.patch(cli.__name__ + ".load_images")
def test_start_all(self, _ignore_load_images):
docker_compose_yml = stringIO()
with mock.patch.dict(LocalSetup.SUPPORTED_VERSIONS, {'master': '8.0.0'}):
setup = LocalSetup(argv=self.common_setup_args + ["master", "--all"])
setup.set_docker_compose_path(docker_compose_yml)
setup()
docker_compose_yml.seek(0)
got = yaml.load(docker_compose_yml)
services = set(got["services"])
self.assertSetEqual(services, {
"apm-server", "elasticsearch", "kibana",
"filebeat", "heartbeat", "metricbeat", "packetbeat",
"opbeans-dotnet",
"opbeans-go",
"opbeans-java",
"opbeans-load-generator",
"opbeans-node",
"opbeans-python",
"opbeans-ruby",
"opbeans-rum",
"postgres", "redis",
})
@mock.patch(cli.__name__ + ".load_images")
def test_start_one_opbeans(self, _ignore_load_images):
docker_compose_yml = stringIO()
with mock.patch.dict(LocalSetup.SUPPORTED_VERSIONS, {'master': '8.0.0'}):
setup = LocalSetup(argv=self.common_setup_args + ["master", "--with-opbeans-python"])
setup.set_docker_compose_path(docker_compose_yml)
setup()
docker_compose_yml.seek(0)
got = yaml.load(docker_compose_yml)
services = got["services"]
self.assertIn("redis", services)
self.assertIn("postgres", services)
self.assertIn("opbeans-load-generator", services)
@mock.patch(cli.__name__ + ".load_images")
def test_start_one_opbeans_without_loadgen(self, _ignore_load_images):
docker_compose_yml = stringIO()
with mock.patch.dict(LocalSetup.SUPPORTED_VERSIONS, {'master': '8.0.0'}):
setup = LocalSetup(argv=self.common_setup_args + ["master", "--with-opbeans-python",
"--no-opbeans-python-loadgen"])
setup.set_docker_compose_path(docker_compose_yml)
setup()
docker_compose_yml.seek(0)
got = yaml.load(docker_compose_yml)
services = got["services"]
self.assertIn("redis", services)
self.assertIn("postgres", services)
self.assertNotIn("opbeans-load-generator", services)
@mock.patch(cli.__name__ + ".load_images")
def test_start_one_opbeans_without_loadgen_global_arg(self, _ignore_load_images):
docker_compose_yml = stringIO()
with mock.patch.dict(LocalSetup.SUPPORTED_VERSIONS, {'master': '8.0.0'}):
setup = LocalSetup(argv=self.common_setup_args + ["master", "--with-opbeans-python",
"--no-opbeans-load-generator"])
setup.set_docker_compose_path(docker_compose_yml)
setup()
docker_compose_yml.seek(0)
got = yaml.load(docker_compose_yml)
services = got["services"]
self.assertIn("redis", services)
self.assertIn("postgres", services)
self.assertNotIn("opbeans-load-generator", services)
@mock.patch(cli.__name__ + ".load_images")
def test_start_opbeans_2nd(self, _ignore_load_images):
docker_compose_yml = stringIO()
with mock.patch.dict(LocalSetup.SUPPORTED_VERSIONS, {'master': '8.0.0'}):
setup = LocalSetup(argv=self.common_setup_args + ["master", "--with-opbeans-dotnet01", "--with-opbeans-node01",
"--with-opbeans-java01", "--with-opbeans-go01",
"--with-opbeans-python01", "--with-opbeans-ruby01"])
setup.set_docker_compose_path(docker_compose_yml)
setup()
docker_compose_yml.seek(0)
got = yaml.load(docker_compose_yml)
services = got["services"]
self.assertIn("opbeans-dotnet01", services)
self.assertIn("opbeans-node01", services)
self.assertIn("opbeans-java01", services)
self.assertIn("opbeans-go01", services)
self.assertIn("opbeans-python01", services)
self.assertIn("opbeans-ruby01", services)
@mock.patch(cli.__name__ + ".load_images")
def test_start_all_opbeans_no_apm_server(self, _ignore_load_images):
docker_compose_yml = stringIO()
with mock.patch.dict(LocalSetup.SUPPORTED_VERSIONS, {'master': '8.0.0'}):
setup = LocalSetup(argv=self.common_setup_args + ["master", "--all-opbeans", "--no-apm-server"])
setup.set_docker_compose_path(docker_compose_yml)
setup()
docker_compose_yml.seek(0)
got = yaml.load(docker_compose_yml)
depends_on = set(got["services"]["opbeans-node"]["depends_on"].keys())
self.assertSetEqual({"postgres", "redis"}, depends_on)
depends_on = set(got["services"]["opbeans-python"]["depends_on"].keys())
self.assertSetEqual({"elasticsearch", "postgres", "redis"}, depends_on)
depends_on = set(got["services"]["opbeans-ruby"]["depends_on"].keys())
self.assertSetEqual({"elasticsearch", "postgres", "redis"}, depends_on)
for name, service in got["services"].items():
self.assertNotIn("apm-server", service.get("depends_on", {}), "{} depends on apm-server".format(name))
@mock.patch(cli.__name__ + ".load_images")
def test_start_unsupported_version_pre_6_3(self, _ignore_load_images):
docker_compose_yml = stringIO()
version = "1.2.3"
self.assertNotIn(version, LocalSetup.SUPPORTED_VERSIONS)
setup = LocalSetup(argv=self.common_setup_args + [version, "--release"])
setup.set_docker_compose_path(docker_compose_yml)
setup()
docker_compose_yml.seek(0)
got = yaml.load(docker_compose_yml)
services = got["services"]
self.assertEqual(
"docker.elastic.co/elasticsearch/elasticsearch-platinum:{}".format(version),
services["elasticsearch"]["image"]
)
self.assertEqual("docker.elastic.co/kibana/kibana-x-pack:{}".format(version), services["kibana"]["image"])
@mock.patch(cli.__name__ + ".load_images")
def test_start_unsupported_version(self, _ignore_load_images):
docker_compose_yml = stringIO()
version = "6.9.5"
self.assertNotIn(version, LocalSetup.SUPPORTED_VERSIONS)
setup = LocalSetup(argv=self.common_setup_args + [version])
setup.set_docker_compose_path(docker_compose_yml)
setup()
docker_compose_yml.seek(0)
got = yaml.load(docker_compose_yml)
services = got["services"]
self.assertEqual(
"docker.elastic.co/elasticsearch/elasticsearch:{}-SNAPSHOT".format(version),
services["elasticsearch"]["image"]
)
self.assertEqual("docker.elastic.co/kibana/kibana:{}-SNAPSHOT".format(version), services["kibana"]["image"])
@mock.patch(service.__name__ + ".resolve_bc")
@mock.patch(cli.__name__ + ".load_images")
def test_start_bc(self, mock_load_images, mock_resolve_bc):
mock_resolve_bc.return_value = {
"projects": {
"elasticsearch": {
"packages": {
"elasticsearch-6.9.5-docker-image.tar.gz": {
"url": "https://staging.elastic.co/.../elasticsearch-6.9.5-docker-image.tar.gz",
"type": "docker"
},
},
},
"kibana": {
"packages": {
"kibana-6.9.5-docker-image.tar.gz": {
"url": "https://staging.elastic.co/.../kibana-6.9.5-docker-image.tar.gz",
"type": "docker"
},
},
},
"apm-server": {
"packages": {
"apm-server-6.9.5-docker-image.tar.gz": {
"url": "https://staging.elastic.co/.../apm-server-6.9.5-docker-image.tar.gz",
"type": "docker"
},
},
},
"beats": {
"packages": {
"metricbeat-6.9.5-linux-amd64-docker-image.tar.gz": {
"url": "https://staging.elastic.co/.../metricbeat-6.9.5-docker-image.tar.gz",
"type": "docker",
},
}
},
"logstash-docker": {
"packages": {
"logstash-6.9.5-docker-image.tar.gz": {
"url": "https://staging.elastic.co/.../logstash-6.9.5-docker-image.tar.gz",
"type": "docker"
},
},
},
},
}
docker_compose_yml = stringIO()
image_cache_dir = "/foo"
version = "6.9.5"
bc = "abcd1234"
self.assertNotIn(version, LocalSetup.SUPPORTED_VERSIONS)
setup = LocalSetup(argv=self.common_setup_args + [
version, "--bc", bc, "--image-cache-dir", image_cache_dir, "--with-logstash", "--with-metricbeat"])
setup.set_docker_compose_path(docker_compose_yml)
setup()
docker_compose_yml.seek(0)
got = yaml.load(docker_compose_yml)
services = got["services"]
self.assertEqual(
"docker.elastic.co/elasticsearch/elasticsearch:{}".format(version),
services["elasticsearch"]["image"]
)
self.assertEqual("docker.elastic.co/kibana/kibana:{}".format(version), services["kibana"]["image"])
mock_load_images.assert_called_once_with(
{
"https://staging.elastic.co/.../elasticsearch-6.9.5-docker-image.tar.gz",
"https://staging.elastic.co/.../logstash-6.9.5-docker-image.tar.gz",
"https://staging.elastic.co/.../kibana-6.9.5-docker-image.tar.gz",
"https://staging.elastic.co/.../apm-server-6.9.5-docker-image.tar.gz",
"https://staging.elastic.co/.../metricbeat-6.9.5-docker-image.tar.gz",
},
image_cache_dir)
@mock.patch(service.__name__ + ".resolve_bc")
@mock.patch(cli.__name__ + ".load_images")
def test_start_bc_oss(self, mock_load_images, mock_resolve_bc):
mock_resolve_bc.return_value = {
"projects": {
"elasticsearch": {
"packages": {
"elasticsearch-oss-6.9.5-docker-image.tar.gz": {
"url": "https://staging.elastic.co/.../elasticsearch-oss-6.9.5-docker-image.tar.gz",
"type": "docker"
},
},
},
"kibana": {
"packages": {
"kibana-oss-6.9.5-docker-image.tar.gz": {
"url": "https://staging.elastic.co/.../kibana-oss-6.9.5-docker-image.tar.gz",
"type": "docker"
},
},
},
"apm-server": {
"packages": {
"apm-server-oss-6.9.5-docker-image.tar.gz": {
"url": "https://staging.elastic.co/.../apm-server-oss-6.9.5-docker-image.tar.gz",
"type": "docker"
},
},
},
},
}
docker_compose_yml = stringIO()
image_cache_dir = "/foo"
version = "6.9.5"
bc = "abcd1234"
self.assertNotIn(version, LocalSetup.SUPPORTED_VERSIONS)
setup = LocalSetup(argv=self.common_setup_args + [
version, "--oss", "--bc", bc, "--image-cache-dir", image_cache_dir])
setup.set_docker_compose_path(docker_compose_yml)
setup()
docker_compose_yml.seek(0)
got = yaml.load(docker_compose_yml)
services = got["services"]
self.assertEqual(
"docker.elastic.co/elasticsearch/elasticsearch-oss:{}".format(version),
services["elasticsearch"]["image"]
)
self.assertEqual("docker.elastic.co/kibana/kibana-oss:{}".format(version), services["kibana"]["image"])
mock_load_images.assert_called_once_with(
{
"https://staging.elastic.co/.../elasticsearch-oss-6.9.5-docker-image.tar.gz",
"https://staging.elastic.co/.../kibana-oss-6.9.5-docker-image.tar.gz",
"https://staging.elastic.co/.../apm-server-oss-6.9.5-docker-image.tar.gz",
},
image_cache_dir)
@mock.patch(service.__name__ + ".resolve_bc")
@mock.patch(cli.__name__ + ".load_images")
def test_start_bc_with_release(self, mock_load_images, mock_resolve_bc):
mock_resolve_bc.return_value = {
"projects": {
"elasticsearch": {
"packages": {
"elasticsearch-6.9.5-docker-image.tar.gz": {
"url": "https://staging.elastic.co/.../elasticsearch-6.9.5-docker-image.tar.gz",
"type": "docker"
},
},
},
"kibana": {
"packages": {
"kibana-6.9.5-docker-image.tar.gz": {
"url": "https://staging.elastic.co/.../kibana-6.9.5-docker-image.tar.gz",
"type": "docker"
},
},
},
"apm-server": {
"packages": {
"apm-server-6.9.5-docker-image.tar.gz": {
"url": "https://staging.elastic.co/.../apm-server-6.9.5-docker-image.tar.gz",
"type": "docker"
},
},
},
},
}
docker_compose_yml = stringIO()
image_cache_dir = "/foo"
version = "6.9.5"
apm_server_version = "6.2.4"
bc = "abcd1234"
self.assertNotIn(version, LocalSetup.SUPPORTED_VERSIONS)
setup = LocalSetup(
argv=self.common_setup_args + [version, "--bc", bc, "--image-cache-dir", image_cache_dir,
"--apm-server-version", apm_server_version, "--apm-server-release"])
setup.set_docker_compose_path(docker_compose_yml)
setup()
docker_compose_yml.seek(0)
got = yaml.load(docker_compose_yml)
services = got["services"]
self.assertEqual(
"docker.elastic.co/apm/apm-server:{}".format(apm_server_version),
services["apm-server"]["image"]
)
mock_load_images.assert_called_once_with(
{
"https://staging.elastic.co/.../elasticsearch-6.9.5-docker-image.tar.gz",
"https://staging.elastic.co/.../kibana-6.9.5-docker-image.tar.gz",
},
image_cache_dir)
@mock.patch(service.__name__ + ".resolve_bc")
def test_docker_download_image_url(self, mock_resolve_bc):
mock_resolve_bc.return_value = {
"projects": {
"elasticsearch": {
"commit_hash": "abc1234",
"commit_url": "https://github.com/elastic/elasticsearch/commits/abc1234",
"packages": {
"elasticsearch-6.3.10-docker-image.tar.gz": {
"url": "https://staging.elastic.co/.../elasticsearch-6.3.10-docker-image.tar.gz",
"type": "docker"
},
"elasticsearch-oss-6.3.10-docker-image.tar.gz": {
"url": "https://staging.elastic.co/.../elasticsearch-oss-6.3.10-docker-image.tar.gz",
"type": "docker"
}
}
}
}
}
Case = collections.namedtuple("Case", ("service", "expected", "args"))
common_args = (("image_cache_dir", ".images"),)
cases = [
# post-6.3
Case(Elasticsearch,
"https://staging.elastic.co/.../elasticsearch-6.3.10-docker-image.tar.gz",
dict(bc="be84d930", version="6.3.10")),
Case(Elasticsearch,
"https://staging.elastic.co/.../elasticsearch-oss-6.3.10-docker-image.tar.gz",
dict(bc="be84d930", oss=True, version="6.3.10")),
]
for case in cases:
args = dict(common_args)
if case.args:
args.update(case.args)
service = case.service(**args)
got = service.image_download_url()
self.assertEqual(case.expected, got)
@mock.patch(cli.__name__ + ".load_images")
def test_apm_server_tls(self, _ignore_load_images):
docker_compose_yml = stringIO()
with mock.patch.dict(LocalSetup.SUPPORTED_VERSIONS, {'master': '8.0.0'}):
setup = LocalSetup(argv=self.common_setup_args + ["master", "--with-opbeans-python",
"--apm-server-enable-tls"])
setup.set_docker_compose_path(docker_compose_yml)
setup()
docker_compose_yml.seek(0)
got = yaml.load(docker_compose_yml)
services = set(got["services"])
self.assertIn("apm-server", services)
self.assertIn("opbeans-python", services)
apm_server = got["services"]["apm-server"]
self.assertIn("apm-server.ssl.enabled=true", apm_server["command"])
self.assertIn("apm-server.ssl.key=/usr/share/apm-server/config/certs/tls.key", apm_server["command"])
self.assertIn("apm-server.ssl.certificate=/usr/share/apm-server/config/certs/tls.crt", apm_server["command"])
self.assertIn("https://localhost:8200/", apm_server["healthcheck"]["test"])
opbeans_python = got["services"]["opbeans-python"]
self.assertIn("ELASTIC_APM_SERVER_URL=https://apm-server:8200", opbeans_python["environment"])
self.assertIn("ELASTIC_APM_JS_SERVER_URL=https://apm-server:8200", opbeans_python["environment"])
def test_parse(self):
cases = [
("6.3", [6, 3]),
("6.3.0", [6, 3, 0]),
("6.3.1", [6, 3, 1]),
("6.3.10", [6, 3, 10]),
("6.3.10-alpha1", [6, 3, 10]),
]
for ver, want in cases:
got = parse_version(ver)
self.assertEqual(want, got)
| StarcoderdataPython |
8045238 | #!/usr/bin/env python
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
import shutil
import tempfile
import hashlib
from st2common.util.monkey_patch import use_select_poll_workaround
use_select_poll_workaround()
from lockfile import LockFile
from lockfile import LockTimeout
from git.repo import Repo
from gitdb.exc import BadName
from st2common.services import packs as pack_service
from st2tests.base import BaseActionTestCase
import st2common.util.pack_management
from st2common.util.pack_management import eval_repo_url
from pack_mgmt.download import DownloadGitRepoAction
PACK_INDEX = {
"test": {
"version": "0.4.0",
"name": "test",
"repo_url": "https://github.com/StackStorm-Exchange/stackstorm-test",
"author": "st2-dev",
"keywords": ["some", "search", "another", "terms"],
"email": "<EMAIL>",
"description": "st2 pack to test package management pipeline"
},
"test2": {
"version": "0.5.0",
"name": "test2",
"repo_url": "https://github.com/StackStorm-Exchange/stackstorm-test2",
"author": "stanley",
"keywords": ["some", "special", "terms"],
"email": "<EMAIL>",
"description": "another st2 pack to test package management pipeline"
},
"test3": {
"version": "0.5.0",
"stackstorm_version": ">=1.6.0, <2.2.0",
"name": "test3",
"repo_url": "https://github.com/StackStorm-Exchange/stackstorm-test3",
"author": "stanley",
"keywords": ["some", "special", "terms"],
"email": "<EMAIL>",
"description": "another st2 pack to test package management pipeline"
}
}
@mock.patch.object(pack_service, 'fetch_pack_index', mock.MagicMock(return_value=(PACK_INDEX, {})))
class DownloadGitRepoActionTestCase(BaseActionTestCase):
action_cls = DownloadGitRepoAction
def setUp(self):
super(DownloadGitRepoActionTestCase, self).setUp()
clone_from = mock.patch.object(Repo, 'clone_from')
self.addCleanup(clone_from.stop)
self.clone_from = clone_from.start()
expand_user = mock.patch.object(os.path, 'expanduser',
mock.MagicMock(return_value=tempfile.mkdtemp()))
self.addCleanup(expand_user.stop)
self.expand_user = expand_user.start()
self.repo_base = tempfile.mkdtemp()
self.repo_instance = mock.MagicMock()
type(self.repo_instance).active_branch = mock.Mock()
def side_effect(url, to_path, **kwargs):
# Since we have no way to pass pack name here, we would have to derive it from repo url
fixture_name = url.split('/')[-1]
fixture_path = os.path.join(self._get_base_pack_path(), 'tests/fixtures', fixture_name)
shutil.copytree(fixture_path, to_path)
return self.repo_instance
self.clone_from.side_effect = side_effect
def tearDown(self):
shutil.rmtree(self.repo_base)
shutil.rmtree(self.expand_user())
def test_run_pack_download(self):
action = self.get_action_instance()
result = action.run(packs=['test'], abs_repo_base=self.repo_base)
temp_dir = hashlib.md5(PACK_INDEX['test']['repo_url'].encode()).hexdigest()
self.assertEqual(result, {'test': 'Success.'})
self.clone_from.assert_called_once_with(PACK_INDEX['test']['repo_url'],
os.path.join(os.path.expanduser('~'), temp_dir))
self.assertTrue(os.path.isfile(os.path.join(self.repo_base, 'test/pack.yaml')))
self.repo_instance.git.checkout.assert_called()
self.repo_instance.git.branch.assert_called()
self.repo_instance.git.checkout.assert_called()
def test_run_pack_download_existing_pack(self):
action = self.get_action_instance()
action.run(packs=['test'], abs_repo_base=self.repo_base)
self.assertTrue(os.path.isfile(os.path.join(self.repo_base, 'test/pack.yaml')))
result = action.run(packs=['test'], abs_repo_base=self.repo_base)
self.assertEqual(result, {'test': 'Success.'})
def test_run_pack_download_multiple_packs(self):
action = self.get_action_instance()
result = action.run(packs=['test', 'test2'], abs_repo_base=self.repo_base)
temp_dirs = [
hashlib.md5(PACK_INDEX['test']['repo_url'].encode()).hexdigest(),
hashlib.md5(PACK_INDEX['test2']['repo_url'].encode()).hexdigest()
]
self.assertEqual(result, {'test': 'Success.', 'test2': 'Success.'})
self.clone_from.assert_any_call(PACK_INDEX['test']['repo_url'],
os.path.join(os.path.expanduser('~'), temp_dirs[0]))
self.clone_from.assert_any_call(PACK_INDEX['test2']['repo_url'],
os.path.join(os.path.expanduser('~'), temp_dirs[1]))
self.assertEqual(self.clone_from.call_count, 2)
self.assertTrue(os.path.isfile(os.path.join(self.repo_base, 'test/pack.yaml')))
self.assertTrue(os.path.isfile(os.path.join(self.repo_base, 'test2/pack.yaml')))
@mock.patch.object(Repo, 'clone_from')
def test_run_pack_download_error(self, clone_from):
clone_from.side_effect = Exception('Something went terribly wrong during the clone')
action = self.get_action_instance()
self.assertRaises(Exception, action.run, packs=['test'], abs_repo_base=self.repo_base)
def test_run_pack_download_no_tag(self):
self.repo_instance.commit.side_effect = BadName
action = self.get_action_instance()
self.assertRaises(ValueError, action.run, packs=['test=1.2.3'],
abs_repo_base=self.repo_base)
def test_run_pack_lock_is_already_acquired(self):
action = self.get_action_instance()
temp_dir = hashlib.md5(PACK_INDEX['test']['repo_url'].encode()).hexdigest()
original_acquire = LockFile.acquire
def mock_acquire(self, timeout=None):
original_acquire(self, timeout=0.1)
LockFile.acquire = mock_acquire
try:
lock_file = LockFile('/tmp/%s' % (temp_dir))
# Acquire a lock (file) so acquire inside download will fail
with open(lock_file.lock_file, 'w') as fp:
fp.write('')
expected_msg = 'Timeout waiting to acquire lock for'
self.assertRaisesRegexp(LockTimeout, expected_msg, action.run, packs=['test'],
abs_repo_base=self.repo_base)
finally:
os.unlink(lock_file.lock_file)
LockFile.acquire = original_acquire
def test_run_pack_lock_is_already_acquired_force_flag(self):
# Lock is already acquired but force is true so it should be deleted and released
action = self.get_action_instance()
temp_dir = hashlib.md5(PACK_INDEX['test']['repo_url'].encode()).hexdigest()
original_acquire = LockFile.acquire
def mock_acquire(self, timeout=None):
original_acquire(self, timeout=0.1)
LockFile.acquire = mock_acquire
try:
lock_file = LockFile('/tmp/%s' % (temp_dir))
# Acquire a lock (file) so acquire inside download will fail
with open(lock_file.lock_file, 'w') as fp:
fp.write('')
result = action.run(packs=['test'], abs_repo_base=self.repo_base, force=True)
finally:
LockFile.acquire = original_acquire
self.assertEqual(result, {'test': 'Success.'})
def test_run_pack_download_v_tag(self):
def side_effect(ref):
if ref[0] != 'v':
raise BadName()
return mock.MagicMock(hexsha='abcdef')
self.repo_instance.commit.side_effect = side_effect
self.repo_instance.git = mock.MagicMock(
branch=(lambda *args: 'master'),
checkout=(lambda *args: True)
)
action = self.get_action_instance()
result = action.run(packs=['test=1.2.3'], abs_repo_base=self.repo_base)
self.assertEqual(result, {'test': 'Success.'})
@mock.patch.object(st2common.util.pack_management, 'get_valid_versions_for_repo',
mock.Mock(return_value=['1.0.0', '2.0.0']))
def test_run_pack_download_invalid_version(self):
self.repo_instance.commit.side_effect = lambda ref: None
action = self.get_action_instance()
expected_msg = ('is not a valid version, hash, tag or branch.*?'
'Available versions are: 1.0.0, 2.0.0.')
self.assertRaisesRegexp(ValueError, expected_msg, action.run,
packs=['test=2.2.3'], abs_repo_base=self.repo_base)
def test_download_pack_stackstorm_version_identifier_check(self):
action = self.get_action_instance()
# Version is satisfied
st2common.util.pack_management.CURRENT_STACKSTORM_VERSION = '2.0.0'
result = action.run(packs=['test3'], abs_repo_base=self.repo_base)
self.assertEqual(result['test3'], 'Success.')
# Pack requires a version which is not satisfied by current StackStorm version
st2common.util.pack_management.CURRENT_STACKSTORM_VERSION = '2.2.0'
expected_msg = ('Pack "test3" requires StackStorm ">=1.6.0, <2.2.0", but '
'current version is "2.2.0"')
self.assertRaisesRegexp(ValueError, expected_msg, action.run, packs=['test3'],
abs_repo_base=self.repo_base)
st2common.util.pack_management.CURRENT_STACKSTORM_VERSION = '2.3.0'
expected_msg = ('Pack "test3" requires StackStorm ">=1.6.0, <2.2.0", but '
'current version is "2.3.0"')
self.assertRaisesRegexp(ValueError, expected_msg, action.run, packs=['test3'],
abs_repo_base=self.repo_base)
st2common.util.pack_management.CURRENT_STACKSTORM_VERSION = '1.5.9'
expected_msg = ('Pack "test3" requires StackStorm ">=1.6.0, <2.2.0", but '
'current version is "1.5.9"')
self.assertRaisesRegexp(ValueError, expected_msg, action.run, packs=['test3'],
abs_repo_base=self.repo_base)
st2common.util.pack_management.CURRENT_STACKSTORM_VERSION = '1.5.0'
expected_msg = ('Pack "test3" requires StackStorm ">=1.6.0, <2.2.0", but '
'current version is "1.5.0"')
self.assertRaisesRegexp(ValueError, expected_msg, action.run, packs=['test3'],
abs_repo_base=self.repo_base)
# Version is not met, but force=true parameter is provided
st2common.util.pack_management.CURRENT_STACKSTORM_VERSION = '1.5.0'
result = action.run(packs=['test3'], abs_repo_base=self.repo_base, force=True)
self.assertEqual(result['test3'], 'Success.')
def test_download_pack_python_version_check(self):
action = self.get_action_instance()
# No python_versions attribute specified in the metadata file
with mock.patch('st2common.util.pack_management.get_pack_metadata') as \
mock_get_pack_metadata:
mock_get_pack_metadata.return_value = {
'name': 'test3',
'stackstorm_version': '',
'python_versions': []
}
st2common.util.pack_management.six.PY2 = True
st2common.util.pack_management.six.PY3 = False
st2common.util.pack_management.CURRENT_PYTHON_VERSION = '2.7.11'
result = action.run(packs=['test3'], abs_repo_base=self.repo_base, force=False)
self.assertEqual(result['test3'], 'Success.')
# Pack works with Python 2.x installation is running 2.7
with mock.patch('st2common.util.pack_management.get_pack_metadata') as \
mock_get_pack_metadata:
mock_get_pack_metadata.return_value = {
'name': 'test3',
'stackstorm_version': '',
'python_versions': ['2']
}
st2common.util.pack_management.six.PY2 = True
st2common.util.pack_management.six.PY3 = False
st2common.util.pack_management.CURRENT_PYTHON_VERSION = '2.7.5'
result = action.run(packs=['test3'], abs_repo_base=self.repo_base, force=False)
self.assertEqual(result['test3'], 'Success.')
st2common.util.pack_management.CURRENT_PYTHON_VERSION = '2.7.12'
result = action.run(packs=['test3'], abs_repo_base=self.repo_base, force=False)
self.assertEqual(result['test3'], 'Success.')
# Pack works with Python 2.x installation is running 3.5
with mock.patch('st2common.util.pack_management.get_pack_metadata') as \
mock_get_pack_metadata:
mock_get_pack_metadata.return_value = {
'name': 'test3',
'stackstorm_version': '',
'python_versions': ['2']
}
st2common.util.pack_management.six.PY2 = False
st2common.util.pack_management.six.PY3 = True
st2common.util.pack_management.CURRENT_PYTHON_VERSION = '3.5.2'
expected_msg = (r'Pack "test3" requires Python 2.x, but current Python version is '
'"3.5.2"')
self.assertRaisesRegexp(ValueError, expected_msg, action.run,
packs=['test3'], abs_repo_base=self.repo_base, force=False)
# Pack works with Python 3.x installation is running 2.7
with mock.patch('st2common.util.pack_management.get_pack_metadata') as \
mock_get_pack_metadata:
mock_get_pack_metadata.return_value = {
'name': 'test3',
'stackstorm_version': '',
'python_versions': ['3']
}
st2common.util.pack_management.six.PY2 = True
st2common.util.pack_management.six.PY3 = False
st2common.util.pack_management.CURRENT_PYTHON_VERSION = '2.7.2'
expected_msg = (r'Pack "test3" requires Python 3.x, but current Python version is '
'"2.7.2"')
self.assertRaisesRegexp(ValueError, expected_msg, action.run,
packs=['test3'], abs_repo_base=self.repo_base, force=False)
# Pack works with Python 2.x and 3.x installation is running 2.7 and 3.6.1
with mock.patch('st2common.util.pack_management.get_pack_metadata') as \
mock_get_pack_metadata:
mock_get_pack_metadata.return_value = {
'name': 'test3',
'stackstorm_version': '',
'python_versions': ['2', '3']
}
st2common.util.pack_management.six.PY2 = True
st2common.util.pack_management.six.PY3 = False
st2common.util.pack_management.CURRENT_PYTHON_VERSION = '2.7.5'
result = action.run(packs=['test3'], abs_repo_base=self.repo_base, force=False)
self.assertEqual(result['test3'], 'Success.')
st2common.util.pack_management.six.PY2 = False
st2common.util.pack_management.six.PY3 = True
st2common.util.pack_management.CURRENT_PYTHON_VERSION = '3.6.1'
result = action.run(packs=['test3'], abs_repo_base=self.repo_base, force=False)
self.assertEqual(result['test3'], 'Success.')
def test_resolve_urls(self):
url = eval_repo_url(
"https://github.com/StackStorm-Exchange/stackstorm-test")
self.assertEqual(url, "https://github.com/StackStorm-Exchange/stackstorm-test")
url = eval_repo_url(
"https://github.com/StackStorm-Exchange/stackstorm-test.git")
self.assertEqual(url, "https://github.com/StackStorm-Exchange/stackstorm-test.git")
url = eval_repo_url("StackStorm-Exchange/stackstorm-test")
self.assertEqual(url, "https://github.com/StackStorm-Exchange/stackstorm-test")
url = eval_repo_url("git://StackStorm-Exchange/stackstorm-test")
self.assertEqual(url, "git://StackStorm-Exchange/stackstorm-test")
url = eval_repo_url("git://StackStorm-Exchange/stackstorm-test.git")
self.assertEqual(url, "git://StackStorm-Exchange/stackstorm-test.git")
url = eval_repo_url("<EMAIL>:foo/bar.git")
self.assertEqual(url, "<EMAIL>:<EMAIL>/bar.git")
url = eval_repo_url("file:///home/vagrant/stackstorm-test")
self.assertEqual(url, "file:///home/vagrant/stackstorm-test")
url = eval_repo_url("file://localhost/home/vagrant/stackstorm-test")
self.assertEqual(url, "file://localhost/home/vagrant/stackstorm-test")
url = eval_repo_url('ssh://<user@host>/AutomationStackStorm')
self.assertEqual(url, 'ssh://<user@host>/AutomationStackStorm')
url = eval_repo_url('ssh://joe@local/AutomationStackStorm')
self.assertEqual(url, 'ssh://joe@local/AutomationStackStorm')
def test_run_pack_download_edge_cases(self):
"""
Edge cases to test:
default branch is master, ref is pack version
default branch is master, ref is branch name
default branch is master, ref is default branch name
default branch is not master, ref is pack version
default branch is not master, ref is branch name
default branch is not master, ref is default branch name
"""
def side_effect(ref):
if ref[0] != 'v':
raise BadName()
return mock.MagicMock(hexsha='abcdeF')
self.repo_instance.commit.side_effect = side_effect
edge_cases = [
('master', '1.2.3'),
('master', 'some-branch'),
('master', 'default-branch'),
('master', None),
('default-branch', '1.2.3'),
('default-branch', 'some-branch'),
('default-branch', 'default-branch'),
('default-branch', None)
]
for default_branch, ref in edge_cases:
self.repo_instance.git = mock.MagicMock(
branch=(lambda *args: default_branch),
checkout=(lambda *args: True)
)
# Set default branch
self.repo_instance.active_branch.name = default_branch
self.repo_instance.active_branch.object = 'aBcdef'
self.repo_instance.head.commit = 'aBcdef'
# Fake gitref object
gitref = mock.MagicMock(hexsha='abcDef')
# Fool _get_gitref into working when its ref == our ref
def fake_commit(arg_ref):
if arg_ref == ref:
return gitref
else:
raise BadName()
self.repo_instance.commit = fake_commit
self.repo_instance.active_branch.object = gitref
action = self.get_action_instance()
if ref:
packs = ['test=%s' % (ref)]
else:
packs = ['test']
result = action.run(packs=packs, abs_repo_base=self.repo_base)
self.assertEqual(result, {'test': 'Success.'})
def test_run_pack_dowload_local_git_repo_detached_head_state(self):
action = self.get_action_instance()
type(self.repo_instance).active_branch = \
mock.PropertyMock(side_effect=TypeError('detached head'))
result = action.run(packs=['file:///stackstorm-test'], abs_repo_base=self.repo_base)
self.assertEqual(result, {'test': 'Success.'})
# Verify function has bailed out early
self.repo_instance.git.checkout.assert_not_called()
self.repo_instance.git.branch.assert_not_called()
self.repo_instance.git.checkout.assert_not_called()
| StarcoderdataPython |
48266 | <reponame>Fiware/ops.Maintenance-calendar
from flask import json
from maintenance_calendar.parser.json.json_parser import JSONParser
class JSONNodeCollectionParser(JSONParser):
def __init__(self):
super(JSONNodeCollectionParser, self).__init__() | StarcoderdataPython |
4850877 | <gh_stars>0
import sys
import os
print(sys.argv,os.path.join("","/"),os.path.join('','index.html'),os.path.join('','param'),os.path.join('','param/bar')) | StarcoderdataPython |
1910338 | # importing the necessory library
import numpy as np
import pandas as pd
# defining the function to read the box boundry
def dimension(file):
f = open(file,'r')
content = f.readlines()
# stroring the each vertext point on the data list
data = []
v_info = []
vertices_data =[]
# cartesian_data =[]
# vt_p = []
for x in range(len(content)):
# checking the file content cartesian points or not
if "CARTESIAN_POINT" in content[x]:
d=content[x].replace(",","").split(" ")
# Storing the cartesian point (X,Y,Z)
cartesian_data=d[0],d[7],d[8],d[9]
data.append(cartesian_data)
# checking for the unit used in step file.
elif "LENGTH_UNIT" in content[x]:
d=content[x].replace(",","").split(" ")
length_unit = (d[11] +" "+ d[12]).replace(".","").title()
elif "VERTEX_POINT " in content[x]:
dt=content[x].replace(",","").split(" ")
vt_p=dt[0],dt[5]
v_info.append(vt_p)
else:
pass
df = pd.DataFrame (data, columns = ['Line_no','x','y','z'])
df = df.set_index("Line_no")
for value in range(len(v_info)):
x_p = df.at[v_info[value][1],'x']
y_p = df.at[v_info[value][1],'y']
z_p = df.at[v_info[value][1],'z']
Points = x_p,y_p,z_p
vertices_data.append(Points)
# storing all the vertices in np.array
vertices_data = np.array(vertices_data).astype(float)
# storing the X, Y, Z minimum and Maximum values
x_min=np.amin(vertices_data[:,0])
y_min=np.amin(vertices_data[:,1])
z_min=np.amin(vertices_data[:,2])
x_max=np.amax(vertices_data[:,0])
y_max=np.amax(vertices_data[:,1])
z_max=np.amax(vertices_data[:,2])
# Calculate the distance
def measure(min_,max_):
dist = max_ - min_
return dist
# Finding the Box Dimension:
length = round(measure(x_min,x_max),2)
width = round(measure(y_min,y_max),2)
height = round(measure(z_min,z_max),2)
return length, width, height ,length_unit | StarcoderdataPython |
5007762 | <filename>data_utils/process_manifest.py
#!/usr/bin/env python
# coding=utf-8
""" Data pre-process """
import json
from collections import Counter
def get_path_trans(manifest_path="data/aishell/manifest.train" ):
'''Get path_to_wav and transcript list from data/manifest.{train,dev,test}
:param manifest_path: path to manifest file
:type manifest: str
return
'''
path_to_wav = []
transcript = []
duration = []
lines = open(manifest_path, "r").readlines()
for line in lines:
man_dict = json.loads(line)
path_to_wav.append(man_dict["audio_filepath"])
transcript.append(man_dict["text"])
duration.append(man_dict["duration"])
return path_to_wav, transcript, duration
def create_dict(vocab):
'''Creat word dict and map from word to num
:param vocab: path to vocab.txt
:type vocab: str
return
'''
total_words = open(vocab, 'r').readlines()
total_words = [word.strip() for word in total_words]
counter = Counter(total_words)
words = sorted(counter)
word_size = len(words)
word_num_map = dict(zip(words, range(word_size)))
print "word_size: ", word_size
return word_size, words, word_num_map
if __name__ == "__main__":
get_path_trans("../data/aishell/manifest.test")
creat_dict("../data/aishell/vocab.txt")
| StarcoderdataPython |
3208936 | """Module to download METAR's and TAF's from Ogimet.com"""
import re
from calendar import monthrange
from datetime import datetime
from bs4 import BeautifulSoup
from requests import get
from .console import console
from .logger import logger
from .sanitize import sanitize_metar, sanitize_taf
TODAY = datetime.now()
OGIMET_LIMIT_MESAGE = "#Sorry, Your quota limit for slow queries rate has been reached"
class OgimetQuotaLimitError(Exception):
"""
#Sorry, Your quota limit for slow queries rate has been reached
The anterior message is raised by Ogimet.com when you get a request
one after another. So, you must to wait at less five minutes to ensure
succesful request of METAR data.
This exception is raised when that message is detected.
"""
def __init__(self, message=OGIMET_LIMIT_MESAGE):
super().__init__(message + ". Wait a few minutes to execute a new request. :)")
def _join_line_separated_metars(metar_list: list, icao_code: str):
"""Joins the metar when it is separated in several lines
Args:
metar_list (list): The Metar list from file lines without fromating
Returns:
list: The correct Metar list, one Metar by item
"""
metar = ""
correct_metar_list = []
for line in metar_list:
metar += re.sub(r"^\s{2,}", " ", line)
if "=" in line:
sanitized = sanitize_metar(metar, icao_code)
correct_metar_list.append(sanitized)
# correct_metar_list.append(metar)
metar = ""
return correct_metar_list
def download_data_from_ogimet(icao_code: str, month: int, year=TODAY.year):
metars = []
tafs = []
month_range = monthrange(year=year, month=month)
if month >= 10:
month = f"{month}"
else:
month = f"0{month}"
url = f"https://www.ogimet.com/display_metars2.php?lugar={icao_code.lower()}&tipo=ALL&ord=DIR&nil=SI&fmt=txt&ano={year}&mes={month}&day=01&hora=00&anof={year}&mesf={month}&dayf={month_range[1]}&horaf=23&minf=59&enviar=Ver"
try:
res = get(url)
html_soup = BeautifulSoup(res.text, "html.parser")
data = html_soup.text.split("\n")
# print(data)
# print(f'DATA: {data[:50]}')
# print(f'DATA: {data[-50:-1]}')
if OGIMET_LIMIT_MESAGE in data:
raise OgimetQuotaLimitError()
elif "Fallo de consulta" in data[-1]:
raise OgimetQuotaLimitError(message=data[-1])
else:
# Extract the METAR's from data
for line in data[32:]:
if line == "":
break
metars.append(line)
# Extract the TAF's from data
for line in data[32 + len(metars) + 6 :]:
line = sanitize_taf(line, icao_code)
tafs.append(line.strip())
# Rensemble METAR's separated in several lines
metars = _join_line_separated_metars(metars, icao_code)
return metars, tafs
except Exception as error:
logger.error("Some error ocurred: {}".format(error))
exit()
if __name__ == "__main__":
download_data_from_ogimet("mroc", 1)
| StarcoderdataPython |
3300944 | <reponame>lubosz/universe
#!/usr/bin/python2
#coding: UTF-8
import sys, os, operator, subprocess
def shell(command):
output = ""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError:
#print(colors['red']+command + " has no output"+ colors['clear'])
pass
if output:
fileList = output.strip().split("\n")
else:
fileList = []
return fileList
foo = shell('grep "obj:" valgrind/suppressions/current.supp | sort | uniq')
for bar in foo:
name = bar.strip()
name = name.replace("obj:/lib/","")
name = name.replace("obj:/usr/lib/","")
name = name.replace(".so","")
names = name.split(".")
name = names[0]
suppression ="{\n\
%s\n\
Memcheck:Leak\n\
...\n\
%s\n\
...\n\
}" % (name, bar.strip())
print suppression
| StarcoderdataPython |
6442756 | import datetime
from dateutil.relativedelta import relativedelta
import pytz
from pytz import timezone
import urllib2
from bs4 import BeautifulSoup
import numpy as np
from random import randint
# Makes a matrix with 1 in ij if ij in same group and 0 otherwise
def CnxtnMtrx(X,A):
# X is the connection matrix
# A is the array with entries the cluster numbers
for i in xrange(len(A)):
for j in xrange(len(A)):
if A[j] == A[i]:
X[i,j] = X[i,j] + 1
return X
def Cluster(Tickers,start,end,k):
runs = 3000
import Data
VolReturns = Data.Data(Tickers,start,end)
##########################################################
# Run kmeans clustering algorithm a number of times and return most frequent result
import kmeansIterations
L_cluster,C,L_clust_rep = kmeansIterations.ClustIter(VolReturns,Tickers,k,runs)
FinalClust = L_cluster[C.index(max(C))]
FinalClust_rep = L_clust_rep[C.index(max(C))]
Cnxtn = CnxtnMtrx(np.zeros((len(FinalClust_rep),len(FinalClust_rep))),FinalClust_rep)
return Cnxtn
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.