id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
121597 | <reponame>aiiaaiiaaiia/Project<gh_stars>0
import os
from flask import Flask, flash, request, redirect, url_for, render_template, jsonify, Response
from werkzeug.utils import secure_filename
from flask import send_from_directory
from rt_atrt_lib import RT_ATRT
import time
import dw_vdo_url
import threading
UPLOAD_FOLDER = 'static/uploadedvideo'
# UPLOAD_FOLDER = os.path.dirname('./uploadedvideo')
ALLOWED_EXTENSIONS = {'avi', 'wmv', 'mpeg', 'mov', 'divx', 'dat', 'flv', 'mp4', 'avchd', 'webm', 'mkv'}
app = Flask(__name__, static_url_path='', static_folder='static', template_folder='template')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/vdo_info',methods = ['GET'])
def vdo_info_handle():
global title
global position
global language
global trans_language
res = {'title': title , 'position': position, 'language':language, 'trans_language': trans_language}
return jsonify(res)
@app.route('/upload_vdo', methods=['GET', 'POST'])
def upload_file():
global file_name
global title
global inputlang
global position
global language
global trans_language
# inputlang = ''
title = ''
file_name = 'None'
position = ''
language = []
trans_language = ''
# text = importpythonmodule.displaytext.generate_text()
# if request.method == "GET":
# autodetect = 'autodetect'
# return redirect(request.url)
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
# flash('No file part')
pass
return redirect(request.url)
file = request.files['file']
if file.filename == '':
# flash('No selected file')
pass
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
file_name = file.filename
title = file_name
# return redirect(url_for('uploaded_file', filename=filename))
return redirect('/vdo_uploaded')
return render_template('upload.html', processed_video = url_for('static', filename="uploadedvideo/"+file_name))
@app.route('/vdo_uploaded', methods=['GET', 'POST'])
def set_process_param():
global file_name
global output_path
global vdo_name
global vdo_output
global position
global language
global trans_language
if request.method == 'POST':
# variable
print("get pose from JS")
print(request.form["text_position"])
dict_form = request.form.to_dict(flat=False)
vdo_name = file_name.split('.')[0]
video = "static/uploadedvideo/"+ vdo_name + ".mp4"
position = ''
language = []
trans_language = ''
output_path = "static/process_output/"
vdo_output = output_path + "processed_" + vdo_name + ".mp4"
for element in dict_form:
print(element)
if element in ['english','chinese','french','thai','italian','japanese','korean','german','spanish','auto detect']:
language.append(element)
elif element in ['trans_thai']:
trans_language = 'thai'
elif element in ['trans_eng']:
trans_language = 'english'
position = dict_form['text_position'][0]
print('video : ', video)
print('position : ', position)
print('language : ', language)
print('trans_language : ', trans_language)
print('output : ', output_path)
## Process Here
# test_python.run_process(1,output_path)
rt_atrt_process = RT_ATRT(video, position, language, trans_language, output_path, frame_similarity_threshold = 0.99)
run_thread = threading.Thread(target=rt_atrt_process.run_process, name="rt_atrt_process", args=[])
run_thread.start()
##
return render_template('progress_bar.html', processed_video = url_for('static', filename="uploadedvideo/"+file_name),
processed_text = url_for('static', filename = "empty.txt"))
return render_template('render_vdo.html', processed_video = url_for('static', filename="uploadedvideo/"+file_name),
processed_text = url_for('static', filename = "empty.txt"))
@app.route('/finish_process')
def finish_process():
global output_path
global vdo_name
return render_template('render_vdo.html', processed_video = url_for('static', filename = "process_output/" + "processed_" + vdo_name + ".mp4"),
processed_text = url_for('static', filename = "process_output/" + vdo_name + "_text.txt"))
@app.route('/url_vdo', methods=['GET', 'POST'])
def process_url():
global file_name
global title
global inputlang
file_name = 'None'
title = 'None'
if request.method == 'POST':
# variable
url = request.form['yt_url']
title, file_name = dw_vdo_url.video_link_url(url)
file_name = file_name + '.mp4'
return redirect('/vdo_uploaded')
return render_template('link.html')
@app.route('/progress')
def progress():
global output_path
global vdo_name
def generate():
x = 0
while x <= 100:
f = open(output_path + vdo_name + "_progress.txt", 'r')
x = int(f.read())
f.close()
yield "data:" + str(x) + "\n\n"
time.sleep(1)
return Response(generate(), mimetype= 'text/event-stream')
if __name__=="__main__":
app.run(host='localhost', port=5000, debug=True) | StarcoderdataPython |
1706099 | <reponame>kmarcini/Project-Euler-Python
###########################
#
# #693 Finite Sequence Generator - Project Euler
# https://projecteuler.net/problem=693
#
# Code by <NAME>
#
###########################
| StarcoderdataPython |
4839175 | def get_min_max(ints):
"""
Return a tuple(min, max) out of list of unsorted integers.
Args:
ints(list): list of integers containing one or more integers
"""
# Handle non-list input
if not isinstance(ints, list):
return None, None
# Define variables for min and max value and initialize to None
min_value = None
max_value = None
for index, value in enumerate(ints):
if index == 0:
min_value = value
max_value = value
if value < min_value:
min_value = value
elif value > max_value:
max_value = value
return min_value, max_value
# Example Test Case of Ten Integers
import random
# Test case 1: random int array
l = [i for i in range(0, 10)] # a list containing 0 - 9
print(f"Test case 1 - random list of int: {l}")
random.shuffle(l)
# Should print "Pass" as the result should be (0, 9)
print ("Pass" if ((0, 9) == get_min_max(l)) else "Fail")
# Test case 2: empty array
print(f"Test case 2 - empty array")
# Should print "Pass" as the result should be (None, None)
print ("Pass" if ((None, None) == get_min_max([])) else "Fail")
# Test case 3: array with single item
print(f"Test case 3 - array with single item")
# Should print "Pass" as the result should be (None, None)
print ("Pass" if ((1, 1) == get_min_max([1])) else "Fail")
# Test case 4: non array input
print(f"Test case 4 - non array input")
# Should print "Pass" as the result should be (None, None)
print ("Pass" if ((None, None) == get_min_max(10)) else "Fail")
| StarcoderdataPython |
113318 | #!/usr/bin/env python3
# imports go here
import pandas as pd
import plotly.plotly as py
from plotly.graph_objs import *
#
# Free Coding session for 2015-05-24
# Written by <NAME>
#
years = list(range(2015, 2045))
YEARS_TO_SIMULATE = 30
INFLATION_RATE = 0.02
KWH_PRICE = 0.10 # 2015 dollars
INSTALL_COST_PER_WATT = 3.00 # in 2015
INSTALL_COST_PER_WATT_2025 = 2.00
cost_per_watt = [INSTALL_COST_PER_WATT]
for i in range(YEARS_TO_SIMULATE):
if i < 10:
# linear interpolation
cost = ((INSTALL_COST_PER_WATT_2025 - INSTALL_COST_PER_WATT) / 10) * i + INSTALL_COST_PER_WATT
else:
cost = cost_per_watt[i] * (1+INFLATION_RATE)
cost_per_watt.append(cost)
data = zip(years, cost_per_watt)
df = pd.DataFrame(data=list(data), columns=['Year', 'Cost Per Watt'])
line = Scatter(x=df['Year'], y=df['Cost Per Watt'])
data = Data([line])
layout = Layout(
title='Cost per Watt',
xaxis=XAxis(
title='Year',
showgrid=False,
zeroline=False
),
yaxis=YAxis(
title='Cost Per Watt',
showline=False
)
)
fig = Figure(data=data, layout=layout)
plot_url = py.plot(fig, filename='Cost Per watt')
| StarcoderdataPython |
3293611 | <reponame>twguest/FELPy
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
FELPY
@author: twguest
Created on Tue Mar 15 21:46:07 2022
__version__ = "1.0.1"
__email__ = "<EMAIL>"
"""
def get_fwhm():
return
if __name__ == '__main__':
pass | StarcoderdataPython |
1640413 | # Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
"""
CoreState.json sample structure:
{
"coreSequence": {
"number": 3,
"action": "<Assessment/Deployment>",
"completed": "<true/false>",
"lastHeartbeat": "<timestamp-in-UTC>",
"processIds": ["", ...]
}
}
"""
import collections
from extension.src.Constants import Constants
class CoreStateHandler(object):
""" Responsible for managing CoreState.json file """
def __init__(self, dir_path, json_file_handler):
self.dir_path = dir_path
self.file = Constants.CORE_STATE_FILE
self.json_file_handler = json_file_handler
self.core_state_fields = Constants.CoreStateFields
def read_file(self):
""" Fetches config from CoreState.json. Returns None if no content/file found """
core_state_json = self.json_file_handler.get_json_file_content(self.file, self.dir_path, raise_if_not_found=False)
parent_key = self.core_state_fields.parent_key
core_state_values = collections.namedtuple(parent_key, [self.core_state_fields.number, self.core_state_fields.action, self.core_state_fields.completed, self.core_state_fields.last_heartbeat, self.core_state_fields.process_ids])
if core_state_json is not None:
seq_no = self.json_file_handler.get_json_config_value_safely(core_state_json, self.core_state_fields.number, parent_key)
action = self.json_file_handler.get_json_config_value_safely(core_state_json, self.core_state_fields.action, parent_key)
completed = self.json_file_handler.get_json_config_value_safely(core_state_json, self.core_state_fields.completed, parent_key)
last_heartbeat = self.json_file_handler.get_json_config_value_safely(core_state_json, self.core_state_fields.last_heartbeat, parent_key)
process_ids = self.json_file_handler.get_json_config_value_safely(core_state_json, self.core_state_fields.process_ids, parent_key)
return core_state_values(seq_no, action, completed, last_heartbeat, process_ids)
| StarcoderdataPython |
3202134 | <filename>editorcontainer/editor/editor.py
"""
editor
======
Contains the :py:class:`.Editor`, that is, the graphical part
of the application to write/edit files.
"""
import os
from pygments import highlight
from pygments import lexers
from pygments.lexers import get_lexer_for_mimetype
from pygments import styles
from pygments.util import ClassNotFound
from pygments.formatters import BBCodeFormatter
from kivy.uix.codeinput import CodeInput
from kivy.uix.floatlayout import FloatLayout
from kivy.extras.highlight import KivyLexer
from kivy.utils import get_color_from_hex, get_hex_from_color
from kivy.properties import StringProperty
from kivy.core.window import Window
from kivy.base import EventLoop
class Editor(CodeInput):
"""Inherits from :py:class:`kivy.uix.codeinput.CodeInput`.
It's a :py:class:`.kivy.uix.widget.Widget` that is adapted to highlight its
contents.
"""
last_click = ''
"""Stores the last click pressed.
This is, stores a value like 'left', 'right', 'scrollup'...
"""
background_color_default_te = [1,1,1,1]
"""Default background color for the editor.
It's set when the 'default TE' style is selected from the :py:class:`footer.footer.Footer`
and when the application is opened in default.
"""
_path = StringProperty(None)
"""Path to the file (without the file's name) that this :py:class:`.Editor` has open."""
_name = StringProperty(None)
"""Name of the file (and tab) for this :py:class:`.Editor`"""
style_name_not_bound = 'default TE'
"""Stores the style name but without being bound.
Because it's not bound, it can store any name. Like 'default TE'
"""
def __init__(self, **kwargs):
super(Editor, self).__init__(**kwargs)
self.text_from = 0
self.text_to = 0
# Let's override this method to brute force the invisible text for the moment.
def paste(self):
''' Insert text from system :class:`~kivy.core.clipboard.Clipboard`
into the :class:`~kivy.uix.textinput.TextInput` at current cursor
position.
.. versionadded:: 1.8.0
'''
super(Editor, self).paste()
if len(self.text) > 0:
l = len(self.text)
c = self.text[l-1]
self.text = self.text[0:l-2]
self.text = self.text + c
def change_style(self, style = None):
"""Change the style of the editor.
The style includes the background_color, the cursor color and the text
(keywords, variable names...). It means that changes the highlighting style.
:param style: Name of the style to which to change.
"""
if style is not None:
if style == 'default TE':
self.style_name = 'default'
self.style_name_not_bound = 'default TE'
self.background_color = self.background_color_default_te
elif style == 'default' and self.style_name == 'default':
self.style_name = 'algol'
self.style_name = 'default'
self.style_name_not_bound = 'default'
else:
try:
self.style_name = style
self.style_name_not_bound = style
except ClassNotFound as err:
print(err, '{}: unknown style'.format(style))
if self.style:
background_c = get_color_from_hex(self.style.background_color)
color_sum = sum(background_c[0:3])
if color_sum >= 0.5:
self.cursor_color = [0, 0, 0, 1]
else:
self.cursor_color = [1, 1, 1, 1]
self._trigger_refresh_text()
def text_changed(self, *args):
"""Manage event when :py:attr:`.Editor.text` changes.
Changes the content of :py:attr:`editorcontainer.editorcontainer.EditorTab.close_button_string`.
When that attribute is changed the text of :py:attr:`editorcontainer.editorcontainer.EditorTab.close_button`
is automatically updated.
This means this method is used to indicate the stated of the tab (unsaved, saved). The mark is an
asterisk (*).
:param \*args: Default arguments. Not used.
"""
self.tab.close_button_string = '*\nx'
self.tab.saved = False
def save_tab(self, all_tabs=False):
"""Save a tab.
Writes the contents of this :py:class:`.Editor` to the file indicated by
:py:attr:`._path` and :py:attr:`._name`.
:param all_tabs: Boolean that indicates wheter just this :py:attr:`.Editor` 's tab \
is being saved (:py:obj:`False`) or all the tabs open in the application are being \
saved (:py:obj:`True`). When all_tabs is :py:obj:`False`, if the contents of this \
:py:class:`.Editor` haven't been saved then a filechooser is shown.
"""
if self._name is not None:
try:
complete_path = os.path.join(self._path, self._name)
with open(complete_path,'w+') as file:
file.write(self.text)
self.tab.close_button_string = 'x'
self.tab.saved = True
except PermissionError as err:
print(err, "You don't have the required access rights"
" to write to: {0}".format(path), sep = '\n')
except IsADirectoryError as err:
print(err, "Cannot save file as directory", sep = '\n')
elif not all_tabs:
file_menu = self.editor_container.parent.menu_bar.file_menu
file_menu.save_as()
# Let's override this method to be able to use the right
# click menu.
def cancel_selection(self):
'''Cancel current selection (if any).
'''
self._selection_from = self._selection_to = self.cursor_index()
self.selection_text = u''
self._selection = False
self._selection_finished = True
self._selection_touch = None
#self._trigger_update_graphics()
# Let's override this method, too, to be able to use the right
# click menu.
def on_cursor(self, instance, value):
"""Manage event when this editor's cursor changes."""
# Update all the graphics.
if self.last_click not in ['right', 'scrolldown', 'scrollup']:
self._trigger_update_graphics()
def change_lexer(self, mimetype = None):
"""Change the lexer of this :py:class:`.Editor`.
The lexer is what takes care of recognizing the keywords, variable names, etc.
:param mimetype: The mimetype for which a lexer should be found. The lexer is \
changed to that found with this mimetype.
"""
if mimetype is not None:
try:
# If the mimetype is 'text/plain' and the extension
# of the file is '.kv', then a kivylexer should be used.
if mimetype == 'text/plain' and os.path.splitext(self._name)[1] == '.kv':
self.lexer = KivyLexer()
else:
self.lexer = get_lexer_for_mimetype(mimetype)
except ClassNotFound as err:
print(err, 'Unsopported type {}'.format(mimetype), sep='\n')
self.lexer = lexers.TextLexer()
finally:
return self.lexer.name
elif self._name is not None:
# If the mimetype is 'text/plain' and the extension
# of the file is '.kv', then a kivylexer should be used.
if os.path.splitext(self._name)[1] == '.kv':
self.lexer = KivyLexer()
else:
self.lexer = lexers.TextLexer()
else:
self.lexer = lexers.TextLexer()
return self.lexer.name
def propagate_editor_container(self, editor_container):
"""Propagate the :py:class:`~editorcontainer.editorcontainer.EditorContainer`
to this :py:class:`.Editor`.
:param editor_container: Should be a reference to :py:attr:`azaharTEA.Container.editor_container`, \
the :py:class:`~editorcontainer.editorcontainer.EditorContainer` of the application.
"""
self.editor_container = editor_container
| StarcoderdataPython |
1657054 | # -*- coding: utf-8 -*-
from flask import current_app
from lxml import etree
namespaces = {'m': 'http://www.loc.gov/MARC21/slim',
'o': 'http://www.openarchives.org/OAI/2.0/'}
def get_control_field(xml, field_tag):
return xml.xpath('./o:controlfield[@tag="%s"]/text()' % field_tag, namespaces=namespaces)
def get_subfield(xml, field_tag, subfield_code):
return xml.xpath('./o:datafield[@tag="%s"]/o:subfield[@code="%s"]/text()' % (field_tag, subfield_code),
namespaces=namespaces)
def test_record_by_id(app_client, test_record):
assert 'control_number' in test_record
control_number = test_record['control_number']
assert 'id' in test_record.get('_oai', {})
identifier = test_record['_oai']['id']
response = app_client.get('/oai2d?verb=GetRecord&metadataPrefix=marc21&identifier=%s' % identifier)
xml = etree.fromstring(response.data).find('.//o:metadata/o:record', namespaces=namespaces)
assert get_control_field(xml, '001') == [control_number]
assert get_control_field(xml, '005') == ['20190221143842.0']
assert get_subfield(xml, '100', 'a') == ['Salmhofer, Manfred']
assert get_subfield(xml, '100', 'u') == [u'Institut für Theoretische Physik, Universität Heidelberg, '
u'Philosophenweg 19, 69120 Heidelberg, Germany']
assert get_subfield(xml, '245', 'a') == [u'Renormalization in condensed matter: Fermionic systems – '
u'from mathematics to materials']
assert get_subfield(xml, '245', '9') == ['Elsevier']
assert get_subfield(xml, '024', 'a') == ['10.1016/j.nuclphysb.2018.07.004']
assert get_subfield(xml, '024', '2') == ['DOI']
assert get_subfield(xml, '773', 'y') == ['2018']
assert get_subfield(xml, '773', 'p') == ['Nuclear Physics B']
assert get_subfield(xml, '540', 'a') == ['CC-BY-3.0']
assert get_subfield(xml, '540', 'u') == ['http://creativecommons.org/licenses/by/3.0/']
assert get_subfield(xml, '542', 'd') == ['The Author']
assert get_subfield(xml, '542', 'f') == ['The Author']
assert get_subfield(xml, '520', 'a') == ['Renormalization plays an important role in the theoretically and mathemat'
'ically careful analysis of models in condensed-matter physics. I review s'
'elected results about correlated-fermion systems, ranging from mathematic'
'al theorems to applications in models relevant for materials science, suc'
'h as the prediction of equilibrium phases of systems with competing order'
'ing tendencies, and quantum criticality.']
assert get_subfield(xml, '520', '9') == ['Elsevier']
assert get_subfield(xml, '260', 'c') == ['2018-07-04']
assert get_subfield(xml, '260', 'b') == ['Elsevier']
files = get_subfield(xml, '856', 'u')
assert len(files) == 2
file_xml, file_pdf = files
servername = current_app.config.get('SERVER_NAME')
assert file_xml.startswith('http://%s/api/files/' % servername)
assert file_xml.endswith('/10.1016/j.nuclphysb.2018.07.004.xml')
assert file_pdf.startswith('http://%s/api/files/' % servername)
assert file_pdf.endswith('/10.1016/j.nuclphysb.2018.07.004.pdf')
assert get_subfield(xml, '856', 'y') == ['xml', 'pdf']
def test_not_found(app_client):
response = app_client.get('/oai2d?verb=GetRecord&metadataPrefix=marc21&identifier=NOT_EXISTING_IDENTIFIER')
assert response.status_code == 422
| StarcoderdataPython |
1716250 | from typing import List, NoReturn
from discord.ext import commands
from zenbot.models import PermissionLevel, Mute
from zenbot.helpers import has_cache, check_perms
from .command import ZenCommand, ZenCommandParameter
class MuteCommand(commands.Cog, ZenCommand):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command("mute")
@has_cache()
async def mute(
self,
ctx: commands.Context,
user_id: str,
duration: str = "10m",
*,
reason: str = "Unreasoned",
):
if not await check_perms(ctx, self.name):
# TODO: embed error
await ctx.send("you dont have perms :P")
return False
# TODO: make sure theres a user in the server with that id
mute = Mute(duration, ctx.author.id, reason)
self.bot.data_manager.servers.get(ctx.guild.id).members.get(
user_id
).mutes.append(mute)
@property
def name(self) -> str:
return "mute"
@property
def description(self) -> str:
return "Mute a user from chatting in any channels for a specified duration"
@property
def category(self) -> str:
return "Moderation"
@property
def parameters(self) -> List[ZenCommandParameter]:
return [
ZenCommandParameter(
"user", "The ID of the user to be muted", required=True
),
ZenCommandParameter("duration", "The duration of the mute", default="10m"),
ZenCommandParameter(
"reason", "The reason for the mute", default="Unreasoned"
),
]
@property
def perm_str(self) -> str:
return "cmd.mute"
@property
def perm_level(self) -> PermissionLevel:
return PermissionLevel.MODERATOR
@property
def example(self) -> str:
return "{prefix}mute 842786340097097798 1d spam"
def setup(bot: commands.Bot) -> NoReturn:
bot.add_cog(MuteCommand(bot))
| StarcoderdataPython |
4821155 | <gh_stars>0
"""
BSD 3-Clause License
Copyright (c) 2021, Netskope OSS
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""Chronicle CLient."""
import requests
import json
from netskope.common.utils import add_user_agent
class ChronicleClient:
"""Chronicle Client."""
def __init__(self, configuration: dict, logger):
"""Initialize."""
self.configuration = configuration
self.logger = logger
def _api_request(self, transformed_data):
"""Call the API for data Ingestion.
:transformed_data : The transformed data to be ingested.
"""
try:
base_url = self.configuration["base_url"]
url = f"{base_url.strip().strip('/')}/v1/udmevents"
data = {"events": transformed_data}
payload = json.dumps(data)
headers = {"Content-Type": "application/json"}
response = requests.request(
"POST",
url,
params={"key": self.configuration["api_key"].strip()},
headers=add_user_agent(headers),
data=payload,
)
status_code = response.status_code
response_body = response.text
if status_code >= 500:
raise Exception(
"Server Error : Status Code: {}. Response: {}".format(
status_code, response_body
)
)
elif status_code == 429:
raise Exception(
f"Either out of resource quota or reaching rate limiting."
f" Status Code: {status_code}. Response: {response_body}"
)
elif status_code in [400, 404]:
raise Exception(
"Client specified an invalid argument . Status code: {}. Response: {}".format(
status_code, response_body
)
)
elif status_code == 499:
raise Exception(
"Request Cancelled by the client : Status code: {}. Response: {}".format(
status_code, response_body
)
)
elif status_code == 403:
raise Exception(
"Invalid Authorization. Status code: {}. Response: {}".format(
status_code, response_body
)
)
except requests.exceptions.HTTPError as err:
self.logger.error(
"Chronicle: HTTP error occurred: {}.".format(err)
)
raise
except requests.exceptions.ConnectionError as err:
self.logger.error(
"Chronicle: Connection error occurred: {}.".format(err)
)
raise
except requests.exceptions.Timeout as err:
self.logger.error("Chronicle: Request timed out: {}.".format(err))
raise
except requests.exceptions.RequestException as err:
self.logger.error(
f"Chronicle: An error occurred while making REST API call to"
f" Chronicle: {err}."
)
raise
except Exception as err:
self.logger.error(
f"Chronicle: An error occurred while processing the "
f"API response: {err}."
)
raise
| StarcoderdataPython |
4817956 | <reponame>jonathanrocher/pybleau<filename>pybleau/app/plotting/plot_style.py
from traits.api import Any, Bool, Button, Dict, Enum, Float, HasStrictTraits, \
Int, List, Property, Range, Trait, Tuple
from traitsui.api import EnumEditor, HGroup, Item, OKCancelButtons, \
RangeEditor, VGroup, View
from enable.api import ColorTrait, LineStyle
from enable.markers import MarkerNameDict, marker_names
from kiva.trait_defs.kiva_font_trait import font_families
from ..utils.chaco_colors import ALL_CHACO_PALETTES, ALL_MPL_PALETTES
DEFAULT_AXIS_LABEL_FONT_SIZE = 18
DEFAULT_TITLE_FONT_SIZE = 18
DEFAULT_TITLE_FONT = "modern"
DEFAULT_MARKER_SIZE = 6
DEFAULT_LINE_WIDTH = 1.3
DEFAULT_NUM_BINS = 10
DEFAULT_COLOR = "blue"
SPECIFIC_CONFIG_CONTROL_LABEL = "Specific controls"
DEFAULT_DIVERG_PALETTE = "hsv"
DEFAULT_CONTIN_PALETTE = "cool"
IGNORE_DATA_DUPLICATES = "ignore"
class BasePlotStyle(HasStrictTraits):
""" Styling parameters for building Chaco renderers.
These objects are designed to be Used by PlotFactories to generate a plot,
but also embedded
"""
#: Color of the renderer (ignore if more than 1)
color = ColorTrait(DEFAULT_COLOR)
#: Name of the palette to pick colors from in z direction
color_palette = Enum(values="_all_palettes")
#: List of available color palettes
_all_palettes = List(ALL_MPL_PALETTES)
#: Transparency of the renderer
alpha = Range(value=1., low=0., high=1.)
#: View elements for users to control these parameters
general_view_elements = Property(List)
#: Font used to draw the plot and axis titles
title_font_name = Enum(DEFAULT_TITLE_FONT, values="_all_fonts")
#: List of all available fonts
_all_fonts = List
#: Font size used to draw the plot title
title_font_size = Int(DEFAULT_TITLE_FONT_SIZE)
#: Font size used to draw the x axis title
x_title_font_size = Int(DEFAULT_AXIS_LABEL_FONT_SIZE)
#: Font size used to draw the y axis title
y_title_font_size = Int(DEFAULT_AXIS_LABEL_FONT_SIZE)
#: Font size used to draw the z axis title
z_title_font_size = Int(DEFAULT_AXIS_LABEL_FONT_SIZE)
#: Angle to rotate the X axis labels (string x values only)
x_axis_label_rotation = Int
#: Angle to rotate the Y axis labels (string x values only)
y_axis_label_rotation = Int
#: Whether to force display of all values along X axis or allow decimation
# (ONLY USED with string labels)
show_all_x_ticks = Bool
#: Low value of the x-axis range
x_axis_range_low = Float(-1)
#: High value of the x-axis range
x_axis_range_high = Float(-1)
#: Low value of the y-axis range
y_axis_range_low = Float(-1)
#: High value of the y-axis range
y_axis_range_high = Float(-1)
#: Automatic low value of the x-axis range for plot full view
auto_x_axis_range_low = Float(-1)
#: High value of the x-axis range
auto_x_axis_range_high = Float(-1)
#: Low value of the y-axis range
auto_y_axis_range_low = Float(-1)
#: High value of the y-axis range
auto_y_axis_range_high = Float(-1)
#: Button to reset the x-axis range to automatic values
reset_x_axis_range = Button("Reset")
#: Button to reset lthe y-axis range to automatic values
reset_y_axis_range = Button("Reset")
#: Linear or log scale for the independent variable?
index_scale = Enum("linear", "log")
#: Linear or log scale for the dependent variable?
value_scale = Enum("linear", "log")
#: List of attribute names to export to dictionary
dict_keys = List
#: View klass. Override to customize the views, for example their icon
view_klass = Any(default_value=View)
#: Keywords passed to create the view
view_kw = Dict
def __all_fonts_default(self):
return sorted(list(font_families.keys()))
def to_dict(self):
return {key: getattr(self, key) for key in self.dict_keys}
def _dict_keys_default(self):
return ["color", "color_palette", "alpha", "title_font_size",
"x_title_font_size", "y_title_font_size", "z_title_font_size",
"x_axis_label_rotation", "y_axis_label_rotation",
"title_font_name", "index_scale", "value_scale",
"x_axis_range_low", "x_axis_range_high", "y_axis_range_low",
"y_axis_range_high", "show_all_x_ticks"]
def _get_general_view_elements(self):
elemens = (
VGroup(
VGroup(
VGroup(
HGroup(
Item("x_axis_range_low", label="X-axis range"),
Item("x_axis_range_high", show_label=False),
Item("reset_x_axis_range", show_label=False)
),
HGroup(
Item("y_axis_range_low", label="Y-axis range"),
Item("y_axis_range_high", show_label=False),
Item("reset_y_axis_range", show_label=False)
),
show_border=True, label="Range controls"
),
HGroup(
Item("index_scale", label="X-axis scale"),
Item("value_scale", label="Y-axis scale"),
show_border=True, label="Scaling controls"
),
show_border=True, label="Axis controls"
),
VGroup(
HGroup(
Item('color', label="Color", style="custom"),
Item("color_palette"),
),
Item('alpha', label="Transparency"),
show_border=True, label="Color controls"
),
VGroup(
Item('title_font_name'),
HGroup(
Item('title_font_size',
editor=RangeEditor(low=9, high=32)),
Item('x_title_font_size',
editor=RangeEditor(low=9, high=32)),
Item('y_title_font_size',
editor=RangeEditor(low=9, high=32)),
Item('z_title_font_size',
editor=RangeEditor(low=9, high=32)),
),
show_border=True, label="Title font controls",
),
VGroup(
HGroup(
Item('x_axis_label_rotation',
editor=RangeEditor(low=0, high=360)),
Item('show_all_x_ticks',
label="Force all ticks/labels"),
),
Item('y_axis_label_rotation',
editor=RangeEditor(low=0, high=360)),
show_border=True, label="Axis label controls (str only)",
)
),
)
return elemens
def traits_view(self):
view = self.view_klass(*self.general_view_elements, **self.view_kw)
return view
def _reset_x_axis_range_changed(self):
self.x_axis_range_low = self.auto_x_axis_range_low
self.x_axis_range_high = self.auto_x_axis_range_high
def _reset_y_axis_range_changed(self):
self.y_axis_range_low = self.auto_y_axis_range_low
self.y_axis_range_high = self.auto_y_axis_range_high
def initialize_axis_ranges(self, plot, transform=None):
""" Initialize the axis ranges from proviuded Plot or renderer.
"""
if transform is None:
def transform(x):
return x
elif isinstance(transform, int):
ndigits = transform
def transform(x):
return round(x, ndigits)
# Avoid UI polluting with non-sensical digits
self.x_axis_range_low = transform(plot.x_axis.mapper.range.low)
self.auto_x_axis_range_low = self.x_axis_range_low
self.x_axis_range_high = transform(plot.x_axis.mapper.range.high)
self.auto_x_axis_range_high = self.x_axis_range_high
self.y_axis_range_low = transform(plot.y_axis.mapper.range.low)
self.auto_y_axis_range_low = self.y_axis_range_low
self.y_axis_range_high = transform(plot.y_axis.mapper.range.high)
self.auto_y_axis_range_high = self.y_axis_range_high
def _view_kw_default(self):
return dict(
resizable=True,
buttons=OKCancelButtons,
title="Plot Styling",
)
class ScatterPlotStyle(BasePlotStyle):
""" Styling object for customizing scatter plots.
"""
#: The type of marker to use
marker = Trait("circle", MarkerNameDict,
editor=EnumEditor(values=marker_names))
#: The size of the marker
marker_size = Int(DEFAULT_MARKER_SIZE)
def traits_view(self):
view = self.view_klass(
VGroup(
VGroup(
Item('marker', label="Marker"),
Item('marker_size',
editor=RangeEditor(low=1, high=20)),
show_border=True, label=SPECIFIC_CONFIG_CONTROL_LABEL
),
*self.general_view_elements
),
** self.view_kw
)
return view
def _dict_keys_default(self):
general_items = super(ScatterPlotStyle, self)._dict_keys_default()
return general_items + ["marker", "marker_size"]
class BarPlotStyle(BasePlotStyle):
""" Styling object for customizing line plots.
"""
#: Width of each bar. Leave as 0 to have it computed programmatically.
bar_width = Float
#: How to handle multiple bars from hue dim? Next to each other or stacked?
# Stacked bars aren't working right in current Chaco
bar_style = Enum(["group"]) # , "stack"
#: How to handle multiple values contributing to a single bar?
data_duplicate = Enum(["mean", IGNORE_DATA_DUPLICATES])
#: Whether to display error bars when multiple values contribute to a bar
show_error_bars = Bool
#: Whether to force display of all values along X axis or allow decimation
# (ONLY USED with string labels)
show_all_x_ticks = Bool(True)
def traits_view(self):
allow_errors = "data_duplicate != '{}'".format(IGNORE_DATA_DUPLICATES)
view = self.view_klass(
VGroup(
VGroup(
HGroup(
Item('bar_width'),
Item('bar_style', tooltip="When multiple bars, display"
" side by side or stacked?")
),
HGroup(
Item('data_duplicate'),
Item("show_error_bars", label="Show error bars?",
enabled_when=allow_errors)
),
show_border=True, label=SPECIFIC_CONFIG_CONTROL_LABEL
),
*self.general_view_elements
),
**self.view_kw
)
return view
def _dict_keys_default(self):
general_items = super(BarPlotStyle, self)._dict_keys_default()
return general_items + ["bar_width", "bar_style", "show_error_bars",
"data_duplicate"]
class LinePlotStyle(BasePlotStyle):
""" Styling object for customizing line plots.
"""
line_width = Float(DEFAULT_LINE_WIDTH)
line_style = LineStyle("solid")
def traits_view(self):
view = self.view_klass(
VGroup(
VGroup(
Item('line_width'),
Item('line_style', style="custom"),
show_border=True, label=SPECIFIC_CONFIG_CONTROL_LABEL
),
*self.general_view_elements
),
**self.view_kw
)
return view
def _dict_keys_default(self):
general_items = super(LinePlotStyle, self)._dict_keys_default()
return general_items + ["line_width", "line_style"]
class HistogramPlotStyle(BasePlotStyle):
""" Styling object for customizing histogram plots.
"""
#: Number of bins: the bar width computed from that and the data range
num_bins = Int(DEFAULT_NUM_BINS)
#: bin start and end to use. Leave empty to use the data's min and max.
bin_limits = Tuple
#: Factor to apply to the default bar width. Set to 1 for bars to touch.
bar_width_factor = Float(1.0)
# Extra parameters not needed in the view ---------------------------------
#: Meaning of the parameter above: data space or screen space?
# Export but don't expose in the UI to make sure it is the data space since
# the bar width computation makes that assumption.
bar_width_type = Enum("data", "screen")
def _dict_keys_default(self):
general_items = super(HistogramPlotStyle, self)._dict_keys_default()
return general_items + ["num_bins", "bin_limits", "bar_width_factor",
"bar_width_type"]
def traits_view(self):
view = self.view_klass(
VGroup(
HGroup(
Item('num_bins', label="Number of bins"),
Item('bar_width_factor',
editor=RangeEditor(low=0.1, high=1.)),
show_border=True, label=SPECIFIC_CONFIG_CONTROL_LABEL
),
*self.general_view_elements
),
** self.view_kw
)
return view
class HeatmapPlotStyle(BasePlotStyle):
"""
"""
#: Number of bins: the bar width computed from that and the data range
colormap_str = Enum(DEFAULT_CONTIN_PALETTE, values="_colormap_list")
#:
_colormap_list = List
colorbar_low = Float
colorbar_high = Float(1.0)
interpolation = Enum("nearest", "bilinear", "bicubic")
add_contours = Bool(False)
contour_levels = Int(5)
contour_styles = Enum("solid", "dash")
contour_alpha = Float(0.9)
contour_widths = Float(0.85)
def _dict_keys_default(self):
general_items = super(HeatmapPlotStyle, self)._dict_keys_default()
return general_items + ["colormap_str", "colorbar_low",
"colorbar_high", "interpolation",
"add_contours", "contour_levels",
"contour_styles", "contour_alpha",
"contour_widths"]
def traits_view(self):
view = self.view_klass(
VGroup(
VGroup(
HGroup(
Item("interpolation"),
),
HGroup(
Item("add_contours"),
Item("contour_levels", label="Num. contours",
enabled_when="add_contours"),
Item("contour_styles", label="Contour line type",
enabled_when="add_contours"),
Item("contour_alpha",
editor=RangeEditor(low=0., high=1.),
label="Contour transparency",
enabled_when="add_contours"),
Item("contour_widths",
editor=RangeEditor(low=0.1, high=4.),
label="Contour widths",
enabled_when="add_contours"),
show_border=True,
),
HGroup(
Item('colormap_str'),
Item('colorbar_low'),
Item('colorbar_high'),
show_border=True,
),
show_border=True, label=SPECIFIC_CONFIG_CONTROL_LABEL
),
*self.general_view_elements
),
** self.view_kw
)
return view
def __colormap_list_default(self):
return ALL_CHACO_PALETTES
| StarcoderdataPython |
136764 | <gh_stars>0
#!/usr/bin/env python
import numpy as np
from scipy.stats import pearsonr
import pylab as pl
import matplotlib
from matplotlib.backends.backend_pdf import PdfPages
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = True
FONTSIZE = 20
KERNEL = 'rbf'
def intuit_metric(rank_file):
if 'bacc' in rank_file:
return 'Balanced Accuracy'
elif 'auc' in rank_file:
return 'AUC'
else:
return 'Accuracy'
def parse_oracle_results(parts):
dataset, _, _, k, stat = parts
if k != KERNEL: return None
if not dataset.endswith('+oracle'): return None
return dataset[:-7], float(stat)
def parse_nsk_results(parts):
dataset, _, _, k, normalization, stat = parts
if k != KERNEL: return None
if normalization != 'averaging': return None
return dataset, float(stat)
def parse_twolevel_results(parts):
dataset, _, _, k, second_level, stat = parts
if k != KERNEL: return None
if second_level != KERNEL: return None
return dataset, float(stat)
def main(nsk_results, twolevel_results, oracle_results, outputfile=None):
metric = intuit_metric(oracle_results)
nsk_stats = dict()
twolevel_stats = dict()
oracle_stats = dict()
for stats_file, stats_dict, parser in zip(
(nsk_results, twolevel_results, oracle_results),
(nsk_stats, twolevel_stats, oracle_stats),
(parse_nsk_results, parse_twolevel_results, parse_oracle_results)):
with open(stats_file, 'r') as f:
for line in f:
parts = line.strip().split(',')
results = parser(parts)
if results is None: continue
dset, stat = results
stats_dict[dset] = stat
good_datasets = sorted((set(nsk_stats.keys())
& set(twolevel_stats.keys())
& set(oracle_stats.keys())))
print '%d datasets.' % len(good_datasets)
X = [(1 - oracle_stats[d]) for d in good_datasets]
Y = [(-(nsk_stats[d] - twolevel_stats[d])/nsk_stats[d]) for d in good_datasets]
r, pval = pearsonr(X, Y)
print 'R: %f' % r
print 'PVAL: %f' % pval
fig = pl.figure(figsize=(5, 5))
ax = fig.add_subplot(111)
ax.plot(X, Y, 'ko')
if outputfile is None:
pl.show()
else:
pdf = PdfPages(outputfile)
pdf.savefig(fig, bbox_inches='tight')
pdf.close()
if __name__ == '__main__':
from optparse import OptionParser, OptionGroup
parser = OptionParser(usage="Usage: %prog nsk-stats twolevel-stats oracle-stats [outputfile]")
options, args = parser.parse_args()
options = dict(options.__dict__)
if len(args) < 3:
parser.print_help()
exit()
main(*args, **options)
| StarcoderdataPython |
3280211 | from django.core.exceptions import ImproperlyConfigured
# Want to get everything from the 'normal' models package.
from django.db.models import * # NOQA
from django.utils.version import get_docs_version
from django.contrib.gis.geos import HAS_GEOS
if not HAS_GEOS:
raise ImproperlyConfigured(
"GEOS is required and has not been detected. Are you sure it is installed? "
"See also https://docs.djangoproject.com/en/%s/ref/contrib/gis/install/geolibs/" % get_docs_version())
# Geographic aggregate functions
from django.contrib.gis.db.models.aggregates import * # NOQA
# The GeoManager
from django.contrib.gis.db.models.manager import GeoManager # NOQA
# The geographic-enabled fields.
from django.contrib.gis.db.models.fields import ( # NOQA
GeometryField, PointField, LineStringField, PolygonField,
MultiPointField, MultiLineStringField, MultiPolygonField,
GeometryCollectionField)
| StarcoderdataPython |
1605699 | <gh_stars>0
from __future__ import absolute_import, unicode_literals
import warnings
from modeltranslation import settings as mt_settings
from modeltranslation.utils import build_localized_fieldname
def get_lang_obj(lang_code, cls, field_name, *args, **kwargs):
"""
Instantiates any `cls` with localized fieldname as the first
argument to it's constructor.
"""
return cls(build_localized_fieldname(field_name, lang_code), *args, **kwargs)
def obj_per_lang(cls, field_name, *args, **kwargs):
"""
Returns an array of instantiated `cls` using localized fieldname
for each language as the first argument to it's constructor.
"""
langs = kwargs.pop('languages', mt_settings.AVAILABLE_LANGUAGES)
ret = []
# make sure default lang always goes first
if mt_settings.DEFAULT_LANGUAGE in langs:
obj = get_lang_obj(
mt_settings.DEFAULT_LANGUAGE,
cls,
field_name,
*args, **kwargs
)
ret.append(obj)
for lang_code in langs:
if lang_code != mt_settings.DEFAULT_LANGUAGE:
obj = get_lang_obj(
lang_code,
cls,
field_name,
*args, **kwargs
)
ret.append(obj)
return ret
def page_slug_is_available(slug, lang_code, parent_page, page=None):
"""
Determines whether a slug is available for a page in
a specified language.
"""
if parent_page is None:
return True
siblings = parent_page.get_children()
if page:
siblings = siblings.not_page(page)
slug_f = build_localized_fieldname('slug', lang_code)
return not siblings.filter(**{slug_f: slug}).exists()
def deprecated(obj):
if isinstance(obj, type):
return _deprecated_cls(cls=obj)
else:
return _deprecated_func(f=obj)
def _deprecated_func(f, warn_cls=DeprecationWarning):
def _deprecated(*args, **kwargs):
message = "Method '%s' is deprecated and will be " \
"removed in the next version of wagtail-translation" \
% f.__name__
warnings.warn(message, warn_cls, stacklevel=2)
return f(*args, **kwargs)
return _deprecated
def _deprecated_cls(cls, warn_cls=DeprecationWarning):
class Deprecated(cls):
def __init__(self, *args, **kwargs):
message = "Class '%s' is deprecated and will be " \
"removed in the next version of wagtail-translation" \
% cls.__name__
warnings.warn(message, warn_cls, stacklevel=2)
super(Deprecated, self).__init__(*args, **kwargs)
return Deprecated
| StarcoderdataPython |
55638 | #!/usr/bin/env python
# Copyright (C) 2016 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:n
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This tool has a couple of helpful macros to process Wasm files from the wasm.json.
from generateWasm import *
import optparse
import sys
import re
parser = optparse.OptionParser(usage="usage: %prog <wasm.json> <WasmOps.h>")
(options, args) = parser.parse_args(sys.argv[0:])
if len(args) != 3:
parser.error(parser.usage)
wasm = Wasm(args[0], args[1])
opcodes = wasm.opcodes
wasmB3IRGeneratorHFile = open(args[2], "w")
opcodeRegex = re.compile('([a-zA-Z0-9]+)')
argumentRegex = re.compile('(\@[0-9]+)')
decimalRegex = re.compile('([-]?[0-9]+)')
whitespaceRegex = re.compile('\s+')
commaRegex = re.compile('(,)')
oparenRegex = re.compile('(\()')
cparenRegex = re.compile('(\))')
class Source:
def __init__(self, contents, offset=0):
self.contents = contents
self.offset = offset
def read(regex, source):
match = regex.match(source.contents, source.offset)
if not match:
return None
source.offset = match.end()
return match.group()
def lex(source):
result = []
while source.offset != len(source.contents):
read(whitespaceRegex, source)
opcode = read(opcodeRegex, source)
if opcode:
result.append(opcode)
continue
argument = read(argumentRegex, source)
if argument:
result.append(argument)
continue
number = read(decimalRegex, source)
if number:
result.append(int(number))
continue
oparen = read(oparenRegex, source)
if oparen:
result.append(oparen)
continue
cparen = read(cparenRegex, source)
if cparen:
result.append(cparen)
continue
comma = read(commaRegex, source)
if comma:
# Skip commas
continue
raise Exception("Lexing Error: could not lex token from: " + source.contents + " at offset: " + str(source.offset) + " (" + source.contents[source.offset:] + "). With tokens: [" + ", ".join(result) + "]")
return result
class CodeGenerator:
def __init__(self, tokens):
self.tokens = tokens
self.index = 0
self.code = []
def advance(self):
self.index += 1
def token(self):
return self.tokens[self.index]
def parseError(self, string):
raise Exception("Parse error " + string)
def consume(self, string):
if self.token() != string:
self.parseError("Expected " + string + " but got " + self.token())
self.advance()
def generateParameters(self):
self.advance()
params = []
tokens = self.tokens
while self.index < len(tokens):
if self.token() == ")":
self.advance()
return params
params.append(self.generateOpcode())
self.parseError("Parsing arguments fell off end")
def generateOpcode(self):
result = None
if self.token() == "i32" or self.token() == "i64":
type = "Int32"
if self.token() == "i64":
type = "Int64"
self.advance()
self.consume("(")
self.code.append(generateConstCode(self.index, self.token(), type))
result = temp(self.index)
self.advance()
self.consume(")")
elif argumentRegex.match(self.token()):
result = "arg" + self.token()[1:]
self.advance()
else:
op = self.token()
index = self.index
self.advance()
params = self.generateParameters()
self.code.append(generateB3OpCode(index, op, params))
result = temp(index)
return result
def generate(self, wasmOp):
if len(self.tokens) == 1:
params = ["arg" + str(param) for param in range(len(wasmOp["parameter"]))]
return " result = m_currentBlock->appendNew<Value>(m_proc, B3::" + self.token() + ", origin(), " + ", ".join(params) + ")"
result = self.generateOpcode()
self.code.append("result = " + result)
return " " + " \n".join(self.code)
def temp(index):
return "temp" + str(index)
def generateB3OpCode(index, op, params):
return "Value* " + temp(index) + " = m_currentBlock->appendNew<Value>(m_proc, B3::" + op + ", origin(), " + ", ".join(params) + ");"
def generateConstCode(index, value, type):
return "Value* " + temp(index) + " = constant(" + type + ", " + value + ");"
def generateB3Code(wasmOp, source):
tokens = lex(Source(source))
parser = CodeGenerator(tokens)
return parser.generate(wasmOp)
def generateSimpleCode(op):
opcode = op["opcode"]
b3op = opcode["b3op"]
args = ["ExpressionType arg" + str(param) for param in range(len(opcode["parameter"]))]
args.append("ExpressionType& result")
return """
template<> auto B3IRGenerator::addOp<OpType::""" + wasm.toCpp(op["name"]) + ">(" + ", ".join(args) + """) -> PartialResult
{
""" + generateB3Code(opcode, b3op) + """;
return { };
}
"""
definitions = [generateSimpleCode(op) for op in wasm.opcodeIterator(lambda op: isSimple(op) and (isBinary(op) or isUnary(op)))]
contents = wasm.header + """
#pragma once
#if ENABLE(WEBASSEMBLY)
namespace JSC { namespace Wasm {
""" + "".join(definitions) + """
} } // namespace JSC::Wasm
#endif // ENABLE(WEBASSEMBLY)
"""
wasmB3IRGeneratorHFile.write(contents)
wasmB3IRGeneratorHFile.close()
| StarcoderdataPython |
77864 | <filename>shenjieting/Study/Study_request.py<gh_stars>1-10
import urllib.request
import urllib.parse
'''
urllib中包括了四个模块,包括:urllib.request,urllib.error,urllib.parse,urllib.robotparser
urllib.request可以用来发送request和获取request的结果
urllib.error包含了urllib.request产生的异常
urllib.parse用来解析和处理URL
urllib.robotparse用来解析页面的robots.txt文件
使用urllib.request发送请求
urllib.request.urlopen()基本使用
urllib.request 模块提供了最基本的构造 HTTP 请求的方法,利用它可以模拟浏览器的一个请求发起过程,同时它还带有处理 authenticaton (授权验证), redirections (重定向), cookies (浏览器Cookies)以及其它内容。好,那么首先我们来感受一下它的强大之处,我们百度为例,我们来把这个网页抓下来。
'''
r = urllib.request.urlopen("http://www.baidu.com")
print(r.read().decode("utf-8"))
print(type(r))
#通过输出结果可以发现它是一个 HTTPResposne 类型的对象,
# 它主要包含的方法有 read() 、 readinto() 、getheader(name) 、 getheaders() 、 fileno() 等函数和 msg 、 version 、 status 、 reason 、 debuglevel 、 closed 等属性。
# 得到这个对象之后,赋值为 response ,然后就可以用 response 调用这些方法和属性,得到返回结果的一系列信息。
# 例如 response.read() 就可以得到返回的网页内容, response.status 就可以得到返回结果的状态码,如200代表请求成功,404代表网页未找到等。
print("*"*30)
print(r.getheaders())
'''
urllib.request.urlopen()详解
利用以上最基本的 urlopen() 方法,我们可以完成最基本的简单网页的 GET 请求抓取。
如果我们想给链接传递一些参数该怎么实现呢?我们首先看一下 urlopen() 函数的API。
urllib.request.urlopen(url, data=None, [timeout, ]*, cafile=None, capath=None, cadefault=False, context=None)
'''
print("*"*30)
data = bytes(urllib.parse.urlencode({'word': 'hello'}), encoding= 'utf8')
res = urllib.request.urlopen("http://www.baidu.com",data=data)
print(res.read())
'''
timeout参数
timeout 参数可以设置超时时间,单位为秒,意思就是如果请求超出了设置的这个时间还没有得到响应,就会抛出异常,如果不指定,就会使用全局默认时间。它支持 HTTP 、 HTTPS 、 FTP 请求。
'''
print("*"*30)
respon=urllib.request.urlopen("http://httpbin.org/get",timeout=1)
print(respon.read())
| StarcoderdataPython |
39369 | <filename>project-euler/solutions/015.py
#!/usr/bin/env python
'''
015.py: https://projecteuler.net/problem=15
Lattice paths
Starting in the top left corner of a 2×2 grid, and only being able to move to the right and down, there are exactly 6 routes to the bottom right corner.
How many such routes are there through a 20×20 grid?
'''
import os
import pytest
import time
def find_num_of_paths(w, h):
'''finds the number of paths through a grid of w x h size. Constraints are,
you can only move right to down. Starting position is [0][0], ending at [w][h].'''
if w <= 0 or h <= 0:
return 0
matrix = [[1]*(w+1) for i in range(h+1)]
for n in range(w+1):
for m in range(h+1):
if m == 0 and n == 0:
pass
elif n-1 < 0:
matrix[n][m] = matrix[n][m-1]
elif m-1 < 0:
matrix[n][m] = matrix[n-1][m]
else:
matrix[n][m] = (matrix[n-1][m] + matrix[n][m-1])
return matrix[w][h]
def test_find_num_of_paths():
'''Test'''
assert 2 == find_num_of_paths(1, 1)
assert 6 == find_num_of_paths(2,2)
assert 0 == find_num_of_paths(0, 0)
def main():
'''Main runner, delegates to solution.'''
print(find_num_of_paths(20, 20))
if __name__ == '__main__':
start_time = time.time()
main()
print("--- %s seconds ---" % (time.time() - start_time))
| StarcoderdataPython |
76359 | <reponame>davesanjay/reproducible-computational-workflows<filename>2-conda/hello.py
print "Hello Basel!"
| StarcoderdataPython |
1621021 | # HELPFUL CONSTANTS
aToB = sin(PI/5) / sin(2*PI/5)
# DRAWING
def drawEdge(lxy):
for i in range(0,len(lxy)-1):
x1,y1 = lxy[i]
x2,y2 = lxy[i+1]
line(x1,y1,x2,y2)
def drawEditPoints(lxy,r):
noStroke()
for x,y in lxy[1:-1]:
ellipse(x,y,r,r)
def drawEditPointsPlus(lxy,ptTypes,r):
pass
# TRANSFORMATIONS
def rotatePoint(x,y,theta):
return (x*cos(theta)-y*sin(theta),x*sin(theta)+y*cos(theta))
def rotateEdge(lxy,theta):
return [rotatePoint(x,y,theta) for x,y in lxy]
def translateEdge(lxy,dx,dy):
return [(x+dx,y+dy) for x,y in lxy]
# SHAPE CONSTRUCTION
def kitePoints(axy,bxy):
e1 = rotateEdge(axy,-PI/5)
e2 = list(reversed(translateEdge(rotateEdge(bxy,2*PI/5),100,0)))
e3 = translateEdge(rotateEdge(bxy,-2*PI/5),100,0)
e4 = list(reversed(rotateEdge(axy,PI/5)))
return e1[:-1] + e2[:-1] + e3[:-1] + e4[:-1]
def dartPoints(axy,bxy):
axy2 = list(reversed(translateEdge(axy,-100,0)))
bxy2 = list(reversed(translateEdge(bxy,100*aToB,0)))
e1 = rotateEdge(axy2,PI/5)
e2 = list(reversed(translateEdge(rotateEdge(bxy2,-3*PI/5),-100*aToB,0)))
e3 = translateEdge(rotateEdge(bxy2,3*PI/5),-100*aToB,0)
e4 = list(reversed(rotateEdge(axy2,-PI/5)))
return e1[:-1] + e2[:-1] + e3[:-1] + e4[:-1]
| StarcoderdataPython |
28214 | import setuptools
import os
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="supportr",
version="0.1",
author="<NAME> and <NAME>",
author_email="<EMAIL>",
description="Supportr",
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">=3.6",
install_requires=["numpy", "csv", "datrie", "nltk", "gensim",
"pandas", "spacy", "sklearn"],
url="https://github.com/davidjurgens/support",
include_package_data=True,
packages=setuptools.find_packages()
)
| StarcoderdataPython |
3234023 | from __future__ import print_function
from orphics import maps,io,cosmology,stats
from pixell import enmap,curvedsky as cs
import numpy as np
import os,sys
from tilec import utils as tutils
import healpy as hp
#Port of healpix module coord_v_convert.f90 to python by JLS
#Feb 28, 2017
from numpy import sin,cos
from numpy import arctan2 as atan2
from numpy import sqrt
froot = "/scratch/r/rbond/msyriac/data/depot/hpymap/"
DTOR = np.pi/180.0
def angdist(v1,v2):
sprod=np.dot(v1,v2)
v3=np.cross(v1,v2)
vprod=sqrt(np.dot(v3,v3))
return atan2(vprod,sprod)
def py_coordsys2euler_zyz(iepoch, oepoch, isys, osys):
#, psi, theta, phi)
v1=np.asarray([1.0, 0.0, 0.0])
v2=np.asarray([0.0, 1.0, 0.0])
v3=np.asarray([0.0, 0.0, 1.0])
v1p=py_xcc_v_convert(v1,iepoch,oepoch,isys,osys)
v2p=py_xcc_v_convert(v2,iepoch,oepoch,isys,osys)
v3p=py_xcc_v_convert(v3,iepoch,oepoch,isys,osys)
v1p=v1p/sqrt(np.dot(v1p,v1p))
v2p=v2p/sqrt(np.dot(v1p,v1p))
v3p=v3p/sqrt(np.dot(v1p,v1p))
theta=angdist(v3,v3p)
psi=atan2(v2p[2],-v1p[2])
phi=atan2(v3p[1],v3p[0])
return psi,theta,phi
def py_xcc_v_convert(ivector,iepoch,oepoch,isys,osys):
isys=isys.lower()
osys=osys.lower()
isys=isys[0]
osys=osys[0]
if (isys=='c'):
isys='q'
if (osys=='c'):
osys='q'
if (isys=='q'):
ivector=py_xcc_dp_q_to_e(ivector,iepoch)
if (isys=='g'):
ivector=py_xcc_dp_g_to_e(ivector,iepoch)
if (iepoch!=oepoch):
ivector=py_xcc_dp_precess(ivector,iepoch,oepoch)
if (osys=='q'):
ivector=py_xcc_dp_e_to_q(ivector,oepoch)
if (osys=='g'):
ivector=py_xcc_dp_e_to_g(ivector,oepoch)
return ivector
def py_xcc_dp_e_to_q(ivector,epoch):
T = (epoch - 1900.e0) / 100.e0
epsilon = 23.452294e0 - 0.0130125e0*T - 1.63889e-6*T**2 + 5.02778e-7*T**3
hvector=np.zeros(ivector.shape)
dc = cos(DTOR * epsilon)
ds = sin(DTOR * epsilon)
hvector[0] = ivector[0]
hvector[1] = dc*ivector[1] - ds*ivector[2]
hvector[2] = dc*ivector[2] + ds*ivector[1]
return hvector
def py_xcc_dp_q_to_e(ivector,epoch):
hvector=np.zeros(ivector.shape)
T = (epoch - 1900.e0) / 100.e0
epsilon = 23.452294e0 - 0.0130125e0*T - 1.63889e-6*T**2 + 5.02778e-7*T**3
dc = cos(DTOR * epsilon)
ds = sin(DTOR * epsilon)
hvector[0] = ivector(1)
hvector[1] = dc*ivector[1] + ds*ivector[2]
hvector[2] = dc*ivector[2] - ds*ivector[1]
return hvector
def py_xcc_dp_e_to_g(ivector,epoch):
T=np.asarray([-0.054882486e0, -0.993821033e0, -0.096476249e0, 0.494116468e0, -0.110993846e0, 0.862281440e0, -0.867661702e0, -0.000346354e0, 0.497154957e0])
T=np.reshape(T,[3,3])
#T=T.transpose()
if (epoch != 2000.0):
ivector=py_xcc_dp_precess(ivector,epoch,2000.0)
return np.dot(T,ivector)
def py_xcc_dp_g_to_e(ivector,epoch):
T=np.asarray([-0.054882486e0, -0.993821033e0, -0.096476249e0, 0.494116468e0, -0.110993846e0, 0.862281440e0, -0.867661702e0, -0.000346354e0, 0.497154957e0])
T=np.reshape(T,[3,3])
T=T.transpose()
hvector=np.dot(T,ivector)
if (epoch != 2000.0):
return py_xcc_dp_precess(hvector,2000.0,epoch)
else:
return hvector
assert(1==0) #never get here
def py_xcc_dp_q_to_e(ivector,epoch):
# Set-up:
T = (epoch - 1900.0) / 100.0
epsilon = 23.452294 - 0.0130125*T - 1.63889e-6*T**2 + 5.02778e-7*T**3
hvector=np.zeros(ivector.shape)
# Conversion
dc = cos(DTOR * epsilon)
ds = sin(DTOR * epsilon)
hvector[0] = ivector[0]
hvector[1] = dc*ivector[1] + ds*ivector[2]
hvector[2] = dc*ivector[2] - ds*ivector[1]
return hvector
def py_xcc_dp_precess(ivector,iepoch,oepoch):
Tm = ((oepoch+iepoch)/2.0 - 1900.0) / 100.0
gp_long = (oepoch-iepoch) * (50.2564+0.0222*Tm) / 3600.0
dE = (oepoch-iepoch) * (0.4711-0.0007*Tm) / 3600.0
obl_long = 180.0 - (173.0 + (57.060+54.770*Tm)/60.0)+ gp_long/2.0
dL = gp_long - obl_long
tvector=np.zeros(ivector.shape)
# Z-axis rotation by OBL_LONG:
dco = cos(DTOR * obl_long)
dso = sin(DTOR * obl_long)
tvector[0] = ivector[0]*dco - ivector[1]*dso
tvector[1] = ivector[0]*dso + ivector[1]*dco
tvector[2] = ivector[2]
# X-axis rotation by dE:
dce = cos(DTOR * dE)
dse = sin(DTOR * dE)
temp = tvector[1]*dce - tvector[2]*dse
tvector[2] = tvector[1]*dse + tvector[2]*dce
tvector[1] = temp
# Z-axis rotation by GP_LONG - OBL_LONG:
dcl = cos(DTOR * dL)
dsl = sin(DTOR * dL)
temp = tvector[0]*dcl - tvector[1]*dsl
tvector[1] = tvector[0]*dsl + tvector[1]*dcl
tvector[0] = temp
return tvector
def rotate_alm(alm,iepoch,oepoch,isys,osys):
phi,theta,psi=py_coordsys2euler_zyz(iepoch,oepoch,isys,osys)
hp.rotate_alm(alm,phi,theta,psi)
return alm
solution = 'comptony'
tdir = "/scratch/r/rbond/msyriac/data/depot/tilec/"
dcomb = 'joint'
nside = 2048
lmax = 3*nside
#for deproject in [None,'cib']:
for deproject in ['cib']:
hmap = 0
hmask = 0
for region in ['deep56','boss']:
imap = enmap.read_map(tutils.get_generic_fname(tdir,region,solution,deproject=deproject,data_comb=dcomb,version="v1.1.1",sim_index=None))
mask = enmap.read_map(tutils.get_generic_fname(tdir,region,solution,deproject=deproject,data_comb=dcomb,version="v1.1.1",sim_index=None,mask=True))
malm = cs.map2alm(mask,lmax=lmax)
ialm = cs.map2alm(imap,lmax=lmax)
malm = malm.astype(np.complex128,copy=False)
malm = rotate_alm(malm,2000.0,2000.0,'C','G')
ialm = ialm.astype(np.complex128,copy=False)
ialm = rotate_alm(ialm,2000.0,2000.0,'C','G')
imask = maps.binary_mask(hp.alm2map(malm,nside))
hmap = hmap + hp.alm2map(ialm,nside)*imask
hmask = hmask + imask
io.mollview(hmap,f'{froot}test_rot_{deproject}.png')
io.mollview(hmask,f'{froot}test_rot_mask_{deproject}.png')
hp.write_map(f'{froot}test_rot_{deproject}.fits',hmap,overwrite=True)
hp.write_map(f'{froot}test_rot_mask_{deproject}.fits',hmask,overwrite=True)
| StarcoderdataPython |
3276111 | import jieba
import simjb
import torbjorn as tbn
def get_data(name):
train_data_path = f"./icwb2-data/training/{name}_training.utf8"
with open(train_data_path, "r") as f_data:
datas = f_data.readlines()
return datas
@tbn.run_time
def calc(datas, data_name, tool_name, tool):
all_num = 0
true_num = 0
false_num = 0
for data in datas:
sentence_list = data.strip().split(" ")
predict_list = list(tool.cut("".join(sentence_list)))
sentence_pos_list = []
sentence_len = 0
for word in sentence_list:
word_len = len(word)
sentence_pos_list.append([sentence_len, sentence_len + word_len])
sentence_len += word_len
predict_pos_list = []
predict_len = 0
for word in predict_list:
word_len = len(word)
predict_pos_list.append([predict_len, predict_len + word_len])
predict_len += word_len
true_word_num = 0
false_word_num = 0
for pos in sentence_pos_list:
if pos in predict_pos_list:
true_word_num += 1
else:
false_word_num += 1
all_num += len(sentence_pos_list)
true_num += true_word_num
false_num += false_word_num
print(f"\n## {data_name} | {tool_name}")
print(f">> all: {all_num}, true: {true_num}, false: {false_num}, acc: {true_num / all_num}")
def run():
data_name_list = ["pku", "msr"]
for data_name in data_name_list:
datas = get_data(data_name)
tools = {"jieba": jieba, "simjb": simjb}
for tool_name, tool in tools.items():
calc(datas, data_name, tool_name, tool)
if __name__ == '__main__':
run()
| StarcoderdataPython |
3210139 | <gh_stars>1-10
# (c) 2018-2021, <NAME> @ ETH Zurich
# Computer-assisted Applications in Medicine (CAiM) Group, Prof. <NAME>
import os, itertools
from data_read.imarisfiles import ImarisFiles
from config import system
import argparse
import numpy as np
import urllib
import zipfile
import logging
import pandas as pd
import glob
logging.getLogger(__name__)
def shift_list(seq, n):
n = n % len(seq)
return seq if n == 0 else seq[n:] + seq[:n]
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def input_dataset(dataset):
if len(dataset) == 1:
ds_train, ds_val, ds_test = dataset, None, None
if len(dataset) == 2:
ds_train, ds_val = dataset
ds_test = None
if len(dataset) == 3:
ds_train, ds_val, ds_test = dataset
return ds_train, ds_val, ds_test
def check_channels(ifile, channels):
ext = os.path.splitext(ifile)[1]
if ext == '.ims':
imFile = ImarisFiles(ifile)
lchannels = set([x.lower() for x in channels])
fchannels = set([x.lower() for x in imFile.channelNames])
lspots = set([x.lower() for x in system.spots_GT])
return len(fchannels.union(lspots).intersection(lchannels)) >= len(channels)
else:
Warning('extension not recognized, channels are not checked')
return True
def input_filenames(filenames, fext=None, do_recursive=False):
if fext is None:
fext = ['.ims']
if not isinstance(filenames, list):
filenames = [filenames]
l_trainpath = []
for ifile in filenames:
if os.path.isdir(filenames):
if do_recursive:
trainpath_aux = [os.path.join(dp, f) for dp, dn, filenames in os.walk(ifile) for f in filenames if
os.path.splitext(f)[1] in fext]
else:
trainpath_aux = [os.path.join(ifile, x) for x in os.listdir(ifile) if
os.path.splitext(x)[1] in fext]
else:
trainpath_aux = [ifile]
for x in trainpath_aux:
l_trainpath.append(x)
def input_files_format(in_file, channels=None, do_recursive=False, fext=None):
if fext is None:
fext = ['.ims']
elif not isinstance(fext, list):
fext = [fext]
if in_file is None:
return in_file
elif not isinstance(in_file, list):
in_file = [in_file]
l_trainpath = []
for ifile in in_file:
if os.path.isdir(ifile):
if do_recursive:
trainpath_aux = [os.path.join(dp, f) for dp, dn, filenames in os.walk(ifile) for f in filenames if
os.path.splitext(f)[1] in fext]
else:
trainpath_aux = [os.path.join(ifile, x) for x in os.listdir(ifile) if
os.path.splitext(x)[1] in fext]
else:
trainpath_aux = [ifile]
for x in trainpath_aux:
l_trainpath.append(x)
if not channels is None:
l_trainpath = [x for x in l_trainpath if check_channels(x, channels)]
return l_trainpath
def download_url_zip(data_url, download_dir, authentify=None):
# Login if needed
if authentify is not None:
password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, authentify["root_url"], authentify["username"], authentify["password"])
handler = urllib.request.HTTPBasicAuthHandler(password_mgr)
opener = urllib.request.build_opener(handler)
opener.open(authentify["root_url"])
urllib.request.install_opener(opener)
logging.info("Downloading: {:s}".format(data_url))
# Download file
fname = data_url.split('/')[-1]
download_dir = os.path.join(download_dir, fname)
fdir, _ = urllib.request.urlretrieve(data_url, download_dir)
# Unzip file
with zipfile.ZipFile(fdir, 'r') as zip_ref:
zip_ref.extractall(os.path.split(zip_ref.filename)[0])
# Delete zip
os.remove(fdir)
def invert_listdict(orig_dict):
inv_dict = {}
for id, vals in orig_dict.items():
for v in vals:
inv_dict[v] = id
return inv_dict
def aggregate_metrics(save_dir, fname='metrics.csv', read_dir=None):
cmetrics = pd.DataFrame()
has_metrics = False
if read_dir:
# If reading boundmax
dir_aux = read_dir
fname_aux = 'metrics.csv'
chcomb = "".join([str(int(x) - 1) for x in
os.path.split(read_dir)[1].replace("ch", "").replace("_l2", "").replace("_l4", "")])
save_metrics = os.path.join(
save_dir, 'metrics_ch' + chcomb + '.csv')
else:
dir_aux = save_dir
fname_aux = fname
save_metrics = os.path.join(save_dir, fname)
for dir in os.listdir(dir_aux):
metrics_file = os.path.join(dir_aux, dir, fname_aux)
if os.path.isdir(os.path.join(dir_aux, dir)) and dir.isdigit():
if os.path.isfile(metrics_file):
has_metrics = True
pmetrics = pd.read_csv(metrics_file,
sep=',',
header=0,
index_col=0
).transpose()
# pmetrics = pd.read_csv(os.path.join(save_dir, dir, 'metrics.csv'))
cmetrics['model_cv' + dir] = pmetrics['model']
elif 'notrain.txt' in os.listdir(os.path.join(dir_aux, dir)):
has_metrics = True
cmetrics.to_csv(save_metrics)
return has_metrics
def aggregate_metrics_chdel(save_dir):
cmetrics = pd.DataFrame()
has_metrics = False
for dir in os.listdir(save_dir):
metrics_file = os.path.join(save_dir, dir, 'metrics.csv')
if os.path.isdir(os.path.join(save_dir, dir)) and dir.isdigit() and os.path.isfile(metrics_file):
has_metrics = True
pmetrics = pd.read_csv(metrics_file,
sep=',',
header=0,
index_col=0
).transpose()
# pmetrics = pd.read_csv(os.path.join(save_dir, dir, 'metrics.csv'))
cmetrics['model_cv' + dir] = pmetrics['model']
cmetrics.to_csv(os.path.join(save_dir, 'metrics.csv'))
return has_metrics
def aggregate_metrics_sample(save_dir, chdel=False):
cmetrics = pd.DataFrame()
has_metrics = False
fdir = os.path.join(save_dir, '0')
for metrics_file in glob.glob(os.path.join(fdir, 'metrics_sample*.csv')):
has_metrics = True
pmetrics = pd.read_csv(metrics_file,
sep=',',
header=0,
index_col=0
).transpose()
# pmetrics = pd.read_csv(os.path.join(save_dir, dir, 'metrics.csv'))
sname = os.path.splitext(os.path.split(metrics_file)[1])[0].replace("metrics_", "")
cmetrics[sname] = pmetrics['model']
if has_metrics:
cmetrics.to_csv(os.path.join(save_dir, 'metrics_samples.csv'))
return has_metrics
def get_weights(class_counts, log_weight=True):
class_counts = np.array(class_counts)
class_weight = sum(class_counts) / (len(class_counts) * class_counts)
if log_weight:
return np.log(np.e + class_weight)
else:
return class_weight
def sort_markers(lmarkers='12345', length_first=True):
if length_first:
l = []
nt = len(lmarkers)
for n1 in range(nt):
l += [lmarkers[n1]]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2)])]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
for n3 in range(n2 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3)])]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
for n3 in range(n2 + 1, nt):
for n4 in range(n3 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4)])]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
for n3 in range(n2 + 1, nt):
for n4 in range(n3 + 1, nt):
for n5 in range(n4 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4, n5)])]
l = ["".join(sorted([y for y in x])) for x in l]
else:
if length_first:
l = []
nt = len(lmarkers)
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
for n3 in range(n2 + 1, nt):
for n4 in range(n3 + 1, nt):
for n5 in range(n4 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4, n5)])]
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4)])]
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4)])]
for n4 in range(n3 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n4)])]
for n3 in range(n2 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n3, n4)])]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
for n3 in range(n2 + 1, nt):
for n4 in range(n3 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4, n5)])]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
for n3 in range(n2 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4, n5)])]
for n1 in range(nt):
for n2 in range(n1 + 1, nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4, n5)])]
for n1 in range(nt):
l += ["".join([lmarkers[x] for x in (n1, n2, n3, n4, n5)])]
l = ["".join(sorted([y for y in x])) for x in l]
return l
def rename_channels(names_old, lmarkers):
mrename = {str(k): str(v) for k, v in zip(lmarkers, range(1, len(lmarkers) + 1))}
for k, v in mrename.items():
names_old = [x.replace(k, chr(int(v))) for x in names_old]
for v in mrename.values():
names_old = [x.replace(chr(int(v)), str(v)) for x in names_old]
names_aux = ['m' + "".join(sorted(x.replace('ch', ''))) for x in names_old]
names_new_sorted = []
for n in range(1, len(lmarkers) + 1):
names_new_sorted += sorted([x for x in names_aux if str(n) in x], key=lambda x: (-len(x), x), reverse=False)
names_aux = [x for x in names_aux if str(n) not in x]
names_sorted = [x.replace("m", "") for x in names_new_sorted]
for k, v in mrename.items():
names_sorted = [x.replace(v, chr(int(k))) for x in names_sorted]
for k in mrename.keys():
names_sorted = [x.replace(chr(int(k)), k) for x in names_sorted]
names_sorted = ["".join(sorted(x.replace('ch', ''))) for x in names_sorted]
return names_sorted
def marker_combinations(nmarkers):
return list(set([tuple(set(x)) for x in itertools.product(np.arange(nmarkers), repeat=nmarkers)]))
| StarcoderdataPython |
3262127 | <reponame>vincent-l-j/micropython-stubber
import pytest
#
from libcst.codemod import CodemodTest
from stubber.codemod.commands.noop import NOOPCommand
################################################################################
# Define a few codeblock for testing of the libcst parser
################################################################################
basic = """
# Basic
foo = ""
class Class:
pass
def foo(a: Class, **kwargs: str) -> Class:
t= Class() # This is a comment
bar = ""
return t
bar = Class()
foo(bar, baz="bla")
"""
positional = """
# Positional /
def positional(a, b, /, c:int=None, d=None) -> int:
print( f"a={a}, b={b} ,c={c} ,d={d}")
return c
"""
keywords = """
# Keywords *
def keywords(a, b, *, e=42, f=False):
print(f"a={a}, b={b} ,e={e} ,f={f}")
"""
both = """
# both keywords and positional
def both(a, b, /, c:int=None, d=None, *, e=42, f=False):
print(f"a={a}, b={b} ,e={e} ,f={f}")
"""
################################################################################
# libCtS testing is based on unittest, use this class but call it from pytest
################################################################################
class TestNOOPCodemod(CodemodTest):
TRANSFORM = NOOPCommand
def tst_noop_36(self, before, after, version) -> None:
"Python 3.6 and newer"
self.assertCodemod(before, after, python_version=version)
################################################################################
# test the support for the Micropython annotation syntax
# Pytest test matrix
################################################################################
@pytest.mark.parametrize(
"before, after",
[
(basic, basic),
(keywords, keywords),
(positional, positional),
(both, both),
],
ids=lambda t: str(t).split()[1], # use 1st comment as test ID
)
@pytest.mark.parametrize(
"version",
[("3.8"), ("3.7"), ("3.5")],
)
def test_LibCST_noop_codemod(version, before, after) -> None:
# wrap unittest in Pytest for simpler matrix testing
# known failures
if "keywords" in before and version < "3.7":
pytest.skip("STAR param not supported on older python < 3.7")
if "positional" in before and version < "3.8":
pytest.skip("SLASH param not supported on older python < 3.8")
Sot = TestNOOPCodemod()
Sot.tst_noop_36(before, before, version)
| StarcoderdataPython |
1716812 | import re
import unittest
from django import forms
from django.apps import AppConfig
from django.utils.functional import curry
class PyDataConfig(AppConfig):
name = 'pydata'
label = 'PyData'
verbose_name = 'AMY for PyData conferences'
def ready(self):
from . import checks
from workshops.forms import PersonForm, TaskForm, SponsorshipForm
from workshops.models import Person, Task, Organization, Sponsorship
from workshops.test.base import TestBase
from workshops.test.test_sponsorship import TestSponsorshipViews
from workshops.views import EventCreate, PersonCreate
# Add fixtures within pydata app to testing database
TestBase.fixtures = [
'workshops_organization.json', 'workshops_role.json']
# Test for adding sponsor w/o amount should fail
TestSponsorshipViews.test_add_sponsor_minimal = unittest.expectedFailure(
TestSponsorshipViews.test_add_sponsor_minimal)
# Add choices to the `amount` field
Sponsorship.LEVELS = (
(0, 'Founding'),
(15000, 'Diamond'),
(8000, 'Platinum'),
(5000, 'Gold'),
(3000, 'Silver'),
(1500, 'Supporting'),
(1, 'Community'),
)
# Add choices to `amount` field
# Django migration system complains about missing migrations
amount_field = Sponsorship._meta.get_field('amount')
amount_field.choices = Sponsorship.LEVELS
# Add method `get_amount_display` to Sponsorship to return the level
setattr(
Sponsorship,
'get_amount_display',
curry(Sponsorship._get_FIELD_display, field=amount_field)
)
# Override the `__str__` method to display level instead of amount
def __str__(self):
return '{}: {}'.format(self.organization, self.get_amount_display())
Sponsorship.add_to_class('__str__', __str__)
# Add a regex to obtain URL of conference and `pk` of sponsor instance
Sponsorship.PROFILE_REGEX = re.compile(r'^(?P<url>.+?(?=/sponsors))/sponsors/(?P<id>\d+)/?') # noqa
# Add "Import from URL" button to SponsorshipForm
class Media:
js = ('import_sponsor.js', )
SponsorshipForm.Media = Media
# Add a dropdown to the `amount` field on SponsorshipForm
SponsorshipForm.base_fields['amount'] = forms.ChoiceField(
choices=Sponsorship.LEVELS,
)
# Add a regex to obtain URL of conference and `pk` of presentation
Task.PRESENTATION_REGEX = re.compile(r'^(?P<url>.+?(?=/schedule))/schedule/presentation/(?P<id>\d+)/?') # noqa
# Add "Import from URL" button to TaskForm
class Media:
js = ('import_task.js', )
TaskForm.Media = Media
# Add a regex to obtain URL of conference and `pk` of speaker
Person.PROFILE_REGEX = re.compile(r'^(?P<url>.+?(?=/speaker))/speaker/profile/(?P<id>[^/]+)/?') # noqa
# Add "Import from URL" button to PersonForm on PersonCreate view
PersonCreate.template_name = 'pydata/person_create_form.html'
class Media:
js = ('import_person.js', )
PersonForm.Media = Media
# Prepopulate fields on EventCreate view
def get_initial(self):
numfocus = Organization.objects.get(fullname='NumFOCUS')
return {
'administrator': numfocus,
'assigned_to': self.request.user,
}
EventCreate.get_initial = get_initial
| StarcoderdataPython |
1607596 | # 唐诗生成
import collections
import os
import sys
import time
import numpy as np
import tensorflow as tf
# 这里引入可能出错
from models.model import rnn_model
# 句子预处理 产生batch函数
from dataset.fiction import process_poems, generate_batch
import heapq
# 后面那个是说明
tf.flags.DEFINE_integer('batch_size', 64, 'batch size.')
tf.flags.DEFINE_float('learning_rate', 0.01, 'learning rate.')
# set this to 'main.py' relative path
tf.flags.DEFINE_string('checkpoints_dir', os.path.abspath('./checkpoints/zhetian/'), 'checkpoints save path.')
tf.flags.DEFINE_string('file_path', os.path.abspath('./dataset/data/zhetian.txt'), 'file name of poems.')
tf.flags.DEFINE_string('model_prefix', 'poems', 'model save prefix.')
tf.flags.DEFINE_integer('epochs', 50, 'train how many epochs.')
tf.flags.DEFINE_string('write', '', 'wtf.')
tf.flags.DEFINE_string('train', '', 'wtf.')
tf.flags.DEFINE_string('no-train', '', 'wtf.')
FLAGS = tf.flags.FLAGS
# FLAGS._parse_flags()
# 起始和结束字符
start_token = 'G'
end_token = 'E'
'''
运行训练,核心
'''
def run_training():
# 检查点保存路径
print('its_not_ok:', FLAGS.checkpoints_dir)
if not os.path.exists(os.path.dirname(FLAGS.checkpoints_dir)):
os.mkdir(os.path.dirname(FLAGS.checkpoints_dir))
if not os.path.exists(FLAGS.checkpoints_dir):
os.mkdir(FLAGS.checkpoints_dir)
# 引入预处理
# 这里返回诗集转换成向量的数据,字与数字映射, 字集
poems_vector, word_to_int, vocabularies = process_poems(FLAGS.file_path)
# batch_size 64 poems_vector 转为数字的映射 word_to_int:字与数字映射
batches_inputs, batches_outputs = generate_batch(FLAGS.batch_size, poems_vector, word_to_int)
# 返回输入与输出的batch信息
# 输入、输出 占位符
input_data = tf.placeholder(tf.int32, [FLAGS.batch_size, None])
output_targets = tf.placeholder(tf.int32, [FLAGS.batch_size, None])
end_points = rnn_model(model='lstm', input_data=input_data, output_data=output_targets, vocab_size=len(
vocabularies), run_size=128, num_layers=2, batch_size=FLAGS.batch_size, learning_rate=FLAGS.learning_rate)
# 保存
saver = tf.train.Saver(tf.global_variables())
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
start_epoch = 0
checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoints_dir)
if checkpoint:
saver.restore(sess, checkpoint)
print("[INFO] restore from the checkpoint {0}".format(checkpoint))
start_epoch += int(checkpoint.split('-')[-1])
print('[INFO] start training...',time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
try:
for epoch in range(start_epoch, FLAGS.epochs):
n = 0
n_chunk = len(poems_vector) // FLAGS.batch_size
for batch in range(n_chunk):
loss, _, _ = sess.run([
end_points['total_loss'],
end_points['last_state'],
end_points['train_op']
], feed_dict={input_data:batches_inputs[n], output_targets:batches_outputs[n]})
n += 1
print('[INFO] Epoch: %d, batch: %d, training loss: %.6f' % (epoch,batch, loss))
if epoch % 6 == 0:
saver.save(sess, './zhetian_model/', global_step=epoch)
except KeyboardInterrupt:
print('[INFO] Interrupt manually, try saving checkpoint for now ..')
saver.save(sess, os.path.join(FLAGS.checkpoints_dir, FLAGS.model_prefix), global_step=epoch)
print('[INFO] Last epoch were saved, next time will start from epoch {}.'.format(epoch))
def to_word(predict, vocabs):
t = np.cumsum(predict)
s = np.sum(predict)
sample = int(np.searchsorted(t, np.random.rand(1) * s))
if sample > len(vocabs):
sample = len(vocabs) - 1
return vocabs[sample]
def gen_poem(begin_word):
batch_size = 1
print('[INFO] loading corpus from %s' % FLAGS.file_path)
poems_vector, word_int_map, vocabularies = process_poems(FLAGS.file_path)
input_data = tf.placeholder(tf.int32, [batch_size, None])
end_points = rnn_model(model='lstm', input_data=input_data, output_data=None, vocab_size=len(vocabularies), run_size=128, num_layers=2,batch_size=64, learning_rate=FLAGS.learning_rate)
saver = tf.train.Saver(tf.global_variables())
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
checkpoint = tf.train.latest_checkpoint('./zhetian_model/')
saver.restore(sess, './zhetian_model/-48')
x = np.array([list(map(word_int_map.get, start_token))])
[predict, last_state] = sess.run([end_points['prediction'], end_points['last_state']], feed_dict={input_data: x})
if begin_word:
word = begin_word
else:
word = to_word(predict, vocabularies)
poem = ''
while word != end_token:
print('running')
poem += word
x = np.zeros((1, 1))
x[0,0] = word_int_map[word]
[predict, last_state] = sess.run([end_points['prediction'], end_points['last_state']],
feed_dict={input_data: x, end_points['initial_state']:last_state})
word = to_word(predict, vocabularies)
return poem
def pretty_print_poem(poem):
poem_sentences = poem.split('。 ')
for s in poem_sentences:
if s != '' and len(s) > 10:
print(s)
def main(is_train):
print('zhetian.main:', is_train)
if is_train:
print('[INFO] train zhetian fiction...')
run_training()
else:
print('[INFO] write zhetian fiction...')
begin_word = input('输入起始字:')
poem2 = gen_poem(begin_word)
pretty_print_poem(poem2)
if __name__ == '__main__':
tf.app.run() | StarcoderdataPython |
128245 | import cv2
# reading the image
image = cv2.imread(filename=r'.\img\Resized.jpg')
cv2.putText(image,"Bhanu",org=(100,300),fontFace=cv2.FONT_ITALIC,fontScale=4,color=(3,5,23),thickness=2,lineType=3)
cv2.putText(image,"Deepak",org=(00,100),fontFace=cv2.FONT_ITALIC,fontScale=4,color=(3,5,23),thickness=2,lineType=3)
# displaying image
# window name
# image matrix - image varoable
cv2.imshow('My pic', image)
cv2.imwrite(r'.\img\text.jpg',image)
cv2.waitKeyEx(0)
cv2.destroyAllWindows() | StarcoderdataPython |
117147 | '''Copyright 2017, Deepak
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from datetime import date, datetime
from functools import partial
import re
class DateDeux(date):
def pydate(self):
return date(self.year, self.month, self.day)
@classmethod
def fromisodate(self, isodatestr):
try:
return DateDeux(*(map(int, isodatestr.split('-'))))
except:
return None
@classmethod
def frompydate(self, pydate_object):
return DateDeux(pydate_object.year, pydate_object.month, pydate_object.day)
def monthstart(self):
return DateDeux(self.year, self.month, 1)
def monthend(self):
return DateDeux.fromordinal(DateDeux(self.year, self.month + 1, 1).toordinal() - 1)
def yearend(self):
return DateDeux(self.year, 12, 31)
def yearstart(self):
return DateDeux(self.year, 1, 1)
def dayname(self):
return ['Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday', 'Saturday', 'Sunday'][self.weekday()]
def dayname_short(self):
return self.dayname()[:3]
def monthname(self):
return ['', 'January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October',
'November', 'December'][self.month]
def monthname_short(self):
return self.monthname()[:3]
def dateformat(self, format):
def _format_as_int(number, length):
if length < len(str(number)):
return str(number)[-length:]
format = "%%0%dd" % length
return format % number
def _format_month(*args):
return self.monthname_short()
def _re_match(matchstring, regex):
return re.findall(regex, matchstring)[0]
matches = list(map(partial(_re_match, format), ['y+', 'm+', 'd+']))
result = format[:]
result = result.replace(matches[0], _format_as_int(self.year, len(matches[0])))
result = result.replace(matches[2], _format_as_int(self.day, len(matches[2])))
_month_func = _format_month if len(matches[1]) == 3 else _format_as_int
result = result.replace(matches[1], _month_func(self.month, len(matches[1])))
return result
def yearcalendar(self):
_start = DateDeux(self.year, 1, 1)
_end = DateDeux(self.year, 12, 31)
diff = _end.toordinal() - _start.toordinal() + 1
return (_start + x for x in range(0, diff))
def monthcalendar(self):
_start = self.monthstart()
_end = self.monthend()
diff = _end.toordinal() - _start.toordinal() + 1
return (_start + x for x in range(0, diff))
def __add__(self, numdays):
return DateDeux.fromordinal(self.toordinal() + numdays)
def __sub__(self, numdays):
try:
return self.toordinal() - numdays.toordinal()
except AttributeError:
return DateDeux.fromordinal(self.toordinal() - numdays)
def __iter__(self):
return iter((self.year, self.month, self.day))
| StarcoderdataPython |
3267937 | <filename>backend/src/baserow/config/settings/test.py
from .base import * # noqa: F403, F401
USER_FILES_DIRECTORY = 'user_files'
USER_THUMBNAILS_DIRECTORY = 'thumbnails'
USER_THUMBNAILS = {'tiny': [21, 21]}
| StarcoderdataPython |
1768045 | <gh_stars>0
from django import forms
from django.utils.translation import gettext as _
class ProfileForm(forms.Form):
email = forms.EmailField(label=_("Email address"), max_length=150)
first_name = forms.CharField(label=_("First name"), max_length=150, required=False)
last_name = forms.CharField(label=_("Last name"), max_length=150, required=False)
birth_date = forms.DateField(
label=_("Date of birth"), required=False, widget=forms.DateInput()
)
old_password = forms.CharField(
label=_("Current password"),
max_length=128,
required=False,
widget=forms.PasswordInput(),
)
new_password_1 = forms.CharField(
label=_("New password"),
max_length=128,
required=False,
widget=forms.PasswordInput(),
)
new_password_2 = forms.CharField(
label=_("Repeat password"),
max_length=128,
required=False,
widget=forms.PasswordInput(),
)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
if self.user:
self.initial.update(
{
"email": self.user.email,
"first_name": self.user.first_name,
"last_name": self.user.last_name,
"birth_date": self.user.birth_date,
}
)
def clean(self):
cleaned_data = super().clean()
old_pw = cleaned_data.get("old_password")
new_pw_1 = cleaned_data.get("new_password_1")
new_pw_2 = cleaned_data.get("new_password_2")
if any([old_pw, new_pw_1, new_pw_2]):
if not old_pw:
self.add_error("old_password", _("The current password is required"))
elif not self.user.check_password(old_pw):
self.add_error("old_password", _("Wrong password"))
if not new_pw_1 or not new_pw_2:
if not new_pw_1:
self.add_error("new_password_1", _("Type in the new password"))
if not new_pw_2:
self.add_error("new_password_2", _("Verify your new password"))
elif new_pw_1 != new_pw_2:
self.add_error(
"new_password_1", _("The passwords typed in are not equal")
)
self.add_error(
"new_password_2", _("The passwords typed in are not equal")
)
| StarcoderdataPython |
48662 | # Copyright 2012 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
def parse_requirements():
fap = open('requirements.txt', 'r')
raw_req = fap.read()
fap.close()
return raw_req.split('\n')
setuptools.setup(
name='warlock',
version='1.3.0',
description='Python object model built on JSON schema and JSON patch.',
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/bcwaldon/warlock',
packages=['warlock'],
install_requires=parse_requirements(),
)
| StarcoderdataPython |
1744918 | <reponame>MonwarAdeeb/HackerRank-Solutions<gh_stars>0
# Enter your code here. Read input from STDIN. Print output to STDOUT
import math
def bi_dist(x, n, p):
b = (math.factorial(n)/(math.factorial(x)*math.factorial(n-x)))*(p**x)*((1-p)**(n-x))
return(b)
b, p, n = 0, 1.09/2.09, 6
for i in range(3,7):
b += bi_dist(i, n, p)
print("%.3f" %b) | StarcoderdataPython |
117227 | <reponame>c1c4/five-best_restaurants<filename>setup.py
from setuptools import setup, find_packages
NAME = 'best-matched-restaurants'
VERSION = '1.0.0'
REQUIRES = ['flask']
setup(
name=NAME,
version=VERSION,
description=NAME,
author_email='<EMAIL>',
url='',
keywords=['Swagger', NAME],
install_requires=REQUIRES,
packages=find_packages(),
package_data={'': ['swagger/swagger.yaml']},
include_package_data=True,
entry_points={
'console_scripts': ['swagger_server=swagger_server.__main__:main']
},
long_description="""
Serviço complementar ao MS de Juridico que é responsavel pelos processos de recuperação judicial ou falência
"""
)
| StarcoderdataPython |
1723677 | import pandas as pd
import numpy as np
def cr_uplift(control, variant, round=None):
"""
Compute uplift
Parameters
----------
control : float
Proportion in control group
variant : float
Proportion in variant group
round
If int rounds the results to this number of decimals. Otherwise,
no rounding
Returns
-------
dict
holding `diff` for the signed difference between the proportions and
`upli` for the uplift in percentage
"""
abs_change = variant - control
pct_chnage = 100 * abs_change / control
if type(round) is int:
return {
"diff": np.round(abs_change, decimals=round),
'upli': np.round(pct_chnage, decimals=round)
}
else:
return {
"diff": abs_change,
"upli": pct_chnage
}
def generate_experiment(seed=42, N=10000, control_cr=None, variant_cr=None):
"""
Generate a single experiment
Parameters
----------
seed : int
Seed for random numbers generator
N : int
Number of observations in the experiment
control_cr : float
Probability of success for the control group in (0,1) interval
variant_cr : float
Probability of success for the control group in (0,1) interval
Returns
-------
pd.DataFrame
For example:
Converted Visited CR_pct
Control 594 2000 29.7
Variant 612 2000 30.6
"""
np.random.seed(seed)
control = np.random.choice([0, 1], p=[1-control_cr, control_cr], size=N)
variant = np.random.choice([0, 1], p=[1-variant_cr, variant_cr], size=N)
res = pd.DataFrame(
{
"Converted": [control.sum(), variant.sum()],
"Visited": [N, N]
}, index=['Control', 'Variant'])
res['CR_pct'] = 100 * res.Converted / res.Visited
return res
def manual_z_score(data):
p_c, p_v = data.Converted / data.Visited
c_t, v_t = data.Visited
p = (c_t * p_c + v_t * p_v) / (c_t + v_t)
z = (p_c - p_v) / np.sqrt((p * (1 - p)) / c_t + ((p * (1 - p)) / v_t))
return z
| StarcoderdataPython |
46720 | from .example_model import get_chain_model,get_trailer_model
from .trailer_printer import trailer_print,draw_rectangular_obstacle,draw_rectangular_obstacle_around_center | StarcoderdataPython |
1781829 | import time
from torch.utils.data import DataLoader
from dxtorchutils.utils.metrics import accuracy
import numpy as np
from dxtorchutils.utils.utils import state_logger
from dxtorchutils.utils.info_logger import Logger
import random
import torch
class TrainVessel:
def __init__(
self,
dataloader: DataLoader,
model: torch.nn.Module,
model_paras_path: str = None,
epochs: int = 20,
opt: torch.optim = None,
criteria: torch.nn.Module = None,
eval_num: int = 100,
):
"""
训练器
:param dataloader: 传入的需要训练的dataloader
:param model: 需要训练的模型
:param model_paras_path: 训练好的参数的地址,默认为空即重新开始训练
:param opt: 优化器,默认用SGD
:param criteria: 损失函数,默认用交叉熵
:param epochs: 训练循环次数,默认20
:param eval_num: 每五个epoch,随机取eval_num个来测试已训练的模型,这是选取数量,默认10个
:return:
"""
self.model = model
self.is_gpu = False
self.is_multi_gpu = False
self.is_tensorboard = False
self.dataloader = dataloader
self.epochs = epochs
self.eval_num = eval_num
self.eval_metric_func = accuracy
self.eval_metric_name = "accuracy"
self.logger = None
self.model_save_path = None
self.time_start = None
self.iteration = None
self.loss_all = None
self.eval_res_all = None
self.log_every_epoch = 1
if model_paras_path is not None:
self.model.load_state_dict(torch.load(model_paras_path))
if opt is None:
self.opt = torch.optim.SGD(model.parameters(), 5e-4, 0.9)
if criteria is None:
self.criteria = torch.nn.CrossEntropyLoss()
def train(self):
state_logger("Model and Dataset Loaded, Start to Train!")
if self.logger is None and self.is_tensorboard is True:
self.logger = Logger("logger/{}-{}".format(self.model.__class__.__name__.lower(), time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
self.time_start = time.time()
if self.is_gpu:
self.model = self.model.cuda()
self.model.train()
self.iteration = 0
for epoch in range(self.epochs):
for data, targets in self.dataloader:
self.train_mini_batch(data, targets)
if (epoch + 1) % self.log_every_epoch == 0 or epoch == 0:
self.model.eval()
with torch.no_grad():
self.loss_all = []
self.eval_res_all = []
rand = random.randint(0, len(self.dataloader.dataset) - self.eval_num - 1)
for index in range(rand, rand + self.eval_num):
data, target = self.dataloader.dataset.__getitem__(index)
if self.is_gpu:
data = data.cuda()
target = target.cuda()
data = torch.unsqueeze(data, 0)
target = torch.unsqueeze(target, 0)
self.eval_through_training(data, target)
eval_res = np.mean(np.array(self.eval_res_all))
loss = np.mean(np.array(self.loss_all))
print("Epoch: {:04}/{:04} | Loss: {:.5} | {}: {:.5}"
.format(epoch + 1, self.epochs, loss, self.eval_metric_name, eval_res))
if self.is_tensorboard:
self.logger.log_metric(eval_res, self.eval_metric_name, loss, epoch)
if self.model_save_path is not None:
if self.is_multi_gpu:
torch.save(self.model.module.state_dict(), self.model_save_path)
else:
torch.save(self.model.state_dict(), self.model_save_path)
else:
if self.is_multi_gpu:
torch.save(self.model.module.state_dict(), self.model.__class__.__name__.lower() + ".pth")
else:
torch.save(self.model.state_dict(), self.model.__class__.__name__.lower() + ".pth")
self.model.train()
torch.cuda.empty_cache()
if self.is_tensorboard:
input_data, _ = next(iter(self.dataloader))
if self.is_gpu:
input_data = input_data.cuda()
self.logger.add_graph(self.model, input_data)
self.logger.close()
state_logger("Training Completed!")
def train_mini_batch(self, data, targets):
self.opt.zero_grad()
if self.is_gpu:
data = data.cuda()
targets = targets.cuda()
output = self.model(data)
if isinstance(output, tuple):
loss = self.criteria(output[0], targets)
for i in range(1, len(output)):
loss += 0.3 * self.criteria(output[i], targets)
elif isinstance(output, list):
loss = self.criteria(output[0], targets)
for i in range(1, len(output)):
loss += self.criteria(output[i], targets)
else:
loss = self.criteria(output, targets)
loss.backward()
self.opt.step()
time_end = time.time()
if self.is_tensorboard:
self.logger.log_training(loss, self.opt.defaults["lr"], time_end - self.time_start, self.iteration)
self.iteration += 1
def eval_through_training(self, data, targets):
output = self.model(data)
loss = self.criteria(output, targets)
if self.is_gpu:
prediction = np.reshape(torch.max(output, 1)[1].cpu().data.numpy(), -1)
targets = np.reshape(targets.cpu().data.numpy(), -1)
loss_num = loss.cpu()
else:
prediction = np.reshape(torch.max(output, 1)[1].data.numpy(), -1)
targets = np.reshape(targets.data.numpy(), -1)
loss_num = loss
self.eval_res_all.append(self.eval_metric_func(targets, prediction))
self.loss_all.append(loss_num)
def replace_eval_metric(self, metric_name, metric_func):
self.eval_metric_name = metric_name
self.eval_metric_func = metric_func
def gpu(self):
self.is_gpu = True
def cpu(self):
self.is_gpu = False
def load_model_para(self, model_paras_path: str):
self.model.load_state_dict(torch.load(model_paras_path))
def enable_tensorboard(self, path=None):
self.is_tensorboard = True
if path is not None:
self.logger = Logger(path)
def save_model_to(self, path):
self.model_save_path = path
def multi_gpu(self, device_ids):
self.is_multi_gpu = True
self.is_gpu = True
self.model = torch.nn.DataParallel(self.model, device_ids=device_ids)
def tensorboard_log_model(self):
if self.logger is None:
self.logger = Logger("logger/{}-{}".format(self.model.__class__.__name__, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
if self.is_gpu:
self.model = self.model.cuda()
input_data, _ = next(iter(self.dataloader))
if self.is_gpu:
input_data = input_data.cuda()
self.logger.add_graph(self.model, input_data)
self.logger.close()
| StarcoderdataPython |
66851 | <reponame>cmu-catalyst/FlexFlow<gh_stars>0
from flexflow.core import *
from flexflow.keras.datasets import cifar10
from flexflow.torch.model import PyTorchModel
import os
import numpy as np
#from accuracy import ModelAccuracy
from PIL import Image
def top_level_task():
ffconfig = FFConfig()
alexnetconfig = NetConfig()
print(alexnetconfig.dataset_path)
print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %(ffconfig.batch_size, ffconfig.workers_per_node, ffconfig.num_nodes))
ffmodel = FFModel(ffconfig)
dims_input = [ffconfig.batch_size, 3, 229, 229]
input = ffmodel.create_tensor(dims_input, DataType.DT_FLOAT)
output_tensors = PyTorchModel.file_to_ff("regnetX32gf.ff", ffmodel, [input])
t = ffmodel.softmax(output_tensors[0])
ffoptimizer = SGDOptimizer(ffmodel, 0.01)
ffmodel.optimizer = ffoptimizer
ffmodel.compile(loss_type=LossType.LOSS_SPARSE_CATEGORICAL_CROSSENTROPY, metrics=[MetricsType.METRICS_ACCURACY, MetricsType.METRICS_SPARSE_CATEGORICAL_CROSSENTROPY])
label = ffmodel.label_tensor
num_samples = 10000
(x_train, y_train), (x_test, y_test) = cifar10.load_data(num_samples)
full_input_np = np.zeros((num_samples, 3, 229, 229), dtype=np.float32)
for i in range(0, num_samples):
image = x_train[i, :, :, :]
image = image.transpose(1, 2, 0)
pil_image = Image.fromarray(image)
pil_image = pil_image.resize((229,229), Image.NEAREST)
image = np.array(pil_image, dtype=np.float32)
image = image.transpose(2, 0, 1)
full_input_np[i, :, :, :] = image
full_input_np /= 255
y_train = y_train.astype('int32')
full_label_np = y_train
dataloader_input = ffmodel.create_data_loader(input, full_input_np)
dataloader_label = ffmodel.create_data_loader(label, full_label_np)
num_samples = dataloader_input.num_samples
assert dataloader_input.num_samples == dataloader_label.num_samples
ffmodel.init_layers()
epochs = ffconfig.epochs
ts_start = ffconfig.get_current_time()
ffmodel.fit(x=dataloader_input, y=dataloader_label, epochs=epochs)
ts_end = ffconfig.get_current_time()
run_time = 1e-6 * (ts_end - ts_start);
print("epochs %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %(epochs, run_time, num_samples * epochs / run_time));
# perf_metrics = ffmodel.get_perf_metrics()
# accuracy = perf_metrics.get_accuracy()
# if accuracy < ModelAccuracy.CIFAR10_ALEXNET.value:
# assert 0, 'Check Accuracy'
if __name__ == "__main__":
print("regnetX32gf torch")
top_level_task()
| StarcoderdataPython |
3200589 |
PATH_ROOT='C:/Users/<NAME>/Desktop/ICoDSA 2020/SENN/'
print('==================== Importing Packages ====================')
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
import pandas as pd
import re
import json
import math
import string
import numpy as np
from bs4 import BeautifulSoup
import gensim
import contractions
import inflect
import string
import stanfordnlp
stanfordnlp.download('en')
import nltk
nltk.download('wordnet')
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('maxent_ne_chunker')
nltk.download('words')
from nltk import pos_tag
from nltk.corpus import wordnet as wn
from nltk.tokenize import wordpunct_tokenize,TweetTokenizer
from nltk import word_tokenize, pos_tag, ne_chunk
from nltk.stem.porter import *
from nltk.stem import WordNetLemmatizer
from scipy import sparse as sp
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.impute import KNNImputer
from textblob import TextBlob
from sklearn.preprocessing import StandardScaler,MinMaxScaler
#------------------------------------------------------------------------------------------
print("==================== Importing Supporting Files ====================")
#RF N-Grams Dictionary
rf_1_gram_df = pd.read_csv(PATH_ROOT+'Supporting_Files/'+'rf_1_gram_df.csv', engine='python',index_col='Unnamed: 0')
rf_2_gram_df = pd.read_csv(PATH_ROOT+'Supporting_Files/'+'rf_2_gram_df.csv', engine='python',index_col='Unnamed: 0')
rf_3_gram_df = pd.read_csv(PATH_ROOT+'Supporting_Files/'+'rf_3_gram_df.csv', engine='python',index_col='Unnamed: 0')
rf_4_gram_df = pd.read_csv(PATH_ROOT+'Supporting_Files/'+'rf_4_gram_df.csv', engine='python',index_col='Unnamed: 0')
#PMI Dictionary
PMI_df = pd.read_csv(PATH_ROOT+'Supporting_Files/'+'PMI_df.csv', engine='python',index_col='Unnamed: 0')
#Abbreviation & Slang Dict
abb_df = pd.read_table(PATH_ROOT+'Supporting_Files/'+'emnlp_dict.txt', sep='\s+', names=('Abbreviation', 'Normal'))
abb=pd.Series(abb_df['Normal'])
abb.index=abb_df['Abbreviation']
abb_dict=dict(abb)
#AFINN Sentiment Lexicon
AFINN_df = pd.read_table(PATH_ROOT+'Supporting_Files/'+'AFINN/'+'AFINN-111.txt', names=('Word', 'Sentiment'))
AFINN=pd.Series(AFINN_df['Sentiment'])
AFINN=((AFINN-AFINN.min())/(AFINN.max()-AFINN.min()))*(1-(-1))+(-1) #Rescaling in [-1,1]
AFINN.index=AFINN_df['Word']
AFINN_dict=dict(AFINN)
#Bing-Liu Sentiment Lexicon
pos = pd.read_table(PATH_ROOT+'Supporting_Files/'+'Bing-Liu-opinion-lexicon-English/'+'positive-words.txt',names='P')
neg=pd.read_table(PATH_ROOT+'Supporting_Files/'+'Bing-Liu-opinion-lexicon-English/'+'negative-words.txt',names='N',encoding='latin-1')
BingLiu_dict={'pos':pos['P'].tolist(),'neg':neg['N'].tolist()}
#General Enquirer Sentiment Lexicon
General_Inquirer_df=pd.read_csv(PATH_ROOT+'Supporting_Files/'+'General Inquirer Lexicon/'+'inquirerbasic.csv',index_col='Entry')
General_Inquirer_df=General_Inquirer_df[['Positiv','Negativ']]
General_Inquirer_dict={'pos':General_Inquirer_df[pd.isnull(General_Inquirer_df['Positiv'])==False]['Positiv'].index.tolist(),
'neg':General_Inquirer_df[pd.isnull(General_Inquirer_df['Negativ'])==False]['Positiv'].index.tolist()}
#NRC Hashtag Sentiment Lexicon
hs=pd.read_table(PATH_ROOT+'Supporting_Files/'+'NRC-Sentiment-Emotion-Lexicons/AutomaticallyGeneratedLexicons/NRC-Hashtag-Sentiment-Lexicon-v1.0/'+'HS-unigrams.txt',names=('Hashtag','PMI(w, pos) -PMI(w, neg)','n_pos','n_neg'),encoding='latin-1')
hs=hs[pd.isnull(hs.Hashtag)==False]
hs['PMI(w, pos) -PMI(w, neg)']=((hs['PMI(w, pos) -PMI(w, neg)']-hs['PMI(w, pos) -PMI(w, neg)'].min())/(hs['PMI(w, pos) -PMI(w, neg)'].max()-hs['PMI(w, pos) -PMI(w, neg)'].min()))*(1-(-1))+(-1) #Rescaling in [-1,1]
nrc=hs['PMI(w, pos) -PMI(w, neg)']
nrc.index=hs['Hashtag']
NRC_hashtag_dict=dict(nrc)
#Sentiwordnet Sentiment Lexicon
sentiwordnet=pd.read_table(PATH_ROOT+'Supporting_Files/'+'SentiWordNet/'+'SentiWordNet_3.0.0.txt',names=('POS','ID','PosScore','NegScore','SynsetTerms','Gloss'),encoding='latin-1')
sentiwordnet=sentiwordnet[pd.isnull(sentiwordnet.POS)==False]
sentiwordnet['score']=sentiwordnet['PosScore']-sentiwordnet['NegScore']
#------------------------------------------------------------------------------------------
print("==================== Importing Data ====================")
df_stocktwits_full_BA = pd.read_csv(PATH_ROOT+'Dataset/'+'df_stocktwits_full_BA.csv', engine='python')
df_stocktwits_full_BA=df_stocktwits_full_BA[~pd.isnull(df_stocktwits_full_BA.id)]
#------------------------------------------------------------------------------------------
def tokenize(sentence):
'''
tokenize input sentence into token
'''
token_list=nltk.regexp_tokenize(sentence, pattern=r"\s|[\.,;]\D", gaps=True)
return(token_list)
def clean_data(concat_df):
print("==================== Cleaning Data ====================")
#Inspired by:
# https://www.dotnetperls.com/punctuation-python
# https://github.com/tthustla/twitter_sentiment_analysis_part1/blob/master/Capstone_part2.ipynb
# https://github.com/Deffro/text-preprocessing-techniques/blob/master/techniques.py
#Word ordinal encoding
p = inflect.engine()
word_to_number_mapping = {}
for i in range(1, 2000):
word_form = p.number_to_words(i) # 1 -> 'one'
ordinal_word = p.ordinal(word_form) # 'one' -> 'first'
ordinal_number = p.ordinal(i) # 1 -> '1st'
word_to_number_mapping[ordinal_word] = ordinal_number # 'first': '1st'
def elongated_word(word):
"""
Replaces an elongated word with its basic form, unless the word exists in the lexicon
"""
repeat_regexp = re.compile(r'(\w*)(\w)\2(\w*)')
repl = r'\1\2\3'
if (len(word)>2 and word[0] != '$'):#if not Stock Market symbol
if wn.synsets(word):
return word
repl_word = repeat_regexp.sub(repl, word)
if repl_word != word:
return elongated_word(repl_word)
else:
return repl_word
else:
return word
def isfloat(value):
'''
Check if value is float or not
'''
try:
float(value)
return True
except ValueError:
return False
def deEmojify(inputString):
'''
Remove Emoji
'''
return inputString.encode('ascii', 'ignore').decode('ascii')
def sentences_cleaner(sentence):
'''
clean input sentence
'''
try:
mention_pat= r'@[A-Za-z0-9_]+'
mention_2_pat=r'@[A-Za-z0-9_]+:\s'
retweet_pat=r'^RT +'
dollars_pat=r'\$ +'
http_pat = r'https?://[^ ]+'
www_pat = r'www.[^ ]+'
apos_pat=r'"+|"$|"+"$'
ticker_pat=r'\$[A-Za]+ '
#Transform any url into '_url'
sentence = re.sub(http_pat, '_url', sentence)
sentence = re.sub(www_pat, '_url', sentence)
#Delete Ticker
sentence = re.sub(ticker_pat,"", sentence)
#HTML decoding remove BOM
soup = BeautifulSoup(sentence, 'lxml')
souped = soup.get_text()
try:
bom_removed = souped.decode("utf-8-sig").replace(u"\ufffd", "?")
except:
bom_removed = souped
#Delete Emoji
stripped=deEmojify(bom_removed)
#Delete mention
stripped = re.sub(mention_2_pat,"", stripped)
stripped = re.sub(mention_pat,"", stripped)
#Delete retweet
stripped=re.sub(retweet_pat,"",stripped)
#Transfrom abbreviation & slang word into normal words based on abb_dict corpus
abbreviation_handled=' '.join(pd.Series(stripped.split()).apply(lambda x: abb_dict[x] if x in abb_dict.keys() else x).to_list())
#Transform contracted words into normal words
contraction_handled =contractions.fix(abbreviation_handled)
#Join the stock symbol
dollars_handled=re.sub(dollars_pat,'$',contraction_handled)
#Transform elongated words into normal words
elongated_handled=' '.join(pd.Series(dollars_handled.split()).apply(lambda x: elongated_word(x[:-1])+x[-1] if (x[-1] in string.punctuation and not isfloat(x)) else elongated_word(x) if not isfloat(x) else x))
#Transform ordinal number
ordinal_handled=' '.join(pd.Series(elongated_handled.split()).apply(lambda x: word_to_number_mapping[x.lower()] if x.lower() in word_to_number_mapping.keys() else x))
#Remove unnecesary apostrophes
apos_handled=re.sub(apos_pat,'',ordinal_handled)
#Split Last Word Punctuation
wordpunct=wordpunct_tokenize(apos_handled)
if (len(wordpunct[-1])>1 and wordpunct[-1][-1] in string.punctuation and wordpunct[-2] not in string.punctuation) or (wordpunct[-1] in string.punctuation and wordpunct[-2] not in string.punctuation):
words =tokenize(apos_handled)
words[-1]=wordpunct[-2]
words.append(wordpunct[-1])
else:
words =tokenize(apos_handled)
return (" ".join(words)).strip()
except:
return sentence
concat_df['clean_text']=concat_df['text'].apply(lambda x: sentences_cleaner(x))
#Remove rows with len(clean_text) < 3
concat_df['text_length']=concat_df['clean_text'].apply(lambda x: len(x))
concat_df=concat_df[concat_df.text_length>3]
concat_df=concat_df.reset_index(drop=True)
concat_df=concat_df.drop(columns=['text_length'])
return(concat_df)
#dictionary that contains pos tags and their explanations
# 'CC': 'coordinating conjunction','CD': 'cardinal digit','DT': 'determiner',
# 'EX': 'existential there (like: \"there is\" ... think of it like \"there exists\")',
# 'FW': 'foreign word','IN': 'preposition/subordinating conjunction','
# JJ': 'adjective \'big\'','JJR': 'adjective, comparative \'bigger\'',
# 'JJS': 'adjective, superlative \'biggest\'', 'LS': 'list marker 1)', 'MD': 'modal could, will',
# 'NN': 'noun, singular \'desk\'', 'NNS': 'noun plural \'desks\'',
#'NNP': 'proper noun, singular \'Harrison\'','NNPS': 'proper noun, plural \'Americans\'',
# 'PDT': 'predeterminer \'all the kids\'','POS': 'possessive ending parent\'s',
# 'PRP': 'personal pronoun I, he, she','PRP$': 'possessive pronoun my, his, hers',
# 'RB': 'adverb very, silently,', 'RBR': 'adverb, comparative better',
# 'RBS': 'adverb, superlative best','RP': 'particle give up', 'TO': 'to go \'to\' the store.',
# 'UH': 'interjection errrrrrrrm','VB': 'verb, base form take','VBD': 'verb, past tense took',
# 'VBG': 'verb, gerund/present participle taking','VBN': 'verb, past participle taken',
# 'VBP': 'verb, sing. present, non-3d take','VBZ': 'verb, 3rd person sing. present takes',
# 'WDT': 'wh-determiner which','WP': 'wh-pronoun who, what','WP$': 'possessive wh-pronoun whose',
# 'WRB': 'wh-abverb where, when','QF' : 'quantifier, bahut, thoda, kam (Hindi)',
# 'VM' : 'main verb','PSP' : 'postposition, common in indian langs','DEM' : 'demonstrative, common in indian langs'
#Extract Parts of Speech as BOW
def extract_pos(doc):
#pos_dict = {'CC':0, 'CD':0,'DT':0,'EX':0,'FW':0,'JJ':0,'JJR':0,'JJS':0,'LS':0,'MD':0,
# 'NN':0,'NNS':0,'NNP':0,'NNPS':0,'PDT':0,'POS':0,'PRP':0,'PRP$':0,'RB':0,
# 'RBR':0,'RBS':0,'RP':0,'TO':0,'UH':0,'VB':0,'VBD':0,'VBG':0,'VBN':0,'VBP':0,
# 'VBZ':0,'VM':0,'WDT':0,'WP':0,'WP$':0,'WRB':0,'QF':0,'PSP':0,'DEM':0}
pos_dict = {'VB':0,'VBD':0,'VBG':0,'VBN':0,'VBP':0,'VBZ':0,'VM':0}
try:
for sent in doc.sentences:
for wrd in sent.words:
if wrd.pos in pos_dict.keys():
pos_dict[wrd.pos]+=1
#return BOW of POS
return pos_dict
except:
return pos_dict
def n_grams_handled(sentence):
'''
Filter before generate n-gram
'''
try:
tk=TweetTokenizer()
cashtag_pat=r'\$[^\s]+'
hashtag_pat=r'#([^\s]+)'
word_number_pat=r'\w*\d\w*'
#Remove word which has length < 2
stripped=' '.join([word for word in sentence.split() if len(word)>=2])
#Remove hashtag
hashtag_handled= re.sub(hashtag_pat,"", stripped)
#Remove cashtag
cashtag_handled= re.sub(cashtag_pat,"", hashtag_handled)
#Remove word with number
number_handled= re.sub(word_number_pat,"", cashtag_handled)
#Remove unnecesary white spaces
words = tk.tokenize(number_handled)
words = [x for x in words if x not in string.punctuation]
clean_sentence=(" ".join(words)).strip()
return clean_sentence
except:
return sentence
def rf_ngram(dict_source,df,gram):
'''
create rf-ngram
'''
def sentence_sparse(sentence,gram,rf_ngram,sparse_rf_ngram):
#Initiate Linke List Sparse Matrix
zero_sparse=sp.lil_matrix( (1,len(rf_ngram)), dtype=float)
#Assign Value of rf_ngram to each word in sentence
splitted_text=tokenize(n_grams_handled(sentence))
#Unigram
if gram==1:
for word in splitted_text:
if word in rf_ngram.index:
zero_sparse[0,rf_ngram.index.get_loc(word)]+=sparse_rf_ngram[0,rf_ngram.index.get_loc(word)]
#Convert LinkedList Sparse Matrix into CSR Sparse Matrix
sparse=zero_sparse.tocsr()
#Bigram
elif gram==2:
bigram=lambda x: splitted_text[x]+' '+splitted_text[x+1]
it_2_gram=range(len(splitted_text)-1)
for i in it_2_gram:
if bigram(i) in rf_ngram.index:
zero_sparse[0,rf_ngram.index.get_loc(bigram(i))]+=sparse_rf_ngram[0,rf_ngram.index.get_loc(bigram(i))]
#Convert LinkedList Sparse Matrix into CSR Sparse Matrix
sparse=zero_sparse.tocsr()
#Trigram
elif gram==3:
trigram=lambda x: splitted_text[x]+' '+splitted_text[x+1]+' '+splitted_text[x+2]
it_3_gram=range(len(splitted_text)-2)
for i in it_3_gram:
if trigram(i) in rf_ngram.index:
zero_sparse[0,rf_ngram.index.get_loc(trigram(i))]+=sparse_rf_ngram[0,rf_ngram.index.get_loc(trigram(i))]
#Convert LinkedList Sparse Matrix into CSR Sparse Matrix
sparse=zero_sparse.tocsr()
#4grams
elif gram==4:
fourgram=lambda x: splitted_text[x]+' '+splitted_text[x+1]+' '+splitted_text[x+2]+' '+splitted_text[x+3]
it_4_gram=range(len(splitted_text)-3)
for i in it_4_gram:
if fourgram(i) in rf_ngram.index:
zero_sparse[0,rf_ngram.index.get_loc(fourgram(i))]+=sparse_rf_ngram[0,rf_ngram.index.get_loc(fourgram(i))]
#Convert LinkedList Sparse Matrix into CSR Sparse Matrix
sparse=zero_sparse.tocsr()
return(sparse)
BOW_df= dict_source
#Calculate rf_ngram for each word
series_1=pd.Series([1 for x in range(len(BOW_df))])
series_1.index=BOW_df.index
series_2=pd.Series([2 for x in range(len(BOW_df))])
series_2.index=BOW_df.index
frac_1=np.log(series_2+(BOW_df['pos']/pd.concat([series_1,BOW_df['neg']],1).max(axis=1)))
frac_2=np.log(series_2+(BOW_df['neg']/pd.concat([series_1,BOW_df['pos']],1).max(axis=1)))
rf_ngram_series= pd.concat([frac_1,frac_2],1).max(axis=1)
sparse_rf_ngram=sp.csr_matrix(rf_ngram_series)
def rf_ngram_calculate(x):
lst=[i for i in sentence_sparse(x,gram,rf_ngram_series,sparse_rf_ngram).toarray()[0].tolist() if i!=0]
if type(x)!=str:
return(np.nan)
else:
if len(lst)>0:
return(np.mean(lst))
else:
return(np.nan)
rf_ngram_avg_list=df['clean_text'].apply(lambda x: rf_ngram_calculate(x))
return(rf_ngram_avg_list)
def PMI(dict_source,df):
'''
create PMI variable
'''
BOW_df= dict_source
N=len(BOW_df) #Number of unique tokens in the corpus
pos_N=len(BOW_df[BOW_df.pos!=0]) #Number of unique positive tokens in the corpus
neg_N=len(BOW_df[BOW_df.neg!=0]) #Number of unique positive tokens in the corpus
total=BOW_df.sum().sum() #Number of tokens in the corpus
pos_total=BOW_df.sum()['pos'] #Number of tokens in the positive corpus
neg_total=BOW_df.sum()['neg'] #Number of tokens in the negative corpus
PMI_df=pd.DataFrame(columns=['freq_word','freq_word_pos','freq_word_neg'])
PMI_df['freq_word']=pd.Series(BOW_df.index).apply(lambda x: (BOW_df.loc[x,'pos']+BOW_df.loc[x,'neutral']+BOW_df.loc[x,'neg'])/total)
PMI_df['freq_word_pos']=pd.Series(BOW_df.index).apply(lambda x: BOW_df.loc[x,'pos']/pos_total) #Freq of word w in positive text
PMI_df['freq_word_neg']=pd.Series(BOW_df.index).apply(lambda x: BOW_df.loc[x,'neg']/neg_total) #Freq of word w in negative text
PMI_df.index=BOW_df.index
#Calculate PMI for each word
PMI_df['PMI_pos']=np.log2(1+((PMI_df['freq_word_pos']*N)/(PMI_df['freq_word']*pos_N)))
PMI_df['PMI_neg']=np.log2(1+((PMI_df['freq_word_neg']*N)/(PMI_df['freq_word']*neg_N)))
PMI_df['PMI']=PMI_df['PMI_pos']-PMI_df['PMI_neg']
def PMI_calculate(x):
lst=[PMI_df.loc[i,'PMI'] for i in tokenize(n_grams_handled(x)) if i in PMI_df.index]
if type(x)!=str:
return(np.nan)
else:
if len(lst)>0:
return(np.mean(lst))
else:
return(np.nan)
PMI_avg_list=df['clean_text'].apply(lambda x: PMI_calculate(x))
return(PMI_avg_list)
def countAllCaps(text):
""" Input: a text, Output: how many words are all caps """
return len(re.findall("[A-Z]{2,}", text))
def countHashtag(text):
""" Input: a text, Output: how many hastags in front of a word """
return len(re.findall(r'#([^\s]+)', text))
def is_ordinal_numbers(sentences):
occur=0
for word in tokenize(sentences):
if ((word[-2:] in ['st','nd','rd','th']) and (isfloat(word[:-2]))):
occur=1
return(occur)
def countMultiExclamationMarks(sentences):
""" Replaces repetitions of exlamation marks """
return len(re.findall(r"(\!)\1+", sentences))
def countMultiQuestionMarks(sentences):
""" Count repetitions of question marks """
return len(re.findall(r"(\?)\1+", sentences))
def sentence_synset(sentence):
'''
return the wordnet synset of each word in the sentence
'''
def penn_to_wn(tag):
if tag.startswith('J'):
return wn.ADJ
elif tag.startswith('N'):
return wn.NOUN
elif tag.startswith('R'):
return wn.ADV
elif tag.startswith('V'):
return wn.VERB
return None
tagged = pos_tag(tokenize(sentence))
synsets_list = []
lemmatzr = WordNetLemmatizer()
for token in tagged:
wn_tag = penn_to_wn(token[1])
if not wn_tag:
continue
lemma = lemmatzr.lemmatize(token[0], pos=wn_tag)
try:
synsets_list.append(wn.synsets(lemma, pos=wn_tag)[0])
except:
None
return synsets_list
def min_multiple_list(S):
'''
Minimum pooling
'''
it=range(len(S)-1)
minim=S[0]
for i in it:
minim=np.minimum(minim,S[i])
return(minim)
def max_multiple_list(S):
'''
Maximum pooling
'''
it=range(len(S)-1)
maxim=S[0]
for i in it:
maxim=np.maximum(maxim,S[i])
return(maxim)
def rescaling(df,columns,scale_type='Standard'):
'''
Function for Feature Scaling
'''
scale_type=scale_type.lower()
scaled_X=df.drop(columns,1)
X=df[columns]
if scale_type=='minmax':
scaler=MinMaxScaler(feature_range=(0,1))
elif scale_type=='standard':
scaler=StandardScaler()
scaled_column=scaler.fit_transform(X)
scaled_column=pd.DataFrame(scaled_column,columns=columns)
for column in columns:
scaled_X[column]=scaled_column[column].tolist()
return(scaled_X)
def feature_engineering_split(df):
print("==================== Feature Engineering by Splitting ====================")
#List of POS-Tag
#pos_key = ['CC', 'CD','DT','EX','FW','JJ','JJR','JJS','LS','MD', 'NN','NNS','NNP','NNPS','PDT'
# ,'POS','PRP','PRP$','RB', 'RBR','RBS','RP','TO','UH','VB','VBD','VBG','VBN','VBP',
# 'VBZ','VM','WDT','WP','WP$','WRB','QF','PSP','DEM']
pos_key = ['<KEY>']
#Initiate pipeline for POS-Tagging
nlp = stanfordnlp.Pipeline(processors = "tokenize,pos")
#Inititate class for Lemmatization
lemmatizer = WordNetLemmatizer()
#Initiate class for Stemming
stemmer = PorterStemmer()
#Lemmatization+Stemming
df['base_text']=df['clean_text'].apply(lambda x: ' '.join(pd.Series(tokenize(x)).apply(lambda wrd: stemmer.stem(lemmatizer.lemmatize(wrd)) if wrd[0]!='$' else wrd).to_list()) if type(x)==str else np.nan)
print('Done Base Text')
#Create POS-Tag features
for tag in pos_key:
df['POS_'+tag]=df['clean_text'].apply(lambda x: extract_pos(nlp(x))[tag] if type(x)==str else np.nan)
print('Done POS Tag')
#Binary Feature '+num'
df["'+num"]=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\+\d+\s|\+\d+[!,.;:?/]|\+\d+$',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature '-num'
df["'-num"]=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\-\d+\s|\-\d+[!,.;:?/]|\-\d+$',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature 'num%'
df["num%"]=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r' \d.\d*%+|^\d.\d*%+|[!,.;:?/]\d.\d*%+| \d*%+|^\d*%+|[!,.;:?/]\d*%+',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature '+num%'
df["'+num%"]=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\+\d.\d*%+|\+\d*%+',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature '-num%'
df["'-num%'"]=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\-\d.\d*%+|\-\d*%+',x))>0) else 0 if type(x)==str else np.nan)
#Binary Features '$num'
df['$num']=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\$\d*%+',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature mixed number and word
df['word_num']=df['clean_text'].apply(lambda x: 1 if (type(x)==str and len(re.findall(r'\w*\d\w*',x))>0) else 0 if (type(x)==str) else np.nan)
#Binary Feature ordinal number
df['ordinal_num']=df['clean_text'].apply(lambda x: is_ordinal_numbers(x) if type(x)==str else np.nan)
#Binary Feature 'num-num'
df['num-num']=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\d*-\d+',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature 'num-num%'
df['num-num%']=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\d*-\d%+',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature 'num-num-num'
df['num-num-num']=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\d*-\d-\d+',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature 'num/num'
df['num/num']=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\d*/\d+',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature 'num/num/num'
df['num/num/num']=df['clean_text'].apply(lambda x: 1 if ((type(x)==str) and len(re.findall(r'\d*/\d/\d+',x))>0) else 0 if type(x)==str else np.nan)
#Binary Feature only numbers(no symbol and characters)
df['only_number']=df['clean_text'].apply(lambda x:1 if (type(x)==str and any(isfloat(wrd) for wrd in tokenize(x))) else 0 if type(x)==str else np.nan)
print('Done Keyword+num')
f_plus=lambda x: pd.Series(tokenize(x)).apply(lambda wrd: 1 if len(re.findall(r'\+\d.\d*%+|\+\d*%+',wrd))>0 else 0)
g_plus=lambda y: f_plus(y)[f_plus(y)==1].index.tolist()
f_min=lambda x: pd.Series(tokenize(x)).apply(lambda wrd: 1 if len(re.findall(r'\-\d.\d*%+|\-\d*%+',wrd))>0 else 0)
g_min=lambda y: f_min(y)[f_min(y)==1].index.tolist()
#Binary Feature 'call'(or 'calls' or 'called') before '+num%'
df['call_+num%']=df['clean_text'].apply(lambda z: 1 if (type(z)==str and any((tokenize(z)[i-1]=='call' or tokenize(z)[i-1]=='calls' or tokenize(z)[i-1]=='called') for i in g_plus(z))) else 0 if type(z)==str else np.nan)
#Binary Feature 'call'(or 'calls' or 'called') before '-num%'
df['call_-num%']=df['clean_text'].apply(lambda z: 1 if (type(z)==str and any((tokenize(z)[i-1]=='call' or tokenize(z)[i-1]=='calls' or tokenize(z)[i-1]=='called') for i in g_min(z))) else 0 if type(z)==str else np.nan)
#Binary Feature 'put'(or 'puts') before '+num%'
df['put_+num%']=df['clean_text'].apply(lambda z: 1 if (type(z)==str and any((tokenize(z)[i-1]=='put' or tokenize(z)[i-1]=='puts') for i in g_plus(z))) else 0 if type(z)==str else np.nan)
#Binary Feature 'put'(or 'puts') before '-num%'
df['put_-num%']=df['clean_text'].apply(lambda z: 1 if (type(z)==str and any((tokenize(z)[i-1]=='put' or tokenize(z)[i-1]=='puts') for i in g_min(z))) else 0 if type(z)==str else np.nan)
#Binary Feature 'Bull' or 'Bullish'
df['bull']=df['clean_text'].apply(lambda x: 1 if (type(x)==str and ('bull' or 'bullish') in x.split()) else 0 if type(x)==str else np.nan)
#Binary Feature 'Bear' or 'Bearish'
df['bear']=df['clean_text'].apply(lambda x: 1 if (type(x)==str and ('bear' or 'bearish') in x.split()) else 0 if type(x)==str else np.nan)
print('Done Specific Keyword')
tk=TweetTokenizer()
#Calculate the number of '!'
df['number_of_!']=df['clean_text'].apply(lambda x: tk.tokenize(x).count('!') if type(x)==str else np.nan)
#Calculate the number of '?'
df['number_of_?']=df['clean_text'].apply(lambda x: tk.tokenize(x).count('?') if type(x)==str else np.nan)
#Calculate the number of '$'
df['number_of_$']=df['clean_text'].apply(lambda x: tk.tokenize(x).count('$') if type(x)==str else np.nan)
#Calculate the number of continuous '!'
df['continous_!']=df['clean_text'].apply(lambda x: countMultiExclamationMarks(x) if type(x)==str else np.nan)
#Calculate the number of continuous '?'
df['continous_?']=df['clean_text'].apply(lambda x: countMultiQuestionMarks(x) if type(x)==str else np.nan)
print('Done Punctation Count')
#Calculate the number of Caps word
df['caps_word']=df['clean_text'].apply(lambda x: countAllCaps(' '.join([i for i in x.split() if i[0]!='$'])) if type(x)==str else np.nan)
print('Done Caps words')
#Calculate the number of Hashtags
df['hashtags']=df['clean_text'].apply(lambda x: countHashtag(x) if type(x)==str else np.nan)
print('Done Hashtags')
#AFINN Sentiment Lexicon
affin_sent_score=lambda x: pd.Series(tokenize(x)).apply(lambda wrd: AFINN_dict[wrd.lower()] if wrd.lower() in AFINN_dict.keys() else 0)
affin_sent_binary=lambda x: pd.Series(tokenize(x)).apply(lambda wrd: 1 if (wrd.lower() in AFINN_dict.keys() and AFINN_dict[wrd.lower()]>0) else
-1 if (wrd.lower() in AFINN_dict.keys() and AFINN_dict[wrd.lower()]<0) else 0)
#Sum Score
df['AFINN_sum_score']=df['clean_text'].apply(lambda x: affin_sent_score(x).sum() if type(x)==str else np.nan)
#Max Score
df['AFINN_max_score']=df['clean_text'].apply(lambda x: affin_sent_score(x).max() if type(x)==str else np.nan)
#Min Score
df['AFINN_min_score']=df['clean_text'].apply(lambda x: affin_sent_score(x).min() if type(x)==str else np.nan)
#Ratio of Positive Words
df['AFINN_pos_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in affin_sent_binary(x) if i==1)/len(tokenize(x)) if type(x)==str else np.nan)
#Ratio of Negatiive Words
df['AFINN_neg_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in affin_sent_binary(x) if i==-1)/len(tokenize(x)) if type(x)==str else np.nan)
print('Done AFIIN Lexicon')
#BingLiu Sentiment Lexicon
bingliu_sent_binary=lambda x: pd.Series(tokenize(x)).apply(lambda wrd: 1 if wrd.lower() in BingLiu_dict['pos'] else -1 if wrd.lower() in BingLiu_dict['neg'] else 0)
#Ratio of Positive Words
df['BingLiu_pos_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in bingliu_sent_binary(x) if i==1)/len(tokenize(x)) if type(x)==str else np.nan)
#Ratio of Negative Words
df['BingLiu_neg_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in bingliu_sent_binary(x) if i==-1)/len(tokenize(x)) if type(x)==str else np.nan)
print('Done BingLiu Lexicon')
#General Inquirer Sentiment Lexicon
general_inquirer_binary=lambda x: pd.Series(tokenize(x)).apply(lambda wrd: 1 if wrd.lower() in General_Inquirer_dict['pos'] else -1 if wrd.lower() in General_Inquirer_dict['neg'] else 0)
#Ratio of Positive Words
df['General_Inquirer_pos_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in general_inquirer_binary(x) if i==1)/len(tokenize(x)) if type(x)==str else np.nan)
#Ratio of Negative Words
df['General_Inquirer_neg_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in general_inquirer_binary(x) if i==-1)/len(tokenize(x)) if type(x)==str else np.nan)
print('Done General Inquirer Lexicon')
#NRC Hashtag Sentiment Lexicon
nrc_hashtag_sent_score=lambda x: pd.Series(tokenize(x)).apply(lambda wrd: NRC_hashtag_dict[wrd.lower()] if wrd.lower() in NRC_hashtag_dict.keys() else 0)
nrc_hashtag_sent_binary=lambda x: pd.Series(tokenize(x)).apply(lambda wrd: 1 if (wrd.lower() in NRC_hashtag_dict.keys() and NRC_hashtag_dict[wrd.lower()]>0) else
-1 if (wrd.lower() in NRC_hashtag_dict.keys() and NRC_hashtag_dict[wrd.lower()]<0) else 0)
#Sum Score
df['NRC_Hashtag_sum_score']=df['clean_text'].apply(lambda x: nrc_hashtag_sent_score(x).sum() if type(x)==str else np.nan)
#Max Score
df['NRC_Hashtag_max_score']=df['clean_text'].apply(lambda x: nrc_hashtag_sent_score(x).max() if type(x)==str else np.nan)
#Min Score
df['NRC_Hashtag_min_score']=df['clean_text'].apply(lambda x: nrc_hashtag_sent_score(x).min() if type(x)==str else np.nan)
#Ratio of Positive Words
df['NRC_Hashtag_pos_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in nrc_hashtag_sent_binary(x) if i==1)/len(tokenize(x)) if type(x)==str else np.nan)
#Ratio of Negatiive Words
df['NRC_Hashtag_neg_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in nrc_hashtag_sent_binary(x) if i==-1)/len(tokenize(x)) if type(x)==str else np.nan)
print('Done NRC Hashtag Sentiment Lexicon')
#SentiWordNet Sentiment Lexicon
sentiwordnet_list=sentiwordnet.ID.tolist()
sent_to_synset=lambda x: pd.Series(sentence_synset(x))
synset_to_offset=lambda x: int(str(x.offset()).zfill(8))
get_value=lambda x: sentiwordnet[sentiwordnet.ID==synset_to_offset(x)]['score'].values[0]
score_offset_check=lambda x: get_value(x) if (synset_to_offset(x) in sentiwordnet_list) else 0
binary_offset_check=lambda x: 1 if (synset_to_offset(x) in sentiwordnet_list and get_value(x)>0) else -1 if (synset_to_offset(x) in sentiwordnet_list and get_value(x)<0) else 0
sentiwordnet_score=lambda sent: sent_to_synset(sent).apply(lambda z: score_offset_check(z))
sentiwordnet_binary=lambda sent: sent_to_synset(sent).apply(lambda z: binary_offset_check(z))
#Sum Score
df['SentiWordNet_sum_score']=df['clean_text'].apply(lambda x: sentiwordnet_score(x).sum() if type(x)==str else np.nan)
#Max Score
df['SentiWordNet_max_score']=df['clean_text'].apply(lambda x: sentiwordnet_score(x).max() if type(x)==str else np.nan)
#Min Score
df['SentiWordNet_min_score']=df['clean_text'].apply(lambda x: sentiwordnet_score(x).min() if type(x)==str else np.nan)
#Ratio of Positive Words
df['SentiWordNet_pos_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in sentiwordnet_binary(x) if i==1)/len(sent_to_synset(x)) if (type(x)==str and len(sent_to_synset(x))>0) else 0 if type(x)==str else np.nan)
#Ratio of Negatiive Words
df['SentiWordNet_neg_ratio']=df['clean_text'].apply(lambda x: sum(1 for i in sentiwordnet_binary(x) if i==-1)/len(sent_to_synset(x)) if (type(x)==str and len(sent_to_synset(x))>0) else 0 if type(x)==str else np.nan)
print('Done SentiWordNet Lexicon')
return(df)
def feature_engineering(df):
print("==================== Feature Engineering ====================")
#n-grams
for grams in [1,2,3,4]:
nan_checker=lambda x: x if type(x)==str else ''
#Initiate class for BOW
bow_vectorizer= CountVectorizer(ngram_range=(grams,grams))
#Initiate class for TF-IDF
tfidf_vectorizer = TfidfVectorizer(norm=None, ngram_range=(grams,grams))
#Create docs
docs=df['clean_text'].apply(lambda x: n_grams_handled(x))
#Create TF-IDF matrix
tfidf_matrix = tfidf_vectorizer.fit_transform(docs.apply(lambda x: nan_checker(x)).to_list())
#Create TF-IDF n-grams
df['Avg_TFIDF_'+str(grams)+'-grams']=[np.mean([x for x in tfidf_matrix[i].toarray()[0].tolist() if x!=0]) for i in range(len(df))]
#Calculate sum of RF n-gram
if grams==1:
df['Avg_rf_'+str(grams)+'-grams']=rf_ngram(rf_1_gram_df,df,grams)
elif grams==2:
df['Avg_rf_'+str(grams)+'-grams']=rf_ngram(rf_2_gram_df,df,grams)
elif grams==3:
df['Avg_rf_'+str(grams)+'-grams']=rf_ngram(rf_3_gram_df,df,grams)
elif grams==4:
df['Avg_rf_'+str(grams)+'-grams']=rf_ngram(rf_4_gram_df,df,grams)
print('Done n-grams')
#Calculate PMI
df['PMI_score']=PMI(PMI_df,df)
#Impute missing value
imputer = KNNImputer(n_neighbors=3)
df_imputed=pd.DataFrame(imputer.fit_transform(df.drop(columns=['id','created_at','body','clean_text','base_text'])))
df_imputed.columns=df.drop(columns=['id','created_at','body','clean_text','base_text']).columns
df['Avg_rf_1-grams']=df_imputed['Avg_rf_1-grams']
df['Avg_rf_2-grams']=df_imputed['Avg_rf_2-grams']
df['Avg_rf_3-grams']=df_imputed['Avg_rf_3-grams']
df['Avg_rf_4-grams']=df_imputed['Avg_rf_4-grams']
df['PMI_score']=df_imputed['PMI_score']
return(df)
def parallelize_dataframe(df, func, n_split):
'''
Function to parallelize a dataframe
'''
df_split = np.array_split(df, n_split)
df_pool=func(df_split[0])
for i in range(n_split-1):
x=df_split[i+1]
x=func(x.copy())
df_pool = pd.concat([df_pool,x],ignore_index=True)
return df_pool
def main():
clean_df=clean_data(df_stocktwits_full_BA)
draft_engineered_df=parallelize_dataframe(clean_df, feature_engineering_split,n_split=2)
engineered_df=parallelize_dataframe(draft_engineered_df, feature_engineering,n_split=3)
engineered_df.to_csv(PATH_ROOT+'Dataset/'+'df_stocktwits_prepared.csv')
if __name__ == '__main__':
main() | StarcoderdataPython |
128902 | <reponame>chengweilun/api_client
# Copyright 2015 Fortinet, Inc.
#
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import eventlet
try:
import httplib
except ImportError:
import http.client as httplib
import socket
from oslo_log import log as logging
from oslo_serialization import jsonutils
from ._i18n import _, _LI, _LW
from . import constants as const
from . import request
LOG = logging.getLogger(__name__)
DEFAULT_USER_AGENT = const.DEFAULT_HTTP_HEADERS['User-Agent']
DEFAULT_HTTP_TIMEOUT = const.DEFAULT_HTTP_TIMEOUT
DEFAULT_RETRIES = const.DEFAULT_RETRIES if request.DEFAULT_RETRIES < 2 else 2
DEFAULT_REDIRECTS = const.DEFAULT_REDIRECTS
class EventletApiRequest(request.ApiRequest):
'''Eventlet-based ApiRequest class.
This class will form the basis for eventlet-based ApiRequest classes
'''
# Maximum number of green threads present in the system at one time.
API_REQUEST_POOL_SIZE = request.DEFAULT_API_REQUEST_POOL_SIZE
# Pool of green threads. One green thread is allocated per incoming
# request. Incoming requests will block when the pool is empty.
API_REQUEST_POOL = eventlet.GreenPool(API_REQUEST_POOL_SIZE)
# A unique id is assigned to each incoming request. When the current
# request id reaches MAXIMUM_REQUEST_ID it wraps around back to 0.
MAXIMUM_REQUEST_ID = request.DEFAULT_MAXIMUM_REQUEST_ID
# The request id for the next incoming request.
CURRENT_REQUEST_ID = 0
def __init__(self, client_obj, url, method="GET", body=None,
headers=None,
retries=DEFAULT_RETRIES,
auto_login=True,
redirects=DEFAULT_REDIRECTS,
http_timeout=DEFAULT_HTTP_TIMEOUT, client_conn=None,
singlethread=False):
'''Constructor.'''
self._api_client = client_obj
self._url = url
self._method = method
self._body = body if body else None
self._headers = headers or {}
self._request_timeout = http_timeout * retries
self._retries = retries
self._auto_login = auto_login
self._redirects = redirects
self._http_timeout = http_timeout
self._client_conn = client_conn
self._abort = False
self._request_error = None
self._singlethread = singlethread
if "User-Agent" not in self._headers:
self._headers["User-Agent"] = DEFAULT_USER_AGENT
self._green_thread = None
# Retrieve and store this instance's unique request id.
self._request_id = EventletApiRequest.CURRENT_REQUEST_ID
# Update the class variable that tracks request id.
# Request IDs wrap around at MAXIMUM_REQUEST_ID
next_request_id = self._request_id + 1
next_request_id %= self.MAXIMUM_REQUEST_ID
EventletApiRequest.CURRENT_REQUEST_ID = next_request_id
@classmethod
def _spawn(cls, func, *args, **kwargs):
'''Allocate a green thread from the class pool.'''
return cls.API_REQUEST_POOL.spawn(func, *args, **kwargs)
def spawn(self, func, *args, **kwargs):
'''Spawn a new green thread with the supplied function and args.'''
return self.__class__._spawn(func, *args, **kwargs)
def join(self):
'''Wait for instance green thread to complete.'''
if self._singlethread:
return self._run()
if self._green_thread is not None:
return self._green_thread.wait()
return Exception(_('Joining an invalid green thread'))
def start(self):
'''Start request processing.'''
if not self._singlethread:
self._green_thread = self.spawn(self._run)
def _run(self):
'''Method executed within green thread.'''
if self._request_timeout:
# No timeout exception escapes the with block.
with eventlet.timeout.Timeout(self._request_timeout, False):
return self._handle_request()
LOG.info(_LI('[%d] Request timeout.'), self._rid())
self._request_error = Exception(_('Request timeout'))
return None
else:
return self._handle_request()
def _handle_request(self):
'''First level request handling.'''
attempt = 0
timeout = 0
badstatus = 0
response = None
while response is None and attempt <= self._retries:
eventlet.greenthread.sleep(timeout)
attempt += 1
req = None
try:
req = self._issue_request()
except (httplib.BadStatusLine, socket.error) as e:
if badstatus <= DEFAULT_RETRIES:
badstatus += 1
attempt -= 1
msg = ("# request {method} {url} {body} error {e}"
).format(method=self._method, url=self._url,
body=self._body, e=e)
LOG.debug(msg)
continue
# automatically raises any exceptions returned.
if isinstance(req, httplib.HTTPResponse):
timeout = 0
if attempt <= self._retries and not self._abort:
# currently there is a bug in fortios, it return 401 and
# 400 when a cookie is invalid, the change is to tolerant
# the bug to handle return 400 situation.
# when fortios fix the bug, here should use
# 'req.status in (401, 403)' instead
if req.status in (400, 401, 403):
continue
elif req.status == 503:
timeout = 0.5
continue
# else fall through to return the error code
LOG.debug("[%(rid)d] Completed request '%(method)s %(url)s'"
": %(status)s",
{'rid': self._rid(), 'method': self._method,
'url': self._url, 'status': req.status})
self._request_error = None
response = req
else:
LOG.info(_LI('[%(rid)d] Error while handling request: '
'%(req)s'),
{'rid': self._rid(), 'req': req})
self._request_error = req
response = None
return response
class LoginRequestEventlet(EventletApiRequest):
'''Process a login request.'''
def __init__(self, client_obj, user, password, client_conn=None,
headers=None):
if headers is None:
headers = {}
message = client_obj.render(client_obj.login_msg())
body = message.get('body', None)
# base64 encode the username and password for http basic
auth = base64.encodestring('%s:%s' % (user, password)).\
replace('\n', '')
headers.update({'Authorization': "Basic %s" % auth})
super(LoginRequestEventlet, self).__init__(
client_obj, message['path'], message['method'], body, headers,
auto_login=True, client_conn=client_conn)
def session_cookie(self):
if self.successful():
return self.value.getheader("Set-Cookie")
return None
class GetApiProvidersRequestEventlet(EventletApiRequest):
'''Get a list of API providers.'''
def __init__(self, client_obj):
url = "/"
super(GetApiProvidersRequestEventlet, self).__init__(
client_obj, url, "GET", auto_login=True)
def api_providers(self):
"""Parse api_providers from response.
Returns: api_providers in [(host, port, is_ssl), ...] format
"""
def _provider_from_listen_addr(addr):
# (pssl|ptcp):<ip>:<port> => (host, port, is_ssl)
parts = addr.split(':')
return (parts[1], int(parts[2]), parts[0] == 'pssl')
try:
if self.successful():
ret = []
body = jsonutils.loads(self.value.body)
for node in body.get('results', []):
for role in node.get('roles', []):
if role.get('role') == 'api_provider':
addr = role.get('listen_addr')
if addr:
ret.append(_provider_from_listen_addr(addr))
return ret
except Exception as e:
LOG.warning(_LW("[%(rid)d] Failed to parse API provider: %(e)s"),
{'rid': self._rid(), 'e': e})
# intentionally fall through
return None
class GenericRequestEventlet(EventletApiRequest):
'''Handle a generic request.'''
def __init__(self, client_obj, method, url, body, content_type, user_agent,
auto_login=False,
http_timeout=request.DEFAULT_HTTP_TIMEOUT,
retries=request.DEFAULT_RETRIES,
redirects=request.DEFAULT_REDIRECTS, singlethread=False):
headers = {'Content-Type': content_type, 'User-Agent': user_agent}
super(GenericRequestEventlet, self).__init__(
client_obj, url, method, body, headers,
retries=retries,
auto_login=auto_login, redirects=redirects,
http_timeout=http_timeout, singlethread=singlethread)
def session_cookie(self):
if self.successful():
return self.value.getheader("Set-Cookie")
return None
class TokenRequestEventlet(EventletApiRequest):
'''get Token Request'''
def __init__(self, client_obj, client_id, client_secret, client_conn=None,
headers=None):
if headers is None:
headers = {}
message = client_obj.render(client_obj.login_msg(),
client_id=client_obj.client_id,
client_secret=client_obj.client_secret)
super(TokenRequestEventlet, self).__init__(
client_obj, message['path'], message['method'], message['body'],
headers,auto_login=True, client_conn=client_conn)
request.ApiRequest.register(EventletApiRequest)
| StarcoderdataPython |
1776732 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
import data.data_collection
from data.data_collection import *
from data.test_constants import *
import data.utils
class DataCollectionTest(unittest.TestCase):
"""
Class that tests the functions in the data_collection.py.
"""
def get_commit_info_side_effect(self, repo_name, commit_ref, auth):
"""
Take a commit ref and return the mock commit info.
Args:
repo_name: dummy parameter for mock function.
commit_ref: a str of commit ref.
auth: dummy parameter for mock function
Returns:
A dict of mock commit info.
"""
commit_info = {'sha': commit_ref, 'files': []}
for file in COMMIT_INFO[commit_ref]:
commit_info['files'].append({'filename': file})
return commit_info
def get_check_runs_side_effect(self, repo_name, commit_ref, auth):
"""
Take a commit ref and return the mock check runs.
Args:
repo_name: dummy parameter for mock function.
commit_ref: a str of commit ref.
auth: dummy parameter for mock function.
Returns:
A dict of mock check runs.
"""
if not CHECK_RUN_RESULTS[commit_ref]:
return {}
check_run_results = {}
check_run_results['total_count'] = \
CHECK_RUN_RESULTS[commit_ref]['total_count']
check_run_results['check_runs'] = []
for status in CHECK_RUN_RESULTS[commit_ref]['check_runs']:
check_run_results['check_runs'].append({'conclusion': status})
return check_run_results
def setUp(self):
"""
Set up the mock functions for testing DataCollector.
"""
self.data_collector = DataCollector('mock',
'2010-01-01T00:00:00Z',
'2010-01-05T00:00:00Z')
data.data_collection.get_pull_request_info = \
mock.Mock(return_value=PULL_REQUEST_INFO)
data.data_collection.get_pull_request_review_comments = \
mock.Mock(return_value=PULL_REQUEST_REVIEW_COMMENTS)
data.data_collection.get_pull_request_issue_comments = \
mock.Mock(return_value=PULL_REQUEST_ISSUE_COMMENTS)
data.data_collection.get_pull_request_reviews = \
mock.Mock(return_value=PULL_REQUEST_REVIEWS)
data.data_collection.get_commit_info = \
mock.Mock(side_effect=self.get_commit_info_side_effect)
data.data_collection.get_commit_check_runs = \
mock.Mock(side_effect=self.get_check_runs_side_effect)
data.data_collection.get_pull_request_files = \
mock.Mock(return_value=PULL_REQUEST_FILES)
def test_set_page(self):
"""
Test the set_page() function on edge cases where the input page is less
than one.
"""
self.data_collector.set_page(1)
with self.assertRaises(ValueError):
self.data_collector.set_page(0)
with self.assertRaises(ValueError):
self.data_collector.set_page(-100)
def test_get_pull_request_review_time(self):
"""
Test the logic of computing the pull request review time.
"""
created_time, closed_time, review_time = self.data_collector.\
_get_pull_request_review_time(PULL_REQUEST_INFO2)
self.assertEqual(created_time, '2018-09-16T00:20:58Z')
self.assertEqual(closed_time, '2020-05-18T05:21:24Z')
expected_review_time = to_timestamp('2020-05-18T05:21:24Z') \
- to_timestamp('2018-09-16T00:20:58Z')
self.assertEqual(review_time, expected_review_time)
def test_get_reverted_pull_request_info(self):
"""
Test the logic of computing reverted pull request number and
the pull request revert time.
"""
reverted_pull_request_number, pull_request_revert_time \
= self.data_collector._get_reverted_pull_request_info(
PULL_REQUEST_INFO3)
expected_revert_time = to_timestamp('2020-05-18T05:21:24Z') \
- to_timestamp('2018-09-16T00:20:58Z')
self.assertEqual(reverted_pull_request_number, 1029)
self.assertEqual(pull_request_revert_time, expected_revert_time)
def test_get_reverted_pull_request_info_with_no_reverted_number(self):
reverted_pull_request_number, pull_request_revert_time \
= self.data_collector._get_reverted_pull_request_info(
PULL_REQUEST_INFO4)
self.assertEqual(reverted_pull_request_number, 0)
self.assertEqual(pull_request_revert_time, 0)
def test_get_review_comments_body(self):
"""
Test the logic of getting file review comments message.
"""
results = self.data_collector._get_review_comments_body(10)
self.assertEqual(results,
[('file1', 'This file looks good to me'),
('file2', 'I wont approve this change')])
def test_get_issue_comments_body(self):
"""
Test the logic of getting the issue comments message.
"""
results = self.data_collector._get_issue_comments_body(100)
self.assertEqual(results,
['This CL could break', 'Please add documentations'])
def test_get_approved_reviewers(self):
"""
Test the logic of getting the approved reviewers usernames from
a list of reviewers.
"""
results = self.data_collector._get_approved_reviewers(50)
self.assertEqual(sorted(results), sorted(['', 'oitgnq']))
def test_get_file_versions(self):
"""
Test the logic of getting the number of file versions in a pull request.
"""
results = self.data_collector._get_file_versions(COMMITS)
self.assertEqual(results, {'file1': 2, 'file2': 2, 'file3': 1})
def test_get_check_runs(self):
"""
Test the logic of computing check run results from the raw check run
history.
"""
results = self.data_collector._get_check_run_results(COMMITS)
self.assertEqual(results, ['failed', 'passed', 'none'])
def test_get_file_changes(self):
"""
Test the logic of computing the line of additions, deletions and
changes of each file in the pull request.
"""
results, total_changes = self.data_collector._get_file_changes(20)
self.assertEqual(results,
[('file1', 10, 50, 60),
('file2', 20, 10, 30),
('file3', 100, 50, 150)])
self.assertEqual(total_changes, 240)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3228076 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("foirequest", "0003_foirequest_reference"),
]
operations = [
migrations.AddField(
model_name="foirequest",
name="is_blocked",
field=models.BooleanField(default=False, verbose_name="Blocked"),
),
]
| StarcoderdataPython |
115673 | # Flask configurations
import os
import sys
sys.path.append(os.path.dirname(__file__)+'/AI')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
| StarcoderdataPython |
1613959 | <reponame>upstart-swiss/demisto-sdk<filename>demisto_sdk/commands/common/tests/update_id_set_test.py
import json
import logging
import os
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import demisto_sdk.commands.common.update_id_set as uis
from demisto_sdk.commands.common.constants import (DEFAULT_JOB_FROM_VERSION,
JOBS_DIR, FileType)
from demisto_sdk.commands.common.legacy_git_tools import git_path
from demisto_sdk.commands.common.update_id_set import (
find_duplicates, get_classifier_data, get_dashboard_data,
get_fields_by_script_argument,
get_filters_and_transformers_from_complex_value,
get_filters_and_transformers_from_playbook, get_general_data,
get_generic_field_data, get_generic_module_data, get_generic_type_data,
get_incident_fields_by_playbook_input, get_incident_type_data,
get_indicator_type_data, get_layout_data, get_layoutscontainer_data,
get_mapper_data, get_pack_metadata_data, get_playbook_data,
get_report_data, get_script_data, get_values_for_keys_recursively,
get_widget_data, has_duplicate, merge_id_sets, process_general_items,
process_incident_fields, process_integration, process_jobs, process_script,
re_create_id_set, should_skip_item_by_mp)
from TestSuite.utils import IsEqualFunctions
TESTS_DIR = f'{git_path()}/demisto_sdk/tests'
class TestPacksMetadata:
METADATA_WITH_XSOAR_SUPPORT = {
'name': 'Pack1',
'support': 'xsoar',
'currentVersion': '1.0.0',
'author': 'Cortex XSOAR',
'tags': ['Alerts'],
'useCases': ['Case Management'],
'categories': ['Endpoint'],
'marketplaces': ['xsoar', 'marketplacev2'],
}
METADATA_WITH_PARTNER_SUPPORT = {
'name': 'Pack1',
'support': 'partner',
'currentVersion': '1.0.0',
'author': '<NAME>',
'tags': ['Alerts'],
'useCases': ['Case Management'],
'categories': ['Endpoint'],
'marketplaces': ['xsoar', 'marketplacev2'],
}
METADATA_WITH_COMMUNITY_SUPPORT = {
'name': 'Pack1',
'support': 'community',
'currentVersion': '1.0.0',
'author': 'Someone',
'tags': ['Alerts'],
'useCases': ['Case Management'],
'categories': ['Endpoint'],
'marketplaces': ['xsoar', 'marketplacev2'],
}
TEST_PACK = [
(METADATA_WITH_XSOAR_SUPPORT, 'Cortex XSOAR', 'certified'),
(METADATA_WITH_PARTNER_SUPPORT, 'Some Partner', 'certified'),
(METADATA_WITH_COMMUNITY_SUPPORT, 'Someone', ''),
]
@staticmethod
@pytest.mark.parametrize('metadata_file_content, author, certification', TEST_PACK)
def test_process_metadata(mocker, repo, metadata_file_content, author, certification):
"""
Given
- A pack_metadata file for Pack1
When
- parsing pack metadata files
Then
- parsing all the data from file successfully
"""
mocker.patch.object(uis, 'get_pack_name', return_value='Pack1')
mocker.patch.object(uis, 'get_mp_types_from_metadata_by_item', return_value=['xsoar', 'marketplacev2'])
pack = repo.create_pack("Pack1")
pack.pack_metadata.write_json(metadata_file_content)
res = get_pack_metadata_data(pack.pack_metadata.path, print_logs=False)
result = res.get('Pack1')
assert 'name' in result.keys()
assert result.get('name') == 'Pack1'
assert result.get('current_version') == '1.0.0'
assert result.get('author') == author
assert result.get('certification') == certification
assert result.get('tags') == ['Alerts']
assert result.get('use_cases') == ['Case Management']
assert result.get('categories') == ['Endpoint']
assert result.get('marketplaces') == ['xsoar', 'marketplacev2']
@staticmethod
def test_process_metadata__marketplace_mismatch(mocker):
"""
Given
- Pack metadata
When
- parsing pack files when there is a mismatch between the item's marketplaces and the current run marketplace.
Then
- return empty dict
"""
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=True)
res = get_pack_metadata_data('', print_logs=False)
assert res == {}
@staticmethod
@pytest.mark.parametrize('print_logs', [True, False])
def test_process_packs_success(mocker, capsys, repo, print_logs):
"""
Given
- A pack metadata file path.
- Whether to print information to log.
When
- Parsing pack metadata files.
Then
- Verify output to logs.
"""
mocker.patch.object(uis, 'get_pack_name', return_value='Pack1')
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
pack = repo.create_pack("Pack1")
pack.pack_metadata.write_json({
'name': 'Pack',
'currentVersion': '1.0.0',
'author': '<NAME>',
'support': 'xsoar',
'tags': ['Alerts'],
'useCases': ['Case Management'],
'categories': ['Endpoint'],
'marketplaces': ['xsoar', 'marketplacev2']
})
pack_metadata_path = pack.pack_metadata.path
res = get_pack_metadata_data(pack_metadata_path, print_logs)
captured = capsys.readouterr()
assert res['Pack1']['name'] == 'Pack'
assert res['Pack1']['current_version'] == '1.0.0'
assert res['Pack1']['author'] == 'Cortex XSOAR'
assert res['Pack1']['tags'] == ['Alerts']
assert res['Pack1']['use_cases'] == ['Case Management']
assert res['Pack1']['categories'] == ['Endpoint']
assert res['Pack1']['certification'] == 'certified'
assert res['Pack1']['marketplaces'] == ['xsoar', 'marketplacev2']
assert (f'adding {pack_metadata_path} to id_set' in captured.out) == print_logs
@staticmethod
def test_process_packs_exception_thrown(capsys, mocker):
"""
Given
- A pack metadata file path.
When
- Parsing pack metadata files.
Then
- Handle the exceptions gracefully.
"""
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
with pytest.raises(FileNotFoundError):
get_pack_metadata_data('Pack_Path', True)
captured = capsys.readouterr()
assert 'Failed to process Pack_Path, Error:' in captured.out
class TestDuplicates:
MOCKED_DATA = [
(
[
{
"BluecatAddressManager": {
"name": "BluecatAddressManager",
"file_path": "Integrations/BluecatAddressManager/BluecatAddressManager.yml",
"fromversion": "5.0.0"
}
},
{
"BluecatAddressManager": {
"name": "BluecatAddressManager",
"file_path": "Integrations/BluecatAddressManager/BluecatAddressManager.yml",
"fromversion": "5.0.0"
}
}
], 'BluecatAddressManager', True
),
(
[
{
"BluecatAddressManager": {
"name": "BluecatAddressManager",
"file_path": "Integrations/BluecatAddressManager/BluecatAddressManager.yml",
"fromversion": "5.0.0"
}
},
{
"BluecatAddressManager": {
"name": "BluecatAddressManager",
"file_path": "Integrations/BluecatAddressManager/BluecatAddressManager.yml",
"fromversion": "3.1.0",
"toversion": "4.0.0"
}
}
], 'BluecatAddressManager', False
),
(
[
{
'Test3': {
'name': 'Test3',
'file_path': 'A',
'fromversion': '3.0.0',
'toversion': '3.6.0',
}
},
{
'Test3': {
'name': 'Test3',
'file_path': 'B',
'fromversion': '3.5.0',
'toversion': '4.5.0',
}
},
{
'Test3': {
'name': 'Test3',
'file_path': 'C',
'fromversion': '3.5.2',
'toversion': '3.5.4',
}
},
{
'Test3': {
'name': 'Test3',
'file_path': 'D',
'fromversion': '4.5.0',
},
},
], 'Test3', True
),
]
@staticmethod
@pytest.mark.parametrize('first_pack, second_pack, first_source, second_source, expected', [
('pack1', 'pack2', ('github.com', 'demisto', 'repo1'), ('github.com', 'demisto', 'repo2'), True),
('pack1', 'pack2', ('github.com', 'demisto', 'repo1'), ('github.com', 'demisto', 'repo1'), True),
('pack1', 'pack1', ('github.com', 'demisto', 'repo1'), ('github.com', 'demisto', 'repo2'), True),
('pack1', 'pack1', ('github.com', 'demisto', 'repo1'), ('github.com', 'demisto', 'repo1'), False),
('pack1', 'pack1', ('github.com', 'demisto', 'repo1'), ('code.pan.run', 'xsoar', 'repo1'), False),
('pack1', 'pack1', ('github.com', 'demisto', 'repo1'), ('code.pan.run', 'xsoar', 'repo2'), True)
])
def test_has_duplicate(first_pack, second_pack, first_source, second_source, expected):
"""
Given
- id_set.json with two duplicate layouts of the same type (details), their versions also overrides.
They are considered duplicates because they have the same name (typeID), their versions override, and they
are the same kind (details).
If the pack and source is the same - there are duplicates, otherwise they are not
When
- checking for duplicate
Then
- Ensure duplicates found depending on the pack and source
"""
id_set = {
'Layouts': []
}
id_set['Layouts'].append({
'urlRep': {
'typeID': 'urlRep',
'fromVersion': '5.0.0',
'kind': 'Details',
'path': 'Layouts/layout-details-urlrep.json',
'pack': first_pack,
'source': first_source
}
})
id_set['Layouts'].append({
'urlRep': {
'typeID': 'urlRep',
'kind': 'Details',
'path': 'Layouts/layout-details-urlrep2.json',
'pack': second_pack,
'source': second_source
}
})
# Check for duplicates for existing id-sets,
# Will not be considered as duplicate when two entities come from the same source
has_duplicates = has_duplicate(id_set['Layouts'], 'urlRep', 'Layouts', False, is_create_new=False)
assert has_duplicates == expected
# Check for duplicates for a new id-set,
# In this case all the examples above should be considered as duplicates
assert has_duplicate(id_set['Layouts'], 'urlRep', 'Layouts', False, is_create_new=True),\
"if it's a new pack it is always a duplicate"
@staticmethod
def test_has_no_duplicate():
"""
Given
- id_set.json with two non duplicate layouts. They have different kind
When
- checking for duplicate
Then
- Ensure duplicates not found
"""
id_set = {
'Layouts': []
}
id_set['Layouts'].append({
'urlRep': {
'typeID': 'urlRep',
'kind': 'Details',
'path': 'Layouts/layout-details-urlrep.json'
}
})
id_set['Layouts'].append({
'urlRep': {
'typeID': 'urlRep',
'kind': 'edit',
'path': 'Layouts/layout-edit-urlrep.json'
}
})
has_duplicates = has_duplicate(id_set['Layouts'], 'urlRep', 'Layouts', False)
assert has_duplicates is False
class TestIntegrations:
INTEGRATION_DATA = {
"Dummy Integration": {
"name": "Dummy Integration",
"file_path": TESTS_DIR + "/test_files/DummyPack/Integrations/DummyIntegration/DummyIntegration.yml",
"fromversion": "4.1.0",
"source": ['github.com', 'demisto', 'demisto-sdk'],
"docker_image": "demisto/python3:3.7.4.977",
"commands": ['xdr-get-incidents',
'xdr-get-incident-extra-data',
'xdr-update-incident',
'xdr-insert-parsed-alert',
'xdr-insert-cef-alerts',
'xdr-isolate-endpoint',
'xdr-unisolate-endpoint',
'xdr-get-endpoints',
'xdr-get-distribution-versions',
'xdr-create-distribution',
'xdr-get-distribution-url',
'xdr-get-create-distribution-status',
'xdr-get-audit-management-logs',
'xdr-get-audit-agent-reports'],
"api_modules": "HTTPFeedApiModule",
"classifiers": "dummy-classifier",
"incident_types": "dummy-incident-type",
"indicator_fields": "CommonTypes",
"indicator_types": "CommonTypes",
"mappers": [
"dummy-mapper-in",
"dummy-mapper-out"
]
}
}
UNIFIED_INTEGRATION_DATA = {
"Dummy Integration": {
"name": "Dummy Integration",
"file_path": TESTS_DIR + "/test_files/DummyPack/Integrations/integration-DummyIntegration.yml",
"fromversion": "4.1.0",
"source": ['github.com', 'demisto', 'demisto-sdk'],
"commands": ['xdr-get-incidents',
'xdr-get-incident-extra-data',
'xdr-update-incident',
'xdr-insert-parsed-alert',
'xdr-insert-cef-alerts',
'xdr-isolate-endpoint',
'xdr-unisolate-endpoint',
'xdr-get-endpoints',
'xdr-get-distribution-versions',
'xdr-create-distribution',
'xdr-get-distribution-url',
'xdr-get-create-distribution-status',
'xdr-get-audit-management-logs',
'xdr-get-audit-agent-reports'],
"api_modules": "HTTPFeedApiModule",
"classifiers": "dummy-classifier",
"incident_types": "dummy-incident-type",
"indicator_fields": "CommonTypes",
"indicator_types": "CommonTypes",
"mappers": [
"dummy-mapper-in",
"dummy-mapper-out"
]
}
}
def test_process_integration__sanity(self, mocker):
"""
Given
- A valid script package folder located at Packs/DummyPack/Scripts/DummyScript.
When
- parsing script files
Then
- integration data will be collected properly
"""
non_unified_file_path = os.path.join(TESTS_DIR, 'test_files',
'DummyPack', 'Integrations', 'DummyIntegration')
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
res = process_integration(non_unified_file_path, True)
assert len(res) == 1
non_unified_integration_data = res[0]
unified_file_path = os.path.join(TESTS_DIR, 'test_files',
'DummyPack', 'Integrations', 'integration-DummyIntegration.yml')
res = process_integration(unified_file_path, True)
assert len(res) == 1
unified_integration_data = res[0]
test_pairs = [
(non_unified_integration_data, TestIntegrations.INTEGRATION_DATA),
(unified_integration_data, TestIntegrations.UNIFIED_INTEGRATION_DATA)
]
for returned, constant in test_pairs:
assert IsEqualFunctions.is_lists_equal(list(returned.keys()), list(constant.keys()))
const_data = constant.get('Dummy Integration')
returned_data = returned.get('Dummy Integration')
assert IsEqualFunctions.is_dicts_equal(returned_data, const_data)
@staticmethod
def test_process_integration__exception(mocker):
"""
Given
- An invalid "integration" file located at invalid_file_structures where commonfields object is not a dict.
When
- parsing integration files
Then
- an exception will be raised
"""
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
test_file_path = os.path.join(TESTS_DIR, 'test_files', 'invalid_file_structures', 'integration.yml')
with pytest.raises(Exception):
process_integration(test_file_path, True)
@staticmethod
def test_process_integration__marketplace_mismatch(mocker):
"""
Given
- An integration file
When
- parsing item files when there is a mismatch between the item's marketplaces and the current run marketplace.
Then
- return empty list
"""
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=True)
test_file_path = os.path.join(TESTS_DIR, 'test_files', 'invalid_file_structures', 'integration.yml')
res = process_integration(test_file_path, print_logs=False)
assert res == []
class TestScripts:
SCRIPT_DATA = {
"DummyScript": {
"name": "DummyScript",
"file_path": TESTS_DIR + "/test_files/DummyPack/Scripts/DummyScript.yml",
"source": ['github.com', 'demisto', 'demisto-sdk'],
"fromversion": "5.0.0",
"docker_image": "demisto/python3:3.7.3.286",
"tests": [
"No test - no need to test widget"
]
}
}
PACK_SCRIPT_DATA = {
"DummyScript": {
"name": "DummyScript",
"source": ['github.com', 'demisto', 'demisto-sdk'],
"docker_image": "demisto/python3:3.8.2.6981",
"pack": "DummyPack",
"file_path": TESTS_DIR + "/test_files/Packs/DummyPack/Scripts/DummyScript/DummyScript.yml",
}
}
@staticmethod
def test_get_script_data():
"""
Test for getting the script data
"""
file_path = TESTS_DIR + '/test_files/DummyPack/Scripts/DummyScript.yml'
data = get_script_data(file_path)
assert IsEqualFunctions.is_lists_equal(list(data.keys()), list(TestScripts.SCRIPT_DATA.keys()))
const_data = TestScripts.SCRIPT_DATA.get('DummyScript')
returned_data = data.get('DummyScript')
assert IsEqualFunctions.is_dicts_equal(returned_data, const_data)
@staticmethod
def test_process_script__sanity_package(mocker):
"""
Given
- An invalid "script" file located at invalid_file_structures where commonfields object is not a dict.
When
- parsing script files
Then
- an exception will be raised
"""
test_file_path = os.path.join(TESTS_DIR, 'test_files',
'Packs', 'DummyPack', 'Scripts', 'DummyScript')
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
res = process_script(test_file_path, True)
assert len(res) == 1
data = res[0]
assert IsEqualFunctions.is_lists_equal(list(data.keys()), list(TestScripts.PACK_SCRIPT_DATA.keys()))
const_data = TestScripts.PACK_SCRIPT_DATA.get('DummyScript')
returned_data = data.get('DummyScript')
assert IsEqualFunctions.is_dicts_equal(returned_data, const_data)
@staticmethod
def test_process_script__marketplace_mismatch(mocker):
"""
Given
- A script file
When
- parsing script files when there is a mismatch between the item's marketplaces and the current run marketplace.
Then
- return empty list
"""
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=True)
test_file_path = os.path.join(TESTS_DIR, 'test_files', 'invalid_file_structures', 'integration.yml')
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
res = process_script(test_file_path, print_logs=False)
assert res == []
@staticmethod
def test_process_script__exception(mocker):
"""
Given
- An invalid "script" file located at invalid_file_structures where commonfields object is not a dict.
When
- parsing script files
Then
- an exception will be raised
"""
test_file_path = os.path.join(TESTS_DIR, 'test_files', 'invalid_file_structures', 'script.yml')
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
with pytest.raises(Exception):
process_script(test_file_path, True)
class TestPlaybooks:
PLAYBOOK_DATA = {
"name": "Dummy Playbook",
"file_path": TESTS_DIR + "/test_files/DummyPack/Playbooks/DummyPlaybook.yml",
"source": ['github.com', 'demisto', 'demisto-sdk'],
"fromversion": "4.5.0",
"filters": ["isEqualString"],
"transformers": ["uniq"],
"implementing_scripts": [
"XDRSyncScript",
"StopScheduledTask",
],
"implementing_playbooks": [
"Calculate Severity - Standard",
"Palo Alto Networks - Malware Remediation",
],
"command_to_integration": {
"xdr-update-incident": "",
"autofocus-sample-analysis": ""
},
"tests": [
"No Test"
],
"skippable_tasks": [
"StopScheduledTask",
"autofocus-sample-analysis"
]
}
@staticmethod
def test_get_playbook_data():
"""
Test for getting the playbook data
"""
file_path = TESTS_DIR + '/test_files/DummyPack/Playbooks/DummyPlaybook.yml'
data = get_playbook_data(file_path)['Dummy Playbook']
assert IsEqualFunctions.is_dicts_equal(data, TestPlaybooks.PLAYBOOK_DATA)
@staticmethod
def test_get_playbook_data_2():
"""
Given
- A playbook file called playbook-with-incident-fields.yml
When
- parsing playbook files
Then
- parsing all the data from file successfully
"""
test_dir = f'{git_path()}/demisto_sdk/commands/create_id_set/tests/test_data/playbook-with-incident-fields.yml'
result = get_playbook_data(test_dir)
result = result.get('Arcsight - Get events related to the Case')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'implementing_scripts' in result.keys()
assert 'command_to_integration' in result.keys()
assert 'tests' in result.keys()
assert 'incident_fields' in result.keys()
assert 'indicator_fields' in result.keys()
@staticmethod
def test_get_playbook_data_no_fields(mocker):
"""
Given
- A playbook file called playbook-no-incident-fields.yml without any
incident or indicator fields that it depends on.
When
- parsing playbook files
Then
- parsing all the data from file successfully
"""
test_dir = f'{git_path()}/demisto_sdk/commands/create_id_set/tests/test_data/playbook-no-incident-fields.yml'
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
result = get_playbook_data(test_dir)
result = result.get('Arcsight - Get events related to the Case')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'implementing_scripts' in result.keys()
assert 'command_to_integration' in result.keys()
assert 'tests' in result.keys()
assert 'incident_fields' not in result.keys()
assert 'indicator_fields' not in result.keys()
@staticmethod
def test_process_playbook__exception(mocker):
"""
Given
- An invalid "playbook" file located at invalid_file_structures where tasks object is not a dict.
When
- parsing playbook files
Then
- an exception will be raised
"""
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
test_file_path = os.path.join(TESTS_DIR, 'test_files', 'invalid_file_structures', 'playbook.yml')
with pytest.raises(Exception):
process_general_items(test_file_path, True, (FileType.PLAYBOOK,), get_playbook_data)
@staticmethod
def test_get_playbook_data_bad_graph():
"""
Given
- A playbook file called playbook-invalid-bad-graph.yml:
- task 1 point to non-existing task
- task 2 is not connected
When
- parsing playbook files
Then
- parsing flow graph from file successfully (only tasks 0 and 1 will be in the graph)
"""
test_file_path = os.path.join(TESTS_DIR, 'test_files', 'playbook-invalid-bad-graph.yml')
result = get_playbook_data(test_file_path)
playbook_data = result.get('InvalidPlaybook-BadGraph', {})
assert playbook_data.get('name') == 'InvalidPlaybook-BadGraph'
assert playbook_data.get('command_to_integration', {}).get('ip') == ''
assert playbook_data.get('command_to_integration', {}).get('domain') == ''
assert 'domain' in playbook_data.get('skippable_tasks', [])
assert 'ip' not in playbook_data.get('skippable_tasks', [])
@staticmethod
def test_get_playbook_data_bad_graph_2():
"""
Given
- A playbook file called playbook-invalid-bad-graph_2.yml:
- starttaskid=5 but task 5 does not exist
When
- parsing playbook files
Then
- parsing flow graph from file successfully (no actual tasks will be in the graph)
"""
test_file_path = os.path.join(TESTS_DIR, 'test_files', 'playbook-invalid-bad-graph_2.yml')
result = get_playbook_data(test_file_path)
playbook_data = result.get('InvalidPlaybook-BadGraph', {})
assert playbook_data.get('name') == 'InvalidPlaybook-BadGraph'
assert playbook_data.get('command_to_integration', {}).get('ip') == ''
assert playbook_data.get('command_to_integration', {}).get('domain') == ''
# domain task is marked as skippable so it will be included regardless to the graph.
assert 'domain' in playbook_data.get('skippable_tasks', [])
assert len(playbook_data.get('skippable_tasks', [])) == 1
@staticmethod
def test_get_filters_from_playbook_tasks():
"""
Given
- playbook with one task and 3 filters: isEqualString, isEqualString and StringContainsArray
When
- parsing filters from the playbook
Then
- parsing 2 filters successfully
- isEqualString filter shows once
"""
data = {'tasks': {'0': {'scriptarguments': {'value': {'complex': {'filters': [[{'operator': 'isEqualString'}],
[{'operator': 'isEqualString'}],
[{
'operator': 'StringContainsArray'}]
]}}}}}}
_, filters = get_filters_and_transformers_from_playbook(data)
assert len(filters) == 2
assert 'isEqualString' in filters
assert 'StringContainsArray' in filters
@staticmethod
def test_get_transformers_from_playbook_tasks():
"""
Given
- playbook with one task and 3 transformers: Length, Length and toUpperCase
When
- parsing transformers from the playbook
Then
- parsing 2 transformers successfully
- Length transformer shows once
"""
data = {'tasks': {'0': {'scriptarguments': {'value': {'complex': {'transformers': [{'operator': 'toUpperCase'},
{'operator': 'Length'},
{'operator': 'Length'}
]}}}}}}
transformers, _ = get_filters_and_transformers_from_playbook(data)
assert len(transformers) == 2
assert 'toUpperCase' in transformers
assert 'Length' in transformers
@staticmethod
def test_get_transformers_from_playbook_condition_task():
"""
Given
- playbook with one condition task with toUpperCase transformer
When
- parsing transformers from the playbook
Then
- parsing toUpperCase transformer successfully
"""
data = {'tasks': {'0': {'type': 'condition', 'conditions': [
{'condition': [[{'left': {'value': {'complex': {'transformers': [{'operator': 'toUpperCase'}
]}}}}]]}]}}}
transformers, _ = get_filters_and_transformers_from_playbook(data)
assert transformers == ['toUpperCase']
@staticmethod
def test_get_transformers_and_filters_from_playbook_two_conditions_task():
"""
Given
- playbook with one task that contains 2 conditions: one with filter and one with transformer
When
- parsing transformers and filters from the playbook
Then
- parsing toUpperCase transformer successfully
- parsing isEqualString filter successfully
"""
data = {'tasks': {'0': {'type': 'condition', 'conditions': [
{'condition': [[{'left': {'value': {'complex': {'filters': [[{'operator': 'isEqualString'}]
]}}}}]]},
{'condition': [[{'right': {'value': {'complex': {'transformers': [{'operator': 'toUpperCase'}
]}}}}]]}]}}}
transformers, filters = get_filters_and_transformers_from_playbook(data)
assert transformers == ['toUpperCase']
assert filters == ['isEqualString']
@staticmethod
def test_get_transformers_from_playbook_inputs(mocker):
"""
Given
- playbook with 2 inputs that using Length and toUpperCase transformers
When
- parsing transformers from the playbook inputs
Then
- parsing 2 transformers successfully
"""
data = {'inputs': [{'value': {'complex': {'transformers': [{'operator': 'toUpperCase'}
]}}},
{'value': {'complex': {'transformers': [{'operator': 'Length'}
]}}}]}
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
transformers, _ = get_filters_and_transformers_from_playbook(data)
assert len(transformers) == 2
assert 'toUpperCase' in transformers
assert 'Length' in transformers
class TestLayouts:
@staticmethod
def test_process_layouts__sanity(mocker):
"""
Given
- A layout file called layout-to-test.json
When
- parsing layout files
Then
- parsing all the data from file successfully
"""
test_file = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set', 'tests',
'test_data', 'layout-to-test.json')
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
res = process_general_items(test_file, True, (FileType.LAYOUT,), get_layout_data)
assert len(res) == 1
result = res[0]
result = result.get('urlRep')
assert 'kind' in result.keys()
assert 'name' in result.keys()
assert 'fromversion' in result.keys()
assert 'toversion' in result.keys()
assert 'file_path' in result.keys()
assert 'typeID' in result.keys()
assert 'incident_and_indicator_types' in result.keys()
assert 'incident_and_indicator_fields' in result.keys()
@staticmethod
def test_process_layouts__no_incident_types_and_fields(mocker):
"""
Given
- A layout file called layout-to-test.json that doesnt have related incident fields and indicator fields
When
- parsing layout files
Then
- parsing all the data from file successfully
"""
test_file = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set', 'tests',
'test_data', 'layout-to-test-no-types-fields.json')
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
res = process_general_items(test_file, False, (FileType.LAYOUT,), get_layout_data)
assert len(res) == 1
result = res[0]
result = result.get('urlRep')
assert 'kind' in result.keys()
assert 'name' in result.keys()
assert 'fromversion' in result.keys()
assert 'toversion' in result.keys()
assert 'file_path' in result.keys()
assert 'typeID' in result.keys()
assert 'incident_and_indicator_types' in result.keys()
assert 'incident_and_indicator_fields' not in result.keys()
@staticmethod
def test_process_layoutscontainer__sanity(mocker):
"""
Given
- A layoutscontainer file called layoutscontainer-to-test.json
When
- parsing layoutscontainer files
Then
- parsing all the data from file successfully
"""
test_file = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set', 'tests',
'test_data', 'layoutscontainer-to-test.json')
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
res = process_general_items(test_file, True, (FileType.LAYOUTS_CONTAINER,), get_layoutscontainer_data)
assert len(res) == 1
result = res[0]
result = result.get('layouts_container_test')
assert 'detailsV2' in result.keys()
assert 'name' in result.keys()
assert 'group' in result.keys()
assert 'fromversion' in result.keys()
assert 'toversion' in result.keys()
assert 'file_path' in result.keys()
assert 'incident_and_indicator_types' in result.keys()
assert 'incident_and_indicator_fields' in result.keys()
class TestIncidentFields:
@staticmethod
def test_process_incident_fields__sanity(mocker):
"""
Given
- An incident field file called incidentfield-to-test.json
When
- parsing incident field files
Then
- parsing all the data from file successfully
"""
test_dir = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set', 'tests',
'test_data', 'incidentfield-to-test.json')
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
res = process_incident_fields(test_dir, True, [])
assert len(res) == 1
result = res[0]
result = result.get('incidentfield-test')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'toversion' in result.keys()
assert 'incident_types' in result.keys()
assert 'scripts' in result.keys()
@staticmethod
def test_process_incident_fields__no_types_scripts(mocker):
"""
Given
- An incident field file called incidentfield-to-test-no-types_scripts.json with no script or incident type
related to it
When
- parsing incident field files
Then
- parsing all the data from file successfully
"""
test_dir = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set', 'tests',
'test_data', 'incidentfield-to-test-no-types_scripts.json')
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
res = process_incident_fields(test_dir, True, [])
assert len(res) == 1
result = res[0]
result = result.get('incidentfield-test')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'toversion' in result.keys()
assert 'incident_types' not in result.keys()
assert 'scripts' not in result.keys()
@staticmethod
def test_process_incident_fields__marketplace_mismatch(mocker):
"""
Given
- An incident field file
When
- parsing An incident field files when there is a mismatch between the item's marketplaces and the current run marketplace.
Then
- return empty list
"""
test_dir = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set', 'tests',
'test_data', 'incidentfield-to-test-no-types_scripts.json')
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=True)
res = process_incident_fields(test_dir, True, [])
assert res == []
class TestIndicatorType:
@staticmethod
def test_process_indicator_type__sanity(mocker):
"""
Given
- An indicator type file called reputation-indicatortype.json.
When
- parsing indicator type files
Then
- parsing all the data from file successfully
"""
test_dir = f'{git_path()}/demisto_sdk/commands/create_id_set/tests/test_data/reputation-indicatortype.json'
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
result = get_indicator_type_data(test_dir, [{'integration': {'commands': ['ip']}}])
result = result.get('indicator-type-dummy')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'integrations' in result.keys()
assert 'scripts' in result.keys()
assert "dummy-script" in result.get('scripts')
assert "dummy-script-2" in result.get('scripts')
assert "dummy-script-3" in result.get('scripts')
@staticmethod
def test_get_indicator_type_data_no_integration_no_scripts():
"""
Given
- An indicator type file called reputation-indicatortype_no_script_no_integration.json without any
integrations or scripts that it depends on.
When
- parsing indicator type files
Then
- parsing all the data from file successfully
"""
test_dir = f'{git_path()}/demisto_sdk/commands/create_id_set/tests/test_data/' \
f'reputation-indicatortype_no_script_no_integration.json'
result = get_indicator_type_data(test_dir, [])
result = result.get('indicator-type-dummy')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'integrations' not in result.keys()
assert 'scripts' not in result.keys()
class TestIncidentTypes:
@staticmethod
def test_get_incident_type_data__sanity():
"""
Given
- An incident type file called incidenttype-to-test.json
When
- parsing incident type files
Then
- parsing all the data from file successfully
"""
test_file = f'{git_path()}/demisto_sdk/commands/create_id_set/tests/test_data/incidenttype-to-test.json'
res = get_incident_type_data(test_file)
result = res.get('dummy incident type')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'playbooks' in result.keys()
assert 'scripts' in result.keys()
@staticmethod
def test_get_incident_type_data__no_playbooks_scripts():
"""
Given
- An incident type file called incidenttype-to-test-no-playbook-script.json with no script or playbook
related to it
When
- parsing incident type files
Then
- parsing all the data from file successfully
"""
test_file = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set',
'tests', 'test_data', 'incidenttype-to-test-no-playbook-script.json')
res = get_incident_type_data(test_file)
result = res.get('dummy incident type')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'playbooks' not in result.keys()
assert 'scripts' not in result.keys()
class TestClassifiers:
@staticmethod
def test_process_classifiers__no_types_scripts(mocker):
"""
Given
- An classifier file called classifier-to-test-no-incidenttypes.json with incident type
related to it
When
- parsing classifier files
Then
- parsing all the data from file successfully
"""
test_file = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set', 'tests',
'test_data', 'classifier-to-test-no-incidenttypes.json')
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
res = get_classifier_data(test_file)
result = res.get('dummy classifier')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'incident_types' not in result.keys()
assert 'incident_fields' not in result.keys()
class TestMappers:
@staticmethod
def test_process_mappers__sanity():
"""
Given
- A mapper file called classifier-mapper-to-test.json
When
- parsing mapper files
Then
- parsing all the data from file successfully
"""
test_file = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set', 'tests',
'test_data', 'classifier-mapper-to-test.json')
res = get_mapper_data(test_file)
result = res.get('dummy mapper')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'incident_types' in result.keys()
assert 'incident_fields' in result.keys()
assert 'dummy incident type' in result['incident_types']
assert 'dummy incident type 1' in result['incident_types']
assert 'dummy incident type 2' in result['incident_types']
assert 'dummy incident field' in result['incident_fields']
assert 'dummy incident field 1' in result['incident_fields']
assert 'dummy incident field 2' in result['incident_fields']
assert 'dummy incident field 3' in result['incident_fields']
assert 'occurred' not in result['incident_fields']
@staticmethod
def test_process_mappers__no_types_fields():
"""
Given
- An mapper file called classifier-mapper-to-test-no-types-fields.json with incident type
related to it
When
- parsing mapper files
Then
- parsing all the data from file successfully
"""
test_file = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set', 'tests',
'test_data', 'classifier-mapper-to-test-no-types-fields.json')
res = get_mapper_data(test_file)
result = res.get('dummy mapper')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'incident_types' not in result.keys()
assert 'incident_fields' not in result.keys()
@staticmethod
def test_process_mappers__outgoing_mapper(mocker):
"""
Given
- A mapper file called ServiceNow-outgoing-mapper with incident fields related to it
When
- running get_mapper_data
Then
- Validating parsing all the incident fields from the simple key.
"""
outgoing_mapper_snow = {
"defaultIncidentType": "ServiceNow Ticket",
"description": "Maps outgoing ServiceNow incident Fields.",
"feed": False,
"fromVersion": "6.0.0",
"id": "ServiceNow-outgoing-mapper",
"mapping": {
"ServiceNow Ticket": {
"dontMapEventToLabels": False,
"internalMapping": {
"category": {
"complex": None,
"simple": "servicenowcategory"
},
"closed_at": {
"complex": {
"accessor": "",
"filters": [
[
{
"ignoreCase": False,
"left": {
"isContext": True,
"value": {
"complex": None,
"simple": "closed"
}
},
"operator": "isAfter",
"right": {
"isContext": False,
"value": {
"complex": None,
"simple": "0001-01-01T00:00:00Z"
}
}
}
]
],
"root": "closed",
"transformers": []
},
"simple": ""
},
"description": {
"complex": None,
"simple": "details"
},
"escalation": {
"complex": None,
"simple": "servicenowescalation"
},
"impact": {
"complex": None,
"simple": "servicenowimpact"
},
"notify": {
"complex": None,
"simple": "servicenownotify"
},
"priority": {
"complex": None,
"simple": "servicenowpriority"
},
"resolved_at": {
"complex": {
"accessor": "",
"filters": [
[
{
"ignoreCase": False,
"left": {
"isContext": True,
"value": {
"complex": None,
"simple": "closed"
}
},
"operator": "isAfter",
"right": {
"isContext": False,
"value": {
"complex": None,
"simple": "0001-01-01T00:00:00Z"
}
}
}
]
],
"root": "closed",
"transformers": []
},
"simple": ""
},
"severity": {
"complex": {
"accessor": "",
"filters": [],
"root": "severity",
"transformers": [
{
"args": {
"limit": {
"isContext": False,
"value": None
},
"replaceWith": {
"isContext": False,
"value": {
"complex": None,
"simple": "3 - Low"
}
},
"toReplace": {
"isContext": False,
"value": {
"complex": None,
"simple": "0"
}
}
},
"operator": "replace"
},
{
"args": {
"limit": {
"isContext": False,
"value": None
},
"replaceWith": {
"isContext": False,
"value": {
"complex": None,
"simple": "3 - Low"
}
},
"toReplace": {
"isContext": False,
"value": {
"complex": None,
"simple": "0.5"
}
}
},
"operator": "replace"
},
{
"args": {
"limit": {
"isContext": False,
"value": None
},
"replaceWith": {
"isContext": False,
"value": {
"complex": None,
"simple": "3 - Low"
}
},
"toReplace": {
"isContext": False,
"value": {
"complex": None,
"simple": "1"
}
}
},
"operator": "replace"
},
{
"args": {
"limit": {
"isContext": False,
"value": None
},
"replaceWith": {
"isContext": False,
"value": {
"complex": None,
"simple": "2 - Medium"
}
},
"toReplace": {
"isContext": False,
"value": {
"complex": None,
"simple": "2"
}
}
},
"operator": "replace"
},
{
"args": {
"limit": {
"isContext": False,
"value": None
},
"replaceWith": {
"isContext": False,
"value": {
"complex": None,
"simple": "1 - High"
}
},
"toReplace": {
"isContext": False,
"value": {
"complex": None,
"simple": "3"
}
}
},
"operator": "replace"
},
{
"args": {
"limit": {
"isContext": False,
"value": None
},
"replaceWith": {
"isContext": False,
"value": {
"complex": None,
"simple": "1 - High"
}
},
"toReplace": {
"isContext": False,
"value": {
"complex": None,
"simple": "4"
}
}
},
"operator": "replace"
}
]
},
"simple": ""
},
"short_description": {
"complex": None,
"simple": "name"
},
"sla_due": {
"complex": None,
"simple": "remediationsla.dueDate"
},
"state": {
"complex": None,
"simple": "servicenowstate"
},
"subcategory": {
"complex": None,
"simple": "subcategory"
},
"urgency": {
"complex": None,
"simple": "servicenowurgency"
},
"work_start": {
"complex": None,
"simple": "timetoassignment.startDate"
}
}
}
},
"name": "ServiceNow - Outgoing Mapper",
"type": "mapping-outgoing",
"version": -1
}
mocker.patch("demisto_sdk.commands.common.tools.get_file", return_value=outgoing_mapper_snow)
mapper = get_mapper_data('')
mapper_data = mapper.get('ServiceNow-outgoing-mapper')
assert mapper_data.get('name') == 'ServiceNow - Outgoing Mapper'
assert mapper_data.get('fromversion') == '6.0.0'
assert mapper_data.get('incident_types') == ['ServiceNow Ticket']
assert set(mapper_data.get('incident_fields')) == {
'closed', 'servicenowescalation', 'servicenowurgency', 'subcategory', 'servicenownotify',
'servicenowcategory', 'remediationsla.dueDate', 'servicenowstate', 'timetoassignment.startDate',
'servicenowimpact', 'servicenowpriority'}
@staticmethod
def test_process_mappers__complex_value():
"""
Given
- An mapper file called classifier-mapper-to-test-complex-value.json with one transformer and one filter
When
- parsing mapper files
Then
- parsing one filter and one transformer from file successfully
"""
test_file = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set', 'tests',
'test_data', 'classifier-mapper-to-test-complex-value.json')
res = get_mapper_data(test_file)
result = res.get('dummy mapper')
transformers = result.get('transformers')
filters = result.get('filters')
assert len(transformers) == 1
assert 'toUpperCase' in transformers
assert len(filters) == 1
assert 'isEqualString' in filters
class TestWidget:
@staticmethod
def test_process_widget__with_script():
"""
Given
- A widget file called widget-with-scripts.json
When
- parsing widget files
Then
- parsing all the data from file successfully
"""
test_file = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set', 'tests',
'test_data', 'widget-with-scripts.json')
res = get_widget_data(test_file)
result = res.get('dummy_widget')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'scripts' in result.keys()
assert 'dummy_script' in result['scripts']
@staticmethod
def test_process_widget__no_script():
"""
Given
- A widget file called widget-no-scripts.json
When
- parsing widget files
Then
- parsing all the data from file successfully
"""
test_file = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set', 'tests',
'test_data', 'widget-no-scripts.json')
res = get_widget_data(test_file)
result = res.get('dummy_widget')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'scripts' not in result.keys()
class TestDashboard:
DASHBOARD_WITH_SCRIPT = {
"id": "dummy_dashboard",
"layout": [
{
"widget": {
"category": "",
"dataType": "scripts",
"fromServerVersion": "",
"id": "dummy_widget",
"name": "dummy_dashboard",
"query": "dummy_script",
"toServerVersion": "",
},
}
],
"name": "dummy_dashboard",
"fromVersion": "6.0.0",
}
DASHBOARD_NO_SCRIPT = {
"id": "dummy_dashboard",
"layout": [
{
"widget": {
"category": "",
"dataType": "indicators",
"fromServerVersion": "",
"id": "dummy_widget",
"name": "dummy_dashboard",
"packID": "",
"toServerVersion": "",
"widgetType": "table"
},
}
],
"name": "dummy_dashboard",
"fromVersion": "6.0.0",
}
@staticmethod
def test_process_dashboard__with_script(repo):
"""
Given
- A dashboard file called dashboard-with-scripts.json
When
- parsing dashboard files
Then
- parsing all the data from file successfully
"""
pack = repo.create_pack("Pack1")
dashboard = pack.create_dashboard('dummy_dashboard')
dashboard.update(TestDashboard.DASHBOARD_WITH_SCRIPT)
test_file = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set', 'tests',
'test_data', dashboard.path)
res = get_dashboard_data(test_file)
result = res.get('dummy_dashboard')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'scripts' in result.keys()
assert 'dummy_script' in result['scripts']
@staticmethod
def test_process_dashboard__no_script(repo):
"""
Given
- A dashboard file called dashboard-no-scripts.json
When
- parsing dashboard files
Then
- parsing all the data from file successfully
"""
pack = repo.create_pack("Pack1")
dashboard = pack.create_dashboard('dummy_dashboard')
dashboard.update(TestDashboard.DASHBOARD_NO_SCRIPT)
test_file = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set', 'tests',
'test_data', dashboard.path)
res = get_dashboard_data(test_file)
result = res.get('dummy_dashboard')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'scripts' not in result.keys()
class TestReport:
REPORT_WITH_SCRIPT = {
"id": "dummy_report",
"modified": "2020-09-23T07:54:57.783240299Z",
"startDate": "0001-01-01T00:00:00Z",
"name": "dummy_report",
"dashboard": {
"id": "dummy_report",
"version": 0,
"name": "dummy_report",
"layout": [
{
"id": "dummy_report",
"widget": {
"id": "dummy_report",
"version": 1,
"modified": "2020-09-09T14:02:27.423018192Z",
"name": "dummy_widget",
"dataType": "scripts",
"query": "dummy_script",
}
}
]
},
"fromVersion": "6.0.0",
}
REPORT_NO_SCRIPT = {
"id": "dummy_report",
"name": "dummy_report",
"dashboard": {
"id": "dummy_report",
"name": "dummy_report",
"layout": [
{
"id": "dummy_report",
"widget": {
"id": "dummy_report",
"name": "dummy_widget",
"dataType": "indicators",
}
}
]
},
"fromVersion": "6.0.0",
}
@staticmethod
def test_process_report__with_script(repo):
"""
Given
- A report file called report-with-scripts.json
When
- parsing report files
Then
- parsing all the data from file successfully
"""
pack = repo.create_pack("Pack1")
report = pack.create_report('dummy_report')
report.update(TestReport.REPORT_WITH_SCRIPT)
test_file = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set', 'tests',
'test_data', report.path)
res = get_report_data(test_file)
result = res.get('dummy_report')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'scripts' in result.keys()
assert 'dummy_script' in result['scripts']
@staticmethod
def test_process_report__no_script(repo):
"""
Given
- A report file called report-no-scripts.json
When
- parsing report files
Then
- parsing all the data from file successfully
"""
pack = repo.create_pack("Pack1")
report = pack.create_report('dummy_report')
report.update(TestReport.REPORT_NO_SCRIPT)
test_file = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set', 'tests',
'test_data', report.path)
res = get_report_data(test_file)
result = res.get('dummy_report')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'scripts' not in result.keys()
class TestGenericFunctions:
@staticmethod
def test_process_general_items__sanity(mocker):
"""
Given
- A classifier file called classifier-to-test.json
When
- parsing classifier files
Then
- parsing all the data from file successfully
"""
test_file = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set', 'tests',
'test_data', 'classifier-to-test.json')
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
res = process_general_items(test_file, True, (FileType.CLASSIFIER,), get_classifier_data)
assert len(res) == 1
result = res[0]
result = result.get('dummy classifier')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'incident_types' in result.keys()
assert 'dummy incident type' in result['incident_types']
assert 'dummy incident type 2' in result['incident_types']
assert 'dummy incident type 3' in result['incident_types']
@staticmethod
def test_process_general_items__marketplace_mismatch(mocker):
"""
Given
- An item file
When
- parsing item files when there is a mismatch between the item's marketplaces and the current run marketplace.
Then
- return empty list
"""
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=True)
test_file = os.path.join(git_path(), 'demisto_sdk', 'commands', 'create_id_set', 'tests',
'test_data', 'classifier-to-test.json')
res = process_general_items(test_file, True, (FileType.CLASSIFIER,), get_classifier_data)
assert res == []
@staticmethod
def test_process_general_items__exception(mocker):
"""
Given
- An invalid "dashboard" file located at invalid_file_structures where id is a list so it can't be used
as a dict key.
When
- parsing dashboard files
Then
- an exception will be raised
"""
test_file_path = os.path.join(TESTS_DIR, 'test_files', 'invalid_file_structures', 'dashboard.json')
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
with pytest.raises(Exception):
process_general_items(test_file_path, True, (FileType.DASHBOARD,), get_general_data)
@staticmethod
def test_get_values_for_keys_recursively():
"""
Given
- A list of keys to extract their values from a dict
When
- Extracting data from nested elements in the json
Then
- Extracting the values from all the levels of nesting in the json
"""
test_dict = {
'id': 1,
'nested': {
'x1': 1,
'x2': 'x2',
'x3': False,
'x4': [
{
'x1': 2,
'x2': 55
},
{
'x3': 1,
'x2': True
}
]
},
'x2': 4.0
}
test_keys = ['x1', 'x2', 'x3']
expected = {
'x1': [1, 2],
'x2': ['x2', 55, True, 4.0],
'x3': [False, 1]
}
assert expected == get_values_for_keys_recursively(test_dict, test_keys)
INPUT_WITH_INCIDENT_FIELD_SIMPLE = {
"key": "AlertID",
"value": {
"simple": "${incident.field_name}"
},
"required": False
}
INPUT_WITH_INCIDENT_FIELD_COMPLEX1 = {
"key": "AlertID",
"value": {
"complex": {
"root": "incident",
"accessor": "field_name"
}
},
"required": False
}
INPUT_WITH_INCIDENT_FIELD_COMPLEX2 = {
"key": "AlertID",
"value": {
"complex": {
"root": "incident.field_name",
"accessor": "username"
}
},
"required": False
}
INPUT_SIMPLE_WITHOUT_INCIDENT_FIELD = {
"key": "AlertID",
"value": {
"simple": "${not_incident.field_name}"
},
"required": False
}
INPUT_COMPLEX_WITHOUT_INCIDENT_FIELD = {
"key": "AlertID",
"value": {
"complex": {
"root": "something",
"accessor": "username"
}
},
"required": False
}
INPUTS = [
(INPUT_WITH_INCIDENT_FIELD_SIMPLE, True),
(INPUT_WITH_INCIDENT_FIELD_COMPLEX1, True),
(INPUT_WITH_INCIDENT_FIELD_COMPLEX2, True),
(INPUT_SIMPLE_WITHOUT_INCIDENT_FIELD, False),
(INPUT_COMPLEX_WITHOUT_INCIDENT_FIELD, False)
]
@staticmethod
@pytest.mark.parametrize('playbook_input, are_there_incident_fields', INPUTS)
def test_get_incident_fields_by_playbook_input(playbook_input, are_there_incident_fields):
"""
Given
- A list of playbook inputs
When
- Searching for dependent incident fields
Then
- Finding all dependent incident fields in the input
"""
result = get_incident_fields_by_playbook_input(playbook_input.get('value'))
if are_there_incident_fields:
assert "field_name" in result
else:
assert result == set()
EXAMPLE_TASK_WITH_SIMPLE_SCRIPT_ARGUMENTS = {
"id": "ID",
"scriptarguments": {
"field_name": {
"simple": "${inputs.IndicatorTagName}"
}
}
}
EXAMPLE_TASK_WITH_CUSTOM_FIELDS_SCRIPT_ARGUMENTS = {
"id": "ID",
"scriptarguments": {
"customFields": {
"simple": '[{"field_name":"${inputs.IndicatorTagName}"}]'
}
}
}
TASK_INPUTS = [
# EXAMPLE_TASK_WITH_SIMPLE_SCRIPT_ARGUMENTS,
EXAMPLE_TASK_WITH_CUSTOM_FIELDS_SCRIPT_ARGUMENTS
]
@staticmethod
@pytest.mark.parametrize('task', TASK_INPUTS)
def test_get_fields_by_script_argument(task):
"""
Given
- A list of playbook tasks
When
- Searching for dependent incident fields in the task script arguments
Then
- Finding all dependent incident fields in the task
"""
result = get_fields_by_script_argument(task)
assert "field_name" in result
class TestFlow(unittest.TestCase):
WIDGET_DATA = {
"id": "temp-widget-dup-check",
"version": -1,
"fromVersion": "3.5.0",
"name": "check duplicate",
"dataType": "incidents",
"widgetType": "pie"
}
REPORT_DATA = {
"id": "temp-report-dup-check",
"name": "Critical and High incidents",
"description": "All critical and high severity incidents that may need the analyst attention.",
"fromVersion": "3.5.0"
}
CLASSIFIER_DATA = {
"id": "dup_check-classifier",
"version": -1,
"modified": "2018-05-21T12:41:29.542577629Z",
"defaultIncidentType": "",
"brandName": "dup_check-classifier-name"
}
LAYOUT_DATA = {
"TypeName": "layout-dup-check-type-name",
"kind": "details",
"fromVersion": "5.0.0",
"layout": {
"TypeName": "",
"id": "layout-dup-check-id",
"kind": "details",
"modified": "2019-09-01T12:25:49.808989+03:00",
"name": "",
"system": False
},
"name": "my-layout",
"typeId": "layout-dup-check-id",
"version": -1
}
DASHBOARD_DATA = {
"id": "dup-check-dashbaord",
"version": -1,
"fromVersion": "4.0.0",
"description": "",
"name": "My Dashboard",
}
DASHBOARD_DATA2 = {
"id": "dup-check-dashbaord",
"version": -1,
"fromVersion": "4.0.0",
"description": "",
"name": "My Dashboard2",
}
INCIDENT_FIELD_DATA = {
"cliName": "accountid",
"description": "",
"fieldCalcScript": "",
"group": 0,
"id": "incident_account_field_dup_check",
"name": "Account ID",
"fromVersion": "5.0.0"
}
# TODO: unskip
@pytest.mark.skip
def test_find_duplicates(self):
sys.path.insert(1, os.getcwd())
# Make the script run from tests dir
os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), TESTS_DIR)))
# create duplicate report
temp_report = tempfile.NamedTemporaryFile(mode="w+", prefix='report-', # disable-secrets-detection
suffix='.json', dir='Reports') # disable-secrets-detection
json.dump(TestFlow.REPORT_DATA, temp_report)
temp_report.flush()
os.fsync(temp_report.fileno())
temp_report2 = tempfile.NamedTemporaryFile(mode="w+", prefix='report-', # disable-secrets-detection
suffix='.json', dir='Reports') # disable-secrets-detection
json.dump(TestFlow.REPORT_DATA, temp_report2)
temp_report2.flush()
os.fsync(temp_report2.fileno())
# create duplicate Widgets
temp_widget = tempfile.NamedTemporaryFile(mode="w+", prefix='widget-', # disable-secrets-detection
suffix='.json', dir='Widgets') # disable-secrets-detection
json.dump(TestFlow.WIDGET_DATA, temp_widget)
temp_widget.flush()
os.fsync(temp_widget.fileno())
temp_widget2 = tempfile.NamedTemporaryFile(mode="w+", prefix='widget-', # disable-secrets-detection
suffix='.json', dir='Widgets') # disable-secrets-detection
json.dump(TestFlow.WIDGET_DATA, temp_widget2)
temp_widget2.flush()
os.fsync(temp_widget2.fileno())
# create duplicate Classifier
temp_classifier = tempfile.NamedTemporaryFile(mode="w+", prefix='classifier-', # disable-secrets-detection
suffix='.json', dir='Classifiers') # disable-secrets-detection
json.dump(TestFlow.WIDGET_DATA, temp_classifier)
temp_classifier.flush()
os.fsync(temp_classifier.fileno())
temp_classifier2 = tempfile.NamedTemporaryFile(mode="w+", prefix='classifier-', # disable-secrets-detection
suffix='.json', dir='Classifiers') # disable-secrets-detection
json.dump(TestFlow.WIDGET_DATA, temp_classifier2)
temp_classifier2.flush()
os.fsync(temp_classifier2.fileno())
# create duplicate Layout
temp_layout = tempfile.NamedTemporaryFile(mode="w+", prefix='layout-', # disable-secrets-detection
suffix='.json', dir='Layouts') # disable-secrets-detection
json.dump(TestFlow.LAYOUT_DATA, temp_layout)
temp_layout.flush()
os.fsync(temp_layout.fileno())
temp_layout2 = tempfile.NamedTemporaryFile(mode="w+", prefix='layout-', suffix='.json',
# disable-secrets-detection
dir='Packs/CortexXDR/Layouts') # disable-secrets-detection
json.dump(TestFlow.LAYOUT_DATA, temp_layout2)
temp_layout2.flush()
os.fsync(temp_layout2.fileno())
# create duplicate Dashboard
temp_dashboard = tempfile.NamedTemporaryFile(mode="w+", prefix='dashboard-', # disable-secrets-detection
suffix='.json', dir='Dashboards') # disable-secrets-detection
json.dump(TestFlow.DASHBOARD_DATA, temp_dashboard)
temp_dashboard.flush()
os.fsync(temp_dashboard.fileno())
temp_dashboard2 = tempfile.NamedTemporaryFile(mode="w+", prefix='dashboard-', # disable-secrets-detection
suffix='.json', dir='Dashboards') # disable-secrets-detection
json.dump(TestFlow.DASHBOARD_DATA2, temp_dashboard2)
temp_dashboard2.flush()
os.fsync(temp_dashboard2.fileno())
# create one incident type field and one indicator type field with same data
temp_incident_field = tempfile.NamedTemporaryFile(mode='w+', prefix='incidentfield-',
# disable-secrets-detection
suffix='.json',
dir='IncidentFields') # disable-secrets-detection
json.dump(TestFlow.INCIDENT_FIELD_DATA, temp_incident_field)
temp_incident_field.flush()
os.fsync(temp_incident_field.fileno())
temp_indicator_field = tempfile.NamedTemporaryFile(mode='w+', prefix='incidentfield-',
# disable-secrets-detection
suffix='.json', dir='IndicatorFields')
json.dump(TestFlow.INCIDENT_FIELD_DATA, temp_indicator_field)
temp_indicator_field.flush()
os.fsync(temp_indicator_field.fileno())
# create temporary file for id_set
temp_id_set = tempfile.NamedTemporaryFile(mode="w+", prefix='temp_id_set-', # disable-secrets-detection
suffix='.json', dir='Tests/scripts') # disable-secrets-detection
json_path = temp_id_set.name
re_create_id_set(json_path, ['Reports', 'Layouts', 'Widgets', 'Classifiers', 'Dashboards',
'IndicatorFields', 'IncidentFields'])
with open(json_path) as json_file:
data = json.load(json_file)
dup_data = find_duplicates(data)
assert any('temp-widget-dup-check' in i for i in dup_data)
assert any('temp-report-dup-check' in i for i in dup_data)
assert any('temp-widget-dup-check' in i for i in dup_data)
assert any('dup-check-dashbaord' in i for i in dup_data)
assert any('layout-dup-check-id' in i for i in dup_data)
assert any('incident_account_field_dup_check' in i for i in dup_data)
class TestGenericFields:
@staticmethod
def test_process_generic_fields(pack):
"""
Given
- A generic field file
When
- parsing generic field files
Then
- parsing all the data from file successfully
"""
field_data = {
"cliName": "operatigsystem",
"id": "id",
"name": "Operating System",
"definitionId": "assets",
"fromVersion": "6.5.0",
"associatedTypes": ["Asset Type"]}
generic_types_list = [{
"Asset Type": {
"name": "Asset Type",
"file_path": "path/path",
"fromversion": "6.5.0",
"pack": "ObjectsExample",
"definitionId": "assets",
"layout": "Workstation Layout"
}
}]
generic_field = pack.create_generic_field('test-generic-field')
generic_field.write_json(field_data)
test_dir = generic_field.path
result = get_generic_field_data(test_dir, generic_types_list=generic_types_list)
result = result.get('id')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'definitionId' in result.keys()
assert 'generic_types' in result.keys()
class TestGenericType:
@staticmethod
def test_get_generic_type_data(pack):
"""
Given
- A generic type file
When
- parsing object type files
Then
- parsing all the data from file successfully
"""
object_type = pack.create_generic_module('test-object-type')
object_type.write_json(
{"id": "type-id", "name": "type-name", "fromVersion": "version", "definitionId": "Assets",
"layout": "layout"})
test_dir = object_type.path
result = get_generic_type_data(test_dir)
result = result.get('type-id')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'layout' in result.keys()
assert 'definitionId' in result.keys()
class TestGenericDefinition:
@staticmethod
def test_get_generic_definition_data(pack):
"""
Given
- A generic definition file
When
- parsing definition type files
Then
- parsing all the data from file successfully
"""
object_type = pack.create_generic_definition('test-generic-definition')
object_type.write_json(
{"id": "type-id", "name": "type-name", "fromVersion": "version", "auditable": False})
test_dir = object_type.path
result = get_general_data(test_dir)
result = result.get('type-id')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'pack' in result.keys()
class TestGenericModule:
@staticmethod
def test_get_generic_module_data(repo):
"""
Given
- A generic module file
When
- parsing generic generic module files
Then
- parsing all the data from file successfully
"""
module_data = {"id": "id",
"version": -1,
"name": "Vulnerability Management",
"fromVersion": "6.5.0",
"definitionIds": ["assets"],
"views": [{
"name": "Vulnerability Management",
"title": "Risk Base Vulnerability Management",
"tabs": [{
"name": "Assets",
"newButtonDefinitionId": "assets",
"dashboard": {
"id": "assets_dashboard",
"version": -1,
"fromDate": "0001-01-01T00:00:00Z",
"toDate": "0001-01-01T00:00:00Z",
"name": "Assets Dashboard",
"prevName": "Assets Dashboard", }}]}]}
pack = repo.create_pack('pack')
generic_module = pack.create_generic_module('test-generic-module')
generic_module.write_json(module_data)
test_dir = generic_module.path
result = get_generic_module_data(test_dir)
result = result.get('id')
assert 'name' in result.keys()
assert 'file_path' in result.keys()
assert 'fromversion' in result.keys()
assert 'definitionIds' in result.keys()
assert 'views' in result.keys()
assert 'pack' in result.keys()
class TestJob:
@staticmethod
@pytest.mark.parametrize('print_logs', (True, False))
@pytest.mark.parametrize('is_feed', (True, False))
def test_process_jobs(capsys, repo, is_feed: bool, print_logs: bool, mocker):
"""
Given
- A repo with a job object.
- Whether to print logs.
When
- Parsing job files.
Then
- Verify output to logs.
"""
pack = repo.create_pack()
job_details = 'job details'
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
job = pack.create_job(is_feed, details=job_details)
res = process_jobs(job.path, print_logs)
captured = capsys.readouterr()
assert len(res) == 1
datum = res[0][job.pure_name]
assert datum['name'] == job.pure_name
path = Path(datum['file_path'])
assert path.name == f'job-{job.pure_name}.json'
assert path.exists()
assert path.is_file()
assert path.suffix == '.json'
assert path.parts[-2] == JOBS_DIR
assert path.parts[-3] == pack.name
assert datum['fromversion'] == DEFAULT_JOB_FROM_VERSION
assert datum['pack'] == pack.name
assert datum['details'] == job_details
assert datum['selectedFeeds'] == []
assert (f'adding {job.path} to id_set' in captured.out) == print_logs
@staticmethod
@pytest.mark.parametrize('is_feed', (True, False))
def test_process_jobs_non_job_extension(capsys, repo, is_feed: bool, mocker):
"""
Given
- A file that isn't a valid Job (wrong filetype)
- Whether to print logs.
When
- Parsing job files.
Then
- Verify output to logs.
"""
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
pack = repo.create_pack()
job = pack.create_job(is_feed)
job_path = Path(job.path)
new_path = job_path.rename(job_path.with_suffix('.yml'))
res = process_jobs(str(new_path), False)
assert not res
@staticmethod
@pytest.mark.parametrize('print_logs', (True, False))
@pytest.mark.parametrize('is_feed', (True, False))
def test_process_jobs_file_nonexistent(capsys, repo, is_feed: bool, print_logs: bool, mocker):
"""
Given
- A file that isn't a valid Job (missing file)
- Whether to print logs.
When
- Parsing job files.
Then
- Verify output to logs.
"""
mocker.patch.object(uis, 'should_skip_item_by_mp', return_value=False)
pack = repo.create_pack()
job = pack.create_job(is_feed)
job_json_path = Path(job.path)
job_json_path_as_yml = job_json_path.with_suffix('.yml')
job_json_path.rename(job_json_path_as_yml)
with pytest.raises(FileNotFoundError):
assert not process_jobs(str(job_json_path), print_logs)
assert f"failed to process job {job_json_path}" in capsys.readouterr().out
def test_merge_id_sets(tmp_path):
"""
Given
- two id_set files
- id_sets don't contain duplicate items
When
- merged
Then
- ensure the output id_set contains items from both id_sets
- ensure no duplicates found
"""
tmp_dir = tmp_path / "somedir"
tmp_dir.mkdir()
first_id_set = {
'playbooks': [
{
'playbook_foo1': {
}
}
],
'integrations': [
{
'integration_foo1': {
}
}
],
'Packs': {
'pack_foo1': {
}
}
}
second_id_set = {
'playbooks': [
{
'playbook_foo2': {
}
}
],
'integrations': [
{
'integration_foo2': {
}
}
],
'Packs': {
'pack_foo2': {
}
}
}
output_id_set, duplicates = merge_id_sets(first_id_set, second_id_set)
assert output_id_set.get_dict() == {
'playbooks': [
{
'playbook_foo1': {
}
},
{
'playbook_foo2': {
}
}
],
'integrations': [
{
'integration_foo1': {
}
},
{
'integration_foo2': {
}
}
],
'Packs': {
'pack_foo1': {
},
'pack_foo2': {
}
}
}
assert not duplicates
def test_merged_id_sets_with_duplicates(caplog):
"""
Given
- first_id_set.json
- second_id_set.json
- they both has the same script ScriptFoo
When
- merged
Then
- ensure output id_set contains items from both id_sets
- ensure merge fails
- ensure duplicate ScriptFoo found
"""
caplog.set_level(logging.DEBUG)
first_id_set = {
'playbooks': [
{
'playbook_foo1': {
'name': 'playbook_foo1'
}
}
],
'scripts': [
{
'ScriptFoo': {
'name': 'ScriptFoo',
'pack': 'ScriptFoo1'
}
}
]
}
second_id_set = {
'playbooks': [
{
'playbook_foo2': {
'name': 'playbook_foo2'
}
}
],
'scripts': [
{
'ScriptFoo': {
'name': 'ScriptFoo',
'pack': 'ScriptFoo2'
}
}
]
}
output_id_set, duplicates = merge_id_sets(first_id_set, second_id_set)
assert output_id_set is None
assert duplicates == ['ScriptFoo']
def test_merged_id_sets_with_legal_duplicates(caplog):
"""
Given
- first_id_set.json
- second_id_set.json
- they both have the same playbook
When
- merged
Then
- ensure merge fails
- ensure duplicate playbook_foo1 found
"""
caplog.set_level(logging.DEBUG)
first_id_set = {
'playbooks': [
{
'playbook_foo1': {
'name': 'playbook_foo1',
'pack': 'foo_1',
"source": ['github.com', 'demisto', 'repo1'],
}
}
],
'scripts': [
{
'Script_Foo1': {
'name': 'ScriptFoo',
'pack': 'foo_1',
"source": ['github.com', 'demisto', 'repo1'],
}
}
]
}
second_id_set = {
'playbooks': [
{
'playbook_foo1': {
'name': 'playbook_foo1',
'pack': 'foo_1',
"source": ['github.com', 'demisto', 'repo2'],
}
}
],
'scripts': []
}
output_id_set, duplicates = merge_id_sets(first_id_set, second_id_set)
assert output_id_set is None
assert duplicates == ['playbook_foo1']
def test_get_filters_and_transformers_from_complex_value():
"""
Given
- complex value with 3 transformers: Length, Length and toUpperCase
and 3 filters: isEqualString, isEqualString and StringContainsArray
When
- parsing transformers and filters from the value
Then
- parsing 2 transformers successfully
- Length transformer shows once
- parsing 2 filters successfully
- isEqualString filter shows once
"""
data = {'transformers': [{'operator': 'toUpperCase'},
{'operator': 'Length'},
{'operator': 'Length'}],
'filters': [[{'operator': 'isEqualString'}],
[{'operator': 'isEqualString'}],
[{'operator': 'StringContainsArray'}]]}
transformers, filters = get_filters_and_transformers_from_complex_value(data)
assert len(transformers) == 2
assert len(filters) == 2
assert 'toUpperCase' in transformers
assert 'Length' in transformers
assert 'isEqualString' in filters
assert 'StringContainsArray' in filters
def test_should_skip_item_by_mp(mocker):
"""
Given
- path of content item, the current marketplace this id set is generated for.
When
- when creating the id set, checking a content item that is 'xsoar only' if it should be part of the mpV2
id set.
Then
- return True since this item should be skipped.
"""
import demisto_sdk.commands.common.update_id_set as uis
mocker.patch.object(uis, 'get_mp_types_from_metadata_by_item', return_value=['xsoar'])
pack_path = os.path.join(TESTS_DIR, 'test_files', 'DummyPackXsoarMPOnly')
script_path = os.path.join(TESTS_DIR, 'test_files', 'DummyPackScriptIsXsoarOnly', 'Scripts', 'DummyScript')
res1 = should_skip_item_by_mp(pack_path, 'mpv2')
res2 = should_skip_item_by_mp(script_path, 'mpv2')
assert res1
assert res2
| StarcoderdataPython |
4813652 | import inspect
from collections import UserList
class Operator:
def __init__(self, name, fn, num=None):
self.name = name
self.fn = fn
self.num = num or len(inspect.signature(self.fn).parameters) # inspect arg number of operator
def __repr__(self):
return f"{self.name}"
class ReversePolishStack(UserList):
def __init__(self, init_list=None, finalize=False):
init_list = init_list or []
self.full_stacks = [*init_list]
if finalize:
init_list = self.finalize(init_list)
super().__init__(init_list)
@staticmethod
def finalize(l):
new_rpn_stack = ReversePolishStack()
for element in l:
new_rpn_stack.append(element)
return new_rpn_stack
def append(self, item) -> None:
self.full_stacks.append(item) # for tracking full stacks
if isinstance(item, Operator):
nums = (self.pop() for _ in range(item.num))
item = item.fn(*tuple(nums)[::-1])
super().append(item)
@property
def result(self):
if len(self) != 1:
res_maybe = self.finalize(self)
return res_maybe[-1] if len(res_maybe) == 1 else None
return self[-1]
def __repr__(self):
return " ".join((f"{x}" for x in self.full_stacks))
ops = {
op.name: op
for op in (
Operator("+", lambda x, y: x + y),
Operator("-", lambda x, y: x - y),
Operator("*", lambda x, y: x * y),
Operator("/", lambda x, y: x / y),
)
}
def permutation_with_binary(numbers):
# Combination of binary operators and numbers
# Given number a, b, c, d, and token !;
# Postfix Notation/ RPN(Reverse Polish Notation) <-> Infix Notation
# 1) ab!c!d! <-> (((a!b)!c)!d)
# 2) ab!cd!! <-> (((a!b)!c)!d)
# 3) ab!c!d! <-> (((a!b)!c)!d)
# 4) ab!c!d! <-> (((a!b)!c)!d)
# 5) ab!c!d! <-> (((a!b)!c)!d)
pass
if __name__ == '__main__':
a = ReversePolishStack([2, 5, ops["-"]], True)
print(a.result)
a = ReversePolishStack([1, 2, ops["+"], 4, ops["*"]], True)
print(a.result)
a = ReversePolishStack([1, 2, ops["+"], 4, ops["*"], 2, ops["*"]], True)
print(a.result)
a = ReversePolishStack([1, 2, ops["+"], 4, ops["*"], 2, ops["*"], 3], True)
print(a.result)
a = ReversePolishStack([1, 2, ops["+"], 4, ops["*"], 2, ops["*"], 3, ops["+"]], False)
print(a.result)
| StarcoderdataPython |
1775776 | import unittest
from swamp.parsers.mtzparser import MtzParser
class MockGemmiMtz(object):
"""A class to mock :py:obj:gemmi.Mtz for testing purposes
:param list columns: a list containing instances of :py:obj:`~swmap.parsers.tests.test_mtz_parser.MockGemmiMtzColumn`
"""
def __init__(self, columns):
self.columns = columns
class MockGemmiMtzColumn(object):
"""A class to mock :py:obj:gemmi.Mtz.Column for testing purposes
:param str label: label of the mocked MTZ column
:param str type: type of the mocked MTZ column
"""
def __init__(self, label, type):
self.label = label
self.type = type
class MockMtzParser(MtzParser):
"""A class to mock :py:obj:`~swmap.parsers.mtzparser.MtzParser` for testing purposes"""
def read_reflections(self):
"""Override :py:func:`~swamp.parsers.mtzparser.Mtzparser.read_reflections` for testing purposes"""
pass
class MtzParserTestCase(unittest.TestCase):
def test_1(self):
parser = MockMtzParser('/empty/path/fname.mtz')
mock_columns = [MockGemmiMtzColumn("SIGI(-)", "M"),
MockGemmiMtzColumn("FP", "F"),
MockGemmiMtzColumn("F(+)", "G"),
MockGemmiMtzColumn("fp(-)", "G"),
MockGemmiMtzColumn("sigF(+)", "L"),
MockGemmiMtzColumn("sigi(+)", "M"),
MockGemmiMtzColumn("SIGFP(-)", "L"),
MockGemmiMtzColumn("dummy-label", "E"),
MockGemmiMtzColumn("Free", "I"),
MockGemmiMtzColumn("SIGFP", "Q"),
MockGemmiMtzColumn("sigi", "Q"),
MockGemmiMtzColumn("i", "J"),
MockGemmiMtzColumn("I(+)", "K"),
MockGemmiMtzColumn("i(-)", "K")]
parser.reflection_file = MockGemmiMtz(mock_columns)
parser.error = False
parser.parse()
self.assertTupleEqual(parser.summary, (b'FP', b'SIGFP', b'i', b'sigi', b'Free', None, None, b'F(+)', b'sigF(+)',
b'I(+)', b'sigi(+)', b'fp(-)', b'SIGFP(-)', b'i(-)', b'SIGI(-)'))
self.assertEqual(parser.fname, '/empty/path/fname.mtz')
def test_2(self):
parser = MockMtzParser('/empty/path/fname.mtz')
mock_columns = [MockGemmiMtzColumn("dummy-label", "E")]
parser.reflection_file = MockGemmiMtz(mock_columns)
self.assertTrue(parser.error)
parser.error = False
parser.parse()
self.assertTrue(parser.error)
def test_3(self):
parser = MockMtzParser('/empty/path/fname.mtz')
mock_columns = [MockGemmiMtzColumn("H", "H"),
MockGemmiMtzColumn("K", "H"),
MockGemmiMtzColumn("L", "H"),
MockGemmiMtzColumn("FREE", "I"),
MockGemmiMtzColumn("FP", "F"),
MockGemmiMtzColumn("SIGFP", "Q"),
MockGemmiMtzColumn("FC", "F"),
MockGemmiMtzColumn("PHIC", "P"),
MockGemmiMtzColumn("FC_ALL", "F"),
MockGemmiMtzColumn("PHIC_ALL", "P"),
MockGemmiMtzColumn("FWT", "F"),
MockGemmiMtzColumn("PHWT", "P"),
MockGemmiMtzColumn("DELFWT", "F"),
MockGemmiMtzColumn("PHDELWT", "P"),
MockGemmiMtzColumn("FOM", "W"),
MockGemmiMtzColumn("FC_ALL_LS", "F"),
MockGemmiMtzColumn("PHIC_ALL_LS", "P")]
parser.reflection_file = MockGemmiMtz(mock_columns)
parser.error = False
parser.parse()
self.assertTupleEqual(parser.summary, (b'FP', b'SIGFP', None, None, b'FREE', None, None, None, None, None,
None, None, None, None, None))
self.assertEqual(parser.fname, '/empty/path/fname.mtz')
def test_4(self):
parser = MockMtzParser('/empty/path/fname.mtz')
mock_columns = [MockGemmiMtzColumn("H", "H"),
MockGemmiMtzColumn("K", "H"),
MockGemmiMtzColumn("L", "H"),
MockGemmiMtzColumn("FreeR_flag", "I"),
MockGemmiMtzColumn("F_XDSdataset", "F"),
MockGemmiMtzColumn("SIGF_XDSdataset", "Q"),
MockGemmiMtzColumn("DANO_XDSdataset", "D"),
MockGemmiMtzColumn("SIGDANO_XDSdataset", "Q"),
MockGemmiMtzColumn("F_XDSdataset(+)", "G"),
MockGemmiMtzColumn("SIGF_XDSdataset(+)", "L"),
MockGemmiMtzColumn("F_XDSdataset(-)", "G"),
MockGemmiMtzColumn("SIGF_XDSdataset(-)", "L"),
MockGemmiMtzColumn("ISYM_XDSdataset", "Y"),
MockGemmiMtzColumn("IMEAN_XDSdataset", "J"),
MockGemmiMtzColumn("SIGIMEAN_XDSdataset", "Q"),
MockGemmiMtzColumn("I_XDSdataset(+)", "K"),
MockGemmiMtzColumn("SIGI_XDSdataset(+)", "M"),
MockGemmiMtzColumn("I_XDSdataset(-)", "K"),
MockGemmiMtzColumn("SIGI_XDSdataset(-)", "M")]
parser.reflection_file = MockGemmiMtz(mock_columns)
parser.error = False
parser.parse()
self.assertTupleEqual(parser.summary, (b'F_XDSdataset', b'SIGF_XDSdataset', b'IMEAN_XDSdataset',
b'SIGIMEAN_XDSdataset', b'FreeR_flag', b'DANO_XDSdataset',
b'SIGDANO_XDSdataset', b'F_XDSdataset(+)', b'SIGF_XDSdataset(+)',
b'I_XDSdataset(+)', b'SIGI_XDSdataset(+)', b'F_XDSdataset(-)',
b'SIGF_XDSdataset(-)', b'I_XDSdataset(-)', b'SIGI_XDSdataset(-)'))
self.assertEqual(parser.fname, '/empty/path/fname.mtz')
def test_5(self):
parser = MockMtzParser('/empty/path/fname.mtz')
mock_columns = [MockGemmiMtzColumn("SIGIminus", "M"),
MockGemmiMtzColumn("F", "F"),
MockGemmiMtzColumn("FWT", "F"),
MockGemmiMtzColumn("F_Plus", "G"),
MockGemmiMtzColumn("f(-)", "G"),
MockGemmiMtzColumn("sigF_PLUS", "L"),
MockGemmiMtzColumn("sigi(+)", "M"),
MockGemmiMtzColumn("SIGFP(-)", "L"),
MockGemmiMtzColumn("dummy-label", "E"),
MockGemmiMtzColumn("Free_FLAG", "I"),
MockGemmiMtzColumn("SIGFP", "Q"),
MockGemmiMtzColumn("sigi", "Q"),
MockGemmiMtzColumn("i", "J"),
MockGemmiMtzColumn("Iplus", "K"),
MockGemmiMtzColumn("iMINUS", "K"),
MockGemmiMtzColumn("sigdano", "Q"),
MockGemmiMtzColumn("danoP", "D")]
parser.reflection_file = MockGemmiMtz(mock_columns)
parser.error = False
parser.parse()
self.assertTupleEqual(parser.summary, (b'F', b'SIGFP', b'i', b'sigi', b'Free_FLAG', b'danoP', b'sigdano',
b'F_Plus', b'sigF_PLUS', b'Iplus', b'sigi(+)', b'f(-)', b'SIGFP(-)',
b'iMINUS', b'SIGIminus'))
self.assertEqual(parser.fname, '/empty/path/fname.mtz')
def test_6(self):
parser = MockMtzParser('/empty/path/fname.mtz')
mock_columns = [MockGemmiMtzColumn("Sigi_(-)", "M"),
MockGemmiMtzColumn("F", "F"),
MockGemmiMtzColumn("FWT", "F"),
MockGemmiMtzColumn("F_Plus", "G"),
MockGemmiMtzColumn("f(-)", "G"),
MockGemmiMtzColumn("sigF_PLUS", "L"),
MockGemmiMtzColumn("sigi_something_(+)", "M"),
MockGemmiMtzColumn("SIGFPMINUS", "L"),
MockGemmiMtzColumn("dummy-label", "E"),
MockGemmiMtzColumn("SA_FLAG", "I"),
MockGemmiMtzColumn("SIGFP", "Q"),
MockGemmiMtzColumn("sigi_something", "Q"),
MockGemmiMtzColumn("i", "J"),
MockGemmiMtzColumn("I_plus", "K"),
MockGemmiMtzColumn("iMINUS", "K"),
MockGemmiMtzColumn("sigdanoP", "Q"),
MockGemmiMtzColumn("danoP", "D")]
parser.reflection_file = MockGemmiMtz(mock_columns)
parser.error = False
parser.parse()
self.assertTupleEqual(parser.summary, (b'F', b'SIGFP', b'i', b'sigi_something', None, b'danoP', b'sigdanoP',
b'F_Plus', b'sigF_PLUS', b'I_plus', b'sigi_something_(+)', b'f(-)',
b'SIGFPMINUS', b'iMINUS', b'Sigi_(-)'))
self.assertEqual(parser.fname, '/empty/path/fname.mtz')
| StarcoderdataPython |
3292853 | import logging
import hikaru
from typing import Dict, Any, Optional
from pydantic import BaseModel, PrivateAttr
from .autogenerated.events import KIND_TO_EVENT_CLASS
from .autogenerated.models import get_api_version
from .model_not_found_exception import ModelNotFoundException
from ..helper import prefix_match, exact_match
from ...core.playbooks.base_trigger import BaseTrigger
from ...core.playbooks.base_trigger import TriggerEvent
from ...core.model.k8s_operation_type import K8sOperationType
from ...core.model.events import ExecutionBaseEvent
from ...core.reporting.base import Finding
class IncomingK8sEventPayload(BaseModel):
"""
The format of incoming payloads containing kubernetes events. This is mostly used for deserialization.
"""
operation: str
kind: str
apiVersion: str = ""
clusterUid: str
description: str
obj: Dict[Any, Any]
oldObj: Optional[Dict[Any, Any]]
class K8sTriggerEvent(TriggerEvent):
k8s_payload: IncomingK8sEventPayload
def get_event_name(self) -> str:
return K8sTriggerEvent.__name__
class K8sBaseTrigger(BaseTrigger):
kind: str
operation: K8sOperationType = None
name_prefix: str = None
namespace_prefix: str = None
labels_selector: str = None
_labels_map: Dict = PrivateAttr()
def __init__(self, *args, **data):
super().__init__(*args, **data)
if self.labels_selector:
labels = self.labels_selector.split(",")
self._labels_map = {}
for label in labels:
label_parts = label.split("=")
if len(label_parts) != 2:
msg = f"Illegal label selector {label}"
logging.error(msg)
raise Exception(msg)
self._labels_map[label_parts[0].strip()] = label_parts[1].strip()
def get_trigger_event(self):
return K8sTriggerEvent.__name__
def should_fire(self, event: TriggerEvent):
if not isinstance(event, K8sTriggerEvent):
return False
k8s_trigger_event = K8sTriggerEvent(**event.dict())
k8s_payload = k8s_trigger_event.k8s_payload
if self.kind != "Any" and self.kind != k8s_payload.kind:
return False
if not exact_match(self.operation, K8sOperationType(k8s_payload.operation)):
return False
meta = k8s_payload.obj.get("metadata", {})
if not prefix_match(self.name_prefix, meta.get("name", "")):
return False
if not prefix_match(self.namespace_prefix, meta.get("namespace", "")):
return False
labels_map = getattr(self, "_labels_map", None)
if labels_map:
obj_labels = meta.get("labels", {})
for label_key, label_value in labels_map.items():
if label_value != obj_labels.get(label_key, ""):
return False
return True
@classmethod
def __parse_kubernetes_objs(cls, k8s_payload: IncomingK8sEventPayload):
model_class = get_api_version(k8s_payload.apiVersion).get(k8s_payload.kind)
if model_class is None:
msg = f"classes for kind {k8s_payload.kind} cannot be found. skipping. description {k8s_payload.description}"
logging.error(msg)
raise ModelNotFoundException(msg)
obj = hikaru.from_dict(k8s_payload.obj, cls=model_class)
old_obj = None
if k8s_payload.oldObj is not None:
old_obj = hikaru.from_dict(k8s_payload.oldObj, cls=model_class)
return obj, old_obj
def build_execution_event(
self, event: K8sTriggerEvent, findings: Dict[str, Finding]
) -> Optional[ExecutionBaseEvent]:
# we can't use self.get_execution_event_type() because for KubernetesAnyAllChangesTrigger we need to filter out
# stuff like ConfigMaps and we do that filtering here by checking if there is a real event_class
# it might be better to move that filtering logic to should_fire() where it belongs and to use
# self.get_execution_event_type() here instead of KIND_TO_EVENT_CLASS. Using KIND_TO_EVENT_CLASS leads to
# inconsistencies for KubernetesAnyAllChangesTrigger (and possibly elsewhere) which claims in
# get_execution_event_type() that it creates a KubernetesAnyChangeEvent object but it really creates
# a different concrete event class using the logic below
event_class = KIND_TO_EVENT_CLASS.get(event.k8s_payload.kind.lower())
if event_class is None:
logging.info(
f"classes for kind {event.k8s_payload.kind} cannot be found. skipping. description {event.k8s_payload.description}"
)
return None
(obj, old_obj) = self.__parse_kubernetes_objs(event.k8s_payload)
operation_type = K8sOperationType(event.k8s_payload.operation)
return event_class(
findings=findings,
operation=operation_type,
description=event.k8s_payload.description.replace("\n", ""),
obj=obj,
old_obj=old_obj,
)
| StarcoderdataPython |
1779202 | from typing import List
class Solution:
def calPoints(self, ops: List[str]) -> int:
record = []
for i in ops:
if(i == "C"):
record.pop()
elif(i == "D"):
record.append(record[-1]*2)
elif(i == "+"):
record.append(record[-1]+record[-2])
else:
record.append(int(i))
return sum(record)
ops = ["5", "-2", "4", "C", "D", "9", "+", "+"]
print(Solution().calPoints(ops))
| StarcoderdataPython |
87025 | from selenium import webdriver
from time import sleep
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class Github:
def __init__(self):
self.brower = webdriver.Firefox(executable_path='C:\\Users\\<NAME>\\Downloads\\geckodriver')
def abrir_site(self):
self.brower.get("https://github.com/")
sleep(1)
def navegar(self):
self.abrir_site()
self.brower.maximize_window()
self.brower.find_element_by_xpath("/html/body/div[1]/header/div/div[2]/div[2]/a[1]").click()
self.brower.implicitly_wait(5)
self.logins()
self.brower.implicitly_wait(5)
self.brower.find_element_by_css_selector('.js-feature-preview-indicator-container > summary:nth-child(1)').click()
self.brower.implicitly_wait(5)
self.brower.find_element_by_xpath('/html/body/div[1]/header/div[7]/details/details-menu/form/button').click()
sleep(5)
self.brower.close()
def logins(self):
self.brower.find_element_by_xpath("//*[@id='login_field']").send_keys('')
self.brower.implicitly_wait(5)
self.brower.find_element_by_xpath("//*[@id='password']").send_keys('')
self.brower.implicitly_wait(5)
self.brower.find_element_by_name('commit').click()
x = Github()
x.navegar()
| StarcoderdataPython |
1602447 | somaidade=0
mediaidade=0
maioridadehomen=0
nomevelho=''
totmulher20=0
for p in range(1,5):
print('-------{} pessoa-------'.format(p))
nome=str(input('nome:')).strip()
idade=int(input('idade'))
sexo=str(input('sexo: [m/f]: ')).strip()
somaidade+=idade
if p ==1 and sexo in 'Mm':
maioridadehomen=idade
nomevelho=nome
if sexo in 'Mn' and idade > maioridadehomen:
maioridadehomen = idade
nomevelho= nome
if sexo in 'Ff' and idade < 20:
totmulher20+=1
mediaidade=somaidade/4
print('a media de idade foi {}'.format(mediaidade))
print('o homen mais velho tem {} anos e se chama {}'.format(maioridadehomen,nomevelho))
print('ao todo são {} mulheres com menos de 20 anos'.format(totmulher20))
| StarcoderdataPython |
1666600 | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
from tf.transformations import euler_from_quaternion
import math
pi = 3.14159265359
pose = [0 , 0 ,0 ]
def Waypoints():
x2 = list()
y2 = list()
i=0
while i<=370:
i=i+5
x = pi *i/180
y = 2*(math.sin(x))*(math.sin(x/2))
x2.append(round(x,2))
y2.append(round(y,2))
return [x2,y2]
def error_in_theatha(x,y):
error = list()
x1,y1,i=0,0,0
while i < 73:
x2=x[i]
y2=y[i]
error.append(round(math.atan((y2-y1)/(x2-x1)),2))
x1,y1 = x2,y2
i=i+1
print("x :{0} ,y:{1}".format(x1,y1))
return error
def odom_callback(data):
global pose
x = data.pose.pose.orientation.x;
y = data.pose.pose.orientation.y;
z = data.pose.pose.orientation.z;
w = data.pose.pose.orientation.w;
pose[0] = data.pose.pose.position.x
pose[1] = data.pose.pose.position.y
pose[2]=euler_from_quaternion([x,y,z,w])[2]
def laser_callback(msg):
global regions
range_max = 144
regions = {
'bright': min(min(msg.ranges[0:144]),range_max) ,
'fright': min(min(msg.ranges[145:288]),range_max) ,
'front': min(min(msg.ranges[289:432]),range_max) ,
'fleft': min(min(msg.ranges[433:576]),range_max) ,
'bleft': min(min(msg.ranges[576:720]),range_max) ,
}
def control_loop():
rospy.init_node('ebot_controller')
pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
rospy.Subscriber('/scan', LaserScan, laser_callback)
rospy.Subscriber('/odom', Odometry, odom_callback)
rate = rospy.Rate(10)
velocity_msg = Twist()
velocity_msg.linear.x = 0
velocity_msg.angular.z = 0
pub.publish(velocity_msg)
x2,y2=Waypoints()
error=error_in_theatha(x2,y2)
i = 0
while not rospy.is_shutdown():
#
# Your algorithm to complete the obstacle course
#
rospy.loginfo("x:%f",pose[0])
rospy.loginfo("y:%f",pose[1])
rospy.loginfo("p:%f",pose[2])
rospy.loginfo("i:%d",i)
while pose[0]< x2[i]:
velocity_msg.linear.x = 0.5
velocity_msg.angular.z = (error[i]-pose[2])*20
pub.publish(velocity_msg)
i =i+1
velocity_msg.linear.x = 0
velocity_msg.angular.z =0
pub.publish(velocity_msg)
if i ==10:
rospy.loginfo(regions)
while not regions['front']==144:
velocity_msg.linear.x = 0.9
velocity_msg.angular.z =0.8
pub.publish(velocity_msg)
print("front:%f",regions['front'])
while not regions['fright']==144:
velocity_msg.linear.x = 0.3
velocity_msg.angular.z = 0
pub.publish(velocity_msg)
print("fright:%f",regions['fright'])
while regions['bright']!=144 and regions['bright']>0.5:
velocity_msg.linear.x = 0.2
velocity_msg.angular.z = -0.5
pub.publish(velocity_msg)
print("bright:%f",regions['bright'])
print("fright:%f",regions['fright'])
while regions['fright']!=144 and regions['fright']>0.5:
velocity_msg.linear.x = 0.25
velocity_msg.angular.z = -0.5
pub.publish(velocity_msg)
print("fright:%f",regions['fright'])
while pose[0]<12.5 and pose[1]!=0 and pose[2]!=0:
dx,dy = 12.5,0
px,py = pose[0],pose[1]
e = round(math.atan((dy-py)/(dx-px)),2)
velocity_msg.linear.x = 0.5
velocity_msg.angular.z = (e-pose[2])*20
pub.publish(velocity_msg)
velocity_msg.linear.x = 0
velocity_msg.angular.z =0
pub.publish(velocity_msg)
return 0
print("Controller message pushed at {}".format(rospy.get_time()))
rate.sleep()
if __name__ == '__main__':
try:
control_loop()
except rospy.ROSInterruptException:
pass
| StarcoderdataPython |
1687215 | # python3 ResultsVisualisation.py
# -*- coding: utf-8 -*-
# ===========================================================================================
# Created by: <NAME>
# Description: Plots the sales prediction results of the different machine learning models
# ===========================================================================================
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
import datetime as dt
from setuptools.msvc import winreg
import MachineLearning.LinearRegression.LinearRegression as lr
import MachineLearning.LinearRegression.PolynomialRegression as plr
def linearRegressionVisualisation(articleId):
'''
Plots the sales data and predictions of the linear regression machine learning model for a article in a 2d plot
:param articleId: (int)
identifier for a article
'''
sales, realSales, predictions, articleName = lr.linearRegression(articleId)
dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in sales['date']]
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=int(len(dates) / 10)))
plt.scatter(dates, realSales, s=10, label="Verkaufsmenge am Tag")
plt.plot(dates, predictions, color='red', label='Vorhersage')
plt.gcf().autofmt_xdate()
plt.title("Verkaufsverlauf für das Produkt {}".format(articleName))
plt.xlabel("Datum")
plt.ylabel("Verkaufsmenge")
plt.legend(loc="best")
plt.show()
def linearRegression3dVisualisation(articleId):
'''
Plots the sales data and predictions of the linear regression machine learning model for a article in a 3d plot
:param articleId: (int)
identifier for a article
'''
sales, realSales, predictions, articleName = lr.linearRegression(articleId)
dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in sales['date']]
ax = plt.axes(projection='3d')
ax.set_xlabel('Date')
ax.set_ylabel('Average Temperature')
ax.set_zlabel('Sales')
xAxis = range(len(sales['date']))
# Data for a three-dimensional line
ax.plot3D(xAxis, sales['tavg'], predictions, 'red')
# Data for three-dimensional scattered points
ax.scatter3D(xAxis, sales['tavg'], realSales, alpha=0.3, facecolors='none', edgecolors='blue')
ax.xaxis.set_ticks(xAxis)
ax.xaxis.set_ticklabels(dates)
plt.title("Sales history for the article {}".format(articleName))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=int(len(dates) / 5)))
plt.show()
def polynomialRegressionVisualisation(articleId):
'''
Plots the sales data and predictions of the polynomial regression machine learning model for a article in a 2d plot
:param articleId: (int)
identifier for a article
'''
sales, realSales, predictions, articleName = plr.polynomialRegression(articleId)
dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in sales['date']]
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=int(len(dates)/10)))
plt.scatter(dates, realSales, s=10, label="Verkaufsmenge am Tag")
plt.plot(dates, predictions, color='red', label='Vorhersage')
plt.gcf().autofmt_xdate()
plt.title("Verkaufsverlauf für das Produkt {}".format(articleName))
plt.xlabel("Datum")
plt.ylabel("Verkaufsmenge")
plt.legend(loc="best")
plt.show()
def polynomialRegression3dVisualisation(articleId):
'''
Plots the sales data and predictions of the polynomial regression machine learning model for a article in a 3d plot
:param articleId: (int)
identifier for a article
'''
sales, realSales, predictions, articleName = plr.polynomialRegression(articleId)
dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in sales['date']]
ax = plt.axes(projection='3d')
ax.set_xlabel('Date')
ax.set_ylabel('Average Temperature')
ax.set_zlabel('Sales')
xAxis = range(len(sales['date']))
# Data for a three-dimensional line
ax.plot3D(xAxis, sales['tavg'], predictions, 'red')
# Data for three-dimensional scattered points
ax.scatter3D(xAxis, sales['tavg'], realSales, alpha=0.3, facecolors='none', edgecolors='blue')
ax.xaxis.set_ticks(xAxis)
ax.xaxis.set_ticklabels(dates)
plt.title("Sales history for the article {}".format(articleName))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=int(len(dates) / 5)))
plt.show()
def SARIMAXVisualisation(articleId):
'''
Plots the sales data and predictions of the SARIMAX machine learning model for a article in a 2d plot
:param articleId: (int)
identifier for a article
'''
pass
def RNNVisualisation(articleId):
'''
Plots the sales data and predictions of the RNN machine learning model for a article in a 2d plot
:param articleId: (int)
identifier for a article
'''
pass
if __name__ == '__main__':
# Change for different articles
wishedArticleId = 1
linearRegression3dVisualisation(wishedArticleId)
polynomialRegression3dVisualisation(wishedArticleId)
| StarcoderdataPython |
3285949 | <reponame>y-srini/opentitan<filename>hw/ip/otbn/dv/rig/rig/gens/bad_bnmovr.py
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import random
from typing import Optional
from shared.operand import RegOperandType, OptionOperandType
from shared.insn_yaml import InsnsFile
from ..config import Config
from ..model import Model
from ..program import ProgInsn, Program
from ..snippet import ProgSnippet
from ..snippet_gen import GenCont, GenRet, SnippetGen
class BadBNMovr(SnippetGen):
'''A snippet generator that generates program ending BN.MOVR instructions.
This includes incrementing both GPRs or having *grd or *grs > 31
'''
ends_program = True
def __init__(self, cfg: Config, insns_file: InsnsFile) -> None:
super().__init__()
self.insn = self._get_named_insn(insns_file, 'bn.movr')
# bn.movr expects the operands: grd, grs, grd_inc, grs_inc
if len(self.insn.operands) != 4:
raise RuntimeError('Unexpected number of operands for bn.movr')
grd, grs, grd_inc, grs_inc = self.insn.operands
exp_shape = (
# grd
isinstance(grd.op_type, RegOperandType) and
grd.op_type.reg_type == 'gpr' and
not grd.op_type.is_dest() and
# grs
isinstance(grs.op_type, RegOperandType) and
grs.op_type.reg_type == 'gpr' and
not grs.op_type.is_dest() and
# grd_inc
isinstance(grd_inc.op_type, OptionOperandType) and
# grs_inc
isinstance(grs_inc.op_type, OptionOperandType)
)
if not exp_shape:
raise RuntimeError('Unexpected shape for bn.movr')
self.weight = cfg.insn_weights.get(self.insn.mnemonic)
# Check that the instruction has a positive weight
if not self.weight:
self.disabled = True
def gen(self,
cont: GenCont,
model: Model,
program: Program) -> Optional[GenRet]:
# Get known registers
known_regs = model.regs_with_known_vals('gpr')
bad_regs = []
good_regs = []
# Make sure there is at least one out of bounds value
for reg_idx, reg_val in known_regs:
if 31 < reg_val:
bad_regs.append(reg_idx)
else:
good_regs.append(reg_idx)
# We have 4 different options for increments * 4 different options for
# out of bound register values.
# reg_val_choice[1:0] -> grd_val, grs_val
# inc_choice[1:0] -> grd_inc, grs_inc
# bad = 1, good = 0
# choices[3:0] = {reg_val_choice, inc_choice}
# We always have an element (x0) in good_regs but if bad_regs is empty
# hard-code grd_val/grs_val = Good/Good and we have to use two incr.
# for producing an error.
choices = 3 if not bad_regs else random.randint(3, 15)
reg_val_choice = choices // 4
inc_choice = choices % 4
bad_grd = reg_val_choice in [2, 3] # grd_val/grs_val = Bad/Good or Bad/Bad
bad_grs = reg_val_choice in [1, 3] # grd_val/grs_val = Good/Bad or Bad/Bad
# If grs_val/grd_val = good/good, we have to force +/+ for a fault
grd_inc = int(inc_choice in [2, 3])
grs_inc = int(inc_choice in [1, 3])
op_val_grs = random.choice(bad_regs if bad_grs else good_regs)
op_val_grd = random.choice(bad_regs if bad_grd else good_regs)
op_val = [op_val_grd, op_val_grs, grd_inc, grs_inc]
prog_insn = ProgInsn(self.insn, op_val, None)
snippet = ProgSnippet(model.pc, [prog_insn])
snippet.insert_into_program(program)
return (snippet, True, model)
| StarcoderdataPython |
1718715 | #!/usr/bin/env python
#
# Paramiko Expect Demo Helper
#
# Written by <NAME>
# http://github.com/fgimian
#
# This interactive script is used to help demonstrate the paramiko_expect-demo.py
# script
from __future__ import print_function
import sys
if sys.version_info.major == 3:
raw_input = input
def main():
name = raw_input('Please enter your name: ')
print('Your name is', name)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1631032 | ## sql definition to import and export data
import psycopg2 as pgsql
import pandas as pd
import sqlalchemy, os
from ctdays import raw_ctd_to_df
def export_sql(database_name, edit, location):
# export table from sql
# cruise name (in list, case insensitive) for AWI server,
# table name for local
if location == 'local': # from local
mydb = pgsql.connect(dbname='%s'%(database_name,), host='localhost', user='dddong', password='<PASSWORD>!')
cur = mydb.cursor()
df = pd.read_sql('''SELECT * FROM %s'''%(edit), mydb)
###########################
##### LOKI data Export ####
elif (location == 'awi_server') & (edit != 'all') & (database_name == 'loki'): # loki edit
mydb = pgsql.connect(dbname='loki', host='postgres5.awi.de', user='loki', password='<PASSWORD>!')
cur = mydb.cursor()
df = pd.read_sql('''SELECT v.name as vessel, c.name as cruise,
s.name as station, s.region as region, s.latitude as latitude, s.longitude as longitude, s.bottom_depth as bottom_depth,
l.datetime as date_time,
l.loki_depth as loki_depth, l.temperature as temperatue, l.salinity as salinity, l.oxygen_conc as oxygen, l.fluorescence as fluorescence,
l.manual_classification as manual_classification, l.developmental_stage as developmental_stage, l.area_pixel as area_pixel,
l.area_sqrmm as area_sqrmm, l.spec_length as spec_length, l.spec_width as spec_width, l.image_filename as image_filename,
t.animal as animal, t.copepod as copepod, t.phylum as phylum, t.class as class, t.spec_order as spec_order,
t.family as family, t.genus as genus, t.species as species
FROM vessel v, cruise c, station s, loki_data l, taxa_group t
WHERE c.id_vessel = v.id_vessel AND c.id_cruise = s.id_cruise
AND s.id_station = l.id_station AND l.id_taxa_group = t.id_taxa_group
AND c.name ILIKE ANY ( ARRAY %s ) ORDER BY s.name, l.loki_depth '''%(edit,), mydb)
elif (location == 'awi_server') & (edit == 'all') & (database_name == 'loki'): # loki all
mydb = pgsql.connect(dbname='loki', host='postgres5.awi.de', user='loki', password='<PASSWORD>!')
cur = mydb.cursor()
df = pd.read_sql('''SELECT v.name as vessel, c.name as cruise,
s.name as station, s.region as region, s.latitude as latitude, s.longitude as longitude, s.bottom_depth as bottom_depth,
l.datetime as date_time,
l.loki_depth as loki_depth, l.temperature as temperatue, l.salinity as salinity, l.oxygen_conc as oxygen, l.fluorescence as fluorescence,
l.manual_classification as manual_classification, l.developmental_stage as developmental_stage, l.area_pixel as area_pixel,
l.area_sqrmm as area_sqrmm, l.spec_length as spec_length, l.spec_width as spec_width, l.image_filename as image_filename,
t.animal as animal, t.copepod as copepod, t.phylum as phylum, t.class as class, t.spec_order as spec_order,
t.family as family, t.genus as genus, t.species as species
FROM vessel v, cruise c, station s, loki_data l, taxa_group t
WHERE c.id_vessel = v.id_vessel AND c.id_cruise = s.id_cruise
AND s.id_station = l.id_station AND l.id_taxa_group = t.id_taxa_group
ORDER BY s.name, l.loki_depth ''', mydb)
###############################
##### MultiNet data Export ####
elif (location == 'awi_server') & (edit != 'all') & (database_name == 'mn'): # loki edit
mydb = pgsql.connect(dbname='loki', host='postgres5.awi.de', user='loki', password='<PASSWORD>!')
cur = mydb.cursor()
df = pd.read_sql('''SELECT v.name as vessel, c.name as cruise,
s.name as station, s.region as region, s.latitude as latitude, s.longitude as longitude, s.bottom_depth as bottom_depth,
m.datetime as date_time,
m.depth_min as depth_min, m.depth_max as depth_max, m.manual_classification as manual_classification, m.developmental_stage as developmental_stage,
m.individuals_cub_m as individuals_cub_m, m.individuals_sqr_m as individuals_sqr_m,
t.animal as animal, t.copepod as copepod, t.phylum as phylum, t.class as class, t.spec_order as spec_order,
t.family as family, t.genus as genus, t.species as species
FROM vessel v, cruise c, station s, mn_data m, taxa_group t
WHERE c.id_vessel = v.id_vessel AND c.id_cruise = s.id_cruise
AND s.id_station = m.id_station AND m.id_taxa_group = t.id_taxa_group
AND c.name ILIKE ANY ( ARRAY %s ) ORDER BY s.name, m.depth_max '''%(edit,), mydb)
elif (location == 'awi_server') & (edit == 'all') & (database_name == 'mn'): # loki all
mydb = pgsql.connect(dbname='loki', host='postgres5.awi.de', user='loki', password='<PASSWORD>!')
cur = mydb.cursor()
df = pd.read_sql('''SELECT v.name as vessel, c.name as cruise,
s.name as station, s.region as region, s.latitude as latitude, s.longitude as longitude, s.bottom_depth as bottom_depth,
m.datetime as date_time,
m.depth_min as depth_min, m.depth_max as depth_max, m.manual_classification as manual_classification, m.developmental_stage as developmental_stage,
m.individuals_cub_m as individuals_cub_m, m.individuals_sqr_m as individuals_sqr_m,
t.animal as animal, t.copepod as copepod, t.phylum as phylum, t.class as class, t.spec_order as spec_order,
t.family as family, t.genus as genus, t.species as species
FROM vessel v, cruise c, station s, mn_data m, taxa_group t
WHERE c.id_vessel = v.id_vessel AND c.id_cruise = s.id_cruise
AND s.id_station = m.id_station AND m.id_taxa_group = t.id_taxa_group
ORDER BY s.name, m.depth_max ''', mydb)
return df
def import_sql (database_name, table_name, df, replace_or_append):
# create new table or insert on database and import the data
engine = sqlalchemy.create_engine('postgresql+psycopg2://dong:Lava10203!@localhost/%s' %(database_name,), paramstyle='format')
df.to_sql('%s' % (table_name,), engine, if_exists=replace_or_append, index=False,
dtype={col_name: sqlalchemy.types.String() for col_name in df})
| StarcoderdataPython |
3238002 | """Implement extraction of 3D voxel-based morphological features."""
from typing import List, Optional
import numpy.typing as npt
from ..gpu import get_image_method
def regionprops(image_bin: npt.ArrayLike, image: npt.ArrayLike, features: Optional[List] = None):
"""Extract region-based morphological features."""
features = [] if features is None else features
skimage_label = get_image_method(image, "skimage.measure.label")
skimage_regionprops_table = get_image_method(image, "skimage.measure.regionprops_table")
label_image_bin = skimage_label(image_bin)
# this will fail on GPU if any properties return objects, see https://github.com/rapidsai/cucim/pull/272
return skimage_regionprops_table(label_image_bin, image, properties=["label"] + features)
| StarcoderdataPython |
1615015 | <gh_stars>0
import os
import shutil
from .base import GnuRecipe
class Extra:
def __init__(self, name):
self.name = name
self.sha256 = None
class SwiPrologRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(SwiPrologRecipe, self).__init__(*args, **kwargs)
self.sha256 = '7f17257da334bc1e7a35e9cf5cb8fca0' \
'1d82f1ea406c7ace76e9062af8f0df8b'
self.name = 'swi-prolog'
self.version = '7.4.2'
self.version_regex = r'(?P<version>\d+\.\d+\.\d+)'
self.url = 'http://www.swi-prolog.org/download/stable/src/' \
'swipl-$version.tar.gz'
self.doc = Extra('swi-prolog-manual')
self.doc.url = 'http://www.swi-prolog.org/download/stable/doc/' \
'SWI-Prolog-$version.pdf'
self.doc.sha256 = '7aca52426ad254a986ed410eeda9590b' \
'cd0f683bc41190f08853a82212d59d7c'
self.doc.version = self.version
self.extra_downloads = [self.doc]
self.configure_strip_cross_compile()
def install(self):
super(SwiPrologRecipe, self).install()
self.log_dir('install', self.directory, 'install manual')
filename = os.path.join(self.prefix_dir, 'doc',
os.path.basename(self.doc.filename))
shutil.copy2(self.doc.filename, filename)
| StarcoderdataPython |
3248425 | <reponame>happyseayou/tello_the_force
"""
如何调用:
mapcom=Mapcom()
while:#视频帧循环里
if userc[4]==2 or userc[4]==3:进入map
mapcom.readflightdata(data)#第一步更新飞行数据data由飞机类给出
comd=mapcom.com(userc)#需要传入用户控制指令,不然万一出错就放生了
flightdata=mapcom.send_flightdata()#将所有飞行数据传出用于显示和其他端口调用
if mapcom.checkalldone():
...退出该模式
userc[4]=0#拨回控制开关
keyuser.us[4]=0#拨回控制开关
mapcom.cheakdone=None#这样子可以重新调用
"""
import math
import pandas as pd
from simple_pid import PID
import time
import datetime
import csv
class Mapcom:
def __init__(self):
# 文件
try:
data = pd.read_csv("./map/map_.csv", usecols=[0, 1, 2, 3])
ls = data.values.tolist()
self.listgo = ls
except:
print('打不开文件,请检查目录正确')
self.listgo = None
# 指令相关
self.index = None
self.nowop = None
self.nowdo = None
self.startcomtime = None
self.isopsuccessful = None
self.changeoptime = None
self.istakeoffok = None
self.checkdone = None # 用于存放是否全部指令都执行完
self.userstatemod = None # 监控键盘是否可以开始mapcom
self.comd = None
self.flymode = None
# 飞机的遥测数据
self.battery = None
self.isfly = None
self.wifi = None
self.anlroll = None
self.anlpitch = None
self.velz = None
self.velxy = None
self.state = None
# pid
self.tpid = 0
self.pid_yaw = PID(2, 1.8, 0, setpoint=0,
output_limits=(-50, 50)) # 这套pid目前最佳fps27
self.pid_thro = PID(1.5, 1.4, 1, setpoint=0,
output_limits=(-50, 50)) # 有点波动
self.pid_pith = PID(0.2, 0.01, 0.2, setpoint=0,
output_limits=(-20, 20))
self.pid_roll = PID(1, 0, 0, setpoint=0, output_limits=(-20, 20))
# pid closeon,不修正指向
self.pidfoward = PID(1.8, 1.8, 2.5, setpoint=0,
output_limits=(-14, 14))
self.pidroll = PID(1.8, 1.8, 2.5, setpoint=0, output_limits=(-14, 14))
# 读进来的未经处理坐标值
self.heightraw = None
self.posxraw = None
self.posyraw = None
self.poszraw = None
self.pointyawraw = None
# 初始的坐标值
self.height0 = None
self.posx0 = None
self.posy0 = None
self.posz0 = None
self.pointyaw0 = None
# 变换后的坐标数据
self.heightnow = None
self.posxnow = None
self.posynow = None
self.posznow = None
self.pointyawnow = None
# 一些飞行中的误差统计
self.offdistance = 0.0
self.offheight = 0.0
self.offpoint = 0.0
self.offroll = 0.0
self.offforword = 0.0
# 坐标数据写入文件
#pathpos='./map/pos_%s.csv'% datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')
self.posdatefile = None
self.posdatewriter = None
# self.posdatewriter.writerow(["time","x","y","z"])
self.preposxnow = 100
self.preposynow = 100
self.preposznow = 100
def reset(self): # 飞行结束后清理到初始,重新进入模式后可以使用,不能self.checkdone,不能pid
self.listgo = None
self.index = None
self.nowop = None
self.nowdo = None
self.startcomtime = None
self.isopsuccessful = None
self.changeoptime = None
self.istakeoffok = None
self.userstatemod = None
self.comd = None
self.flymode = None
# self.checkdone=None#用于存放是否全部指令都执行完
# 飞机的遥测数据
self.battery = None
self.isfly = None
self.wifi = None
self.anlroll = None
self.anlpitch = None
self.velz = None
self.velxy = None
self.state = None
self.tpid = 0
# pid
# self.pid_yaw=PID(0.25,0,0,setpoint=0,output_limits=(-30,30))
# self.pid_thro=PID(0.3,0.005,0.1,setpoint=0,output_limits=(-30,30))
# self.pid_pith=PID(0.3,0.01,0.3,setpoint=0,output_limits=(-30,30))
# self.pid_roll= PID(0.2,0.005,0.2,setpoint=0,output_limits=(-30,30))
# 读进来的未经处理坐标值
self.heightraw = None
self.posxraw = None
self.posyraw = None
self.poszraw = None
self.pointyawraw = None
# 初始的坐标值
self.height0 = None
self.posx0 = None
self.posy0 = None
self.posz0 = None
self.pointyaw0 = None
# 变换后的坐标数据
self.heightnow = None
self.posxnow = None
self.posynow = None
self.posznow = None
self.pointyawnow = None
# 一些飞行中的误差统计
self.offdistance = 0.0
self.offheight = 0.0
self.offpoint = 0.0
self.offroll = 0.0
self.offforword = 0.0
# 坐标数据写入文件
#pathpos='./map/pos_%s.csv'% datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')
self.posdatefile = None
self.posdatewriter = None
# self.posdatewriter.writerow(["time","x","y","z"])
self.preposxnow = 100
self.preposynow = 100
self.preposznow = 100
def flashdata(self):
# 变换到起飞点的坐标
self.heightnow = self.heightraw
# xt=xcosa+ysina
# yt=ycosa-xsina
# a=-pointyaw0
self.posxnow = (self.posxraw-self.posx0)*math.cos(-math.radians(self.pointyaw0)) + \
(self.posyraw-self.posy0)*math.sin(-math.radians(self.pointyaw0))
self.posynow = (self.posyraw-self.posy0)*math.cos(-math.radians(self.pointyaw0)) - \
(self.posxraw-self.posx0)*math.sin(-math.radians(self.pointyaw0))
self.posznow = self.poszraw-self.posz0+self.height0*10
self.pointyawnow = self.pointyawraw-self.pointyaw0
if self.nowop == 2 or self.nowop == 3 or self.nowop == 8:
self.offdistance = math.sqrt(
(self.posxnow-self.nowdo[1])**2+(self.posynow-self.nowdo[2])**2)
self.offheight = self.posznow-self.nowdo[3]
# 夹角
anglepoint2y = math.degrees(math.atan2(
self.nowdo[1]-self.posxnow, self.nowdo[2]-self.posynow)) # 目标点与y轴[-180,180]
self.offpoint = self.pointyawnow-anglepoint2y
if self.offpoint <= -180:
self.offpoint = 360+self.offpoint
elif self.offpoint > 180:
self.offpoint = 360-self.offpoint # 结果为-180~180
self.offroll = self.offdistance * \
math.sin(math.radians(self.offpoint)) # 选择与off同向pid运输需要注意
self.offforword = self.offdistance * \
math.cos(math.radians(self.offpoint))
def checkop(self): # 执行的操作更换器
self.startcomtime = time.time()
if self.isfly == 1:
if self.state == 6: # 判断是否正在悬停
if self.isopsuccessful == 1:
self.index += 1 # 下一条指令
if self.index < len(self.listgo):
self.nowdo = self.listgo[self.index]
self.nowop = self.nowdo[0]
self.isopsuccessful = 0
self.changeoptime = time.time()
if self.istakeoffok == 1: # 起飞成功更新数据之后
self.flashdata() # 更新飞行中数据
# else:
# if self.state==6:#判断是否可以起飞
# #起飞倒计时
# if time.time()-self.changeoptime>=5:
# self.index=0
# self.nowdo=self.listgo[self.index]
# self.changeoptime=time.time()
def takeoffop(self):
com = [0, 0, 0, 0, 0]
if self.isfly == 1:
if self.state == 6:
if abs(self.heightraw-self.nowdo[1]) <= 1.5:
com[3] = 0
if self.velxy < 1 and abs(self.velz) < 1: # 判断是否静止
self.isopsuccessful = 1
# 记录初始坐标和初始角度用于后面的坐标变换
self.posx0 = self.posxraw
self.posy0 = self.posyraw
self.posz0 = self.poszraw
self.height0 = self.heightraw
self.pointyaw0 = self.pointyawraw
self.istakeoffok = 1
# print('起飞成功')
else:
com[3] = int(self.pid_thro(
(self.heightraw-self.nowdo[1])*10))
# print('修正高度')
else:
if self.state == 6:
com = [0, 0, 0, 0, 1]
self.isopsuccessful = 0
else:
self.index = None
self.nowdo = None
self.nowop = None
# print('飞机状态不给起飞')
# print(com)
return com
def holdonop(self):
com = [0, 0, 0, 0, 0]
if self.isfly == 1:
if self.state == 6:
if time.time()-self.changeoptime <= self.nowdo[1]:
com = [0, 0, 0, 0, 0]
else:
self.isopsuccessful = 1
com = [0, 0, 0, 0, 0]
return com
def goop(self):
com = [0, 0, 0, 0, 0]
if self.isfly == 1:
if self.state == 6: # 是否悬停正常
if self.offdistance < 11: # 缓冲区大小未知
if self.velxy < 1 and abs(self.velz < 1): # 判断是否静止
self.isopsuccessful = 1
com = [0, 0, 0, 0, 0]
else:
com = [0, 0, 0, 0, 0]
else: # pid上
# if self.tpid==0:
if abs(self.offheight) < 12:
# if self.tpid==0:
if abs(self.offpoint) < 5: # 先对准指向
com[2] = int(self.pid_pith(-self.offdistance))
com[1] = int(self.pid_roll(self.offroll))
# com[3]=int(self.pid_thro(self.offheight))
# com[0]=int(self.pid_yaw(self.offpoint))
else:
com[0] = int(self.pid_yaw(self.offpoint))
# else:
# com[0]=int(self.pid_yaw(self.offpoint))
else:
com[3] = int(self.pid_thro(self.offheight))
# else:
# com[3]=int(self.pid_thro(self.offheight))
else:
com = [0, 0, 0, 0, 0]
# print(com)
return com
def closeop(self):
com = [0, 0, 0, 0, 0]
if self.isfly == 1:
if self.state == 6: # 是否悬停正常
if self.offdistance < 11: # 缓冲区大小未知
if self.velxy < 1 and abs(self.velz < 1): # 判断是否静止
self.isopsuccessful = 1
com = [0, 0, 0, 0, 0]
else:
com = [0, 0, 0, 0, 0]
else:
if abs(self.offheight) < 12:
# 不修正指向
com[2] = int(self.pidfoward(-self.offforword))
com[1] = int(self.pidroll(self.offroll))
else:
com[3] = int(self.pid_thro(self.offheight))
else:
com = [0, 0, 0, 0, 0]
# print(com)
return com
def backhome(self):
com = [0, 0, 0, 0, 0]
if self.isfly == 1:
if self.state == 6: # 是否悬停正常
if self.offdistance < 10: # 缓冲区大小未知
if self.velxy < 1 and abs(self.velz < 1): # 判断是否静止
self.isopsuccessful = 1
com = [0, 0, 0, 0, 0]
else: # pid上
if abs(self.offheight) < 12:
if abs(self.offpoint) < 5: # 先对准指向
com[2] = int(self.pid_pith(-self.offdistance))
com[1] = int(self.pid_roll(self.offroll))
# com[3]=int(self.pid_thro(self.offheight))
# com[0]=int(self.pid_yaw(self.offpoint))
else:
com[0] = int(self.pid_yaw(self.offpoint))
else:
com[3] = int(self.pid_thro(self.offheight))
else:
com = [0, 0, 0, 0, 0]
return com
def land(self):
com = [0, 0, 0, 0, 0]
if self.isfly == 1:
if self.state == 6: # 是否悬停正常
if self.velxy < 1 and abs(self.velz < 1): # 判断是否静止
if abs(self.pointyawnow) < 1:
self.isopsuccessful = 1
com[4] = 4
self.checkdone = 1
else:
com[0] = int(self.pid_yaw(self.pointyawnow))
else:
com = [0, 0, 0, 0, 0]
return com
def stopmap(self):
self.checkdone = 1
def checkfile(self):
if self.listgo is None:
try:
data = data = pd.read_csv(
"./map/map_.csv", usecols=[0, 1, 2, 3])
ls = data.values.tolist()
self.listgo = ls
ok = 1
except:
ok = 0
print('打不开文件,请检查目录正确')
# self.reset()
self.checkdone = 1 # 在comd里找不到文件的话直接弹出执行完成退出模式
else:
ok = 1
return ok
def userstate(self, userc): # 判断用户控制状态,等待,执行中,暂停,强制降落并直接退出
if userc[5] == 1: # 如果已经起飞完成了,进入map后按起飞也可以开始
self.index = 0
self.nowdo = self.listgo[self.index]
self.nowop = self.nowdo[0]
self.flymode = self.nowop
self.userstatemod = 2 # 开始
elif userc[5] == 4: # 直接退出到普通并悬停
self.nowdo = [5, 0, 0, 0]
self.nowop = self.nowdo[0]
self.flymode = 5
self.userstatemod = 2
elif userc[4] == 3:
self.userstatemod = 3 # 暂停模式b
# self.nowdo=[7,0,0,0]
self.flymode = 7
else:
if self.isfly == 1 and (self.index is not None):
self.userstatemod = 2
else:
self.userstatemod = 0 # 等待
# self.nowdo=[6,0,0,0]
self.flymode = 6
# 定义飞行指令发送,在main循环中调用
def com(self, userc):
ok = self.checkfile()
self.userstate(userc)
com = [0, 0, 0, 0, 0] # 每轮循环归零
# if self.tpid==1:
# self.pidtuning(pid)
if self.userstatemod == 2:
if ok == 1: # 有文件才可以,没有文件不给玩
self.checkop()
if self.nowop == 0:
com = self.takeoffop()
if self.nowop is not None:
self.flymode = self.nowop
else:
self.flymode = 6
elif self.nowop == 1:
com = self.holdonop()
self.flymode = self.nowop
elif self.nowop == 2:
com = self.goop()
self.flymode = self.nowop
elif self.nowop == 8:
com = self.closeop()
self.flymode = self.nowop
elif self.nowop == 3:
com = self.backhome()
self.flymode = self.nowop
elif self.nowop == 4:
com = self.land()
self.flymode = self.nowop
elif self.nowop == 5:
self.stopmap()
self.flymode = self.nowop
else:
pass
elif self.userstatemod == 0 or self.userstatemod == 3:
if self.isfly == 1:
if self.state == 6: # 判断是否正在悬停
if self.istakeoffok == 1: # 起飞成功更新数据之后
self.flashdata() # 更新飞行中数据
# 用户超越控制
if userc[0] != 0 or userc[1] != 0 or userc[2] != 0 or userc[3] != 0:
com[0] = userc[3]
com[1] = userc[2]
com[2] = userc[1]
com[3] = userc[0]
# print(self.nowop)
self.comd = com
return com
# def pidtuning(self,value):
# #value=[[p,i,d,down,up] yaw
# # [p,i,d,down,up] thro
# # [p,i,d,down,up] pith
# # [p,i,d,down,up]] roll
# self.pid_yaw=PID(value[0][0],value[0][1],value[0][2],setpoint=0,output_limits=(value[0][3],value[0][4]))
# self.pid_thro=PID(value[1][0],value[1][1],value[1][2],setpoint=0,output_limits=(value[1][3],value[1][4]))
# self.pid_pith=PID(value[2][0],value[2][1],value[2][2],setpoint=0,output_limits=(value[2][3],value[2][4]))
# self.pid_roll= PID(value[3][0],value[3][1],value[3][2],setpoint=0,output_limits=(value[3][3],value[3][4]))
def readflightdata(self, data): # 定义从无人机获取遥测数据
self.battery = data[0]
self.isfly = data[1]
self.heightraw = data[3]
self.wifi = data[4]
self.anlroll = data[5]
self.anlpitch = data[6]
self.velz = data[7]
self.velxy = data[8]
# xy要互换,#向上z正方向,向前y正方向,向右x正方向
self.posxraw = data[10]
self.posyraw = data[9]
self.poszraw = -data[11] # 更改为向上为正方向
self.pointyawraw = data[14]
self.state = data[15] # state的判断还要加一层保险
if self.isfly:
if abs(data[9]) <= 2 and abs(data[10]) <= 2 and abs(data[11]) <= 2:
self.state = 1 # 当三个值同时为这个范围时说明飘了
# if self.isfly==0:
# if abs(self.anlpitch)>16 or abs(self.anlroll)>16:#倾角过大,sin1=1/(1+tan1^2+tan2^2)
# self.state=1]
def send_flightdata(self): # 定义发送数据出来给ui等其他部件
flightdata = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# 常规数据
flightdata[0] = self.isfly
flightdata[1] = self.battery
# 现在的指令
flightdata[2] = self.flymode+20 # [op,v1,v2,v3]加20区分其他模块
flightdata[3] = self.index
# 舵量监控
flightdata[4] = self.comd[3]
flightdata[5] = self.comd[2]
flightdata[6] = self.comd[1]
flightdata[7] = self.comd[0]
# 地图跟踪数据
if self.offdistance:
flightdata[8] = self.offdistance
if self.offheight:
flightdata[9] = self.offheight
if self.offpoint:
flightdata[10] = self.offpoint
flightdata[11] = self.heightraw
flightdata[12] = self.wifi
if self.offroll:
flightdata[13] = self.offroll
if self.offforword:
flightdata[22] = self.offforword
# 姿态位置
flightdata[14] = self.userstatemod
flightdata[15] = self.anlroll
flightdata[16] = self.anlpitch
flightdata[17] = self.velxy
flightdata[18] = self.velz
if self.posxnow:
flightdata[19] = self.posxnow
if self.posynow:
flightdata[20] = self.posynow
if self.posznow:
flightdata[21] = self.posznow
# flightdata[22]=0
flightdata[23] = self.pointyawraw
if self.pointyawnow:
flightdata[24] = self.pointyawnow
flightdata[25] = self.state
# 将坐标数据写入文件
self.posloger(flightdata)
return flightdata
def posloger(self, flightdata):
# 检查文件是否有打开
if self.posdatefile is None:
try:
pathpos = './map/pos_%s.csv' % datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')
self.posdatefile = open(pathpos, 'w', newline='')
self.posdatewriter = csv.writer(self.posdatefile)
self.posdatewriter.writerow(["time", "x", "y", "z"])
except:
print('初始化航点记录失败,无法记录航点')
self.posdatefile = None
if self.posdatefile is not None:
offposx = abs(flightdata[19]-self.preposxnow)
offposy = abs(flightdata[20]-self.preposynow)
offposz = abs(flightdata[21]-self.preposznow)
if offposx >= 0.3 and offposy >= 0.3 and offposz >= 0.3: # 不是原地悬停
# 写入数据
index = int(time.time())
self.posdatewriter.writerow(
[index, flightdata[19], flightdata[20], flightdata[21]])
# 重新pre值
self.preposxnow = flightdata[19]
self.preposynow = flightdata[20]
self.preposznow = flightdata[21]
def checkalldone(self): # 检查是否完成降落,是则退出模式
if self.checkdone == 1:
ok = 1
self.posdatefile.close() # 先关闭航点记录文件
self.reset() # 初始化并且自动退出map模式
else:
ok = 0
return ok
| StarcoderdataPython |
3311656 | <gh_stars>1-10
from rest_framework import status
from rest_framework.authtoken.models import Token
from .basetests import BaseTest
from questions.models import Question
class QuestionModelTest(BaseTest):
"""
Tests for questions creation model
"""
def test_delete_question(self):
"""
Test if the models are saving data into the database
"""
self.question.delete()
new_count = Question.objects.all().count()
self.assertEqual(new_count, 0)
class DeleteQuestionTest(BaseTest):
"""
Test for deleting question
"""
def test_deleting_question(self):
"""
Test successful deletion of question
"""
self.is_authenticated()
response = self.delete_question()
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_is_unauthenticated(self):
"""
Test deleting a question while unauthenticated
"""
response = self.delete_question()
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_delete_invalid_question(self):
"""
Test deleting a question that does not exist
"""
self.is_authenticated()
response = self.delete_invalid_question()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_with_invalid_meetup(self):
"""
Test deleting a question with a meetup that does not exist
"""
self.is_authenticated()
response = self.delete_with_invalid_meetup()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_with_unmatch_meetup(self):
"""
Test deleting a question with a meetup that does not match
"""
self.is_authenticated()
response = self.delete_with_unmatch_meetup()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_is_not_owner(self):
"""
Test the user is not owner of the question
"""
token, created = Token.objects.get_or_create(user=self.user2)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.delete_question()
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED )
| StarcoderdataPython |
3289370 | <gh_stars>0
from honeybee import HoneyBee
hb = HoneyBee('localhost', 8080, 'talbor49', '1234')
print(hb.use('x'))
print(hb.set('tal', 'bae'))
print(hb.get('tal'))
| StarcoderdataPython |
3246043 | <filename>ex.8(2).py
list1=['d', 'cccc', 'bb', 'aaa']
def mysort(mylist = [],numflag = False):
if numflag == False:
mylist.sort(key = lambda x: x[0])
else:
mylist.sort(key = len)
return print(mylist)
mysort(list1)
mysort(list1, True)
print('DONE')
| StarcoderdataPython |
197686 | <reponame>andrewcharlesjones/cplvm<filename>experiments/simulation_experiments/hypothesis_testing/global_ebfs_cplvm_splatter.py
import matplotlib
from cplvm import CPLVM
from cplvm import CPLVMLogNormalApprox
from os.path import join as pjoin
import functools
import warnings
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import os
from scipy.stats import poisson
from scipy.special import logsumexp
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
from tensorflow_probability import bijectors as tfb
import matplotlib
font = {"size": 20}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
matplotlib.rcParams["lines.markersize"] = 10
tf.enable_v2_behavior()
warnings.filterwarnings("ignore")
if __name__ == "__main__":
latent_dim_shared = 2
latent_dim_foreground = 2
## Load data simulated from Splatter
data_dir = "/Users/andrewjones/Documents/beehive/cplvm/data/splatter/two_clusters"
bg_path = pjoin(data_dir, "bg.csv")
fg_path = pjoin(data_dir, "fg.csv")
fg_labels_path = pjoin(data_dir, "fg_labels.csv")
bg_data = pd.read_csv(bg_path, index_col=0)
fg_data = pd.read_csv(fg_path, index_col=0)
fg_labels = pd.read_csv(fg_labels_path, index_col=0).iloc[:, 0].values
n_bg = bg_data.shape[0]
n_fg = fg_data.shape[0]
num_repeats = 5
bfs_experiment = []
bfs_shuffled = []
bfs_control = []
for ii in range(num_repeats):
######################################
######### Original datasets ##########
######################################
X = bg_data.values.T
Y = fg_data.values.T
num_datapoints_x = X.shape[1]
num_datapoints_y = Y.shape[1]
cplvm = CPLVM(k_shared=latent_dim_shared, k_foreground=latent_dim_foreground)
approx_model_H0 = CPLVMLogNormalApprox(
X, Y, latent_dim_shared, latent_dim_foreground, offset_term=False, is_H0=True
)
approx_model_H1 = CPLVMLogNormalApprox(
X, Y, latent_dim_shared, latent_dim_foreground, offset_term=False, is_H0=False
)
H0_results = cplvm._fit_model_vi(X, Y, approx_model_H0, compute_size_factors=True, offset_term=False, is_H0=True)
H1_results = cplvm._fit_model_vi(X, Y, approx_model_H1, compute_size_factors=True, offset_term=False, is_H0=False)
H1_elbo = (
-1
* H1_results["loss_trace"][-1].numpy()
/ (num_datapoints_x + num_datapoints_y)
)
H0_elbo = (
-1
* H0_results["loss_trace"][-1].numpy()
/ (num_datapoints_x + num_datapoints_y)
)
curr_bf = H1_elbo - H0_elbo
print("BF treatment: {}".format(curr_bf))
bfs_experiment.append(curr_bf)
######################################
######### Shuffled datasets ##########
######################################
shuffled_idx = np.random.permutation(np.arange(n_bg + n_fg))
shuffled_idx_X = shuffled_idx[:n_bg]
shuffled_idx_Y = shuffled_idx[n_bg:]
all_data = np.concatenate([bg_data.values, fg_data.values], axis=0).T
X = all_data[:, shuffled_idx_X]
Y = all_data[:, shuffled_idx_Y]
num_datapoints_x = X.shape[1]
num_datapoints_y = Y.shape[1]
cplvm = CPLVM(k_shared=latent_dim_shared, k_foreground=latent_dim_foreground)
approx_model_H0 = CPLVMLogNormalApprox(
X, Y, latent_dim_shared, latent_dim_foreground, offset_term=False, is_H0=True
)
approx_model_H1 = CPLVMLogNormalApprox(
X, Y, latent_dim_shared, latent_dim_foreground, offset_term=False, is_H0=False
)
H0_results = cplvm._fit_model_vi(X, Y, approx_model_H0, compute_size_factors=True, offset_term=False, is_H0=True)
H1_results = cplvm._fit_model_vi(X, Y, approx_model_H1, compute_size_factors=True, offset_term=False, is_H0=False)
H1_elbo = (
-1
* H1_results["loss_trace"][-1].numpy()
/ (num_datapoints_x + num_datapoints_y)
)
H0_elbo = (
-1
* H0_results["loss_trace"][-1].numpy()
/ (num_datapoints_x + num_datapoints_y)
)
curr_bf = H1_elbo - H0_elbo
print("BF shuffled: {}".format(curr_bf))
bfs_shuffled.append(curr_bf)
######################################
######### Null datasets ##########
######################################
fg_unresponsive_data = fg_data.iloc[fg_labels == 0, :]
all_data = np.concatenate([bg_data.values, fg_unresponsive_data.values], axis=0).T
shuffled_idx = np.random.permutation(np.arange(all_data.shape[1]))
shuffled_idx_X = shuffled_idx[:bg_data.shape[0]]
shuffled_idx_Y = shuffled_idx[bg_data.shape[0]:]
X = all_data[:, shuffled_idx_X]
Y = all_data[:, shuffled_idx_Y]
num_datapoints_x = X.shape[1]
num_datapoints_y = Y.shape[1]
cplvm = CPLVM(k_shared=latent_dim_shared, k_foreground=latent_dim_foreground)
approx_model_H0 = CPLVMLogNormalApprox(
X, Y, latent_dim_shared, latent_dim_foreground, offset_term=False, is_H0=True
)
approx_model_H1 = CPLVMLogNormalApprox(
X, Y, latent_dim_shared, latent_dim_foreground, offset_term=False, is_H0=False
)
H0_results = cplvm._fit_model_vi(X, Y, approx_model_H0, compute_size_factors=True, offset_term=False, is_H0=True)
H1_results = cplvm._fit_model_vi(X, Y, approx_model_H1, compute_size_factors=True, offset_term=False, is_H0=False)
H1_elbo = (
-1
* H1_results["loss_trace"][-1].numpy()
/ (num_datapoints_x + num_datapoints_y)
)
H0_elbo = (
-1
* H0_results["loss_trace"][-1].numpy()
/ (num_datapoints_x + num_datapoints_y)
)
curr_bf = H1_elbo - H0_elbo
print("BF control: {}".format(curr_bf))
bfs_control.append(curr_bf)
plt.figure(figsize=(9, 8))
results_df = pd.DataFrame({"control": bfs_control, "shuffled": bfs_shuffled, "experiment": bfs_experiment})
results_df_melted = pd.melt(results_df)
results_df_melted.to_csv("../out/splatter/global_ebfs.csv")
ax = sns.boxplot(data=results_df_melted, x="variable", y="value", color="black")
for patch in ax.artists:
r, g, b, a = patch.get_facecolor()
patch.set_facecolor((r, g, b, .3))
plt.title("Global ELBO Bayes factors")
plt.xticks(np.arange(3), labels=[
"Unperturbed\nnull", "Shuffled\nnull", "Perturbed"])
plt.ylabel("log(EBF)")
plt.tight_layout()
plt.savefig("../out/splatter/global_ebfs.png")
plt.close()
| StarcoderdataPython |
1769248 | from GradientDescent import *
| StarcoderdataPython |
25668 | <filename>chapter03/python/situation.py
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import pandas as pd
# 1. Load Data and Item Profile
ratings = pd.read_csv('chapter03/data/movie_rating.csv')
movie_ratings = pd.pivot_table(
ratings,
values='rating',
index='title',
columns='critic'
)
print(movie_ratings)
# 2. Consturct profiles
profile_dict = {
'Romance': [0.3, 0.4, 0],
'Thriller': [0, 0, 0.5],
'Action': [0.2, 0, 0],
'Sci-fi': [0, 0.3, 0.4],
'Mystery': [0, 0, 0],
'Comedy': [0.5, 0, 0],
'Fantasy': [0, 0.3, 0.1],
'Crime': [0, 0, 0]
}
situation_profile = pd.DataFrame(
profile_dict,
index=['weekday', 'weekend', 'holiday']
)
print(situation_profile)
profile_dict = {
'Romance': [1, 0, 0, 0, 0, 1],
'Thriller': [0, 1, 1, 0, 0, 0],
'Action': [0, 0, 1, 0, 0, 0],
'Sci-fi': [0, 0, 0, 1, 0, 0],
'Mystery': [0, 0, 0, 0, 1, 0],
'Comedy': [0, 0, 0, 0, 0, 1],
'Fantasy': [1, 1, 0, 1, 0, 0],
'Crime': [0, 0, 0, 0, 1, 0]
}
item_profile = pd.DataFrame(profile_dict, index=movie_ratings.index)
idf = dict()
N = len(item_profile)
for feature in item_profile.columns:
df = item_profile[feature].value_counts()[1]
idf[feature] = np.log(N / df)
item_profile.loc[:, feature] =\
[tf * idf[feature] for tf in item_profile.loc[:, feature]]
print(item_profile)
user_profile = np.dot(np.transpose(movie_ratings.fillna(0)), item_profile)
user_profile = pd.DataFrame(
user_profile,
index=movie_ratings.columns,
columns=item_profile.columns
)
print(user_profile)
user_id = 5
user_5_profile = user_profile.loc[user_profile.index[5]]
user_situation_profile = situation_profile * user_5_profile
print(user_situation_profile)
# 3. calculate similarity
result = cosine_similarity(user_situation_profile, item_profile)
result = pd.DataFrame(
result,
index=user_situation_profile.index,
columns=item_profile.index
)
print(result)
| StarcoderdataPython |
3210240 | #!/usr/bin/env python3
'''Simple script to verify SHA256 hashes and rename downloaded versions with their version number'''
import json
import requests
import hashlib
from pathlib import Path
class DownloadException(Exception):
pass
def sha256(path):
with open(filename,"rb") as f:
return hashlib.sha256(f.read()).hexdigest()
url = 'https://binary.ninja/js/hashes.json'
r = requests.get(url)
results = json.loads(r.text)
if not results['version']:
raise DownloadException('Hash file does not exist or is incomplete.')
version = results["version"]
for filename in results["hashes"]:
hash = results["hashes"][filename]
binfile = Path(filename)
if binfile.is_file():
if sha256(binfile) == hash:
newname = f"{filename[0:-4]}-{version}{filename[-4:]}"
print(f"SHA256 matches for {filename}, renaming to {newname}")
binfile.rename(newname)
else:
print(f"HASH FAILED for {filename}")
else:
print(f"No {filename} found")
| StarcoderdataPython |
1683290 | <filename>config/__init__.py
__author__ = 'tuanvu'
__create_time__ = '29/05/2015 2:17 PM' | StarcoderdataPython |
3228872 | # Generated by Django 2.2.6 on 2020-04-15 14:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("customers", "0009_rename_company_to_organization"),
]
operations = [
migrations.AddField(
model_name="organization",
name="organization_type",
field=models.CharField(
choices=[
("company", "Company"),
("internal", "Internal"),
("non-billable", "Non-billable"),
("other", "Other"),
],
default="company",
max_length=16,
verbose_name="organization type",
),
preserve_default=False,
),
]
| StarcoderdataPython |
3383351 | <filename>misc_codes/seshat_misc.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 12 12:55:40 2019
@author: hajime
"""
'''
Asymmetric regresion and MHG graphing.
'''
#%%
'''
Asymmetry regression
'''
from sklearn.linear_model import LinearRegression
CC_scaled_df = pd.DataFrame(CC_scaled,columns=[ 'PolPop', 'PolTerr', 'CapPop', 'levels', 'government','infrastr', 'writing', 'texts', 'money' ])
CC_scaled_df[['NGA','Time']] = CC_df[['NGA','Time']]
CC_reshape = CC_scaled_df.groupby(['NGA','Time']).mean().reset_index()
CC_fwd = CC_reshape.groupby(['NGA']).shift(-1)
CC_out = CC_fwd[['Time', 'PolPop', 'PolTerr', 'CapPop', 'levels', 'government','infrastr', 'writing', 'texts', 'money']] - CC_reshape[['Time', 'PolPop', 'PolTerr', 'CapPop', 'levels', 'government','infrastr', 'writing', 'texts', 'money']]
CC_out['NGA'] = CC_reshape['NGA']
CC_out_vel = CC_out[['Time', 'PolPop', 'PolTerr', 'CapPop', 'levels', 'government','infrastr', 'writing', 'texts', 'money']].div(CC_out['Time'],axis=0) *100.
CC_out_vel.columns = [str(col) + '_vel' for col in CC_out_vel.columns]
CC_out_vel['NGA'] = CC_out['NGA']
y = velArrayOut[:,0,1].reshape([-1,1])
##Average vel of CCs
CC_vel_mean = CC_out_vel.mean()
##Average vel of CCs when PC1 vel is positive
CC_vel_mean_PC1vel_positive = CC_out_vel.loc[(y>0).flatten() ].mean()
##Average vel of CCs when PC1 vel is positive
CC_vel_mean_PC1vel_negative = CC_out_vel.loc[(y<=0).flatten() ].mean()
#%%
#%%
##MGH plot
MHG_df = pd.read_csv('first_dates.csv') # A pandas dataframe
MHG_df = MHG_df.loc[MHG_df.MoralisingGods.notna()]
NGA_14 =['Big Island Hawaii',
'Cuzco',
'Deccan',
'Kachi Plain',
'Kansai',
'Konya Plain',
'Middle Yellow River Valley',
'Niger Inland Delta',
'North Colombia',
'Orkhon Valley',
'Paris Basin',
'Sogdiana',
'Susiana',
'Upper Egypt']
plt.figure(figsize=(17,8.85))
plt.axis('scaled')
plt.xlim(-6,5)
plt.ylim(-3,3)
scale=100.
for i in range(0,velArrayOut.shape[0]):
if not np.isnan(velArrayOut[i,0,1]):
if flowInfo.NGA[i] in MHG_df.NGA.values:
if flowInfo.NGA[i] in NGA_14:
MHG_year = MHG_df.loc[MHG_df.NGA==flowInfo.NGA[i]].MoralisingGods.values[0]
DM_year = MHG_df.loc[MHG_df.NGA==flowInfo.NGA[i]].DoctrinalMode.values[0]
if flowInfo.Time[i]>=MHG_year:
rgb = cm.hsv(0)
elif flowInfo.Time[i]>=DM_year:
rgb = cm.hsv(100)
else:
rgb = cm.hsv(50)
plt.arrow(velArrayOut[i,0,0],velArrayOut[i,1,0],velArrayOut[i,0,1]*scale,velArrayOut[i,1,1]*scale,width=.01,head_width=.06,head_length=.04,color=rgb)
if flowInfo.Time[i]==MHG_year:
plt.plot(velArrayOut[i,0,0],velArrayOut[i,1,0], 'bo')
plt.xlabel("PC1")
plt.ylabel("PC2")
plt.savefig("OUT_in_the_data_MHG_DM_14NGAs.pdf")
plt.show()
plt.close()
#%%
#%%
#01/25 filter data points that i) have time gap and ii) move to different gaussian
#Fit GMM on 2D space
gmm_x_fit = GMM(n_components=2).fit(d_x_pos)
gmm_x_cov = gmm_x_fit.covariances_
gmm_x_mean = gmm_x_fit.means_
gmm_x_weights = gmm_x_fit.weights_
gmm_x_prob = gmm_x_fit.predict_proba(d_x_pos_notnan)
gmm_x_pred = gmm_x_fit.predict(d_x_pos_notnan)
d_x_pos_next_obs = d_x_pos_notnan + d_mov_notnan
gmm_x_prob_next = gmm_x_fit.predict_proba(d_x_pos_next_obs)
gmm_x_pred_next = gmm_x_fit.predict(d_x_pos_next_obs)
dt_notnan = dt[pos_v_not_nan_out]
filtered_loc = ~( (gmm_x_pred!=gmm_x_pred_next)*(dt_notnan.flatten()>100) )
d_x_pos_filtered = d_x_pos_notnan[filtered_loc]
d_x_dt_filtered = d_x_dt_notnan[filtered_loc]
d_v_filtered = d_v_notnan[filtered_loc]
d_mov_filtered = d_mov_notnan[filtered_loc]
d_x_pos_fil_normalized,d_x_pos_fil_mean,d_x_pos_fil_std = normalize(d_x_pos_filtered)
d_x_dt_fil_normalized,d_x_dt_fil_mean,d_x_dt_fil_std = normalize(d_x_dt_filtered)
d_v_fil_normalized,d_v_fil_mean,d_v_fil_std = normalize(d_v_filtered)
d_mov_fil_normalized,d_mov_fil_mean,d_mov_fil_std = normalize(d_mov_filtered)
x_grid_pos_fil_normalized = (x_grids_out-d_x_pos_fil_mean)/d_x_pos_fil_std
x_grid_dt_fil_normalized = (x_grid_dt-d_x_dt_fil_mean)/d_x_dt_fil_std
dict_for_matlab = {}
dict_for_matlab['velArrayOut_filtered'] = velArrayOut[pos_v_not_nan_out][filtered_loc]
dict_for_matlab['velArrayOut_notnan'] = velArrayOut[pos_v_not_nan_out]
dict_for_matlab['Gaussian'] = gmm_x_pred+1
#savemat('velArrayOut_plus_Gaussian.mat',dict_for_matlab)
'''
#Graph Original Flow with filtered data
plt.figure(figsize=(17,8.85))
plt.axis('scaled')
plt.xlim(-6,5)
plt.ylim(-3,3)
scale=100.
flowInfo_NGA_filtered = np.array(flowInfo.NGA[pos_v_not_nan_out][filtered_loc] )
for i in range(0,d_x_pos_filtered.shape[0]):
rgb = cm.hsv(np.where(np.unique(flowInfo_NGA_filtered) == flowInfo_NGA_filtered[i])[0][0]/len(np.unique(flowInfo_NGA_filtered)))
plt.arrow(d_x_pos_filtered[i,0],d_x_pos_filtered[i,1],d_v_filtered[i,0]*scale,d_v_filtered[i,1]*scale,width=.01,head_width=.06,head_length=.04,color=rgb)
plt.xlabel("PC1")
plt.ylabel("PC2")
plt.savefig("OUT_in_the_data_filtered.pdf")
plt.show()
plt.close()
#Flows that are filtered out
#Graph Original Flow with filtered data
plt.figure(figsize=(17,8.85))
plt.axis('scaled')
plt.xlim(-6,5)
plt.ylim(-3,3)
scale=100.
flowInfo_NGA_removed = np.array(flowInfo.NGA[pos_v_not_nan_out][~filtered_loc] )
for i in range(0,d_x_pos_notnan[~filtered_loc].shape[0]):
#rgb = cm.hsv(np.where(np.unique(flowInfo_NGA_filtered) == flowInfo_NGA_filtered[i])[0][0]/len(np.unique(flowInfo_NGA_filtered)))
plt.arrow(d_x_pos_notnan[~filtered_loc][i,0],d_x_pos_notnan[~filtered_loc][i,1],d_v_notnan[~filtered_loc][i,0]*scale,d_v_notnan[~filtered_loc][i,1]*scale,width=.01,head_width=.06,head_length=.04,color=rgb)
plt.xlabel("PC1")
plt.ylabel("PC2")
plt.savefig("removed_data_by_filtering.pdf")
plt.show()
plt.close()
'''
| StarcoderdataPython |
1685654 | <gh_stars>100-1000
"""
Copyright (c) 2015-2020 <NAME>(<EMAIL>), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
####################################################################
# DEPRECATED AFTER ADDING `following` table
####################################################################
db = current.db
atable = db.auth_user
frtable = db.friend_requests
join_query = (atable.id == frtable.to_h)
email_ids = db(frtable).select(atable.email,
join=frtable.on(join_query),
distinct=True)
for email in email_ids:
current.send_mail(to=email["email"],
subject="You have pending requests!",
message=
"""<html>
Hello StopStalker!! <br />
You have pending friend requests on StopStalk <br />
Connect with more to make best use of StopStalk - %s <br />
To stop receiving mails - <a href="%s">Unsubscribe</a> <br />
Cheers, <br />
StopStalk
</html>
""" % (URL("default", "notifications",
scheme="https",
host="www.stopstalk.com"),
URL("default", "unsubscribe",
scheme="https",
host="www.stopstalk.com")),
mail_type="pending_requests",
bulk=True)
# END =========================================================================
| StarcoderdataPython |
29958 | #!/usr/bin/env python
'''
Calculating the emissions from deposits in Platypus stable accounts
'''
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, EngFormatter, PercentFormatter
from strategy_const import *
from const import *
def boosted_pool_emission_rate(your_stable_deposit, vePTP_held, other_deposit_weights):
''' proportion of boosted pool emissions your deposits and vePTP earn
'''
your_boosted_pool_weight = np.sqrt(your_stable_deposit * vePTP_held)
return your_boosted_pool_weight / other_deposit_weights
def base_pool_emission_rate(your_stable_deposit, other_stable_deposits):
''' proportion of base pool emissions your deposits earn
'''
total_deposits = other_stable_deposits + your_stable_deposit
return your_stable_deposit / total_deposits
# define function with vectorize decorator for extensibility
@np.vectorize
def total_emissions_rate(stable_bankroll,
ptp_marketbuy_proportion):
'''
:stable_bankroll: total USD value of the stables you'd invest in the Platypus protocol
:ptp_marketbuy_proportion: proportion of stable_bankroll you'd use to marketbuy PTP for staking to vePTP
returns the number of PTP tokens you'd rececive given defined constants earlier in the notebook.
'''
n_PTP = (stable_bankroll * ptp_marketbuy_proportion) / PTP_PRICE
n_vePTP = HOURS_SPENT_STAKING * HOURLY_STAKED_PTP_vePTP_YIELD * n_PTP
stable_deposit = stable_bankroll * (1 - ptp_marketbuy_proportion)
# calculating lower bound on total deposit weights:
# assume all other deposits are from one wallet with all other staked PTP
# and it's been staking as long as you have
total_deposit_weights = GLOBAL_PTP_STAKED * HOURLY_STAKED_PTP_vePTP_YIELD * HOURS_SPENT_STAKING
boosted = boosted_pool_emission_rate(stable_deposit, n_vePTP, total_deposit_weights)
base = base_pool_emission_rate(stable_deposit, TVL - stable_deposit)
return (BOOSTING_POOL_ALLOCATION * boosted) + (BASE_POOL_ALLOCATION * base)
def plot_2d_returns(stable_bankroll, ptp_proportion, returns_array, as_percents = True):
"""Use matplotlib to plot the slope of returns across different bankroll strategies
"""
fig, ax = plt.subplots(subplot_kw={"projection": "3d"}, figsize=(18,9))
manifold = ax.plot_surface(stable_bankroll, ptp_proportion, returns_array,
cmap=cm.plasma, linewidth=0.5, antialiased=False)
# labels, titles, and axes
ax.set_title(f"Monthly Strategy Emissions given PTP staking for {round(HOURS_SPENT_STAKING / 24)} Days")
ax.xaxis.set_major_formatter(EngFormatter(unit="$", places=1, sep="\N{THIN SPACE}"))
ax.set_xlabel("Strategy Bankroll")
ax.yaxis.set_major_formatter(PercentFormatter(xmax=1, decimals=1))
ax.set_ylabel("Percent Market-Bought and Staked")
ax.zaxis.set_major_locator(LinearLocator(9))
ax.zaxis.set_major_formatter(PercentFormatter(xmax=1, decimals=4))
ax.set_zlabel("Percent of Emissions for Strategy")
# colorbar for scale
fig.colorbar(manifold, shrink=0.5, aspect=5, format=PercentFormatter(xmax=1, decimals=4))
plt.show()
def main():
print(f"Emissions calculations consider PTP/USD: ${round(PTP_PRICE, 3)}\n" +
f"Reflecting a FDMC of \t${round(FDMC / 10**6)}MM " +
f"({round(PERCENT_COINS_CIRCULATING * 100)}% of coins available)\n" +
f"and implying TVL of \t${round(TVL / 10**6)}MM " +
f"(Mcap/TVL: {round(1 / TVL_TO_CMC_RATIO, 4)})\n" +
f"with {round(GLOBAL_PTP_STAKED / 10**6, 2)}MM PTP staked for vePTP ({round(PERCENT_PTP_STAKED * 100)}%)")
# Create the mesh and calculate return rates
stable_bankroll, ptp_proportion = np.meshgrid(stable_deposit_range, ptp_market_buy_bankroll_proportion)
returns = total_emissions_rate(stable_bankroll, ptp_proportion)
# plotting time
plot_2d_returns(stable_bankroll, ptp_proportion, returns)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3263203 | # Copyright (C) 2017-2019 <NAME>
# SPDX-License-Identifier: Apache-2.0
import dolfin
from ocellaris.utils import shift_fields
def before_simulation(simulation, force_steady=False):
"""
Handle timestepping issues before starting the simulation. There are
basically two options, either we have full velocity history available,
either from initial conditions on the input file or from a restart file,
or there is only access to one previous time step and we need to start
up using first order timestepping
"""
starting_order = 1
# Check if there are non-zero values in the upp vectors
maxabs = 0
for d in range(simulation.ndim):
this_maxabs = abs(simulation.data['upp%d' % d].vector().get_local()).max()
maxabs = max(maxabs, this_maxabs)
maxabs = dolfin.MPI.max(dolfin.MPI.comm_world, float(maxabs))
if maxabs > 0:
starting_order = 2
if force_steady:
simulation.log.info('Setting time derivatives to zero')
simulation.data['time_coeffs'].assign(dolfin.Constant([0.0, 0.0, 0.0]))
elif starting_order == 2:
# Switch to second order time stepping
simulation.log.info(
'Initial values for upp are found and used, '
'starting with second order time stepping.'
)
simulation.data['time_coeffs'].assign(dolfin.Constant([3 / 2, -2.0, 1 / 2]))
else:
# Standard first order time stepping
simulation.log.info(
'Initial values for upp are not found, ' 'starting with first order time stepping.'
)
simulation.data['time_coeffs'].assign(dolfin.Constant([1.0, -1.0, 0.0]))
update_convection(simulation, starting_order, force_steady=force_steady)
simulation.log.info('\nTime loop is now starting\n', flush='force')
def update_timestep(simulation):
"""
Switch to first order time stepping if the timestep has changed
"""
dt = simulation.input.get_value('time/dt', required_type='float')
dt_prev = simulation.dt
if dt != dt_prev:
simulation.log.info('Temporarily changing to first order time stepping')
simulation.data['time_coeffs'].assign(dolfin.Constant([1.0, -1.0, 0.0]))
return dt
def after_timestep(simulation, is_steady, force_steady=False):
"""
Move u -> up, up -> upp and prepare for the next time step
"""
# Stopping criteria for steady state simulations
vel_diff = None
if is_steady:
vel_diff = 0
for d in range(simulation.ndim):
u_new = simulation.data['u%d' % d]
up = simulation.data['up%d' % d]
diff = abs(u_new.vector().get_local() - up.vector().get_local()).max()
vel_diff = max(vel_diff, diff)
shift_fields(simulation, ['u%d', 'up%d', 'upp%d'])
shift_fields(simulation, ['u_conv%d', 'up_conv%d', 'upp_conv%d'])
if force_steady:
simulation.data['time_coeffs'].assign(dolfin.Constant([0.0, 0.0, 0.0]))
else:
# Change time coefficient to second order
simulation.data['time_coeffs'].assign(dolfin.Constant([3 / 2, -2, 1 / 2]))
# Extrapolate the convecting velocity to the next step
update_convection(simulation, force_steady=force_steady)
return vel_diff
def update_convection(simulation, order=2, force_steady=False):
"""
Update terms used to linearise and discretise the convective term
"""
ndim = simulation.ndim
data = simulation.data
# Update convective velocity field components
for d in range(ndim):
uic = data['u_conv%d' % d]
uip = data['up_conv%d' % d]
uipp = data['upp_conv%d' % d]
if order == 1 or force_steady:
uic.assign(uip)
else:
# Backwards difference formulation - standard linear extrapolation
uic.vector().zero()
uic.vector().axpy(2.0, uip.vector())
uic.vector().axpy(-1.0, uipp.vector())
uic.vector().apply('insert')
| StarcoderdataPython |
1770450 | import unittest
import tempfile
import os
from .ParsedArgs import ParsedArgs
from meraki_cli.__main__ import _args_from_file
class TestArgsFromFile(unittest.TestCase):
def setUp(self):
self.parsed_args = ParsedArgs()
def testArgsFromFileExplicitPathNoExist(self):
self.parsed_args.configFile = '~/idontexist.conf'
with self.assertLogs(level='CRITICAL'):
with self.assertRaises(SystemExit):
_args_from_file(self.parsed_args)
def testArgsFromFileGoodPath(self):
# If an env variable exists, delete it so it doesn't interfere
if os.environ.get('MERAKI_DASHBOARD_API_KEY'):
del os.environ['MERAKI_DASHBOARD_API_KEY']
file = tempfile.NamedTemporaryFile('w', delete=False)
file.write('{"apiKey": "abcdef"}')
file.close()
self.parsed_args.configFile = file.name
_args_from_file(self.parsed_args)
os.remove(file.name)
assert self.parsed_args.apiKey == 'abcdef'
def testArgsFromFileExplicitPathSuccess(self):
# Create a temp meraki.conf file in the current dir
configFile = open(os.path.join(os.getcwd(), 'meraki-cli.conf'), 'w')
configFile.write('{"logfile": "expathtest.log"}')
configFile.close()
_args_from_file(self.parsed_args)
os.remove(configFile.name) # Delete that temp file
assert self.parsed_args.logfile == 'expathtest.log'
def testArgsFromFileEnvPath(self):
folder = tempfile.TemporaryDirectory()
# Set the APPDATA env var so we can test with it
os.environ['APPDATA'] = folder.name
# Create a ./meraki/ directory in the temp directory
mdir = os.path.join(folder.name, 'meraki-cli')
os.mkdir(mdir)
# Create a ./meraki/meraki.conf temp config file
configFile = open(os.path.join(mdir, 'meraki-cli.conf'), 'w')
configFile.write('{"logfile": "envpathtest.log"}')
configFile.close()
_args_from_file(self.parsed_args)
# Delete that env var
del os.environ['APPDATA']
assert self.parsed_args.logfile == 'envpathtest.log'
| StarcoderdataPython |
3371650 | <filename>hi-ml/testhiml/testhiml/test_attentionlayers.py
import pytest
from typing import Type, Union
from torch import nn, rand, sum, allclose, ones_like
from health_ml.networks.layers.attention_layers import (AttentionLayer, GatedAttentionLayer,
MeanPoolingLayer, TransformerPooling,
MaxPoolingLayer)
def _test_attention_layer(attentionlayer: nn.Module, dim_in: int, dim_att: int,
batch_size: int,) -> None:
features = rand(batch_size, dim_in) # N x L x 1 x 1
attn_weights, output_features = attentionlayer(features)
assert attn_weights.shape == (dim_att, batch_size) # K x N
assert output_features.shape == (dim_att, dim_in) # K x L
assert ((attn_weights >= 0) & (attn_weights <= 1 + 1e-5)).all() # added tolerance due to rounding issues
row_sums = sum(attn_weights, dim=1, keepdim=True)
assert allclose(row_sums, ones_like(row_sums))
if isinstance(attentionlayer, TransformerPooling):
pass
elif isinstance(attentionlayer, MaxPoolingLayer):
pass
else:
pooled_features = attn_weights @ features.flatten(start_dim=1)
assert allclose(pooled_features, output_features)
@pytest.mark.parametrize("dim_in", [1, 3])
@pytest.mark.parametrize("dim_hid", [1, 4])
@pytest.mark.parametrize("dim_att", [1, 5])
@pytest.mark.parametrize("batch_size", [1, 7])
@pytest.mark.parametrize('attention_layer_cls', [AttentionLayer, GatedAttentionLayer])
def test_attentionlayer(dim_in: int, dim_hid: int, dim_att: int, batch_size: int,
attention_layer_cls: Type[Union[AttentionLayer, GatedAttentionLayer]]) -> None:
attentionlayer = attention_layer_cls(
input_dims=dim_in,
hidden_dims=dim_hid,
attention_dims=dim_att
)
_test_attention_layer(attentionlayer, dim_in, dim_att, batch_size)
@pytest.mark.parametrize("dim_in", [1, 3])
@pytest.mark.parametrize("batch_size", [1, 7])
def test_mean_pooling(dim_in: int, batch_size: int,) -> None:
_test_attention_layer(MeanPoolingLayer(), dim_in=dim_in, dim_att=1, batch_size=batch_size)
@pytest.mark.parametrize("dim_in", [1, 3])
@pytest.mark.parametrize("batch_size", [1, 7])
def test_max_pooling(dim_in: int, batch_size: int,) -> None:
_test_attention_layer(MaxPoolingLayer(), dim_in=dim_in, dim_att=1, batch_size=batch_size)
@pytest.mark.parametrize("num_layers", [1, 4])
@pytest.mark.parametrize("num_heads", [1, 2])
@pytest.mark.parametrize("dim_in", [4, 8]) # dim_in % num_heads must be 0
@pytest.mark.parametrize("batch_size", [1, 7])
def test_transformer_pooling(num_layers: int, num_heads: int, dim_in: int, batch_size: int) -> None:
transformer_pooling = TransformerPooling(num_layers=num_layers,
num_heads=num_heads,
dim_representation=dim_in).eval()
_test_attention_layer(transformer_pooling, dim_in=dim_in, dim_att=1, batch_size=batch_size)
| StarcoderdataPython |
3285661 | import numpy
from .dtypes import index_type,value_type,label_type
class FactorSubset(object):
""" Holds a subset of factor indices of a graphical model.
This class is used to compute queries for a a subset of a gm.
This queries are very efficient since allmost all members
are implemented in pure C++.
The members are a vectorized subset of the regular
factor api of a graphical model. Therefore allmost all factor queries
can be vectorized with this class.
Args :
gm : the graphical model to which the factors belong
factorIndices : the factor indices w.r.t. the gm which are in the subset .
If factorIndices is not given, the indices of all factors will be used
Example: ::
>>> import opengm
>>> import numpy
>>> unaries=numpy.random.rand(3,2,2)
>>> gm=opengm.grid2d2Order(unaries,opengm.PottsFunction([2,2],0.0,0.4))
>>> factorSubset=opengm.FactorSubset(gm)
>>> len(factorSubset)==gm.numberOfFactors
True
>>> numberOfVariables=factorSubset.numberOfVariables()
>>> len(numberOfVariables)==gm.numberOfFactors
True
>>> unaryFactorIndices=factorSubset.factorsWithOrder(1)
>>> unaryFactorSubset=opengm.FactorSubset(gm,unaryFactorIndices)
>>> len(unaryFactorSubset)
6
>>> secondOrderFactorIndices=factorSubset.factorsWithOrder(2)
>>> secondOrderFactorSubset=opengm.FactorSubset(gm,secondOrderFactorIndices)
>>> len(secondOrderFactorSubset)
7
"""
def __init__(self,gm,factorIndices=None):
self.gm=gm
if factorIndices is None:
self.factorIndices=numpy.arange(gm.numberOfFactors,dtype=index_type)
else :
self.factorIndices=factorIndices
def __len__(self):
""" get the number of factors within the factorSubset """
return len(self.factorIndices)
def numberOfVariables(self):
""" get the number variables for each factors within the factorSubset """
return self.gm._factor_numberOfVariables(self.factorIndices)
def gmLabelsToFactorLabels(self,labels):
numpyLabels=numpy.require(labels,dtype=label_type)
return self.gm._factor_gmLablingToFactorLabeling(self.factorIndices,labels)
def variableIndices(self):
return self.gm._factor_variableIndices(self.factorIndices)
def numberOfLabels(self):
return self.gm._factor_numberOfLabels(self.factorIndices)
def isSubmodular(self):
return self.gm._factor_isSubmodular(self.factorIndices)
def mapScalarReturning(self,function,dtype):
if(dtype==numpy.float32):
return self.gm._factor_scalarRetFunction_float32(function,self.factorIndices)
elif(dtype==numpy.float64):
return self.gm._factor_scalarRetFunction_float64(function,self.factorIndices)
elif(dtype==numpy.uint64):
return self.gm._factor_scalarRetFunction_uint64(function,self.factorIndices)
elif(dtype==numpy.int64):
return self.gm._factor_scalarRetFunction_int64(function,self.factorIndices)
elif(dtype==numpy.bool):
return self.gm._factor_scalarRetFunction_bool(function,self.factorIndices)
else:
raise RuntimeError("dtype %s is not supported, so far only float32, float64, int64, uint64 and bool are supported")% (str(dtype),)
def fullIncluedFactors(self,vis):
visNumpy=numpy.require(vis,dtype=index_type)
return self.gm._factor_fullIncluedFactors(self.factorIndices,visNumpy)
def evaluate(self,labels):
labelsNumpy=numpy.require(labels,dtype=label_type)
if(labelsNumpy.ndim==1 and labelsNumpy.shape[0] == self.gm.numberOfLabels):
return self.gm._factor_evaluateGmLabeling(self.factorIndices,labelsNumpy)
else :
if labelsNumpy.ndim==1:
labelsNumpy=labelsNumpy.reshape([1,-1])
return self.gm._factor_evaluateFactorLabeling(self.factorIndices,labelsNumpy)
def factorsWithOrder(self,order):
return self.gm._factor_withOrder(self.factorIndices,int(order))
if __name__ == "__main__":
import doctest
import opengm
doctest.testmod()
| StarcoderdataPython |
132682 | <reponame>HugoS99/DagTest<filename>dag.py
from airflow import DAG
from datetime import datetime, timedelta
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.operators.dummy_operator import DummyOperator
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime.utcnow(),
'email': ['<EMAIL>'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5)
}
dag = DAG(
'kubernetes_sample', default_args=default_args, schedule_interval=timedelta(minutes=10))
start = DummyOperator(task_id='run_this_first', dag=dag)
passing = KubernetesPodOperator(namespace='default',
image="Python:3.6",
cmds=["Python","-c"],
arguments=["print('hello world')"],
labels={"foo": "bar"},
name="passing-test",
task_id="passing-task",
get_logs=True,
dag=dag
)
failing = KubernetesPodOperator(namespace='default',
image="ubuntu:1604",
cmds=["Python","-c"],
arguments=["print('hello world')"],
labels={"foo": "bar"},
name="fail",
task_id="failing-task",
get_logs=True,
dag=dag
)
passing.set_upstream(start)
failing.set_upstream(start) | StarcoderdataPython |
113811 | n = input()
def gcd(m, n):
while True:
m %= n
if m == 0:
return n
n %= m
if n == 0:
return m
def lcm(m, n):
g = gcd(m ,n)
return m * n / g
def omin(n):
r = n
for i in range(2, n):
if r % i != 0:
r = lcm(r, i)
return r
print omin(n) | StarcoderdataPython |
4834542 | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import argparse
import logging
from syne_tune.optimizer.schedulers.searchers.gp_searcher_utils import (
SUPPORTED_RESOURCE_FOR_ACQUISITION,
)
from benchmarking.cli.benchmark_factory import supported_benchmarks, benchmark_factory
logger = logging.getLogger(__name__)
__all__ = [
"parse_args",
"make_searcher_and_scheduler",
]
def parse_args(allow_lists_as_values=True):
"""
Argument parser for CLI. Normally, this parameterizes a single experiment.
But if `allow_lists_as_values == True`, certain arguments admit lists as
values. In this case, experiments of all combinations of values (Cartesian
product) are launched.
:param allow_lists_as_values: See above
:return: params dict. Note that if an argument added to the parser is not
provided a value for, it is contained in the dict with value None
"""
parser = argparse.ArgumentParser(
description="Asynchronous Hyperparameter Optimization"
)
# We parse the CL args twice. The first pass parses all global arguments
# (not specific to the benchmark). From that pass, we know what the
# benchmark is. In a second pass, we parse additional benchmark-specific
# arguments, as defined in the default_params for the benchmark.
if allow_lists_as_values:
allow_list = dict(nargs="+")
else:
allow_list = dict()
if allow_lists_as_values:
parser.add_argument(
"--argument_groups",
type=str,
help="Specify groups of list arguments, separated "
"by |. Arguments in a group are iterated "
"over together",
)
# Note: The benchmark cannot be a list argument, since it can define its
# own CL arguments
parser.add_argument(
"--benchmark_name",
type=str,
default="mlp_fashionmnist",
choices=supported_benchmarks(),
help="Benchmark to run experiment on",
)
parser.add_argument(
"--skip_initial_experiments",
type=int,
default=0,
help="When multiple experiments are launched (due to "
"list arguments), this number of initial "
"experiments are skipped",
)
parser.add_argument(
"--backend",
type=str,
default="local",
choices=("local", "sagemaker", "simulated"),
help="Backend for training evaluations",
)
parser.add_argument(
"--local_tuner",
action="store_true",
help="Run tuning experiment locally? Otherwise, it is "
"run remotely (which allows to run multiple "
"tuning experiments in parallel)",
)
parser.add_argument(
"--run_id",
type=int,
help="Identifier to distinguish between runs " "(nonnegative integers)",
**allow_list,
)
parser.add_argument(
"--num_runs",
type=int,
help="Number of repetitions, with run_id 0, 1, ..."
"Only if run_id not given (ignored otherwise)",
)
parser.add_argument(
"--random_seed_offset",
type=int,
help="Master random seed is this plus run_id, modulo "
"2 ** 32. Drawn at random if not given",
)
parser.add_argument(
"--instance_type",
type=str,
help="SageMaker instance type for workers",
**allow_list,
)
parser.add_argument(
"--tuner_instance_type",
type=str,
default="ml.c5.xlarge",
help="SageMaker instance type for tuner (only for "
"sagemaker backend and remote tuning)",
**allow_list,
)
parser.add_argument(
"--num_workers",
type=int,
help="Number of workers (parallel evaluations)",
**allow_list,
)
parser.add_argument(
"--image_uri", type=str, help="URI of Docker image (sagemaker backend)"
)
parser.add_argument(
"--sagemaker_execution_role",
type=str,
help="SageMaker execution role (sagemaker backend)",
)
parser.add_argument(
"--experiment_name",
type=str,
help="Experiment name (used as job_name_prefix in " "sagemaker backend)",
)
parser.add_argument(
"--no_debug_log", action="store_true", help="Switch off verbose logging"
)
parser.add_argument(
"--debug_log_level",
action="store_true",
help="Set logging level to DEBUG (default is INFO)",
)
parser.add_argument(
"--no_tuner_logging",
action="store_true",
help="By default, the full tuning status is logged "
"in the tuning loop every --print_update_interval"
" secs. If this is set, this logging is suppressed",
)
parser.add_argument(
"--enable_sagemaker_profiler",
action="store_true",
help="Enable SageMaker profiler (this needs one "
"processing job for each training job",
)
parser.add_argument(
"--no_experiment_subdirectory",
action="store_true",
help="When storing results, do not use subdirectory " "experiment_name",
)
parser.add_argument(
"--cost_model_type",
type=str,
help="Selects cost model of benchmark",
**allow_list,
)
parser.add_argument(
"--scheduler", type=str, default="fifo", help="Scheduler name", **allow_list
)
parser.add_argument("--searcher", type=str, help="Searcher name", **allow_list)
parser.add_argument(
"--results_update_interval",
type=int,
default=300,
help="Results and tuner state are stored every this " "many seconds",
)
parser.add_argument(
"--print_update_interval",
type=int,
default=300,
help="Tuner status printed every this many seconds",
)
parser.add_argument(
"--tuner_sleep_time",
type=float,
default=5,
help="Tuner tries to fetch new results every this " "many seconds",
)
parser.add_argument(
"--max_resource_level",
type=int,
help="Largest resource level (e.g., epoch number) " "for training evaluations",
**allow_list,
)
parser.add_argument(
"--epochs",
type=int,
help="Deprecated: Use max_resource_level instead",
**allow_list,
)
parser.add_argument(
"--num_trials", type=int, help="Maximum number of trials", **allow_list
)
parser.add_argument(
"--scheduler_timeout",
type=int,
help="Trials started until this cutoff time (in secs)",
**allow_list,
)
parser.add_argument(
"--max_failures",
type=int,
default=1,
help="The tuning job terminates once this many " "training evaluations failed",
**allow_list,
)
parser.add_argument(
"--s3_bucket",
type=str,
help="S3 bucket to write checkpoints and results to. "
"Defaults to default bucket of session",
)
parser.add_argument(
"--no_gpu_rotation",
action="store_true",
help="For local back-end on a GPU instance: By "
"default, trials are launched in parallel "
"on different GPU cores (GPU rotation). If "
"this is set, all GPU cores are used for a "
"single evaluation",
)
parser.add_argument(
"--blackbox_repo_s3_root",
type=str,
help="S3 root directory for blackbox repository. "
"Defaults to default bucket of session",
)
parser.add_argument(
"--blackbox_seed",
type=int,
help="Fixed seeds of blackbox queries to this value "
"(0 is safe), so that they return the same "
"metric values for the same config",
)
# Arguments for scheduler
parser.add_argument(
"--brackets",
type=int,
help="Number of brackets in HyperbandScheduler",
**allow_list,
)
parser.add_argument(
"--reduction_factor",
type=float,
help="Reduction factor in HyperbandScheduler",
**allow_list,
)
parser.add_argument(
"--grace_period",
type=int,
help="Minimum resource level (e.g., epoch number) " "in HyperbandScheduler",
**allow_list,
)
parser.add_argument(
"--rung_levels",
type=str,
help="List of resource levels to use for the rungs "
"in HyperbandScheduler. Entries must be positive "
"ints. Overrides --grace_period, "
"--reduction_factor if given",
**allow_list,
)
parser.add_argument(
"--rung_system_per_bracket",
action="store_true",
help="Parameter of HyperbandScheduler",
)
parser.add_argument(
"--searcher_data",
type=str,
help="Parameter of HyperbandScheduler",
**allow_list,
)
parser.add_argument(
"--register_pending_myopic",
action="store_true",
help="Parameter of HyperbandScheduler",
)
parser.add_argument(
"--not_normalize_targets",
action="store_true",
help="Do not normalize targets to mean 0, variance 1"
" before fitting surrogate model",
)
parser.add_argument(
"--pasha_ranking_criterion",
type=str,
help="Parameter of PASHA scheduler",
**allow_list,
)
parser.add_argument(
"--pasha_epsilon", type=float, help="Parameter of PASHA scheduler", **allow_list
)
parser.add_argument(
"--pasha_epsilon_scaling",
type=str,
help="Parameter of PASHA scheduler",
**allow_list,
)
# Arguments for bayesopt searcher
parser.add_argument(
"--searcher_model",
type=str,
help="Surrogate model for bayesopt searcher with " "HyperbandScheduler",
**allow_list,
)
parser.add_argument(
"--searcher_num_init_random",
type=int,
help="Number of initial trials not chosen by searcher",
**allow_list,
)
parser.add_argument(
"--searcher_num_init_candidates",
type=int,
help="Number of random candidates scored to seed search",
**allow_list,
)
parser.add_argument(
"--searcher_num_fantasy_samples",
type=int,
help="Number of fantasy samples",
**allow_list,
)
help_str = (
"Rule for resource level at which acquisition function is used "
+ f"[{SUPPORTED_RESOURCE_FOR_ACQUISITION}]"
)
parser.add_argument(
"--searcher_resource_acq", type=str, help=help_str, **allow_list
)
parser.add_argument(
"--searcher_resource_acq_bohb_threshold",
type=int,
help="Parameter for resource_acq == bohb",
**allow_list,
)
parser.add_argument(
"--searcher_gp_resource_kernel",
type=str,
help="Multi-task kernel for HyperbandScheduler",
**allow_list,
)
parser.add_argument(
"--searcher_opt_skip_period",
type=int,
help="Update GP hyperparameters only every (...) times",
**allow_list,
)
parser.add_argument(
"--searcher_opt_skip_init_length",
type=int,
help="Update GP hyperparameters every time until "
"(...) observations are done",
**allow_list,
)
parser.add_argument(
"--searcher_opt_skip_num_max_resource",
action="store_true",
help="Update GP hyperparameters only when training " "runs reach max_t",
)
parser.add_argument(
"--searcher_opt_nstarts",
type=int,
help="GP hyperparameter optimization restarted (...) " "times",
**allow_list,
)
parser.add_argument(
"--searcher_opt_maxiter",
type=int,
help="Maximum number of iterations of GP " "hyperparameter optimization",
**allow_list,
)
parser.add_argument(
"--searcher_initial_scoring",
type=str,
help="Scoring function to rank initial candidates "
"for seeding search [thompson_indep, acq_func]",
**allow_list,
)
parser.add_argument(
"--searcher_skip_local_optimization",
action="store_true",
help="Skip local optimization of acquisition function "
"and just pick top-scorer of initial candidates",
)
parser.add_argument(
"--searcher_issm_gamma_one",
action="store_true",
help="Fix gamma parameter of ISSM to one?",
)
parser.add_argument(
"--searcher_exponent_cost",
type=float,
help="Exponent of cost term in cost-aware expected "
"improvement acquisition function",
**allow_list,
)
parser.add_argument(
"--searcher_expdecay_normalize_inputs",
action="store_true",
help="Normalize resource values to [0, 1] in "
"GP-expdecay surrogate model (only if "
"searcher_model = gp_expdecay)",
)
parser.add_argument(
"--searcher_num_init_candidates_for_batch",
type=int,
help="Relevant for synchronous Hyperband with bayesopt "
"searcher. If batch of size B is suggested, the "
"first suggest uses searcher_num_init_candidates, "
"the B-1 subsequent suggests use this value",
**allow_list,
)
parser.add_argument(
"--searcher_use_old_code",
action="store_true",
help="DEBUG: Use old code for gp_issm, gp_expdecay",
)
parser.add_argument(
"--searcher_no_fantasizing",
action="store_true",
help="Ignore pending evaluations, do not use fantasizing",
)
# Arguments for kde searcher
parser.add_argument(
"--searcher_num_min_data_points",
type=int,
help="KDE: Minimum number of datapoints needed to fit models",
**allow_list,
)
parser.add_argument(
"--searcher_top_n_percent",
type=int,
help="KDE: Top (bottom) model fit on this top (bottom) fraction of data",
**allow_list,
)
parser.add_argument(
"--searcher_min_bandwidth",
type=float,
help="KDE: Minimum bandwidth",
**allow_list,
)
parser.add_argument(
"--searcher_num_candidates",
type=int,
help="KDE: Number of candidates that are sampled to optimize the acquisition function",
**allow_list,
)
parser.add_argument(
"--searcher_bandwidth_factor",
type=int,
help="KDE: Parameter to scale bandwidth",
**allow_list,
)
parser.add_argument(
"--searcher_random_fraction",
type=float,
help="KDE: Fraction of configs suggested at random",
**allow_list,
)
# First pass: All global arguments
# Why do we parse all global args here, and not just benchmark_name?
# This is to make sure that the help option of the parser lists all
# global arguments and their help strings.
_params = parser.parse_known_args()[0]
benchmark_name = _params.benchmark_name
# Add benchmark-specific CL args (if any)
# These are the ones listed in benchmark['default_params'], minus args which
# are already global (i.e., added above)
_, default_params = benchmark_factory({"benchmark_name": benchmark_name})
help_str = f"Additional parameter for {benchmark_name} benchmark"
have_extra_args = False
for name, value in default_params.items():
try:
# We don't need to set defaults here
if value is None:
_type = str
else:
_type = type(value)
parser.add_argument("--" + name, type=_type, help=help_str)
have_extra_args = True
except argparse.ArgumentError:
pass
# Second pass: All args (global and benchmark-specific)
if have_extra_args:
params = vars(parser.parse_args())
else:
params = _params
# Post-processing
params["debug_log"] = not params["no_debug_log"]
del params["no_debug_log"]
params["rotate_gpus"] = not params["no_gpu_rotation"]
del params["no_gpu_rotation"]
epochs = params.get("epochs")
if params.get("max_resource_level") is None:
if epochs is not None:
logger.info(
"--epochs is deprecated, please use "
"--max_resource_level in the future"
)
params["max_resource_level"] = epochs
elif epochs is not None:
logger.info(
"Both --max_resource_level and the deprecated "
"--epochs are set. The latter is ignored"
)
if "epochs" in params:
del params["epochs"]
params["normalize_targets"] = not params["not_normalize_targets"]
del params["not_normalize_targets"]
params["searcher_use_new_code"] = not params["searcher_use_old_code"]
del params["searcher_use_old_code"]
return params
def _enter_not_none(dct, key, val, tp=None):
if tp is None:
tp = str
if val is not None:
dct[key] = tp(val)
def make_searcher_and_scheduler(params) -> (dict, dict):
scheduler = params["scheduler"]
searcher = params["searcher"]
# Options for searcher
search_options = dict()
_enter_not_none(search_options, "debug_log", params.get("debug_log"), tp=bool)
_enter_not_none(
search_options, "normalize_targets", params.get("normalize_targets"), tp=bool
)
model = params.get("searcher_model")
_enter_not_none(search_options, "model", model)
if searcher.startswith("bayesopt"):
# Options for bayesopt searcher
searcher_args = (
("num_init_random", int, False),
("num_init_candidates", int, False),
("num_fantasy_samples", int, False),
("resource_acq", str, True),
("resource_acq_bohb_threshold", int, True),
("gp_resource_kernel", str, True),
("opt_skip_period", int, False),
("opt_skip_init_length", int, False),
("opt_skip_num_max_resource", bool, False),
("opt_nstarts", int, False),
("opt_maxiter", int, False),
("initial_scoring", str, False),
("skip_local_optimization", bool, False),
("issm_gamma_one", bool, False),
("exponent_cost", float, False),
("expdecay_normalize_inputs", bool, False),
("use_new_code", bool, False),
("num_init_candidates_for_batch", int, False),
("no_fantasizing", bool, False),
)
gp_add_models = {"gp_issm", "gp_expdecay"}
for name, tp, warn in searcher_args:
_enter_not_none(search_options, name, params.get("searcher_" + name), tp=tp)
if warn and name in search_options and model in gp_add_models:
logger.warning(f"{name} not used with searcher_model = {model}")
if "issm_gamma_one" in search_options and model != "gp_issm":
logger.warning(
f"searcher_issm_gamma_one not used with searcher_model = {model}"
)
if "expdecay_normalize_inputs" in search_options and model != "gp_expdecay":
logger.warning(
"searcher_expdecay_normalize_inputs not used with searcher_model "
f"= {model}"
)
elif searcher == "kde":
# Options for kde searcher
searcher_args = (
("num_min_data_points", int),
("top_n_percent", int),
("min_bandwidth", float),
("num_candidates", int),
("bandwidth_factor", int),
("random_fraction", float),
)
for name, tp in searcher_args:
_enter_not_none(search_options, name, params.get("searcher_" + name), tp=tp)
# Options for scheduler
random_seed_offset = params.get("random_seed_offset")
if random_seed_offset is None:
random_seed_offset = 0
random_seed = (random_seed_offset + params["run_id"]) % (2**32)
scheduler_options = {"random_seed": random_seed}
name = "max_resource_level" if scheduler == "hyperband_synchronous" else "max_t"
_enter_not_none(scheduler_options, name, params.get("max_resource_level"), tp=int)
scheduler_args = ()
if scheduler != "fifo":
# Only process these arguments for HyperbandScheduler
prefix = "hyperband_"
assert scheduler.startswith(prefix)
scheduler_args = scheduler_args + (
("reduction_factor", int),
("grace_period", int),
("brackets", int),
)
if scheduler != "hyperband_synchronous":
sch_type = scheduler[len(prefix) :]
_enter_not_none(scheduler_options, "type", sch_type)
rung_levels = params.get("rung_levels")
if rung_levels is not None:
scheduler_options["rung_levels"] = sorted(
[int(x) for x in rung_levels.split()]
)
scheduler_args = scheduler_args + (
("searcher_data", str),
("register_pending_myopic", bool),
("rung_system_per_bracket", bool),
)
for name, tp in scheduler_args:
_enter_not_none(scheduler_options, name, params.get(name), tp=tp)
# Special constraints
if (
scheduler != "fifo"
and searcher.startswith("bayesopt")
and model in gp_add_models
):
searcher_data = scheduler_options.get("searcher_data")
if searcher_data is not None and searcher_data != "all":
logger.warning(
f"searcher_model = '{model}' requires "
f"searcher_data = 'all' (and not '{searcher_data}')"
)
scheduler_options["searcher_data"] = "all"
return search_options, scheduler_options
| StarcoderdataPython |
1752153 | #!/usr/bin/env python
"""
_New_
MySQL implementation of Jobs.New
"""
__all__ = []
import time
import logging
from WMCore.Database.DBFormatter import DBFormatter
class New(DBFormatter):
sql = """INSERT INTO wmbs_job (jobgroup, name, state, state_time,
couch_record, cache_dir, location, outcome,
fwjr_path) VALUES
(:jobgroup, :name,
(SELECT id FROM wmbs_job_state WHERE name = 'new'),
:state_time, :couch_record, :cache_dir,
(SELECT id FROM wmbs_location WHERE site_name = :location),
:outcome, :fwjr_path)"""
getIDsql = """SELECT id as id, name as name FROM wmbs_job WHERE name= :name AND jobgroup= :jobgroup"""
def getBinds(self, jobList):
binds = []
for job in jobList:
tmpDict = {}
tmpDict["jobgroup"] = job.get("jobgroup")
tmpDict["name"] = job.get("name")
tmpDict["couch_record"] = job.get("couch_record", None)
tmpDict["location"] = job.get("location", None)
tmpDict["cache_dir"] = job.get("cache_dir", None)
tmpDict["state_time"] = int(time.time())
if job.get("outcome", 'failure') == 'success':
tmpDict['outcome'] = 1
else:
tmpDict['outcome'] = 0
tmpDict["fwjr_path"] = job.get("fwjr", None)
binds.append(tmpDict)
return binds
def format(self, input):
result = {}
jobList = self.formatDict(input)
for job in jobList:
result[job['name']] = job['id']
return result
def execute(self, jobgroup = None, name = None, couch_record = None, location = None, cache_dir = None,
outcome = None, fwjr = None, conn = None, transaction = False, jobList = None):
if outcome == None or type(outcome) != str:
outcome = 'failure'
elif outcome.lower() == 'success':
boolOutcome = 1
else:
boolOutcome = 0
#Adding jobList enters bulk mode
if jobList:
binds = self.getBinds(jobList)
self.dbi.processData(self.sql, binds, conn = conn, transaction = transaction)
binds2 = []
for d in binds:
binds2.append({'name': d['name'], 'jobgroup': d['jobgroup']})
#Now we need the IDs
result = self.dbi.processData(self.getIDsql, binds2, conn = conn, transaction = transaction)
return self.format(result)
elif jobgroup and name:
binds = {"jobgroup": jobgroup, "name": name,
"couch_record": couch_record, "state_time": int(time.time()),
"location": location, "cache_dir": cache_dir, "outcome": boolOutcome, "fwjr_path": fwjr}
self.dbi.processData(self.sql, binds, conn = conn,
transaction = transaction)
return
else:
logging.error('Asked for new jobs in Jobs.New without jobgroup and name!')
return
| StarcoderdataPython |
51460 | # This file is Public Domain and may be used without restrictions.
import _jpype
import jpype
from jpype.types import *
from jpype import java
import jpype.dbapi2 as dbapi2
import common
import time
try:
import zlib
except ImportError:
zlib = None
class SQLModuleTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
def assertIsSubclass(self, a, b):
self.assertTrue(issubclass(a, b), "`%s` is not a subclass of `%s`" % (a.__name__, b.__name__))
def testConstants(self):
self.assertEqual(dbapi2.apilevel, "2.0")
self.assertEqual(dbapi2.threadsafety, 2)
self.assertEqual(dbapi2.paramstyle, "qmark")
def testExceptions(self):
self.assertIsSubclass(dbapi2.Warning, Exception)
self.assertIsSubclass(dbapi2.Error, Exception)
self.assertIsSubclass(dbapi2.InterfaceError, dbapi2.Error)
self.assertIsSubclass(dbapi2.DatabaseError, dbapi2.Error)
self.assertIsSubclass(dbapi2._SQLException, dbapi2.Error)
self.assertIsSubclass(dbapi2.DataError, dbapi2.DatabaseError)
self.assertIsSubclass(dbapi2.OperationalError, dbapi2.DatabaseError)
self.assertIsSubclass(dbapi2.IntegrityError, dbapi2.DatabaseError)
self.assertIsSubclass(dbapi2.InternalError, dbapi2.DatabaseError)
self.assertIsSubclass(dbapi2.InternalError, dbapi2.DatabaseError)
self.assertIsSubclass(dbapi2.ProgrammingError, dbapi2.DatabaseError)
self.assertIsSubclass(dbapi2.NotSupportedError, dbapi2.DatabaseError)
def testConnectionExceptions(self):
cx = dbapi2.Connection
self.assertEqual(cx.Warning, dbapi2.Warning)
self.assertEqual(cx.Error, dbapi2.Error)
self.assertEqual(cx.InterfaceError, dbapi2.InterfaceError)
self.assertEqual(cx.DatabaseError, dbapi2.DatabaseError)
self.assertEqual(cx.DataError, dbapi2.DataError)
self.assertEqual(cx.OperationalError, dbapi2.OperationalError)
self.assertEqual(cx.IntegrityError, dbapi2.IntegrityError)
self.assertEqual(cx.InternalError, dbapi2.InternalError)
self.assertEqual(cx.InternalError, dbapi2.InternalError)
self.assertEqual(cx.ProgrammingError, dbapi2.ProgrammingError)
self.assertEqual(cx.NotSupportedError, dbapi2.NotSupportedError)
def test_Date(self):
d1 = dbapi2.Date(2002, 12, 25) # noqa F841
d2 = dbapi2.DateFromTicks( # noqa F841
time.mktime((2002, 12, 25, 0, 0, 0, 0, 0, 0))
)
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(d1),str(d2))
def test_Time(self):
t1 = dbapi2.Time(13, 45, 30) # noqa F841
t2 = dbapi2.TimeFromTicks( # noqa F841
time.mktime((2001, 1, 1, 13, 45, 30, 0, 0, 0))
)
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Timestamp(self):
t1 = dbapi2.Timestamp(2002, 12, 25, 13, 45, 30) # noqa F841
t2 = dbapi2.TimestampFromTicks( # noqa F841
time.mktime((2002, 12, 25, 13, 45, 30, 0, 0, 0))
)
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Binary(self):
b = dbapi2.Binary(b"Something")
b = dbapi2.Binary(b"") # noqa F841
def test_STRING(self):
self.assertTrue(hasattr(dbapi2, "STRING"), "module.STRING must be defined")
def test_BINARY(self):
self.assertTrue(
hasattr(dbapi2, "BINARY"), "module.BINARY must be defined."
)
def test_NUMBER(self):
self.assertTrue(
hasattr(dbapi2, "NUMBER"), "module.NUMBER must be defined."
)
def test_DATETIME(self):
self.assertTrue(
hasattr(dbapi2, "DATETIME"), "module.DATETIME must be defined."
)
def test_ROWID(self):
self.assertTrue(hasattr(dbapi2, "ROWID"), "module.ROWID must be defined.")
class SQLTablesTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
def testStr(self):
for i in dbapi2._types:
self.assertIsInstance(str(i), str)
def testRepr(self):
for i in dbapi2._types:
self.assertIsInstance(repr(i), str)
| StarcoderdataPython |
141863 | <reponame>PatDaoust/MiscellaneousExercises<filename>Khan Academy Algorithms.py
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 22 16:30:42 2021
@author: catal
"""
import pdb
import unittest
# 1Let min = 0 and max = n-1.
# 2If max < min, then stop: target is not present in array. Return -1.
# 3Compute guess as the average of max and min, rounded down (so that it is an integer).
# 4If array[guess] equals target, then stop. You found it! Return guess.
# 5If the guess was too low, that is, array[guess] < target, then set min = guess + 1.
# 6Otherwise, the guess was too high. Set max = guess - 1.
# 7Go back to step 2.
def binarySearch(array, target_value):
'''
assumes array is a list of ints
assumes target_value is an int, the int to search for
Returns an int, either the index of the location in the array,
or -1 if the array did not contain the targetValue */'''
#initialize values
min_i = 0
max_i = len(array) -1
not_found = True
#binary search loop
while not_found == True:
guess_i = (min_i + max_i)//2
if max_i <= min_i:
return -1
if array[guess_i] == target_value:
return guess_i
if array[guess_i] < target_value:
min_i = guess_i + 1
if array[guess_i] > target_value:
max_i = guess_i - 1
# primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97];
# print(binarySearch(primes, 13))
def swap(a_list, index1, index2):
"""assumes a_list is a list,
assumes index1 is an int representing the index of one of the two values to be swaped
assumes index1 is an int representing the index of one of the two values to be swaped
does not modify the inputed list
returns a list, equivalent to a_list with index1 and index2 swapped
e.g. swap(['a','b','c'], 0, 2) -> ['c','b','a']
"""
b_list = a_list.copy()
b_list[index1] = a_list[index2]
b_list[index2] = a_list[index1]
return b_list
# print(swap(["a","b","c"], 0, 2))
def findMinIndex(a_list,start_index, verbose=False):
"""assumes that a_list is a list of numbers
assumes start)index is an int, representing the index where to start looking
returns a tuple (int,number)
the index of the smallest number found between start_index and the end of the list, inclusive,
and the value of this smallest number
if there is a tie for smallest valye, returns the 1st occurence
"""
min_index = start_index
min_value = a_list[start_index]
for index in range(start_index,len(a_list)):
if a_list[index] < min_value:
min_index = index
min_value = a_list[index]
if verbose:
print("The index of the minimum value of the subarray starting at index " + str(start_index)
+" is " + str(min_index) + "." )
return min_index
# some_list = [18, 6, 66, 44, 9, 22, 14]
# findMinIndex(some_list,2)
def selectionSort(a_list):
"""assumes a_list is a list of numbers
returns a list of the numbers in a_list, sorted from smallest to largest
does not modify the inputed a_list
"""
list_b = a_list.copy()
for index in range(len(a_list)):
local_min_index = findMinIndex(list_b,index)
list_b = swap(list_b,index, local_min_index)
return list_b
# some_list = [22, 11, 99, 88, 9, 7, 42]
# print(selectionSort(some_list))
def insert(a_list, value):
"""assumes a_list is a list of numbers, already sorted in accending order
assumes value is a number
modifies the inputed a_list
returns a_list
"""
# pdb.set_trace()
index = len(a_list)
a_list.append(None)
while index > 0:
if (a_list[index-1]<=value):
a_list[index] = value
return a_list
#shift value 1 up 1 spot
a_list[index] = a_list[index-1]
index -= 1
a_list[index] = value
return a_list
# print(insert([1,2,3], 0))
# some_list = [2, 3, 5, 6, 7, 9, 11, 13]
# print(insert(some_list, 1))
def insertionSort(a_list):
"""assumes a_list is a list of numbers
does not modify the inputed a_list
returns a list, the sorted version of a_list
"""
mod_list = a_list[:1]
for index in range(1,len(a_list)):
mod_list = insert(mod_list,a_list[index])
return mod_list
# print(insertionSort([22, 11, 99, 88, 9, 7, 42]))
def iterativeFactorial(num):
"""assumes num is a positive int
returns an int, num! (the factorial of n)
"""
factorial = 1
while num > 0:
factorial = factorial*num
num -= 1
return factorial
# num = 6
# iterativeFactorial(num)
# print(num)
def recursiveFactorial(num):
"""assumes num is a positive int
returns an int, num! (the factorial of n)
"""
if num == 0:
return 1
else:
return num * recursiveFactorial(num-1)
# print(recursiveFactorial(10))
def isPalindrome(a_string):
"""assumes a_string is a string
returns a boolean, true if a_string is a palindrome(including lenght 0), else false
"""
if len(a_string) <=1:
return True
if a_string[0] == a_string[-1]:
return isPalindrome(a_string[1:-1])
else:
return False
# print(isPalindrome("cat"))
# print(isPalindrome(""))
# print(isPalindrome("c"))
# print(isPalindrome("cac"))
# print(isPalindrome("tacocat"))
def powers(x,n):
"""assume x is
assumes n is
returns an number, x**n
"""
# // base case
if n==0:
return 1
# // recursive case: n is negative
if n<0:
return 1/(powers(x,-n))
# // recursive case: n is positive odd
if (n>0) and (n%2!=0):
return x*(powers(x,n-1))
# // recursive case: n is positive even
if (n>0) and (n%2==0):
y = powers(x,n/2)
return y*y
# print(powers(2,0))
# print(powers(2,4))
# print(powers(2,3))
# print(powers(2,-7))
def partition(a_list):
"""assumes a_list is a list of numbers
modifies a_list by internally moving its elements,
such that the rightmost element (the pivot)
has to its right only greater or equal elements,
and has to its left only lesser or equalelements
returns an int, the index of the pivot
"""
#partition list around pivot
pivot = a_list[-1]
list_lenght = len(a_list)
for i in range(list_lenght-1,-1, -1):
if a_list[i] > pivot:
a_list.insert(list_lenght, a_list.pop(i))
#find pivot index
return a_list.index(pivot)
# print(partition([9, 7, 5, 11, 12, 2, 14, 3, 10, 4, 6]))
def quickSort(a_list, index1,index2):
"""assumes a_list is a list of numbers
assumes index1 is an int, the starting index
assumes index1 is an int, the ending index
returns a list of numbers, the section of a_list[index1;index2] sorted and otherwise unchanges
"""
if len(a_list) < 2:
return a_list
else:
# print(a_list)
pivot = partition(a_list)
# print(a_list)
# print(pivot)
quickSort(a_list,0,pivot-1) #fix overriding
quickSort(a_list, pivot+1,len(a_list)) #fix overriding
# print(a_list)
return a_list
# quickSort([3,4,1,2])
# quickSort([3,4,5,1,2],0,4)
# quickSort([9, 7, 5, 11, 12, 2, 14, 3, 10, 4, 6])
| StarcoderdataPython |
163390 | """ Tests dataset views methods """
from __future__ import unicode_literals
from __future__ import absolute_import
import copy
import datetime
import json
import django
from django.utils.timezone import now
from rest_framework import status
from rest_framework.test import APITestCase
from data.data.json.data_v6 import DataV6
from data.dataset.json.dataset_v6 import DataSetDefinitionV6
from util import rest
from data.models import DataSet
import data.test.utils as dataset_test_utils
import storage.test.utils as storage_utils
from storage.models import Workspace
"""Tests the v6/datasets/ endpoint"""
class TestDatasetViews(APITestCase):
api = 'v6'
def setUp(self):
django.setup()
rest.login_client(self.client, is_staff=True)
# create a workspace and files
self.workspace = storage_utils.create_workspace(name='Test Workspace', is_active=True)
self.file1 = storage_utils.create_file(file_name='input_e.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.file2 = storage_utils.create_file(file_name='input_f.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.file3 = storage_utils.create_file(file_name='input_f2.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.file4 = storage_utils.create_file(file_name='input_eb.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.file5 = storage_utils.create_file(file_name='input_fb.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.file6 = storage_utils.create_file(file_name='input_fb2.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
today = now()
yesterday = today - datetime.timedelta(days=1)
tomorrow = today + datetime.timedelta(days=1)
self.dataset = dataset_test_utils.create_dataset(definition=copy.deepcopy(dataset_test_utils.DATASET_DEFINITION),
title="Test Dataset 1", description="Key Test Dataset Number one")
DataSet.objects.filter(pk=self.dataset.pk).update(created=yesterday)
self.dataset2 = dataset_test_utils.create_dataset(title="Test Dataset 2", description="Test Dataset Number two")
DataSet.objects.filter(pk=self.dataset2.pk).update(created=tomorrow)
# create dataset members
data1 = copy.deepcopy(dataset_test_utils.DATA_DEFINITION)
data1['files']['input_e'] = [self.file1.id]
data1['files']['input_f'] = [self.file2.id, self.file3.id]
self.member1_1 = dataset_test_utils.create_dataset_members(dataset=self.dataset, data_list=[data1])[0]
data2 = copy.deepcopy(dataset_test_utils.DATA_DEFINITION)
data2['files']['input_e'] = [self.file4.id]
data2['files']['input_f'] = [self.file5.id, self.file6.id]
self.member1_1_2 = dataset_test_utils.create_dataset_members(dataset=self.dataset, data_list=[data2])
self.member2_1 = dataset_test_utils.create_dataset_members(dataset=self.dataset2)[0]
self.member2_2 = dataset_test_utils.create_dataset_members(dataset=self.dataset2)[0]
def test_successful(self):
"""Tests successfully calling the v6/datasets/ view.
"""
url = '/%s/datasets/' % self.api
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Test response contains specific dataset
result = json.loads(response.content)
self.assertEqual(len(result['results']), 2)
for entry in result['results']:
expected = None
expectedFiles = 0
if entry['id'] == self.dataset.id:
expected = self.dataset
expectedFiles = 6
elif entry['id'] == self.dataset2.id:
expected = self.dataset2
expectedFiles = 0
else:
self.fail('Found unexpected result: %s' % entry['id'])
self.assertEqual(entry['title'], expected.title)
self.assertEqual(entry['files'], expectedFiles)
def test_dataset_time_successful(self):
"""Tests successfully calling the v6/datasets api with time filters
"""
yesterday = now().date() - datetime.timedelta(days=1)
yesterday = yesterday.isoformat() + 'T00:00:00Z'
today = now().date()
today = today.isoformat() + 'T00:00:00Z'
tomorrow = now().date() + datetime.timedelta(days=1)
tomorrow = tomorrow.isoformat() + 'T00:00:00Z'
url = '/%s/datasets/?started=%s' % (self.api, today)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Verify one result
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
url = '/%s/datasets/?ended=%s' % (self.api, today)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Verify one result
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
def test_dataset_id_successful(self):
"""Tests successfully calling the v6/datasets/?id= api call
"""
url = '/%s/datasets/?id=%s' % (self.api, self.dataset.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Verify one result
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
url = '/%s/datasets/?id=%s&id=%s' % (self.api, self.dataset.id, self.dataset2.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Verify two results
result = json.loads(response.content)
self.assertEqual(len(result['results']), 2)
def test_dataset_keyword_successful(self):
"""Tests successfully calling the v6/datasets/?keyword= api call
"""
url = '/%s/datasets/?keyword=%s' % (self.api, 'key')
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Verify one result
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
url = '/%s/datasets/?keyword=%s&keyword=%s' % (self.api, 'one', 'two')
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Verify 2 results
result = json.loads(response.content)
self.assertEqual(len(result['results']), 2)
def test_order_by(self):
"""Tests successfully calling the datasets view with sorting."""
url = '/%s/datasets/?order=-id' % self.api
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Verify 2 results
result = json.loads(response.content)
self.assertEqual(len(result['results']), 2)
self.assertEqual(result['results'][0]['id'], self.dataset2.id)
"""Tests the v6/datasets POST calls """
class TestDataSetPostView(APITestCase):
"""Tests the v6/dataset/ POST API call"""
api = 'v6'
def setUp(self):
django.setup()
rest.login_client(self.client, is_staff=True)
def test_invalid_definition(self):
"""Tests successfully calling POST with an invalid definition."""
json_data = {}
url = '/%s/datasets/' % self.api
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
definition = copy.deepcopy(dataset_test_utils.DATASET_DEFINITION)
del definition['global_data']['json']['input_c']
json_data = {
'title': 'My Dataset',
'description': 'A test dataset',
'definition': definition,
}
url = '/%s/datasets/' % self.api
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
def test_add_dataset(self):
"""Tests adding a new dataset"""
url = '/%s/datasets/' % self.api
json_data = {
'title': 'My Dataset',
'description': 'A test dataset',
'definition': copy.deepcopy(dataset_test_utils.DATASET_DEFINITION),
}
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)
result = json.loads(response.content)
new_dataset_id = result['id']
self.assertTrue('/%s/datasets/%d/' % (self.api, new_dataset_id) in response['location'])
self.assertEqual(result['title'], json_data['title'])
self.assertEqual(result['description'], json_data['description'])
# create another dataset
json_data_2 = {
'title': 'My Dataset 2',
'description': 'Another test dataset',
'definition': copy.deepcopy(dataset_test_utils.DATASET_DEFINITION),
}
response = self.client.generic('POST', url, json.dumps(json_data_2), 'application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)
result = json.loads(response.content)
new_dataset_id = result['id']
self.assertTrue('/%s/datasets/%d/' % (self.api, new_dataset_id) in response['location'])
self.assertEqual(result['title'], json_data_2['title'])
self.assertEqual(result['description'], json_data_2['description'])
def test_create_dataset_with_members(self):
"""Tests creating a dataset along with a bunch of members"""
title = 'Test Dataset'
description = 'Test DataSet description'
file1 = storage_utils.create_file()
file2 = storage_utils.create_file()
file3 = storage_utils.create_file()
file4 = storage_utils.create_file()
# call test
parameters = {'version': '6',
'files': [
{'name': 'input_a',
'media_types': ['application/json'],
'required': True},
{'name': 'input_b',
'media_types': ['application/json'],
'multiple': True,
'required': True},
{'name': 'input_c',
'media_types': ['application/json'],
'required': True}
],
'json': []}
definition = {'version': '6', 'parameters': parameters}
json_data = {
'title': title,
'description': description,
'definition': definition,
'data': {
'version': '7',
'files': {
'input_a': [file1.id],
'input_b': [file2.id, file3.id],
'input_c': [file4.id],
},
'json': {}
},
}
url = '/%s/datasets/' % self.api
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)
result = json.loads(response.content)
new_dataset_id = result['id']
self.assertTrue('/%s/datasets/%d/' % (self.api, new_dataset_id) in response['location'])
self.assertTrue(len(result['definition']['parameters']['files']), 3)
self.assertTrue(len(result['files']), 4)
"""Tests the v6/datasets/<dataset_id> endpoint"""
class TestDatasetDetailsView(APITestCase):
api = 'v6'
def setUp(self):
django.setup()
rest.login_client(self.client, is_staff=True)
# Create workspace
self.workspace = Workspace.objects.create(name='Test Workspace', is_active=True, created=now(),
last_modified=now())
# Create files
self.country = storage_utils.create_country()
self.src_file_a = storage_utils.create_file(file_name='input_a.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path', countries=[self.country],
workspace=self.workspace)
self.src_file_b = storage_utils.create_file(file_name='input_b.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_c = storage_utils.create_file(file_name='input_c.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_b2 = storage_utils.create_file(file_name='input_b2.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_e = storage_utils.create_file(file_name='input_e.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_f = storage_utils.create_file(file_name='input_f.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
for i in range(0,500):
storage_utils.create_file(source_collection='12345')
for i in range(0,500):
storage_utils.create_file(source_collection='123456')
# Create datasets
parameters = {'version': '6',
'files': [
{'name': 'input_a',
'media_types': ['application/json'],
'required': True}
],
'json': []}
definition = {'version': '6', 'parameters': parameters}
self.dataset = dataset_test_utils.create_dataset( title="Test Dataset 1",
description="Test Dataset Number 1",
definition=definition)
parameters2 = {'version': '6',
'files': [
{'name': 'input_b',
'media_types': ['application/json'],
'required': True,
'multiple': True},
{'name': 'input_c',
'media_types': ['application/json'],
'required': False}
],
'json': []}
definition2 = {'version': '6', 'parameters': parameters2}
self.dataset2 = dataset_test_utils.create_dataset(title="Test Dataset 2",
description="Test Dataset Number 2",
definition=definition2)
# Create members
data_dict = {
'version': '6',
'files': {'input_a': [self.src_file_a.id]},
'json': {}
}
data = DataV6(data=data_dict).get_dict()
self.member_a = dataset_test_utils.create_dataset_members(dataset=self.dataset,
data_list=[data])[0]
data_dict = {
'version': '6',
'files': {'input_b': [self.src_file_b.id, self.src_file_b2.id]},
'json': {}
}
data2 = DataV6(data=data_dict).get_dict()
self.member_b = dataset_test_utils.create_dataset_members(dataset=self.dataset2,
data_list=[data2])[0]
data_dict = {
'version': '6',
'files': {'input_b': [self.src_file_b.id, self.src_file_b2.id], 'input_c': [self.src_file_c.id]},
'json': {}
}
data3 = DataV6(data=data_dict).get_dict()
self.member_bc = dataset_test_utils.create_dataset_members(dataset=self.dataset2,
data_list=[data3])[0]
def test_dataset_details_successful(self):
"""Tests successfully calling the v6/datasets/<dataset_id>/ view.
"""
url = '/%s/datasets/%d/' % (self.api, self.dataset.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Check response for dataset details
result = json.loads(response.content)
self.assertEqual(result['id'], self.dataset.id)
self.assertEqual(result['title'], self.dataset.title)
self.assertEqual(result['description'], self.dataset.description)
dsdict = DataSetDefinitionV6(definition=self.dataset.definition).get_dict()
del dsdict['version']
self.assertDictEqual(result['definition'], dsdict)
self.assertEqual(len(result['files']), 1)
self.assertIsNotNone(result['files'][0]['scale_file']["countries"])
url = '/%s/datasets/%d/' % (self.api, self.dataset2.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Check response for dataset details
result = json.loads(response.content)
self.assertEqual(result['id'], self.dataset2.id)
self.assertEqual(result['title'], self.dataset2.title)
self.assertEqual(result['description'], self.dataset2.description)
self.maxDiff = None
dsdict = DataSetDefinitionV6(definition=self.dataset2.definition).get_dict()
del dsdict['version']
self.assertDictEqual(result['definition'], self.dataset2.definition)
self.assertEqual(len(result['files']), 3)
def test_add_dataset_member(self):
"""Tests adding a new dataset member"""
url = '/%s/datasets/%d/' % (self.api, self.dataset.id)
data_dict = {
'version': '6',
'files': {'input_a': [self.src_file_a.id]},
'json': {}
}
json_data = {
'data': [data_dict],
}
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)
result = json.loads(response.content)
self.assertEqual(len(result), 1)
def test_add_filter_dataset_members(self):
"""Tests adding new dataset members based on a filter"""
url = '/%s/datasets/%d/' % (self.api, self.dataset.id)
template = {
'version': '6',
'files': {'input_a': 'FILE_VALUE'},
'json': {}
}
json_data = {
'data_template': template,
'source_collection': '12345'
}
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)
result = json.loads(response.content)
self.assertEqual(len(result), 500)
json_data = {
'data_template': template,
'source_collection': ['12345', '123456']
}
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)
result = json.loads(response.content)
self.assertEqual(len(result), 1000)
def test_add_filter_dataset_members_dry_run(self):
"""Tests adding new dataset members based on a filter"""
url = '/%s/datasets/%d/' % (self.api, self.dataset.id)
template = {
'version': '6',
'files': {'input_a': 'FILE_VALUE'},
'json': {}
}
json_data = {
'data_template': template,
'source_collection': '12345',
'dry_run': True
}
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertEqual(len(result), 500)
def test_add_invalid_dataset_member(self):
"""Tests adding an invalid new dataset member"""
url = '/%s/datasets/%d/' % (self.api, self.dataset.id)
data_dict = {
'version': '6',
'files': {'input_b': [self.src_file_a.id]},
'json': {}
}
json_data = {
'data': [data_dict],
}
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
class TestDataSetValidationView(APITestCase):
api = 'v6'
def setUp(self):
django.setup()
rest.login_client(self.client, is_staff=True)
def test_validate_successful(self):
"""Tests successfully validating a new dataset using the v6/datasets/validation API
"""
url = '/%s/datasets/validation/' % self.api
json_data = {
'title': 'Test Dataset',
'description': 'My Test Dataset',
'definition': dataset_test_utils.DATASET_DEFINITION,
}
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
results = json.loads(response.content)
self.assertTrue(results['is_valid'])
self.assertEqual(len(results['warnings']), 0)
self.assertEqual(len(results['errors']), 0)
def test_validate_missing_definition(self):
url = '/%s/datasets/validation/' % self.api
json_data = {
'title': 'Test Dataset',
'description': 'My Test Dataset',
}
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
results = json.loads(response.content)
self.assertEqual(results['detail'], "Missing required parameter: \"definition\"")
def test_invalid_definition(self):
"""Validates an invalid dataset definition
"""
url = '/%s/datasets/validation/' % self.api
json_data = {
'title': 'Test Dataset',
'description': 'My Test Dataset',
'definition': {
'version': '6',
'parameters': [
{
'name': 'global-param',
},
{
'name': 'member-param',
},
],
},
}
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
results = json.loads(response.content)
self.assertFalse(results['is_valid'])
self.assertEqual(len(results['errors']), 1)
self.assertEqual(results['errors'][0]['name'], 'INVALID_DATASET_DEFINITION')
"""Tests the v6/datasets/%d/members/ endpoint"""
class TestDatasetMembersView(APITestCase):
api = 'v6'
def setUp(self):
django.setup()
rest.login_client(self.client, is_staff=True)
# Create workspace
self.workspace = Workspace.objects.create(name='Test Workspace', is_active=True, created=now(),
last_modified=now())
# Create files
self.src_file_a = storage_utils.create_file(file_name='input_a.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_b = storage_utils.create_file(file_name='input_b.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_c = storage_utils.create_file(file_name='input_c.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_b2 = storage_utils.create_file(file_name='input_b2.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_e = storage_utils.create_file(file_name='input_e.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_f = storage_utils.create_file(file_name='input_f.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
# Create datasets
parameters = {'version': '6',
'files': [
{'name': 'input_a',
'media_types': ['application/json'],
'required': True}
],
'json': []}
definition = {'version': '6', 'parameters': parameters}
self.dataset = dataset_test_utils.create_dataset( title="Test Dataset 1",
description="Test Dataset Number 1",
definition=definition)
parameters2 = {'version': '6',
'files': [
{'name': 'input_b',
'media_types': ['application/json'],
'required': True,
'multiple': True},
{'name': 'input_c',
'media_types': ['application/json'],
'required': False}
],
'json': []}
definition2 = {'version': '6', 'parameters': parameters2}
self.dataset2 = dataset_test_utils.create_dataset(title="Test Dataset 2",
description="Test Dataset Number 2",
definition=definition2)
# Create members
data_dict = {
'version': '6',
'files': {'input_a': [self.src_file_a.id]},
'json': {}
}
data = DataV6(data=data_dict).get_dict()
self.member_a = dataset_test_utils.create_dataset_members(dataset=self.dataset,
data_list=[data])[0]
data_dict = {
'version': '6',
'files': {'input_b': [self.src_file_b.id, self.src_file_b2.id]},
'json': {}
}
data2 = DataV6(data=data_dict).get_dict()
self.member_b = dataset_test_utils.create_dataset_members(dataset=self.dataset2,
data_list=[data2])[0]
data_dict = {
'version': '6',
'files': {'input_b': [self.src_file_b.id, self.src_file_b2.id], 'input_c': [self.src_file_c.id]},
'json': {}
}
data3 = DataV6(data=data_dict).get_dict()
self.member_bc = dataset_test_utils.create_dataset_members(dataset=self.dataset2,
data_list=[data3])[0]
def test_dataset_members_successful(self):
"""Tests successfully calling the v6/datasets/members/<id>/ view.
"""
url = '/%s/datasets/%d/members/' % (self.api, self.dataset.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Check response for dataset members
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
url = '/%s/datasets/%d/members/' % (self.api, self.dataset2.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Check response for dataset members
result = json.loads(response.content)
self.assertEqual(len(result['results']), 2)
"""Tests the v6/datasets/members/<datasetmember_id> endpoint"""
class TestDatasetMemberDetailsView(APITestCase):
api = 'v6'
def setUp(self):
django.setup()
rest.login_client(self.client, is_staff=True)
# Create workspace
self.workspace = Workspace.objects.create(name='Test Workspace', is_active=True, created=now(),
last_modified=now())
# Create files
self.src_file_a = storage_utils.create_file(file_name='input_a.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_b = storage_utils.create_file(file_name='input_b.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_c = storage_utils.create_file(file_name='input_c.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_b2 = storage_utils.create_file(file_name='input_b2.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_e = storage_utils.create_file(file_name='input_e.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
self.src_file_f = storage_utils.create_file(file_name='input_f.json', file_type='SOURCE', media_type='application/json',
file_size=10, data_type_tags=['type'], file_path='the_path',
workspace=self.workspace)
# Create datasets
parameters = {'version': '6',
'files': [
{'name': 'input_a',
'media_types': ['application/json'],
'required': True}
],
'json': []}
definition = {'version': '6', 'parameters': parameters}
self.dataset = dataset_test_utils.create_dataset( title="Test Dataset 1",
description="Test Dataset Number 1",
definition=definition)
parameters2 = {'version': '6',
'files': [
{'name': 'input_b',
'media_types': ['application/json'],
'required': True,
'multiple': True},
{'name': 'input_c',
'media_types': ['application/json'],
'required': False}
],
'json': []}
definition2 = {'version': '6', 'parameters': parameters2}
self.dataset2 = dataset_test_utils.create_dataset(title="Test Dataset 2",
description="Test Dataset Number 2",
definition=definition2)
# Create members
data_dict = {
'version': '6',
'files': {'input_a': [self.src_file_a.id]},
'json': {}
}
data = DataV6(data=data_dict).get_dict()
self.member_a = dataset_test_utils.create_dataset_members(dataset=self.dataset,
data_list=[data])[0]
data_dict = {
'version': '6',
'files': {'input_b': [self.src_file_b.id, self.src_file_b2.id]},
'json': {}
}
data2 = DataV6(data=data_dict).get_dict()
self.member_b = dataset_test_utils.create_dataset_members(dataset=self.dataset2,
data_list=[data2])[0]
data_dict = {
'version': '6',
'files': {'input_b': [self.src_file_b.id, self.src_file_b2.id], 'input_c': [self.src_file_c.id]},
'json': {}
}
data3 = DataV6(data=data_dict).get_dict()
self.member_bc = dataset_test_utils.create_dataset_members(dataset=self.dataset2,
data_list=[data3])[0]
def test_dataset_member_details_successful(self):
"""Tests successfully calling the v6/datasets/members/<id>/ view.
"""
url = '/%s/datasets/members/%d/' % (self.api, self.member_a.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Check response for dataset details
result = json.loads(response.content)
self.assertEqual(result['id'], self.member_a.id)
self.assertEqual(result['dataset']['id'], self.dataset.id)
versionless = copy.deepcopy(self.member_a.data)
del versionless['version']
self.assertDictEqual(result['data'], versionless)
url = '/%s/datasets/members/%d/' % (self.api, self.member_b.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Check response for dataset details
result = json.loads(response.content)
self.assertEqual(result['id'], self.member_b.id)
self.assertEqual(result['dataset']['id'], self.dataset2.id)
versionless = copy.deepcopy(self.member_b.data)
del versionless['version']
self.assertDictEqual(result['data'], versionless)
url = '/%s/datasets/members/%d/' % (self.api, self.member_bc.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
# Check response for dataset details
result = json.loads(response.content)
self.assertEqual(result['id'], self.member_bc.id)
self.assertEqual(result['dataset']['id'], self.dataset2.id)
versionless = copy.deepcopy(self.member_bc.data)
del versionless['version']
self.assertDictEqual(result['data'], versionless) | StarcoderdataPython |
1608151 | <reponame>michael-christen/toolbox
#!/usr/bin/env python3
import csv
import json
import sys
import re
RECIPE_RE = re.compile(
r'[Rr]ecipe( here)?: (?P<url>http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%0-9a-fA-F][0-9a-fA-F]))+)')
def main():
rows = get_rows()
write_rows(rows)
def write_rows(rows):
writer = csv.DictWriter(
sys.stdout,
fieldnames=['video_id', 'image_url', 'title', 'recipe_url', 'description'])
writer.writeheader()
for row in rows:
writer.writerow(row)
def get_recipe_url_from_description(description):
match = RECIPE_RE.search(description)
if not match:
return ''
else:
return match.group('url')
def get_rows():
data = json.load(sys.stdin)
rows = []
for item in data:
video_id = item['contentDetails']['videoId']
image_url = ''
for res in ['maxres', 'high', 'standard', 'medium', 'default']:
try:
image_url = item['snippet']['thumbnails'][res]['url']
except KeyError:
pass
else:
break
if not image_url:
raise KeyError(f'No thumbnail found in {item}')
title = item['snippet']['title']
description = item['snippet']['description']
recipe_url = get_recipe_url_from_description(description)
row = {
'video_id': video_id,
'image_url': image_url,
'title': title,
'description': description,
'recipe_url': recipe_url,
}
rows.append(row)
return rows
if __name__ == '__main__':
main()
| StarcoderdataPython |
116219 | #!coding:utf-8
from sys import path as sys_path
from os import path as os_path
import subprocess
import pytest
sys_path.append(os_path.abspath(os_path.join(os_path.dirname(__file__), "../")))
import autoargparse
@pytest.mark.example
@pytest.mark.parametrize(
"input,expected",
[
("python ./example/calculator.py 1", ["!!!"]),
("python ./example/calculator.py 2", ["Great !!!"]),
("python ./example/calculator.py 1 -m 2 3", ["!!!", "6"]),
("python ./example/calculator.py 1 -m 2 3 -v", ["!!!", "a * b = 6"]),
(
"python ./example/calculator.py 1 -m 2 3 -vv",
["!!!", "func = mul args = 2 3 2", "a * b = 6"],
),
("python ./example/calculator.py 1 -s 1 2 3 4", ["!!!", "10"]),
("python ./example/calculator.py 1 -s 1 2 3 4 --max", ["!!!", "4"]),
("python ./example/calculator.py 1 -s 1 2 3 4 --negative", ["!!!", "-10"]),
],
)
def test_example(input, expected):
obj = subprocess.Popen(
input.split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = obj.communicate()
assert out.decode('utf-8').splitlines() == expected
| StarcoderdataPython |
1698957 | from torch.nn import functional as F
from torch import nn
import torch
class _Scorer(nn.Module):
def __init__(self, n_classes, soft=False, apply_softmax=True, skip_first_class=True, smooth=1e-7):
super(_Scorer, self).__init__()
self.register_buffer('eye', torch.eye(n_classes))
self.soft = soft
self.n_classes = n_classes
self.apply_softmax = apply_softmax
self.skip_first_class = skip_first_class
self.smooth = smooth
def one_hot(self, x):
# squeeze channels and convert to one hot, then move classes to second dimension
x = self.eye[x.long()].permute(0, 3, 1, 2)
if self.skip_first_class:
x = x[:, 1-self.n_classes:, :, :] # skip background (class 0)
return x
def transform_inputs(self, inputs: torch.Tensor, truth: torch.Tensor):
truth = self.one_hot(truth)
if self.apply_softmax:
inputs = F.softmax(inputs, dim=1)
if not self.soft:
inputs = torch.argmax(inputs, dim=1)
inputs = self.one_hot(inputs)
elif self.skip_first_class:
inputs = inputs[:, 1:, :, :] # skip background
return inputs, truth
class DiceScore(_Scorer):
r"""Sørensen–Dice Score
The Dice coefficient, or Dice-Sørensen coefficient, is a common metric for pixel segmentation that can also be
modified to act as a loss function:
.. math::
DSC(X, Y) = \frac{2 \left| X + Y \right|}{\left| X \right| + \left| Y \right|}
Inspired by https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py
"""
def __init__(self, n_classes, soft=False, apply_softmax=True, skip_first_class=True, smooth=1e-7):
super(DiceScore, self).__init__(n_classes, soft, apply_softmax, skip_first_class, smooth)
def forward(self, inputs: torch.Tensor, truth: torch.Tensor):
inputs, truth = self.transform_inputs(inputs, truth)
intersection = torch.sum(inputs * truth, dim=(0, 2, 3))
cardinality = torch.sum(inputs ** 2 + truth ** 2, dim=(0, 2, 3))
dice_coefficient = 2. * intersection / (cardinality + self.smooth)
return dice_coefficient.mean()
class TverskyIndex(_Scorer):
r"""Tversky Index
The Tversky Index (TI) is a asymmetric similarity measure that is a
generalisation of the dice coefficient and the Jaccard index.
.. math::
TI = \frac{TP}{TP + \alpha FN + \beta FP}
"""
def __init__(self, n_classes, alpha=0.5, beta=0.5, soft=False, apply_softmax=True, skip_first_class=True, smooth=1e-7):
super(TverskyIndex, self).__init__(n_classes, soft, apply_softmax, skip_first_class, smooth)
self.alpha = alpha
self.beta = beta
def forward(self, inputs: torch.Tensor, truth: torch.Tensor):
inputs, truth = self.transform_inputs(inputs, truth)
intersection = torch.sum(inputs * truth, dim=(0, 2, 3))
fps = torch.sum(inputs * (1 - truth), dim=(0, 2, 3))
fns = torch.sum((1 - inputs) * truth, dim=(0, 2, 3))
return (intersection / (intersection + (self.alpha * fps) + (self.beta * fns) + self.smooth)).mean()
class JaccardIndex(_Scorer):
r"""The Jaccard index, also known as the Jaccard similarity coefficient or Intersection Over Union
.. math::
J(A,B) = \frac{|A \cap B|}{|A \cup B|} = \frac{|A \cap B|}{|A| + |B| - |A \cap B|}.
"""
def __init__(self, n_classes, soft=False, apply_softmax=True, skip_first_class=True, smooth=1e-7):
super(JaccardIndex, self).__init__(n_classes, soft, apply_softmax, skip_first_class, smooth)
def forward(self, inputs: torch.Tensor, truth: torch.Tensor):
inputs, truth = self.transform_inputs(inputs, truth)
intersection = torch.sum(inputs * truth, dim=(0, 2, 3))
union = torch.sum(inputs + truth, dim=(0, 2, 3)) - intersection
iou = (intersection / (union + self.smooth))
return iou.mean()
class Metrics(nn.Module):
def __init__(self, buffer_size, num_classes, loss, device=None):
super(Metrics, self).__init__()
self.register_buffer("_losses", torch.zeros(buffer_size, dtype=torch.float32, device=device))
self.register_buffer("_scores_iou", torch.zeros(buffer_size, dtype=torch.float32, device=device))
self.register_buffer("_scores_dice", torch.zeros(buffer_size, dtype=torch.float32, device=device))
self.register_buffer("_scores_soft_dice", torch.zeros(buffer_size, dtype=torch.float32, device=device))
# self.register_buffer("_scores_hausdorff", torch.zeros(buffer_size, dtype=torch.double, device=device))
self._loss = loss
self._dice = DiceScore(num_classes)
self._soft_dice = DiceScore(num_classes, soft=True)
self._iou = JaccardIndex(num_classes)
# self._hausdorff = AveragedHausdorffLoss()
pass
def collect_metrics_only(self, batch_index, net_predictions, segmentation_classes):
self._scores_iou[batch_index] = self._iou(net_predictions, segmentation_classes).detach()
self._scores_dice[batch_index] = self._dice(net_predictions, segmentation_classes).detach()
self._scores_soft_dice[batch_index] = self._soft_dice(net_predictions, segmentation_classes).detach()
# self._scores_hausdorff[batch_index] = self._hausdorff(net_predictions, segmentation_classes).detach()
def collect_and_get_loss(self, batch_index, net_predictions, segmentation_classes):
self.collect_metrics_only(batch_index, net_predictions, segmentation_classes)
loss_value = self._loss(net_predictions, segmentation_classes)
self._losses[batch_index] = loss_value.detach()
return loss_value
def collect(self, batch_index, net_predictions, segmentation_classes):
self.collect_metrics_only(batch_index, net_predictions, segmentation_classes)
self._losses[batch_index] = self._loss(net_predictions, segmentation_classes).detach()
def get_loss(self, net_predictions, segmentation_classes):
return self._loss(net_predictions, segmentation_classes)
@property
def loss(self):
return self._losses.mean().item()
@property
def iou(self):
return self._scores_iou.mean().item()
@property
def dice(self):
return self._scores_dice.mean().item()
@property
def soft_dice(self):
return self._scores_soft_dice.mean().item()
# @property
# def hausdorff(self):
# return self._scores_hausdorff.mean().item()
def get_metrics(self):
return self.loss, self.iou, self.dice, self.soft_dice # , self.hausdorff
if __name__ == '__main__':
from torch import Tensor
dc = DiceScore(3)
gt = Tensor([[[1, 0], [0, 2]]])
pred = Tensor([[
[[.1, .8],
[.8, .1]],
[[.8, .1],
[.1, .1]],
[[.1, .1],
[.1, .8]]
]])
print(pred)
pred = torch.argmax(pred, dim=1)
pred = torch.eye(3)[pred.long()]
pred = pred.permute(0, 3, 1, 2) # move classes to second dimension
print(pred)
# print(dc(pred, gt))
| StarcoderdataPython |
18975 | import collections
import functools
import json
import logging
import multiprocessing
import os
import time
from collections import OrderedDict
from queue import PriorityQueue, Empty
from typing import List, Tuple, Any
from itertools import cycle, islice
import minerl.herobraine.env_spec
from minerl.herobraine.hero import spaces
import cv2
import os
import numpy as np
import gym
logger = logging.getLogger(__name__)
from minerl.data.version import assert_version, assert_prefix
import copy
import tqdm
import queue
import minerl.data.util
from minerl.data.util import forever, minibatch_gen
import concurrent
from IPython import embed
if os.name != "nt":
class WindowsError(OSError):
pass
def tree_slice(tree, slc):
if isinstance(tree, OrderedDict):
return OrderedDict(
[(k, tree_slice(v, slc)) for k, v in tree.items()]
)
else:
return tree[slc]
class DataPipeline:
"""
Creates a data pipeline object used to itterate through the MineRL-v0 dataset
"""
def __init__(self,
data_directory: os.path,
environment: str,
num_workers: int,
worker_batch_size: int,
min_size_to_dequeue: int,
random_seed=42):
"""
Sets up a tensorflow dataset to load videos from a given data directory.
:param data_directory:
:type data_directory:
:param num_workers:
:type num_workers:
:param worker_batch_size:
:type worker_batch_size:
:param min_size_to_dequeue:
:type min_size_to_dequeue:
:param random_seed:
"""
self.seed = random_seed
self.data_dir = data_directory
self.environment = environment
self.number_of_workers = num_workers
self.worker_batch_size = worker_batch_size
self.size_to_dequeue = min_size_to_dequeue
self.processing_pool = multiprocessing.Pool(self.number_of_workers)
self._env_spec = gym.envs.registration.spec(self.environment)._kwargs['env_spec']
self._action_space = gym.envs.registration.spec(self.environment)._kwargs['action_space']
self._observation_space = gym.envs.registration.spec(self.environment)._kwargs['observation_space']
@property
def spec(self) -> minerl.herobraine.env_spec.EnvSpec:
return self._env_spec
@property
def action_space(self):
"""
Returns: action space of current MineRL environment
"""
return self._action_space
@property
def observation_space(self):
"""
Returns: action space of current MineRL environment
"""
return self._observation_space
# return result
def load_data(self, stream_name: str, skip_interval=0, include_metadata=False, video_name='recording.mp4'):
"""Iterates over an individual trajectory named stream_name.
Args:
stream_name (str): The stream name desired to be iterated through.
skip_interval (int, optional): How many sices should be skipped.. Defaults to 0.
include_metadata (bool, optional): Whether or not meta data about the loaded trajectory should be included.. Defaults to False.
Yields:
A tuple of (state, player_action, reward_from_action, next_state, is_next_state_terminal).
These are tuples are yielded in order of the episode.
"""
if '/' in stream_name:
file_dir = stream_name
else:
file_dir = os.path.join(self.data_dir, stream_name)
if DataPipeline._is_blacklisted(stream_name):
raise RuntimeError("This stream is corrupted (and will be removed in the next version of the data!)")
seq = DataPipeline._load_data_pyfunc(file_dir, -1, None, self.environment, skip_interval=skip_interval,
include_metadata=include_metadata, video_name=video_name)
if include_metadata:
observation_seq, action_seq, reward_seq, next_observation_seq, done_seq, meta = seq
else:
observation_seq, action_seq, reward_seq, next_observation_seq, done_seq = seq
# make a copty
gym_spec = gym.envs.registration.spec(self.environment)
target_space = copy.deepcopy(gym_spec._kwargs['observation_space'])
x = list(target_space.spaces.items())
target_space.spaces = collections.OrderedDict(
sorted(x, key=lambda x:
x[0] if x[0] is not 'pov' else 'z')
)
# Now we just need to slice the dict.
for idx in tqdm.tqdm(range(len(reward_seq))):
# Wrap in dict
action_dict = tree_slice(action_seq, idx)
observation_dict = tree_slice(observation_seq, idx)
next_observation_dict = tree_slice(next_observation_seq, idx)
yield_list = [observation_dict, action_dict, reward_seq[idx], next_observation_dict, done_seq[idx]]
yield yield_list + [meta] if include_metadata else yield_list
def get_trajectory_names(self):
"""Gets all the trajectory names
Returns:
A list of experiment names: [description]
"""
return [os.path.basename(x) for x in self._get_all_valid_recordings(self.data_dir)]
############################
# PRIVATE METHODS #
############################
@staticmethod
def read_frame(cap):
try:
ret, frame = cap.read()
if ret:
cv2.cvtColor(frame, code=cv2.COLOR_BGR2RGB, dst=frame)
frame = np.asarray(np.clip(frame, 0, 255), dtype=np.uint8)
return ret, frame
except Exception as err:
logger.error("error reading capture device:", err)
raise err
@staticmethod
def _roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to <NAME>
pending = len(iterables)
nexts = cycle(iter(it).next for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
# Todo: Make data pipeline split files per push.
@staticmethod
def _load_data_pyfunc(file_dir: str, max_seq_len: int, data_queue, env_str="", skip_interval=0,
include_metadata=False, video_name='recording.mp4'):
"""
Enqueueing mechanism for loading a trajectory from a file onto the data_queue
:param file_dir: file path to data directory
:param skip_interval: Number of time steps to skip between each sample
:param max_seq_len: Number of time steps in each enqueued batch
:param data_queue: multiprocessing data queue, or None to return streams directly
:param include_metadata: whether or not to return an additional tuple containing metadata
:return:
"""
logger.debug("Loading from file {}".format(file_dir))
video_path = str(os.path.join(file_dir, video_name))
numpy_path = str(os.path.join(file_dir, 'rendered.npz'))
meta_path = str(os.path.join(file_dir, 'metadata.json'))
try:
# Start video decompression
cap = cv2.VideoCapture(video_path)
# Load numpy file
state = np.load(numpy_path, allow_pickle=True)
# Load metadata file
with open(meta_path) as file:
meta = json.load(file)
if 'stream_name' not in meta:
meta['stream_name'] = file_dir
action_dict = collections.OrderedDict([(key, state[key]) for key in state if key.startswith('action$')])
reward_vec = state['reward']
info_dict = collections.OrderedDict([(key, state[key]) for key in state if key.startswith('observation$')])
# Recursively sorts nested dicts
def recursive_sort(dct):
for key in list(dct.keys()):
if isinstance(dct[key], OrderedDict):
dct[key] = recursive_sort(dct[key])
dct[key] = OrderedDict(sorted(dct[key].items()))
return dct
def unflatten(dct, sep='$'):
out_dict = OrderedDict({})
for k, v in dct.items():
keys = k.split(sep)
cur_dict = out_dict
for key in keys[:-1]:
if key not in cur_dict:
cur_dict[key] = OrderedDict({})
cur_dict = cur_dict[key]
cur_dict[keys[-1]] = v
# Sort dict recursively
recursive_sort(out_dict)
return out_dict
# There is no action or reward for the terminal state of an episode.
# Hence in Publish.py we shorten the action and reward vector to reflect this.
# We know FOR SURE that the last video frame corresponds to the last state (from Universal.json).
num_states = len(reward_vec) + 1
max_frame_num = meta['true_video_frame_count']
frames = []
frame_num, stop_idx = 0, 0
# Advance video capture past first i-frame to start of experiment
cap = cv2.VideoCapture(video_path)
# for _ in range(max_frame_num - num_states):
# ret, _ = DataPipeline.read_frame(cap)
# frame_num += 1
# if not ret:
# raise RuntimeError()
# Rendered Frames
# Loop through the video and construct frames
# of observations to be sent via the multiprocessing queue
# in chunks of worker_batch_size to the batch_iter loop.
while True:
ret = True
start_idx = stop_idx
# Collect up to worker_batch_size number of frames
try:
# Go until max_seq_len +1 for S_t, A_t, -> R_t, S_{t+1}, D_{t+1}
while ret and frame_num < max_frame_num and (len(frames) < max_seq_len + 1 or max_seq_len == -1):
ret, frame = DataPipeline.read_frame(cap)
frames.append(frame)
frame_num += 1
except Exception as err:
logger.error("error reading capture device:", err)
raise err
if len(frames) <= 1:
break
if frame_num == max_frame_num:
frames[-1] = frames[-2]
# Next sarsd pair index
stop_idx = start_idx + len(frames) - 1
# print('Num frames in batch:', stop_idx - start_idx)
# Load non-image data from npz
current_observation_data = OrderedDict()
action_data = OrderedDict()
next_observation_data = OrderedDict()
try:
for key in list(info_dict.keys()) + ['observation$pov']:
if 'pov' in key:
current_observation_data[key] = np.asanyarray(frames[:-1])
next_observation_data[key] = np.asanyarray(frames[1:])
else:
current_observation_data[key] = np.asanyarray(info_dict[key][start_idx:stop_idx])
next_observation_data[key] = np.asanyarray(info_dict[key][start_idx + 1:stop_idx + 1])
# We are getting (S_t, A_t -> R_t), S_{t+1}, D_{t+1} so there are less actions and rewards
for key in action_dict:
action_data[key] = np.asanyarray(action_dict[key][start_idx: stop_idx])
reward_data = np.asanyarray(reward_vec[start_idx:stop_idx], dtype=np.float32)
done_data = [False for _ in range(len(reward_data))]
if frame_num == max_frame_num:
done_data[-1] = True
except Exception as err:
logger.error("error drawing batch from npz file:", err)
raise err
# unflatten these dictioanries.
current_observation_data = unflatten(current_observation_data)['observation']
action_data = unflatten(action_data)['action']
next_observation_data = unflatten(next_observation_data)['observation']
batches = [current_observation_data, action_data, reward_data, next_observation_data,
np.array(done_data, dtype=np.bool)]
if include_metadata:
batches += [meta]
if data_queue is None:
return batches
else:
data_queue.put(batches)
logger.debug("Enqueued from file {}".format(file_dir))
if not ret:
break
else:
frames = [frames[-1]]
logger.error("Finished")
return None
except WindowsError as e:
logger.debug("Caught windows error {} - this is expected when closing the data pool".format(e))
return None
except FileNotFoundError as e:
print("File not found!")
raise e
except Exception as e:
logger.error("Exception caught on file \"{}\" by a worker of the data pipeline.".format(file_dir))
logger.error(repr(e))
return None
def batch_iter(self,
batch_size: int,
seq_len: int,
num_epochs: int = -1,
preload_buffer_size: int = 2,
seed: int = None,
include_metadata: bool = False):
"""Returns batches of sequences length SEQ_LEN of the data of size BATCH_SIZE.
The iterator produces batches sequentially. If an element of a batch reaches the
end of its
Args:
batch_size (int): The batch size.
seq_len (int): The size of sequences to produce.
num_epochs (int, optional): The number of epochs to iterate over the data. Defaults to -1.
preload_buffer_size (int, optional): Increase to IMPROVE PERFORMANCE. The data iterator uses a queue to prevent blocking, the queue size is the number of trajectories to load into the buffer. Adjust based on memory constraints. Defaults to 32.
seed (int, optional): [int]. NOT IMPLEMENTED Defaults to None.
include_metadata (bool, optional): Include metadata on the source trajectory. Defaults to False.
Returns:
Generator: A generator that yields (sarsd) batches
"""
# Todo: Not implemented/
for epoch in (range(num_epochs) if num_epochs > 0 else forever()):
trajectory_queue = queue.Queue(maxsize=preload_buffer_size)
def traj_iter():
for _ in jobs:
s, a, r, sp1, d = trajectory_queue.get()
yield dict(
obs=s,
act=a,
reward=r,
next_obs=sp1,
done=d
)
jobs = [(f, -1, None) for f in self._get_all_valid_recordings(self.data_dir)]
np.random.shuffle(jobs)
trajectory_loader = minerl.data.util.OrderedJobStreamer(
job,
jobs,
trajectory_queue,
# executor=concurrent.futures.ThreadPoolExecutor,
max_workers=preload_buffer_size
)
trajectory_loader.start()
for seg_batch in minibatch_gen(traj_iter(), batch_size=batch_size, nsteps=seq_len):
yield seg_batch['obs'], seg_batch['act'], seg_batch['reward'], seg_batch['next_obs'], seg_batch['done']
trajectory_loader.shutdown()
@staticmethod
def _is_blacklisted(path):
for p in [
'tempting_capers_shapeshifter-14'
]:
if p in path:
return True
return False
@staticmethod
def _get_all_valid_recordings(path):
directoryList = []
# return nothing if path is a file
if os.path.isfile(path):
return []
# Skip this file.
if DataPipeline._is_blacklisted(path):
return []
# add dir to directory list if it contains .txt files
if len([f for f in os.listdir(path) if f.endswith('.mp4')]) > 0:
if len([f for f in os.listdir(path) if f.endswith('.npz')]) > 0:
assert_prefix(path)
directoryList.append(path)
for d in os.listdir(path):
new_path = os.path.join(path, d)
if os.path.isdir(new_path):
directoryList += DataPipeline._get_all_valid_recordings(new_path)
directoryList = np.array(directoryList)
np.random.shuffle(directoryList)
return directoryList.tolist()
###
# DEPRECATED API
###
def seq_iter(self, num_epochs=-1, max_sequence_len=32, queue_size=None, seed=None, include_metadata=False):
"""DEPRECATED METHOD FOR SAMPLING DATA FROM THE MINERL DATASET.
This function is now :code:`DataPipeline.batch_iter()`
"""
raise DeprecationWarning(
"The `DataPipeline.seq_iter` method is deprecated! Please use DataPipeline.batch_iter()."
"\nNOTE: The new method `DataPipeline.batch_iter` has a different return signature! "
"\n\t Please see how to use it @ http://www.minerl.io/docs/tutorials/data_sampling.html")
def sarsd_iter(self, num_epochs=-1, max_sequence_len=32, queue_size=None, seed=None, include_metadata=False):
"""
Returns a generator for iterating through (state, action, reward, next_state, is_terminal)
tuples in the dataset.
Loads num_workers files at once as defined in minerl.data.make() and return up to
max_sequence_len consecutive samples wrapped in a dict observation space
Args:
num_epochs (int, optional): number of epochs to iterate over or -1
to loop forever. Defaults to -1
max_sequence_len (int, optional): maximum number of consecutive samples - may be less. Defaults to 32
seed (int, optional): seed for random directory walk - note, specifying seed as well as a finite num_epochs
will cause the ordering of examples to be the same after every call to seq_iter
queue_size (int, optional): maximum number of elements to buffer at a time, each worker may hold an
additional item while waiting to enqueue. Defaults to 16*self.number_of_workers or 2*
self.number_of_workers if max_sequence_len == -1
include_metadata (bool, optional): adds an additional member to the tuple containing metadata about the
stream the data was loaded from. Defaults to False
Yields:
A tuple of (state, player_action, reward_from_action, next_state, is_next_state_terminal, (metadata)).
Each element is in the format of the environment action/state/reward space and contains as many
samples are requested.
"""
raise DeprecationWarning(
"The `DataPipeline.sarsd_iter` method is deprecated! Please use DataPipeline.batch_iter().")
def job(arg):
return DataPipeline._load_data_pyfunc(*arg)
| StarcoderdataPython |
108133 | """
Module containing flask init and several
"""
from flask import request, render_template, redirect, url_for
import hashlib
import os
from application.factories import make_flask_app
# from celery_app import celery_app
environment = os.environ.get('ENVIRONMENT')
app = make_flask_app("log-parser", environment)
def get_hash(file):
"""
Computes md5 hash for a file to detect duplicates
:param file: uploaded file
:return: Hex hash
"""
s = hashlib.md5()
with open(file, 'rb') as f:
for line in f:
s.update(line)
return s.hexdigest()
@app.route('/', methods=['POST', 'GET'])
def main():
"""
Renders the start page, saves given url
:return: None
"""
# if request.method == 'POST':
# url = request.form['submit_url']
# # result = Url.query.filter_by(url=url).first()
# if result:
# flash("nope")
# return redirect(url_for('main'))
# else:
# entry = Url(url)
# db.session.add(entry)
# db.session.commit()
# return redirect(url_for('main'))
return render_template('main.html')
@app.route('/logs', methods=['GET', 'POST'])
def upload_log():
"""
Uploads log file, checks stats, tries to parse and show geo stats
:return: None
"""
if request.method == 'POST':
file = request.files['upload_log']
if file:
ext = os.path.splitext(file.filename)[-1]
if ext in app.config['COMPRESSION_EXTENSIONS']:
path_name = os.path.join(app.config['UPLOAD_FOLDER'],
file.filename)
file.save(path_name)
# celery_app.extract.delay(path_name)
else:
path_name = os.path.join(app.config['UPLOAD_FOLDER'],
file.filename)
file.save(path_name)
return redirect(url_for('upload_log'))
return render_template('logs_form.html')
@app.route('/logs/files')
def uploaded_file():
for f in os.listdir(app.config['UPLOAD_FOLDER']):
print(f)
@app.route('/fin')
def financial_controller():
return render_template('financial_controller.html')
| StarcoderdataPython |
89623 | <reponame>basicpail/core
"""Test different accessory types: Triggers (Programmable Switches)."""
from unittest.mock import MagicMock
from homeassistant.components.homekit.type_triggers import DeviceTriggerAccessory
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry, async_get_device_automations
async def test_programmable_switch_button_fires_on_trigger(
hass, hk_driver, events, demo_cleanup, device_reg, entity_reg
):
"""Test that DeviceTriggerAccessory fires the programmable switch event on trigger."""
hk_driver.publish = MagicMock()
demo_config_entry = MockConfigEntry(domain="domain")
demo_config_entry.add_to_hass(hass)
assert await async_setup_component(hass, "demo", {"demo": {}})
await hass.async_block_till_done()
hass.states.async_set("light.ceiling_lights", STATE_OFF)
await hass.async_block_till_done()
entry = entity_reg.async_get("light.ceiling_lights")
assert entry is not None
device_id = entry.device_id
device_triggers = await async_get_device_automations(hass, "trigger", device_id)
acc = DeviceTriggerAccessory(
hass,
hk_driver,
"DeviceTriggerAccessory",
None,
1,
None,
device_id=device_id,
device_triggers=device_triggers,
)
await acc.run()
await hass.async_block_till_done()
assert acc.entity_id is None
assert acc.device_id is device_id
assert acc.available is True
hk_driver.publish.reset_mock()
hass.states.async_set("light.ceiling_lights", STATE_ON)
await hass.async_block_till_done()
hk_driver.publish.assert_called_once()
hk_driver.publish.reset_mock()
hass.states.async_set("light.ceiling_lights", STATE_OFF)
await hass.async_block_till_done()
hk_driver.publish.assert_called_once()
await acc.stop()
| StarcoderdataPython |
3254946 | <gh_stars>0
from .boids import Flock, Boid
from matplotlib import pyplot as plt
from matplotlib import animation
from argparse import ArgumentParser
import yaml
import os
def parse_args():
parser = ArgumentParser(description = "Runs the program.")
parser.add_argument('--file', type=str,
help='YAML file to load data from')
parser.add_argument('--size', default=50, type=int,
help='Number of boids in flock.')
parser.add_argument('--dist', default=100, type=float,
help='Distance over which boids try to match speed.')
parser.add_argument('--strength', default=0.125, type=float,
help='How strongly boids try and match speed.')
parser.add_argument('--avoid_dist', default=10, type=float,
help='Distance in which boids avoid each other.')
parser.add_argument('--mid_strength', default=0.01, type=float,
help='How strongly boids try and flock together.')
arguments=parser.parse_args()
initial_position = [[-450,50],[300,600]]
initial_velocity = [[0,10],[-20,20]]
axis_limits = [[-500,1500],[-500,1500]]
cfgdata={"input_file":arguments.file,
"number_of_boids":arguments.size,
"group_flying_dist":arguments.dist,
"group_flying_strength":arguments.strength,
"alert_distance":arguments.avoid_dist,
"mid_strength":arguments.mid_strength,
"initial_position":initial_position,
"initial_velocity":initial_velocity,
"axis_limits":axis_limits}
return cfgdata
def load_config(config_filename):
cfgfile = open(config_filename,'r')
cfgdata = yaml.load(cfgfile)
cfgfile.close()
cfgdata['input_file'] = config_filename
return cfgdata
def run_boids(cfgdata):
number_of_boids = cfgdata["number_of_boids"]
group_flying_dist = cfgdata["group_flying_dist"]
group_flying_strength = cfgdata["group_flying_strength"]
alert_distance = cfgdata["alert_distance"]
mid_strength = cfgdata["mid_strength"]
initial_position = cfgdata["initial_position"]
initial_velocity = cfgdata["initial_velocity"]
axis_limits = cfgdata["axis_limits"]
flock = Flock(number_of_boids,
group_flying_dist,
group_flying_strength,
alert_distance,
mid_strength,
initial_position,
initial_velocity)
figure=plt.figure()
axes=plt.axes(xlim=(plot_axis_limits[0][0],
plot_axis_limits[0][1]),
ylim=(plot_axis_limits[1][0],
plot_axis_limits[1][1]))
x_vals = [boid.position[0] for boid in flock.boids]
y_vals = [boid.position[1] for boid in flock.boids]
boid_scatter = [boid.position for boid in flock.boids]
scatter=axes.scatter(x_vals,y_vals)
def animate(frame):
flock.new_boids()
scatter.set_offsets(boid_scatter)
anim = animation.FuncAnimation(figure, animate,
frames=50, interval=50)
plt.title('Boids')
plt.show()
| StarcoderdataPython |
3270916 | <reponame>djokester/autokeras
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import timeit
class Experiment(object):
def __init__(self, name, tmp_dir="tmp_dir"):
self.name = name
self.tmp_dir = tmp_dir
def get_auto_model(self):
raise NotImplementedError
@staticmethod
def load_data():
raise NotImplementedError
def run_once(self):
(x_train, y_train), (x_test, y_test) = self.load_data()
auto_model = self.get_auto_model()
start_time = timeit.default_timer()
auto_model.fit(x_train, y_train)
stop_time = timeit.default_timer()
accuracy = auto_model.evaluate(x_test, y_test)[1]
total_time = stop_time - start_time
return total_time, accuracy
def run(self, repeat_times=1):
total_times = []
metric_values = []
for i in range(repeat_times):
total_time, metric = self.run_once()
total_times.append(total_time)
metric_values.append(metric)
self.tear_down()
return total_times, metric_values
def tear_down(self):
shutil.rmtree(self.tmp_dir)
| StarcoderdataPython |
1742231 | <reponame>DavidsDvm/Dev_Exercises
"""
Programa que busque el area y el perimetro de un circulo
"""
print ("~=======================================~")
radio = int(input("Ingrese el radio del circulo"))
print ("~=======================================~")
area = 3.1416 * (radio ** 2)
perimetro = (radio * 2) * 3.1416
print ("El area de su circulo es igual a",area,"el perimetro de su ciruculo es",perimetro)
print ("~=======================================~") | StarcoderdataPython |
3277509 | <reponame>emily-gordy/Decadal-SST-prediction<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 28 11:40:40 2021
@author: emgordy
"""
# Train NNs
# this script was run on a cluster and takes days. Proceed with caution
import xarray as xr
import glob
import numpy as np
import math
import tensorflow as tf
import random
import tensorflow_probability as tfp
from scipy.stats import norm
ly1 = 3 # lead years (1 and 5, or 3 and 7)
ly2 = 7
runin = 60
#%% define some funcs
# function outputs model object
def loadmodel(HIDDENS,random_seed,ridgepenL2,lr,drate):
n_layers = np.shape(HIDDENS)[0]
# define the model
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dropout(rate = drate,seed=random_seed))
model.add(tf.keras.layers.Dense(HIDDENS[0], activation='relu',
bias_initializer=tf.keras.initializers.RandomNormal(seed=random_seed),
kernel_initializer=tf.keras.initializers.RandomNormal(seed=random_seed),
kernel_regularizer=tf.keras.regularizers.L2(l2=ridgepenL2)))#,
# add hidden layers
for layer in range(1,n_layers):
model.add(tf.keras.layers.Dense(HIDDENS[layer], activation='relu',
bias_initializer=tf.keras.initializers.RandomNormal(seed=random_seed),
kernel_initializer=tf.keras.initializers.RandomNormal(seed=random_seed)))
#kernel_regularizer=tf.keras.regularizers.L1L2(l1=lassopenL1,l2=ridgepenL2)))#,
#kernel_regularizer=keras.regularizers.L2(ridgepenL2)))
# final layer
model.add(tf.keras.layers.Dense(output_nodes,activation=None,
bias_initializer=tf.keras.initializers.RandomNormal(seed=random_seed),
kernel_initializer=tf.keras.initializers.RandomNormal(seed=random_seed),))
# kernel_regularizer=keras.regularizers.L1L2(l1=0,l2=0)))
model.compile(optimizer=tf.keras.optimizers.SGD(lr), # optimizer
loss=loss_function, # loss function
metrics=[tf.keras.metrics.MeanAbsolutePercentageError(),
tf.keras.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanSquaredError()])
return model
# function to generate model string to save under
def modelstrfunc(folderstr,lat1,lon1,ly1,ly2,HIDDENS,ridgepenL2,lr,random_seed,drate):
n_layers = np.shape(HIDDENS)[0]
if n_layers == 1:
modelstrout = "models/"+ folderstr +"/polydetrendlat%d_lon%d_ly%d-%d_layers%d_%d_ridge%f_drate%f_lr%f_seed%d.h5" %(
lat1,lon1,ly1,ly2,n_layers,HIDDENS[0],ridgepenL2,lr,drate,random_seed)
elif n_layers == 2:
modelstrout = "models/"+ folderstr +"/polydetrendlat%d_lon%d_ly%d-%d_layers%d_%d%d_ridge%f_drate%f_lr%f__seed%d.h5" %(
lat1,lon1,ly1,ly2,n_layers,HIDDENS[0],HIDDENS[1],ridgepenL2,lr,drate,random_seed)
elif n_layers == 3:
modelstrout = "models/"+ folderstr +"/polydetrendlat%d_lon%d_ly%d-%d_layers%d_%d%d%d_ridge%f_drate%f_lr%f_seed%d.h5" %(
lat1,lon1,ly1,ly2,n_layers,HIDDENS[0],HIDDENS[1],HIDDENS[2],ridgepenL2,drate,lr,random_seed)
else:
modelstrout = "models/"+ folderstr +"/polydetrendlat%d_lon%d_ly%d-%d_layers%d_%d%d%d%d_ridge%f_drate%f_lr%f_seed%d.h5" %(
lat1,lon1,ly1,ly2,n_layers,HIDDENS[0],HIDDENS[1],HIDDENS[2],HIDDENS[3],ridgepenL2,drate,lr,random_seed)
print('warning layer limit reached in string')
return modelstrout
# grab specific point from SST map
def makeYdata(Y_full,ilat,ilon,output_nodes):
nzeros=output_nodes-1
shape = Y_full.shape[0]
Y_train_grab = Y_full[:,ilat,ilon]
Y_zeros = np.concatenate((np.expand_dims(Y_train_grab, axis=1),np.zeros((shape,nzeros))),axis=1) # add column of zeros to output for correct prediction format
return Y_zeros
# custom loss function
def RegressLossExpSigma(y_true, y_pred):
mu = tf.cast(y_pred[:,0], tf.float64)
std = tf.math.exp(tf.cast(y_pred[:,1], tf.float64))
norm_dist = tfp.distributions.Normal(mu,std)
loss = -norm_dist.log_prob(tf.cast(y_true[:,0],tf.float64))
loss = tf.cast(loss,tf.float64)
return tf.reduce_mean(loss, axis=-1)
# split SST to train,val,testing data, then standardize by the training data
def makedata_heatx3_SSTonly(ly1=1,ly2=5):
sststr = glob.glob("sst-output_detrended*spinup*%d-%d*heatx3.nc" %(ly1,ly2))[0]
sstoutputset = xr.open_dataset(sststr)
sst = sstoutputset.sst
lat = sstoutputset.lat
lon = sstoutputset.lon
sstsel = np.asarray(sst)
latsel = np.asarray(lat)
lonsel = np.asarray(lon)
samplesize = np.shape(sstsel)[0]
train_val_test = [0,0.7,0.85,1]
# ohc = ohc[:,8100:]
sst_std = []
for ii in range(3):
split1 = int(samplesize*train_val_test[ii])
split2 = int(samplesize*train_val_test[ii+1])
sstint = sstsel[split1:split2,:,:]
sst_std.append(sstint)
Y_train = sst_std[0]
Y_val = sst_std[1]
Y_test = sst_std[2]
Y_mean = np.nanmean(Y_train,axis=0)
Y_std = np.nanstd(Y_train,axis=0)
Y_train = np.divide((Y_train-Y_mean),Y_std)
Y_val = np.divide((Y_val-Y_mean),Y_std)
Y_test = np.divide(Y_test-Y_mean,Y_std)
return Y_train,Y_val,Y_test,latsel,lonsel
# split OHC into train,val,test, standardize by the training set
def makedata_heatx3_OHConly(ly1=1,ly2=5,runin=60):
ohcstr = glob.glob("ohc-i*heatx3*detrended*spinup*%d*%d-%d*.nc" %(runin,ly1,ly2))[0]
ohcinputset = xr.open_dataset(ohcstr)
ohc = np.asarray(ohcinputset.ohc)
# remove land from ohc
ohclm = ~np.isnan(ohc[0,:]/ohc[0,:])
ohc = ohc[:,ohclm]
samplesize = np.shape(ohc)[0]
train_val_test = [0,0.7,0.85,1]
# ohc = ohc[:,8100:]
ohc_std = []
for ii in range(3):
split1 = int(samplesize*train_val_test[ii])
split2 = int(samplesize*train_val_test[ii+1])
ohcint = ohc[split1:split2,:]
# ohcint[np.isnan(ohcint)] = 0
ohc_std.append(ohcint)
X_train = ohc_std[0]
X_val = ohc_std[1]
X_test = ohc_std[2]
X_std = np.nanstd(X_train,axis=0)
X_train = np.divide(X_train,X_std) # make nans zeros
X_train[np.isnan(X_train)] = 0
X_val = np.divide(X_val,X_std)
X_val[np.isnan(X_val)] = 0
X_test = np.divide(X_test,X_std)
X_test[np.isnan(X_test)] = 0
return X_train,X_val,X_test
#%% experiment data
X_train,X_val,X_test = makedata_heatx3_OHConly(ly1,ly2,runin)
Y_train_full,Y_val_full,Y_test_full,latsel,lonsel = makedata_heatx3_SSTonly(ly1,ly2)
#%% experiment variables
patience = 100 # early stopping patience
n_epochs = 1000 # training epochs
batchsize = 64
lr = 1e-4 # learning rate
ridgepenL2 = 0 #l2/ridge penalty, zero in this experiment
drate=0.8 # dropout rate in training
hiddens = [60,4] # hidden layer architecture, 2 layers, 60 nodes then 4 nodes
seeds = np.arange(0,10) # random seeds
es_callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, restore_best_weights=True) #early stopping
callbacks = [es_callback,]
loss_function = RegressLossExpSigma
#%% output specifics
nlat = latsel.shape[0]
nlon = lonsel.shape[0]
nseeds = seeds.shape[0]
latsel = np.asarray(latsel)
lonsel = np.asarray(lonsel)
folderstr = 'SSTfromOHC_regression' # folder to save models
#%% train 10 models at each point on the globe
output_nodes = 2
for ilat,lat in enumerate(latsel):
for ilon,lon in enumerate(lonsel):
if np.isnan(Y_train_full[0,ilat,ilon]):
print("lat="+str(lat)+ " lon=" +str(lon)+" over land")
else:
Y_train = makeYdata(Y_train_full,ilat,ilon,output_nodes)
Y_val = makeYdata(Y_val_full,ilat,ilon,output_nodes)
for iseed,seed in enumerate(seeds):
modelstrout = modelstrfunc(folderstr,lat,lon,ly1,ly2,hiddens,ridgepenL2,lr,seed,drate)
strcheck = glob.glob(modelstrout)
if len(strcheck) == 0:
print(modelstrout)
print("lat="+str(lat)+ " lon=" +str(lon)+"seed="+str(seed))
np.random.seed(seed)
tf.random.set_seed(seed)
random.seed(int(seed))
# load and train
model = loadmodel(hiddens,seed,ridgepenL2,lr,drate)
print('training model')
history = model.fit(X_train, Y_train, epochs=n_epochs, batch_size=batchsize, validation_data=(X_val, Y_val),
shuffle=True, verbose=0, callbacks=es_callback)
print('done training')
model.save_weights(modelstrout)
else:
print('model exists')
| StarcoderdataPython |
107878 | # PROBABILISTIC ROBOT ACTION CORES
#
# (C) 2014 by <NAME> (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
from dnutils import logs
from pracmln import MLN, Database
from pracmln.mln.base import parse_mln
from pracmln.mln.methods import LearningMethods
from pracmln.utils.project import MLNProject
from prac.core import locations as pracloc
from prac.core.base import PRACModule, PRACPIPE
from prac.core.inference import PRACInferenceStep
from prac.pracutils.utils import prac_heading
logger = logs.getlogger(__name__, logs.DEBUG)
possibleProps = ['color', 'size', 'shape', 'hypernym', 'hasa'] # , 'dimension', 'consistency', 'material']
class NLObjectRecognition(PRACModule):
'''
PRACModule used to infer an objects' identity given its attributes.
'''
@PRACPIPE
def __call__(self, pracinference, **params):
logger.info('Running {}'.format(self.name))
print prac_heading('Recognizing Objects')
# load default project
projectpath = os.path.join(pracloc.pracmodules, self.name, self.defproject)
project = MLNProject.open(projectpath)
inf_step = PRACInferenceStep(pracinference, self)
dbs = pracinference.inference_steps[-1].output_dbs
mlntext = project.mlns.get(project.queryconf['mln'], None)
mln = parse_mln(mlntext,
searchpaths=[self.module_path],
projectpath=projectpath,
logic=project.queryconf.get('logic', 'FuzzyLogic'),
grammar=project.queryconf.get('grammar',
'PRACGrammar'))
wordnet_module = self.prac.module('wn_senses')
# adding evidence properties to new query db
for db in dbs:
# find properties and add word similarities
logger.error(db.domains)
logger.error(mln.domains)
output_db = wordnet_module.add_similarities(db, mln)
output_db.write()
# infer and update output dbs
infer = self.mlnquery(config=project.queryconf,
db=output_db, mln=mln)
result_db = infer.resultdb
inf_step.outdbs.append(result_db)
return inf_step
@PRACPIPE
def train(self, praclearning):
print prac_heading('Training knowledgebase')
mlnName = praclearning.otherParams.get('mln', None)
mlnLogic = praclearning.otherParams.get('logic', None)
objName = praclearning.otherParams.get('concept', None)
onTheFly = praclearning.otherParams.get('onthefly', False)
mln = MLN(mlnfile=os.path.abspath(mlnName), logic=mlnLogic,
grammar='PRACGrammar')
pracTrainingDBS = praclearning.training_dbs
trainingDBS = []
if len(pracTrainingDBS) >= 1 and type(
pracTrainingDBS[0]) is str: # db from file
logger.info('Learning from db files...')
inputdbs = Database.load(mln, dbfile=pracTrainingDBS,
ignore_unknown_preds=True)
trainingDBS += inputdbs
elif len(pracTrainingDBS) > 1:
logger.info('Learning from db files (xfold)...')
trainingDBS = pracTrainingDBS
else: # db from inference result
logger.info('Learning from inference result...')
inputdbs = pracTrainingDBS
for db in inputdbs:
db << 'object(cluster, {})'.format(objName)
trainingDBS.append(db)
outputfile = '{}_trained.mln'.format(mlnName.split('.')[0])
# learning mln
trainedMLN = mln.learnWeights(trainingDBS, LearningMethods.DCLL,
evidencePreds=possibleProps, partSize=1,
gaussianPriorSigma=10, useMultiCPU=0,
optimizer='cg', learningRate=0.9)
print prac_heading('Learnt Formulas')
trainedMLN.printFormulas()
trainedMLN.write(file(outputfile, "w"))
return trainedMLN
| StarcoderdataPython |
4836686 | <gh_stars>1-10
# Copyright (c) 2016, Manito Networks, LLC
# All rights reserved.
import time, datetime, socket, struct, sys, json, socket, logging, logging.handlers
from struct import *
from socket import inet_ntoa
from IPy import IP
import dns_base
import site_category
from netflow_options import *
def dns_add_address(ip):
# Check if IPv4
if IP(ip).version() == 4:
# Add a /32 mask to make the address usable
v4_ip = IP(str(ip)+"/32")
# Check if broadcast address
if ip == '255.255.255.255':
return False
# Check if it's a local address that we're not looking up
elif lookup_internal is False and v4_ip.iptype() == 'PRIVATE':
return False
else:
pass
# IPv6 doesn't need this treatment
else:
pass
# Haven't already resolved the IP - do the lookup and cache the result
if ip not in dns_base.dns_cache["Records"]:
# Reverse-lookup the new IP using the configured DNS server
fqdn_lookup = str(socket.getfqdn(ip))
# Give the new record a home
dns_base.dns_cache["Records"][ip] = {}
dns_base.dns_cache["Records"][ip]['Expires'] = int(time.time())+1800
# If a name was successfully resolved...
if ip != fqdn_lookup:
# Update the local cache
dns_base.dns_cache["Records"][ip]["FQDN"] = fqdn_lookup
# Parse the FQDN for Domain information
if "." in fqdn_lookup:
fqdns_exploded = fqdn_lookup.split('.') # Blow it up
domain = str(fqdns_exploded[-2]) + "." + str(fqdns_exploded[-1]) # Grab TLD and second-level domain
# Check for .co.uk, .com.jp, etc...
if domain in dns_base.second_level_domains:
domain = str(fqdns_exploded[-3]) + "." + str(fqdns_exploded[-2]) + "." + str(fqdns_exploded[-1])
# Not a .co.uk or .com.jp type domain
else:
pass
dns_base.dns_cache["Records"][ip]["Domain"] = domain
# Tag the domain with a category if possible
if domain in site_category.site_categories:
dns_base.dns_cache["Records"][ip]["Category"] = site_category.site_categories[domain]
# For graph normalization
else:
dns_base.dns_cache["Records"][ip]["Category"] = "Uncategorized"
# Internal hostname without a domain
else:
dns_base.dns_cache["Records"][ip]["Domain"] = "None"
dns_base.dns_cache["Records"][ip]["Category"] = "Uncategorized"
# No DNS record, lookup returned original IP for the domain
else:
dns_base.dns_cache["Records"][ip]["FQDN"] = "No record"
dns_base.dns_cache["Records"][ip]["Domain"] = "No record"
dns_base.dns_cache["Records"][ip]["Category"] = "Uncategorized"
# Already have the lookup in the cache and it hasn't been pruned yet
else:
pass
return dns_base.dns_cache["Records"][ip]
# Prune resolved DNS names after 30min so we don't keep using stale domain names for tagging
def dns_prune():
prune_records = []
current_time = time.time()
if dns_base.dns_cache["Prune"] < current_time:
for old_records in dns_base.dns_cache["Records"]:
if dns_base.dns_cache["Records"][old_records]["Expires"] < current_time:
prune_records.append(old_records)
for pop_records in prune_records:
dns_base.dns_cache["Records"].pop(pop_records)
dns_base.dns_cache["Prune"] = int(current_time + 1800)
return | StarcoderdataPython |
3281549 | """
Implements custom ufunc dispatch mechanism for non-CPU devices.
"""
from __future__ import print_function, absolute_import
import operator
import warnings
from functools import reduce
import numpy as np
from numba.utils import longint, OrderedDict
from numba.utils import IS_PY3
from numba.npyufunc.ufuncbuilder import _BaseUFuncBuilder, parse_identity
from numba import sigutils, types
from numba.typing import signature
from numba.npyufunc.sigparse import parse_signature
if IS_PY3:
def _exec(codestr, glbls):
exec(codestr, glbls)
else:
eval(compile("""
def _exec(codestr, glbls):
exec codestr in glbls
""",
"<_exec>", "exec"))
def _broadcast_axis(a, b):
"""
Raises
------
ValueError if broadcast fails
"""
if a == b:
return a
elif a == 1:
return b
elif b == 1:
return a
else:
raise ValueError("failed to broadcast {0} and {1}".format(a, b))
def _pairwise_broadcast(shape1, shape2):
"""
Raises
------
ValueError if broadcast fails
"""
shape1, shape2 = map(tuple, [shape1, shape2])
while len(shape1) < len(shape2):
shape1 = (1,) + shape1
while len(shape1) > len(shape2):
shape2 = (1,) + shape2
return tuple(_broadcast_axis(a, b) for a, b in zip(shape1, shape2))
def _multi_broadcast(*shapelist):
"""
Raises
------
ValueError if broadcast fails
"""
assert shapelist
result = shapelist[0]
others = shapelist[1:]
try:
for i, each in enumerate(others, start=1):
result = _pairwise_broadcast(result, each)
except ValueError:
raise ValueError("failed to broadcast argument #{0}".format(i))
else:
return result
class UFuncMechanism(object):
"""
Prepare ufunc arguments for vectorize.
"""
DEFAULT_STREAM = None
SUPPORT_DEVICE_SLICING = False
def __init__(self, typemap, args):
"""Never used directly by user. Invoke by UFuncMechanism.call().
"""
self.typemap = typemap
self.args = args
nargs = len(self.args)
self.argtypes = [None] * nargs
self.scalarpos = []
self.signature = None
self.arrays = [None] * nargs
def _fill_arrays(self):
"""
Get all arguments in array form
"""
for i, arg in enumerate(self.args):
if isinstance(arg, np.ndarray):
self.arrays[i] = arg
elif self.is_device_array(arg):
self.arrays[i] = arg
elif isinstance(arg, (int, longint, float, complex, np.number)):
# Is scalar
self.scalarpos.append(i)
else:
raise TypeError("argument #%d has invalid type" % (i + 1,))
def _fill_argtypes(self):
"""
Get dtypes
"""
for i, ary in enumerate(self.arrays):
if ary is not None:
self.argtypes[i] = ary.dtype
def _resolve_signature(self):
"""Resolve signature.
May have ambiguous case.
"""
matches = []
# Resolve scalar args exact match first
if self.scalarpos:
# Try resolve scalar arguments
for formaltys in self.typemap:
match_map = []
for i, (formal, actual) in enumerate(zip(formaltys,
self.argtypes)):
if actual is None:
actual = np.asarray(self.args[i]).dtype
match_map.append(actual == formal)
if all(match_map):
matches.append(formaltys)
# No matching with exact match; try coercing the scalar arguments
if not matches:
matches = []
for formaltys in self.typemap:
all_matches = all(actual is None or formal == actual
for formal, actual in
zip(formaltys, self.argtypes))
if all_matches:
matches.append(formaltys)
if not matches:
raise TypeError("No matching version. GPU ufunc requires array "
"arguments to have the exact types. This behaves "
"like regular ufunc with casting='no'.")
if len(matches) > 1:
raise TypeError("Failed to resolve ufunc due to ambiguous "
"signature. Too many untyped scalars. "
"Use numpy dtype object to type tag.")
# Try scalar arguments
self.argtypes = matches[0]
def _get_actual_args(self):
"""Return the actual arguments
Casts scalar arguments to numpy.array.
"""
for i in self.scalarpos:
self.arrays[i] = np.array([self.args[i]], dtype=self.argtypes[i])
return self.arrays
def _broadcast(self, arys):
"""Perform numpy ufunc broadcasting
"""
shapelist = [a.shape for a in arys]
shape = _multi_broadcast(*shapelist)
for i, ary in enumerate(arys):
if ary.shape == shape:
pass
else:
if self.is_device_array(ary):
arys[i] = self.broadcast_device(ary, shape)
else:
ax_differs = [ax for ax in range(len(shape))
if ax >= ary.ndim
or ary.shape[ax] != shape[ax]]
missingdim = len(shape) - len(ary.shape)
strides = [0] * missingdim + list(ary.strides)
for ax in ax_differs:
strides[ax] = 0
strided = np.lib.stride_tricks.as_strided(ary,
shape=shape,
strides=strides)
arys[i] = self.force_array_layout(strided)
return arys
def get_arguments(self):
"""Prepare and return the arguments for the ufunc.
Does not call to_device().
"""
self._fill_arrays()
self._fill_argtypes()
self._resolve_signature()
arys = self._get_actual_args()
return self._broadcast(arys)
def get_function(self):
"""Returns (result_dtype, function)
"""
return self.typemap[self.argtypes]
def is_device_array(self, obj):
"""Is the `obj` a device array?
Override in subclass
"""
return False
def broadcast_device(self, ary, shape):
"""Handles ondevice broadcasting
Override in subclass to add support.
"""
raise NotImplementedError("broadcasting on device is not supported")
def force_array_layout(self, ary):
"""Ensures array layout met device requirement.
Override in sublcass
"""
return ary
@classmethod
def call(cls, typemap, args, kws):
"""Perform the entire ufunc call mechanism.
"""
# Handle keywords
stream = kws.pop('stream', cls.DEFAULT_STREAM)
out = kws.pop('out', None)
if kws:
warnings.warn("unrecognized keywords: %s" % ', '.join(kws))
# Begin call resolution
cr = cls(typemap, args)
args = cr.get_arguments()
resty, func = cr.get_function()
outshape = args[0].shape
def attempt_ravel(a):
if cr.SUPPORT_DEVICE_SLICING:
raise NotImplementedError
try:
# Call the `.ravel()` method
return a.ravel()
except NotImplementedError:
# If it is not a device array
if not cr.is_device_array(a):
raise
# For device array, retry ravel on the host by first
# copying it back.
else:
hostary = cr.to_host(a, stream).ravel()
return cr.to_device(hostary, stream)
if args[0].ndim > 1:
args = [attempt_ravel(a) for a in args]
# Prepare argument on the device
devarys = []
any_device = False
for a in args:
if cr.is_device_array(a):
devarys.append(a)
any_device = True
else:
dev_a = cr.to_device(a, stream=stream)
devarys.append(dev_a)
# Launch
shape = args[0].shape
if out is None:
# No output is provided
devout = cr.device_array(shape, resty, stream=stream)
devarys.extend([devout])
cr.launch(func, shape[0], stream, devarys)
if any_device:
# If any of the arguments are on device,
# Keep output on the device
return devout.reshape(outshape)
else:
# Otherwise, transfer output back to host
return devout.copy_to_host().reshape(outshape)
elif cr.is_device_array(out):
# If output is provided and it is a device array,
# Return device array
if out.ndim > 1:
out = attempt_ravel(out)
devout = out
devarys.extend([devout])
cr.launch(func, shape[0], stream, devarys)
return devout.reshape(outshape)
else:
# If output is provided and it is a host array,
# Return host array
assert out.shape == shape
assert out.dtype == resty
devout = cr.device_array(shape, resty, stream=stream)
devarys.extend([devout])
cr.launch(func, shape[0], stream, devarys)
return devout.copy_to_host(out, stream=stream).reshape(outshape)
def to_device(self, hostary, stream):
"""Implement to device transfer
Override in subclass
"""
raise NotImplementedError
def to_host(self, devary, stream):
"""Implement to host transfer
Override in subclass
"""
raise NotImplementedError
def device_array(self, shape, dtype, stream):
"""Implements device allocation
Override in subclass
"""
raise NotImplementedError
def launch(self, func, count, stream, args):
"""Implements device function invocation
Override in subclass
"""
raise NotImplementedError
def to_dtype(ty):
return np.dtype(str(ty))
class DeviceVectorize(_BaseUFuncBuilder):
def __init__(self, func, identity=None, targetoptions={}):
assert not targetoptions
self.py_func = func
self.identity = parse_identity(identity)
# { arg_dtype: (return_dtype), cudakernel }
self.kernelmap = OrderedDict()
@property
def pyfunc(self):
return self.py_func
def add(self, sig=None, argtypes=None, restype=None):
# Handle argtypes
if argtypes is not None:
warnings.warn("Keyword argument argtypes is deprecated",
DeprecationWarning)
assert sig is None
if restype is None:
sig = tuple(argtypes)
else:
sig = restype(*argtypes)
del argtypes
del restype
# compile core as device function
args, return_type = sigutils.normalize_signature(sig)
devfnsig = signature(return_type, *args)
funcname = self.pyfunc.__name__
kernelsource = self._get_kernel_source(self._kernel_template,
devfnsig, funcname)
corefn, return_type = self._compile_core(devfnsig)
glbl = self._get_globals(corefn)
sig = signature(types.void, *([a[:] for a in args] + [return_type[:]]))
_exec(kernelsource, glbl)
stager = glbl['__vectorized_%s' % funcname]
kernel = self._compile_kernel(stager, sig)
argdtypes = tuple(to_dtype(t) for t in devfnsig.args)
resdtype = to_dtype(return_type)
self.kernelmap[tuple(argdtypes)] = resdtype, kernel
def build_ufunc(self):
raise NotImplementedError
def _get_kernel_source(self, template, sig, funcname):
args = ['a%d' % i for i in range(len(sig.args))]
fmts = dict(name=funcname,
args=', '.join(args),
argitems=', '.join('%s[__tid__]' % i for i in args))
return template.format(**fmts)
def _compile_core(self, sig):
raise NotImplementedError
def _get_globals(self, corefn):
raise NotImplementedError
def _compile_kernel(self, fnobj, sig):
raise NotImplementedError
class DeviceGUFuncVectorize(_BaseUFuncBuilder):
def __init__(self, func, sig, identity=None, targetoptions={}):
# Allow nopython flag to be set.
if not targetoptions.pop('nopython', True):
raise TypeError("nopython flag must be True")
# Are there any more target options?
if targetoptions:
opts = ', '.join([repr(k) for k in targetoptions.keys()])
fmt = "The following target options are not supported: {0}"
raise TypeError(fmt.format(opts))
self.py_func = func
self.identity = parse_identity(identity)
self.signature = sig
self.inputsig, self.outputsig = parse_signature(self.signature)
assert len(self.outputsig) == 1, "only support 1 output"
# { arg_dtype: (return_dtype), cudakernel }
self.kernelmap = OrderedDict()
@property
def pyfunc(self):
return self.py_func
def add(self, sig=None, argtypes=None, restype=None):
# Handle argtypes
if argtypes is not None:
warnings.warn("Keyword argument argtypes is deprecated",
DeprecationWarning)
assert sig is None
if restype is None:
sig = tuple(argtypes)
else:
sig = restype(*argtypes)
del argtypes
del restype
indims = [len(x) for x in self.inputsig]
outdims = [len(x) for x in self.outputsig]
funcname = self.py_func.__name__
src = expand_gufunc_template(self._kernel_template, indims,
outdims, funcname)
glbls = self._get_globals(sig)
_exec(src, glbls)
fnobj = glbls['__gufunc_{name}'.format(name=funcname)]
args, return_type = sigutils.normalize_signature(sig)
outertys = list(_determine_gufunc_outer_types(args, indims + outdims))
kernel = self._compile_kernel(fnobj, sig=tuple(outertys))
dtypes = tuple(np.dtype(str(t.dtype)) for t in outertys)
self.kernelmap[tuple(dtypes[:-1])] = dtypes[-1], kernel
def _compile_kernel(self, fnobj, sig):
raise NotImplementedError
def _get_globals(self, sig):
raise NotImplementedError
def _determine_gufunc_outer_types(argtys, dims):
for at, nd in zip(argtys, dims):
if isinstance(at, types.Array):
yield at.copy(ndim=nd + 1)
else:
if nd > 0:
raise ValueError("gufunc signature mismatch: ndim>0 for scalar")
yield types.Array(dtype=at, ndim=1, layout='A')
def expand_gufunc_template(template, indims, outdims, funcname):
"""Expand gufunc source template
"""
argdims = indims + outdims
argnames = ["arg{0}".format(i) for i in range(len(argdims))]
checkedarg = "min({0})".format(', '.join(["{0}.shape[0]".format(a)
for a in argnames]))
inputs = [_gen_src_for_indexing(aref, adims, _gen_src_for_input_indexing)
for aref, adims in zip(argnames, indims)]
outputs = [_gen_src_for_indexing(aref, adims, _gen_src_for_output_indexing)
for aref, adims in zip(argnames[len(indims):], outdims)]
argitems = inputs + outputs
src = template.format(name=funcname, args=', '.join(argnames),
checkedarg=checkedarg,
argitems=', '.join(argitems))
return src
def _gen_src_for_indexing(aref, adims, gen_sliced):
return "{aref}[{sliced}]".format(aref=aref, sliced=gen_sliced(adims))
def _gen_src_for_input_indexing(adims):
if adims > 0:
return _gen_src_for_array_indexing(adims)
else:
return '__tid__'
def _gen_src_for_output_indexing(adims):
if adims > 0:
return _gen_src_for_array_indexing(adims)
else:
return '__tid__:(__tid__ + 1)'
def _gen_src_for_array_indexing(adims):
return ','.join(['__tid__'] + [':'] * adims)
class GUFuncEngine(object):
'''Determine how to broadcast and execute a gufunc
base on input shape and signature
'''
@classmethod
def from_signature(cls, signature):
return cls(*parse_signature(signature))
def __init__(self, inputsig, outputsig):
# signatures
self.sin = inputsig
self.sout = outputsig
# argument count
self.nin = len(self.sin)
self.nout = len(self.sout)
def schedule(self, ishapes):
if len(ishapes) != self.nin:
raise TypeError('invalid number of input argument')
# associate symbol values for input signature
symbolmap = {}
outer_shapes = []
inner_shapes = []
for argn, (shape, symbols) in enumerate(zip(ishapes, self.sin)):
argn += 1 # start from 1 for human
inner_ndim = len(symbols)
if len(shape) < inner_ndim:
fmt = "arg #%d: insufficient inner dimension"
raise ValueError(fmt % (argn,))
if inner_ndim:
inner_shape = shape[-inner_ndim:]
outer_shape = shape[:-inner_ndim]
else:
inner_shape = ()
outer_shape = shape
for axis, (dim, sym) in enumerate(zip(inner_shape, symbols)):
axis += len(outer_shape)
if sym in symbolmap:
if symbolmap[sym] != dim:
fmt = "arg #%d: shape[%d] mismatch argument"
raise ValueError(fmt % (argn, axis))
symbolmap[sym] = dim
outer_shapes.append(outer_shape)
inner_shapes.append(inner_shape)
# solve output shape
oshapes = []
for outsig in self.sout:
oshape = []
for sym in outsig:
oshape.append(symbolmap[sym])
oshapes.append(tuple(oshape))
# find the biggest outershape as looping dimension
sizes = [reduce(operator.mul, s, 1) for s in outer_shapes]
largest_i = np.argmax(sizes)
loopdims = outer_shapes[largest_i]
pinned = [False] * self.nin # same argument for each iteration
for i, d in enumerate(outer_shapes):
if d != loopdims:
if d == (1,) or d == ():
pinned[i] = True
else:
fmt = "arg #%d: outer dimension mismatch"
raise ValueError(fmt % (i + 1,))
return GUFuncSchedule(self, inner_shapes, oshapes, loopdims, pinned)
class GUFuncSchedule(object):
def __init__(self, parent, ishapes, oshapes, loopdims, pinned):
self.parent = parent
# core shapes
self.ishapes = ishapes
self.oshapes = oshapes
# looping dimension
self.loopdims = loopdims
self.loopn = reduce(operator.mul, loopdims, 1)
# flags
self.pinned = pinned
self.output_shapes = [loopdims + s for s in oshapes]
def __str__(self):
import pprint
attrs = 'ishapes', 'oshapes', 'loopdims', 'loopn', 'pinned'
values = [(k, getattr(self, k)) for k in attrs]
return pprint.pformat(dict(values))
class GenerializedUFunc(object):
def __init__(self, kernelmap, engine):
self.kernelmap = kernelmap
self.engine = engine
self.max_blocksize = 2 ** 30
assert self.engine.nout == 1, "only support single output"
def __call__(self, *args, **kws):
callsteps = self._call_steps(args, kws)
callsteps.prepare_inputs()
indtypes, schedule, outdtype, kernel = self._schedule(
callsteps.norm_inputs, callsteps.output)
callsteps.adjust_input_types(indtypes)
callsteps.allocate_outputs(schedule, outdtype)
callsteps.prepare_kernel_parameters()
newparams, newretval = self._broadcast(schedule,
callsteps.kernel_parameters,
callsteps.kernel_returnvalue)
callsteps.launch_kernel(kernel, schedule.loopn, newparams + [newretval])
return callsteps.post_process_result()
def _schedule(self, inputs, out):
input_shapes = [a.shape for a in inputs]
schedule = self.engine.schedule(input_shapes)
# find kernel
idtypes = tuple(i.dtype for i in inputs)
try:
outdtype, kernel = self.kernelmap[idtypes]
except KeyError:
# No exact match, then use the first compatbile.
# This does not match the numpy dispatching exactly.
# Later, we may just jit a new version for the missing signature.
idtypes = self._search_matching_signature(idtypes)
# Select kernel
outdtype, kernel = self.kernelmap[idtypes]
# check output
if out is not None and schedule.output_shapes[0] != out.shape:
raise ValueError('output shape mismatch')
return idtypes, schedule, outdtype, kernel
def _search_matching_signature(self, idtypes):
"""
Given the input types in `idtypes`, return a compatible sequence of
types that is defined in `kernelmap`.
Note: Ordering is guaranteed by `kernelmap` being a OrderedDict
"""
for sig in self.kernelmap.keys():
if all(np.can_cast(actual, desired)
for actual, desired in zip(sig, idtypes)):
return sig
else:
raise TypeError("no matching signature")
def _broadcast(self, schedule, params, retval):
assert schedule.loopn > 0, "zero looping dimension"
odim = 1 if not schedule.loopdims else schedule.loopn
newparams = []
for p, cs in zip(params, schedule.ishapes):
if not cs and p.size == 1:
# Broadcast scalar input
devary = self._broadcast_scalar_input(p, odim)
newparams.append(devary)
else:
# Broadcast vector input
newparams.append(self._broadcast_array(p, odim, cs))
newretval = retval.reshape(odim, *schedule.oshapes[0])
return newparams, newretval
def _broadcast_array(self, ary, newdim, innerdim):
newshape = (newdim,) + innerdim
# No change in shape
if ary.shape == newshape:
return ary
# Creating new dimension
elif len(ary.shape) < len(newshape):
assert newshape[-len(ary.shape):] == ary.shape, \
"cannot add dim and reshape at the same time"
return self._broadcast_add_axis(ary, newshape)
# Collapsing dimension
else:
return ary.reshape(*newshape)
def _broadcast_add_axis(self, ary, newshape):
raise NotImplementedError("cannot add new axis")
def _broadcast_scalar_input(self, ary, shape):
raise NotImplementedError
class GUFuncCallSteps(object):
__slots__ = [
'args',
'kwargs',
'output',
'norm_inputs',
'kernel_returnvalue',
'kernel_parameters',
'_is_device_array',
'_need_device_conversion',
]
def __init__(self, args, kwargs):
self.args = args
self.kwargs = kwargs
self.output = self.kwargs.get('out')
self._is_device_array = [self.is_device_array(a) for a in self.args]
self._need_device_conversion = not any(self._is_device_array)
# Normalize inputs
inputs = []
for a, isdev in zip(self.args, self._is_device_array):
if isdev:
inputs.append(a)
else:
inputs.append(np.array(a))
self.norm_inputs = inputs
def adjust_input_types(self, indtypes):
"""
Attempt to cast the inputs to the required types if necessary
and if they are not device array.
Side effect: Only affects the element of `norm_inputs` that requires
a type cast.
"""
for i, (ity, val) in enumerate(zip(indtypes, self.norm_inputs)):
if ity != val.dtype:
if not hasattr(val, 'astype'):
msg = ("compatible signature is possible by casting but "
"{0} does not support .astype()").format(type(val))
raise TypeError(msg)
# Cast types
self.norm_inputs[i] = val.astype(ity)
def allocate_outputs(self, schedule, outdtype):
# allocate output
if self._need_device_conversion or self.output is None:
retval = self.device_array(shape=schedule.output_shapes[0],
dtype=outdtype)
else:
retval = self.output
self.kernel_returnvalue = retval
def prepare_kernel_parameters(self):
params = []
for inp, isdev in zip(self.norm_inputs, self._is_device_array):
if isdev:
params.append(inp)
else:
params.append(self.to_device(inp))
assert all(self.is_device_array(a) for a in params)
self.kernel_parameters = params
def post_process_result(self):
if self._need_device_conversion:
out = self.to_host(self.kernel_returnvalue, self.output)
elif self.output is None:
out = self.kernel_returnvalue
else:
out = self.output
return out
def prepare_inputs(self):
pass
def launch_kernel(self, kernel, nelem, args):
raise NotImplementedError
def is_device_array(self, obj):
raise NotImplementedError
def to_device(self, hostary):
raise NotImplementedError
def device_array(self, shape, dtype):
raise NotImplementedError
| StarcoderdataPython |
112159 | """Tests for privacy.schema.embed"""
from privacy.schema import embeds
def test_embed_request():
embed_request_obj = embeds.EmbedRequest(token="<PASSWORD>", css="https://www.not_a.url")
assert embed_request_obj.token == "<PASSWORD>"
assert embed_request_obj.css == "https://www.not_a.url"
assert embed_request_obj.expiration is None
| StarcoderdataPython |
3395302 | # !/usr/bin/python3
# -*- coding: utf-8 -*-
# import subfolders here
from utilsJ.Slack.hooks import msger, whisp
# should remove above import, bad practise
# from utilsJ.Behavior import glm2afc, plotting, models, ComPipe
# import sys
# import os
# import numpy as np
# import pandas as pd
# #import swifter
# import matplotlib.pyplot as plt
# import seaborn as sns
# import pickle
# #import statsmodels.api as sm
# #from statsmodels.stats.proportion import proportion_confint
# import tqdm
# #if notebook:
# from IPython.core.display import display, HTML
# display(HTML("<style>.container { width:90% !important; }</style>"))
| StarcoderdataPython |
47826 | <filename>pip_update.py<gh_stars>10-100
import re
import os
import sys
import shutil
import subprocess
def pip_update():
# Check for Ubuntu and get the wxPython extras release version.
UBUNTU_RELEASE = ''
os_release_file = '/etc/os-release'
if os.path.exists( os_release_file ):
with open( os_release_file ) as f:
for line in f:
line = line.strip()
if line.startswith('VERSION_ID'):
UBUNTU_RELEASE = '.'.join(re.sub('[^0-9.]', '', line).split('.')[:2])
break
# Update all the requirements.
fname = 'requirements.txt'
with open(fname) as f:
for package in f:
package = package.strip()
if UBUNTU_RELEASE and 'wxPython' in package:
package = '-f https://extras.wxpython.org/wxPython4/extras/linux/gtk3/ubuntu-{} wxPython'.format( UBUNTU_RELEASE )
cmd = [sys.executable, '-m', 'pip', 'install', '--upgrade', *package.split()]
print( ' '.join( cmd ) )
subprocess.check_call( cmd )
def process_dependencies():
# Process Dependencies in the subprojects.
with os.scandir('.') as cur_dir:
for f in cur_dir:
if not f.is_dir():
continue
dep_name = os.path.join( f.path, 'Dependencies.py' )
if os.path.exists( dep_name ):
with open(dep_name) as dep_file:
for entry in dep_file:
entry = entry.strip().split()
if len(entry) != 2 or entry[0] != 'import':
continue
py_file = entry[1] + '.py'
cpy_args = [py_file, os.path.join(f.path, py_file)]
print( 'cp', *cpy_args )
shutil.copyfile( *cpy_args )
if __name__ == '__main__':
#pip_update()
process_dependencies()
| StarcoderdataPython |
3291136 | from torch.nn import functional as F
import torch
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import colors
import gflags
FLAGS = gflags.FLAGS
gflags.DEFINE_string('filename', None, '')
palette = ['83E76B', 'CDE584', 'DEDB6A', 'F39D58', 'C44028']
#palette = ['EA1B20', '121111']
#palette = ['1E5652']
def save_images(images, name, nrow, ncol):
fig, axes = plt.subplots(nrow, ncol)
nplot = len(images)
assert nrow * ncol == nplot, f'{nrow}, {ncol}, {nplot}'
for i in range(nrow):
for j in range(ncol):
c = i * ncol + j
image = images[c]
if nplot == 1 and i == 0 and j == 0:
ax = axes
else:
ax = axes[i][j]
# clr = np.array(colors.to_rgb(f'#{palette[c % len(palette)]}'))
#spot = 1. * (image < 0.95)
#clr_image = np.ones_like(image) * clr
#spot_image = np.ones_like(image)
#image = (1 - spot) * spot_image + spot * clr_image
image_tensor = torch.tensor(image)
image_tensor = image_tensor.unsqueeze(0)
image_tensor = image_tensor.permute(0, 3, 1, 2)
upsampled = F.interpolate(
image_tensor, scale_factor=32, mode='nearest')
upsampled = upsampled.permute(0, 2, 3, 1).squeeze(0)
ax.imshow(upsampled.numpy())
#ax.imshow(image, norm=None)
# ax.axis('off')
plt.subplots_adjust(wspace=0, hspace=0)
fig.savefig(name)
plt.close('all')
def load(filename):
assert filename.endswith('.npz'), filename
dict_data = np.load(filename)
data = dict_data['arr_0']
return data
def main():
data = load(FLAGS.filename)
save_images(data[:1], 'replot.png', 1, 1)
if __name__ == '__main__':
FLAGS(sys.argv)
main()
| StarcoderdataPython |
1713206 | from exchange_cash.exchange import Exchange
class Cash:
def __init__(self, cents: int, exchange: Exchange):
self._exchange = exchange
self._cents = cents
@property
def cents(self):
return self._cents
def in_(self, currency: str):
return Cash(
self._cents * self._exchange.rate('USD', currency),
self._exchange
)
def to_string(self):
return str(self._cents)
| StarcoderdataPython |
3218849 | import sqlite3
from Terminal import Terminal
from LoginScreen import LoginScreen
from RegisterScreen import RegisterScreen
from WelcomeScreen import WelcomeScreen
from MainMenuScreen import MainMenuScreen
from PostScreen import PostScreen
from SearchForPostsScreen import SearchForPostsScreen
from PostQuery import QuestionQuery
from PostQuery import AnswerQuery
from Vote import Vote
from PostEditScreen import PostEditScreen
from BadgeScreen import BadgeScreen
from TagScreen import TagScreen
from MarkAccepted import AcceptedAnswer
#dbName = 'Miniproject_1.db'
def check_priv(dbName, uid):
"""
checks the database to see if the user has the title of privileged.
Parameters:
dbName: String. Name of the database, derived from terminal.py
uid: 4 character string. Unique ID of the user, used for loads the things in the program.
Returns:
check: Boolean. True when a match between the uid of the user and a uid in the Privileged table in the database, False otherwise.
"""
#connect to database and create cursor to query
db = sqlite3.connect(dbName)
cur = db.cursor()
#grab a matching uid in privileged table, then closes the database.
cur.execute("SELECT uid FROM privileged WHERE ? = uid", (uid,))
check = cur.fetchone()
db.close()
#if a match is found, return true. If no matches were found, return false.
if check is None:
return False
else:
return True
if __name__ == "__main__":
terminal = Terminal()
#Main program loop, includes the welcome screen and login sequence. This loop handles cases such as users logging out and back into another account.
exit = False
while not exit:
#prints the welcome screen, which lets users login, register, or exit the program
welcomeScreen = WelcomeScreen(terminal)
isUser = welcomeScreen.printScreen()
#open the login screen and log the user in. They can return back if they want to too.
if isUser:
#log the user in
uid = LoginScreen(terminal).log_in()
if uid is None:
continue
#checks if the user is a privileged user
priv = check_priv(terminal.getDBName(), uid)
#testing below
#print(priv)
#funny tidbit, the statement "not isUser" returns true if isUser is not True, even if isUser is NoneType.
elif isUser == False:
#register, then log in
uid = RegisterScreen(terminal, terminal.getDBName()).printScreen()
if uid is None:
continue
priv = check_priv(terminal.getDBName(), uid)
else:
#Quitting the program, leads to a goodbye message outside of loop.
break
#Input loop for command choice.
while True:
#prints the menue and returns the choice the user makes as an int. error handling and processing takes place in menu.py, so
#there's no need to worry about it here
menu = MainMenuScreen(terminal).printScreen()
#post question
if menu == 0:
PostScreen(terminal, uid).printQuestionScreen()
#search for posts
elif menu == 1:
#PseudoCode for how searchforposts works
#Grab the post to perform the action on,
#Returned as a tuple in the form (title, votecount, answer count, body, pid) for questions
#Returned as a tuple in the form (title, votecount, body, pid) for answers
#difference is isinstance(x, QuestionQuery)
post = SearchForPostsScreen(terminal).printScreen()
#while the user is still using the menue
while True:
#clean terminal and print title of post
if post is None:
break
terminal.clear()
terminal.printCenter("Title: "+ post[0])
#if its a question
if isinstance(post, QuestionQuery):
#Options are:
#reply to post
#vote on post
#exit to menue
#if the user is privileged, also includes:
#edit post
#give badge
#add tag
#special line for printing the body, since the location of the body in the tuple is [3], not [2]
terminal.printCenter("Body: " + post[3])
print("\n")
#print the options for the user
print("1) Reply to the post\n2) Upvote the post")
#privileged user actions only
if priv:
print("3) Edit Post\n4) Give Badge\n5) Add Tag\n6) Exit")
choice = input("Please select which action to take: ")
#reply to post
if choice == '1':
terminal.clear()
PostScreen(terminal, uid).printAnswerScreen(post[4])
#upvote post. Checks to see if user has voted on the post before adding vote
#this simplifies error handling by not allowing errors
elif choice == '2':
v = Vote(terminal.getDBName())
if v.check_vote(post[4], uid) == False:
v.vote(post[4], uid)
input("Vote placed. Press enter to continue:")
else:
input("You have already voted on this post. Press enter to continue:")
#TODO: edits posts (needs fix)
elif choice == '3':
newPost = PostEditScreen(terminal, post).printScreen()
if newPost is not None:
post = newPost
#TODO: adds badge (needs fix)
elif choice == '4':
BadgeScreen(terminal, post).printScreen()
#TODO: adds badge (needs fix)
elif choice == '5':
TagScreen(terminal, post).printScreen()
#returns back to main menu
elif choice == '6':
break
else:
input("Invalid input. Press enter to continue:")
else:
print("3) Exit")
choice = input("Please select which action to take: ").strip()
#reply to post
if choice == '1':
terminal.clear()
PostScreen(terminal, uid).printAnswerScreen(post[4])
#upvote post. Checks to see if user has voted on the post before adding vote
#this simplifies error handling by not allowing errors
elif choice == '2':
v = Vote(terminal.getDBName())
if v.check_vote(post[4], uid) == False:
v.vote(post[4], uid)
input("Vote placed. Press enter to continue:")
else:
input("You have already voted on this post. Press enter to continue:")
#exit back to main menu
elif choice == '3':
break
else:
input("Invalid input. Press enter to continue:")
else:
#Options are:
#vote on the answer
#exit to menu
#If the user is privileged, also includes:
#edit post
#give badge
#add tag
#mark as the accepted answer
#print body of post, special due to differences in QuestionQuery and AnswerQuery return
terminal.printCenter("Body: " + post[2])
print("\n")
#print the options for the user
print("1) Upvote the post")
#for privileged users:
if priv:
#additional commands
print("2) Edit Post\n3) Give Badge\n4) Add Tag\n5) Mark Accepted Answer\n6) Exit")
#user choice of commands
choice = input("Please select which action to take: ").strip()
#upvote post. Checks to see if user has voted on the post before adding vote
#this simplifies error handling by not allowing errors
if choice == '1':
v = Vote(terminal.getDBName())
if v.check_vote(post[3], uid) == False:
v.vote(post[3], uid)
input("Vote placed. Press enter to continue:")
else:
input("You have already voted on this post. Press enter to continue:")
#TODO: edits posts (needs fix)
elif choice == '2':
newPost = PostEditScreen(terminal, post).printScreen()
if(newPost is not None):
post = newPost
#TODO: add badge (needs fix)
elif choice == '3':
BadgeScreen(terminal, post).printScreen()
#TODO: add tags (needs fix)
elif choice == '4':
TagScreen(terminal, post).printScreen()
#TODO: Mark accepted answer
elif choice == '5':
#TODO: mark accepted answer
AcceptedAnswer(terminal, post[3]).acceptAnswer()
input("Successfully set the answer as the accepted answer. Press enter to continue:")
#exit back to main menu
elif choice == '6':
break
else:
input("Invalid input. Press enter to continue:")
#non-privileged users
else:
#print options, only ones are add vote and exit
print("2) Exit")
#user choice of commands
choice = input("Please select which action to take: ").strip()
#upvote post. Checks to see if user has voted on the post before adding vote
#this simplifies error handling by not allowing errors
if choice == '1':
v = Vote(terminal.getDBName())
if v.check_vote(post[3], uid) == False:
v.vote(post[3], uid)
input("Vote placed. Press enter to continue:")
else:
input("You have already voted on this post. Press enter to continue:")
#return back to main menu
elif choice == '2':
break
else:
input("Invalid input. Press enter to continue:")
#end of search posts, including all of the options for selected post
#log out of account
elif menu == 2:
break
#exit program
elif menu == 3:
exit = True
break
#end of input loop
#end of main program loop
#When the user quits the program.
print("Goodbye!") | StarcoderdataPython |
35270 | import glob,os,sys
class Path():
'''
>>> paths = Path(source,"*.txt")
>>> for path in paths:
lines = Stream(path)
for line in lines:
print(line)
'''
def __init__(self, source, pattern):
self.source = source
self.pattern = pattern
def __getpaths__(self):
source = os.path.join(self.source, self.pattern)
files = glob.glob(source)
for filename in files:
yield os.path.join(source, filename)
def __iter__(self):
return self.__getpaths__()
class Stream():
'''
>>> lines = Stream(path)
>>> for line in lines:
print(line)
'''
def __init__(self,
encoding=None,
sentencizer=None,
text_filters=[]
):
self.encoding = encoding
self.__sentencizer = sentencizer
self.__text_filters = text_filters
def __call__(self,path):
"""Read lines from filepath."""
with open(path,'r',
encoding = (
self.encoding(path)
if callable(self.encoding)
else self.encoding)
) as fd:
# обрабатываем либо по предложению
if self.__sentencizer:
text = self.preprocess_text(fd.read())
for sentence in self.__sentencizer(text):
yield sentence
# либо по строке
else:
for line in fd:
yield line
def preprocess_text(self,text):
for text_filter in self.__text_filters:
text = text_filter(text)
return text
class Lemmatizer():
def __init__(self, lemmatizer=None,
allowed_tags=set(), disallowed_tags=set()):
self.lemmatize = lemmatizer
self.allowed_tags = set(allowed_tags) - set(disallowed_tags)
def __call__(self,data):
if isinstance(data,(str)):
data = [data]
self.allowed_tags
for lemma,pos in self.lemmatize(data,pos=True):
if self.allowed_tags:
if (self.allowed_tags) and (pos in self.allowed_tags):
yield lemma
else:
yield lemma
class Tokenizer():
def __init__(self,tokenizer=None):
self.tokenize = tokenizer
def __call__(self,data):
return self.tokenize(data)
class CharCleaner():
def __init__(self,cleaners=None):
self.cleaners = cleaners
def __call__(self,data):
for cleaner in self.cleaners:
data = cleaner(data)
return data
class TokenCleaner():
def __init__(self,cleaners=None):
self.cleaners = cleaners
def __call__(self,data):
for cleaner in self.cleaners:
data = cleaner(data)
return data
class LemmaCleaner():
def __init__(self,cleaners=None):
self.cleaners = cleaners
def __call__(self,data):
for cleaner in self.cleaners:
data = cleaner(data)
return data
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.