id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1690587 | <filename>RL/algorithms/random.py<gh_stars>1-10
import wandb
from RL import argparser as p
from RL import register_algo
from RL.agents.console_print_agent import ConsolePrintAgent
from RL.agents.random_play_agent import RandomPlayAgent
from RL.agents.reward_scaling_agent import RewardScalingAgent
from RL.agents.seeding_agent import SeedingAgent
from RL.agents.simple_render_agent import SimpleRenderAgent
from RL.agents.stats_recording_agent import StatsRecordingAgent
from .standard_wrap_algo import StandardEnvWrapAlgo
p.add_argument('--seed', default=None, type=int)
p.add_argument('--reward_scaling', default=1, type=float)
p.add_argument('--cost_scaling', default=1, type=float)
p.add_argument('--record_unscaled', action='store_true')
p.add_argument('--gamma', default=0.99, type=float)
p.add_argument('--cost_gamma', default=1.0, type=float)
p.add_argument('--record_discounted', action='store_true')
p.add_argument('--frameskip', type=int, default=1)
p.add_argument('--no_render', action='store_true')
class Random(StandardEnvWrapAlgo):
def setup(self):
args = p.parse_args()
self.register_agent(SeedingAgent("SeedingAgent", self, args.seed))
self.register_agent(RewardScalingAgent(
"RewardScaler", self, reward_scaling=args.reward_scaling, cost_scaling=args.cost_scaling))
self.manager.episode_type = 1
self.register_agent(RandomPlayAgent(
"RandomAgent", self, play_for_steps=None))
self.register_agent(StatsRecordingAgent("StatsRecorder", self, reward_scaling=args.reward_scaling, cost_scaling=args.cost_scaling, record_unscaled=args.record_unscaled,
gamma=args.gamma, cost_gamma=args.cost_gamma, record_undiscounted=not args.record_discounted, frameskip=self.frameskip, RPE_av_over=args.RPE_av_over, RPS_av_over=args.RPS_av_over)) # type: StatsRecordingAgent
self.register_agent(ConsolePrintAgent("ConsolePrinter", self, lambda: {
'Steps': self.manager.num_steps,
'Episodes': self.manager.num_episodes,
'Len': self.manager.num_episode_steps,
'R': wandb.run.history._data['Episode/Reward'],
f'R({args.RPE_av_over})': wandb.run.history._data[f'Average/RPE (Last {args.RPE_av_over})'],
'C': wandb.run.history._data['Episode/Cost']
}, lambda: {
'Total Steps': self.manager.num_steps,
'Total Episodes': self.manager.num_episodes,
'Average RPE': wandb.run.history._data['Average/RPE'],
'Average CPE': wandb.run.history._data['Average/CPE'],
'Average RPS': wandb.run.history._data['Average/RPS'],
'Average CPS': wandb.run.history._data['Average/CPS']
}))
if not args.no_render:
self.register_agent(SimpleRenderAgent("SimpleRenderAgent", self))
register_algo('Random', Random)
| StarcoderdataPython |
3243528 | pi=3.14
raio=5
area=pi*raio
print(area)
| StarcoderdataPython |
3346265 | from flask import render_template, Blueprint
spelling_blueprint = Blueprint('spelling',__name__)
@spelling_blueprint.route('/')
@spelling_blueprint.route('/spelling')
def index():
return render_template("index.html")
| StarcoderdataPython |
149141 | from flask import make_response, g
from .....common.corpora_orm import CollectionVisibility
from .....common.entities import Collection
from .....common.utils.exceptions import ConflictException
from .....api_server.db import dbconnect
from .....common.utils.exceptions import ForbiddenHTTPException
from backend.corpora.lambdas.api.v1.collection import _owner_or_allowed
@dbconnect
def post(collection_uuid: str, body: object, user: str):
db_session = g.db_session
collection = Collection.get_collection(
db_session,
collection_uuid,
CollectionVisibility.PRIVATE,
owner=_owner_or_allowed(user),
)
if not collection:
raise ForbiddenHTTPException()
if all([dataset.tombstone for dataset in collection.datasets]):
raise ConflictException(detail="The collection must have a least one dataset.")
data_submission_policy_version = body["data_submission_policy_version"]
collection.publish(data_submission_policy_version=data_submission_policy_version)
return make_response({"collection_uuid": collection.id, "visibility": collection.visibility}, 202)
| StarcoderdataPython |
3242637 | """
The basic processing unit in a :class:`~.pipeline.Pipeline`.
"""
from abc import ABCMeta, abstractmethod
class Stage(metaclass=ABCMeta):
"""
The basic processing unit in a :class:`~.pipeline.Pipeline`.
"""
@abstractmethod
def execute(self, context):
"""
Executes the task to be performed by this :class:`~.stage.Stage`.
The input data will be read from the ``context`` manager.
The output data will be written to the ``context`` manager.
Args:
context (:class:`~.pipeline_context.PipelineContext`): Keeps the shared
information between each :class:`~.stage.Stage` in the
:class:`~.pipeline.Pipeline`.
"""
pass
| StarcoderdataPython |
4819924 | <gh_stars>0
from django.shortcuts import render
from django.shortcuts import redirect
from InvManage.forms import *
from InvManage.models import *
from InvManage.filters import VendorFilter
from django.http import JsonResponse
from InvManage.serializers import VendorSerializer
from InvManage.scripts.filters import *
from InvManage.scripts.helpers import create_event
from InvManage.scripts.helpers import generate_form_parameter_string
from django.http import HttpResponse, JsonResponse
def create_vendor_view(request):
"""
Creates a vendor on ``POST`` request, and returns a vendor creation form on ``GET`` request.
.. http:get:: /vendor
Gets the vendor creation form.
**Example request**:
.. sourcecode:: http
GET /vendor/ HTTP/1.1
Host: localhost:8000
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/html; charset=utf-8
:reqheader Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
:statuscode 200: Vendor creation form received successfully.
.. http:post:: /vendor
Creates a vendor.
**Example request**:
.. sourcecode:: http
POST /vendor/ HTTP/1.1
Host: localhost:8000
Content-Type: multipart/form-data;
:form vend-name: ``Lug Vendor``
:form vend-identifier: ``TBPN-02692``
:form vend-gstin: ``89AAC254254F2``
:form ship-title: ``AKATSUKI``
:form ship-name: ``<NAME>``
:form ship-phone: ``679 166-3127``
:form ship-address: ``Nonummy Avenue``
:form ship-city: ``Chung Cheong``
:form ship-state: ``Guanacaste``
:form ship-country: ``tellusidnunc.net``
:form ship-website: ``<EMAIL>``
:form ship-post: ``8949``
:form pdform-currency: ``DEM``
:form pdform-minorder: ``2000``
:form pdform-contactperson: ``<NAME>``
:form pdform-refcode: ``CUST000124``
:form pdform-transportmode: ``Express``
:form com-language: ``German``
:form com-phone: ``936 651-4817``
:form com-email: ``<EMAIL>``
:form com-fax: ``323 555 1234``
:form bank-name: ``FIRST FLORIDA INTEGRITY BANK``
:form bank-branch: ``Bavaria``
:form bank-region: ``Bayem``
:form bank-route: ``67016325``
:form bank-number: ``42543251393``
:form bank-acctype: ``Current``
:form bank-iban: ``DE6233542``
:form bank-code: ``BA54354354``
:form bank-branchcode: ``BA35435823``
:resheader Content-Type: multipart/form-data; boundary=----WebKitFormBoundaryLTR88aZAnBUSE7mv
:statuscode 302: Redirects to ``/vendor``.
"""
if request.method == 'GET':
# Create a list of vendors
vendors = []
for i, vend in enumerate(Vendor.objects.all()):
vendors.append(
{'id': vend.id, 'name': vend.name, 'code': vend.identifier})
return render(request, 'vendor.html', { 'vendor_form': VendorForm(),
'address_form': ShippingAddressForm(),
'com_form': CommunicationForm(),
'purchasing_form': PurchaseDataForm(),
'account_form': BankAccountForm(),
'vendors': vendors,
'requested_view_type': 'create'})
if request.method == 'POST':
data = {}
vendor_form = VendorForm(request.POST, prefix='vend')
address_form = ShippingAddressForm(request.POST, prefix='ship')
com_form = CommunicationForm(request.POST, prefix='com')
purchasing_form = PurchaseDataForm(request.POST, prefix='pdform')
account_form = BankAccountForm(request.POST, prefix='bank')
print(request.POST)
if vendor_form.is_valid():
data.update(vendor_form.cleaned_data)
# Create address instance
if address_form.is_valid():
add = ShippingAddress.objects.create(**address_form.cleaned_data)
# Create communication instance
if com_form.is_valid():
print(com_form.is_valid())
com = Communication.objects.create(**com_form.cleaned_data)
# Create purchase data instance
if purchasing_form.is_valid():
pur = PurchaseData.objects.create(**purchasing_form.cleaned_data)
# Create account instance
if account_form.is_valid():
acc = BankAccount.objects.create(**account_form.cleaned_data)
new_vendor = Vendor.objects.create( name=data['name'],
identifier=data['identifier'],
address=add,
communication=com,
bankaccount=acc,
purchasedata=pur
)
create_event(new_vendor,'Created')
return redirect('vendor')
def update_vendor_view(request):
"""
Updates a vendor on ``POST`` request and returns the vendor update form for ``GET`` request.
.. http:get:: /vendor/update
Gets the vendor update form whose primary key matches the query parameter ``pk``.
**Example request**:
.. sourcecode:: http
GET /vendor/update HTTP/1.1
Host: localhost:8000
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
:query pk: The primary key of the vendor.
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/html; charset=utf-8
:reqheader Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
:statuscode 200: Vendor update form received successfully.
.. http:post:: /vendor/update
Updates the vendor.
**Example request**:
.. sourcecode:: http
POST /vendor/update HTTP/1.1
Host: localhost:8000
Content-Type: multipart/form-data;
:form vend-name: ``Lug Vendor``
:form vend-identifier: ``TBPN-02692``
:form vend-gstin: ``89AAC254254F2``
:form ship-title: ``AKATSUKI``
:form ship-name: ``<NAME>``
:form ship-phone: ``679 166-3127``
:form ship-address: ``Nonummy Avenue``
:form ship-city: ``Chung Cheong``
:form ship-state: ``Guanacaste``
:form ship-country: ``tellusidnunc.net``
:form ship-website: ``<EMAIL>``
:form ship-post: ``8949``
:form pdform-currency: ``DEM``
:form pdform-minorder: ``1000``
:form pdform-contactperson: ``<NAME>``
:form pdform-refcode: ``CUST000124``
:form pdform-transportmode: ``Express``
:form com-language: ``German``
:form com-phone: ``936 651-4817``
:form com-email: ``<EMAIL>``
:form com-fax: ``323 555 1234``
:form bank-name: ``FIRST FLORIDA INTEGRITY BANK``
:form bank-branch: ``Bavaria``
:form bank-region: ``Bayem``
:form bank-route: ``67016325``
:form bank-number: ``42543251393``
:form bank-acctype: ``Current``
:form bank-iban: ``DE6233542``
:form bank-code: ``BA54354354``
:form bank-branchcode: ``BA35435823``
:resheader Content-Type: multipart/form-data; boundary=----WebKitFormBoundaryLTR88aZAnBUSE7mv
:statuscode 302: Redirects to ``/consumer``.
"""
if request.method == 'GET':
pk = request.GET.get('pk')
vendor = Vendor.objects.get(id=pk)
return render(request, 'vendor/update_vendor.html', { 'vendor_form': VendorForm(initial=vendor.__dict__),
'address_form': ShippingAddressForm(initial=vendor.address.__dict__),
'com_form': CommunicationForm(initial=vendor.communication.__dict__),
'purchasing_form': PurchaseDataForm(initial=vendor.purchasedata.__dict__),
'account_form': BankAccountForm(initial=vendor.bankaccount.__dict__),
'requested_view_type': 'update',
'pk':pk
})
if request.method == 'POST':
pk = request.POST.get('pk')
print(pk)
data = {}
vendor_form = VendorForm(request.POST, prefix='vend')
address_form = ShippingAddressForm(request.POST, prefix='ship')
com_form = CommunicationForm(request.POST, prefix='com')
purchasing_form = PurchaseDataForm(request.POST, prefix='pdform')
account_form = BankAccountForm(request.POST, prefix='bank')
print(request.POST)
if vendor_form.is_valid():
data.update(vendor_form.cleaned_data)
# Update address instance
if address_form.is_valid():
add = ShippingAddress.objects.create(**address_form.cleaned_data)
# Update communication instance
if com_form.is_valid():
print(com_form.is_valid())
com = Communication.objects.create(**com_form.cleaned_data)
# Update purchase data instance
if purchasing_form.is_valid():
pur = PurchaseData.objects.create(**purchasing_form.cleaned_data)
# Update account instance
if account_form.is_valid():
acc = BankAccount.objects.create(**account_form.cleaned_data)
Vendor.objects.filter(id=pk).update( name=data['name'],
identifier=data['identifier'],
gstin=data['gstin'],
address=add,
communication=com,
bankaccount=acc,
purchasedata=pur
)
create_event(Vendor.objects.get(id=pk),'Updated')
return redirect('vendor')
def delete_vendor_view(request, pk):
"""
Deletes the vendor with primary key ``pk`` on ``POST`` request.
.. http:post:: /vendor/<str:object_id>/delete
Deletes the vendor represented by the primary key ``object_id``.
**Example request**:
.. sourcecode:: http
POST /vendor/5/delete HTTP/1.1
Host: localhost:8000
Content-Type: application/x-www-form-urlencoded
:param object_id: Vendor primary key.
:resheader Content-Type: application/x-www-form-urlencoded
:statuscode 302: Redirects to ``/vendors``.
:statuscode 500: Vendor matching query does not exist.
"""
if request.method == 'POST':
vendor = Vendor.objects.get(id=pk)
create_event(vendor,'Deleted')
vendor.delete()
return redirect('vendor')
def display_vendors_view(request):
"""
Retrieves the list of vendors on ``GET`` request.
.. http:get:: /vendors/
Gets the list of all vendors.
**Example request**:
.. sourcecode:: http
GET /vendors/ HTTP/1.1
Host: localhost:8000
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
:form page: The page number of the vendors list.
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/html; charset=utf-8
:reqheader Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
:statuscode 200: List of vendors received successfully.
"""
if request.method == 'GET':
vendors = Vendor.objects.all()
state = FilterState.objects.get(name='Vendors_basic')
column_list = change_column_position(request, state)
myFilter = VendorFilter(request.GET, queryset=vendors)
queryset = myFilter.qs
number_of_objects = len(queryset)
page_number = request.GET.get('page')
page_obj, data = paginate(queryset, myFilter, page_number)
dictionaries = []
for obj in page_obj:
objdata = { 'id': obj.pk,
'name': obj.name,
'identifier': obj.identifier,
'phone': obj.communication.phone,
'email': obj.communication.email,
'location': obj.address.city
}
dictionaries.append(objdata)
return render(request, 'vendor/vendor_contents.html', {'page_obj': page_obj,
'myFilter': myFilter,
'n_prod': number_of_objects,
'columns': column_list,
'dicts': dictionaries,
'url': request.build_absolute_uri('/vendors/')})
def get_vendor(request):
"""
Returns the ``JSON`` serialized data of the requested vendor on ``GET`` request.
.. http:get:: /get_vendor/
Gets the JSON serialized data of the requested vendor.
**Example request**:
.. sourcecode:: http
GET /get_vendor/ HTTP/1.1
Host: localhost:8000
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
:param vendor_id: Vendor primary key.
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json; charset=utf-8
[
{
"name": "<NAME>",
"identifier": "TBPN-02692",
"gstin": "89AAC4683897343",
"address": {
"name": "<NAME>",
"address": "Nonummy Avenue",
"city": "Chung Cheong",
"phone": "679 166-3127",
"state": "Guanacaste",
"country": "tellusidnunc.net",
"post": "8949"
}
}
]
:resheader Content-Type: application/json
:statuscode 200: List of vendors received successfully.
:statuscode 400: Bad request version
:statuscode 500: Vendor matching query does not exist.
"""
if request.method == 'GET':
vendor_id = request.GET.get('vendor_id')
vendor = VendorSerializer(Vendor.objects.get(id=vendor_id))
return JsonResponse(vendor.data) | StarcoderdataPython |
92650 | from armulator.armv6.opcodes.abstract_opcodes.bkpt import Bkpt
from armulator.armv6.opcodes.opcode import Opcode
from bitstring import BitArray
class BkptA1(Bkpt, Opcode):
def __init__(self, instruction):
Opcode.__init__(self, instruction)
Bkpt.__init__(self)
def is_pc_changing_opcode(self):
return False
@staticmethod
def from_bitarray(instr, processor):
imm32 = BitArray(bin="0000000000000000" + instr.bin[12:24] + instr.bin[-4:])
if instr.bin[0:4] != "1110":
print "unpredictable"
else:
return BkptA1(instr)
| StarcoderdataPython |
187290 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import optparse
from proton import Message
from proton.handlers import MessagingHandler, Handler
from proton.reactor import Container
from random import randint
from time import time
class TaskHandler(Handler):
def __init__(self, receiver, credit):
self.receiver = receiver
self.credit = credit
def on_timer_task(self, event):
self.receiver.flow(self.credit)
class FortuneCookieService(MessagingHandler):
def __init__(self, url):
super(FortuneCookieService, self).__init__(prefetch=0)
self.url = url
with open("fortune-cookie.txt") as f:
self.cookies = f.readlines()
self.upper = len(self.cookies)-1
def on_start(self, event):
self.container = event.container
self.acceptor = event.container.listen(self.url)
def on_link_opened(self, event):
if(event.link.is_receiver):
event.receiver.flow(1)
def on_message(self, event):
request = event.message.body
print "Hello-Service-AMQP received request [%s]" % request
print "reply-to %s" % event.message.reply_to
self.accept(event.delivery)
sender = self.container.create_sender(event.message.reply_to)
greeting = 'HELLO ' + request.upper()
delivery = sender.send(Message(body=unicode(greeting)))
delivery.context = event.link
def on_accepted(self, event):
event.delivery.link.close()
event.delivery.context.flow(1)
parser = optparse.OptionParser(usage="usage: %prog",
description="Appends Hello to the request and sends the response in uppercase")
parser.add_option("-a","--address", default="localhost:5672/hello-service-amqp",
help="address for listening on client requests (default %default)")
opts, args = parser.parse_args()
Container(FortuneCookieService(opts.address)).run()
| StarcoderdataPython |
1709353 | # SPDX-License-Identifier: MIT
# Copyright (C) 2021 <NAME>
from rapidfuzz.cpp_string_metric import (
levenshtein,
normalized_levenshtein,
hamming,
normalized_hamming
)
| StarcoderdataPython |
3383072 |
# coding: utf-8
# In[1]:
from sklearn import tree
# In[2]:
import pandas as pd
import numpy as np
# In[3]:
dataset = pd.read_csv("a.csv")
# In[4]:
X = dataset.drop(['TQ'] ,1)
Y = dataset[['TQ']]
# In[5]:
clf = tree.DecisionTreeClassifier()
# In[6]:
clf = clf.fit(X,Y)
# In[125]:
def pred(arr):
prediction = clf.predict([arr])
c=prediction[0]
print(c)
arr1=arr-c
arr1=list(map(abs,arr1))
if(arr[arr1.index(min(arr1))]==0):
return(max(arr))
return(arr[arr1.index(min(arr1))])
# In[126]:
def wait():
p='c'
while(p!='p'):
f=open("sema.txt","r")
p=f.read(1)
f.close()
# In[127]:
def signal():
f=open("sema.txt","w")
f.write("c")
f.close()
# In[128]:
while(1):
wait()
f=open("s.txt","r")
p=f.readline()
f.close()
p=list(map(int,p.split(',')))
i=pred(p)
f=open("s.txt","w")
f.write(str(i))
f.close()
signal()
# In[114]:
# In[116]:
# In[117]:
| StarcoderdataPython |
49891 | <filename>molecule/resources/tests/all/test_common.py
import re
debian_os = ['debian', 'ubuntu']
rhel_os = ['redhat', 'centos']
def test_distribution(host):
assert host.system_info.distribution.lower() in debian_os + rhel_os
def test_repo_pinning_file(host):
if host.system_info.distribution.lower() in debian_os:
f = host.file('/etc/apt/preferences.d/dnsdist')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
f.contains('Package: dnsdist*')
f.contains('Pin: origin repo.powerdns.com')
f.contains('Pin-Priority: 600')
def test_package(host):
p = host.package('dnsdist')
assert p.is_installed
def test_configuration(host):
f = host.file('/etc/dnsdist/dnsdist.conf')
assert f.exists
def test_service(host):
# Using Ansible to mitigate some issues with the service test on debian-8
s = host.ansible('service', 'name=dnsdist state=started enabled=yes')
assert s["changed"] is False
def test_tcp(host):
tcp = host.socket('tcp://127.0.0.1:5300')
assert tcp.is_listening
def test_udp(host):
udp = host.socket('udp://127.0.0.1:5300')
assert udp.is_listening
def test_service_overrides(host):
smgr = host.ansible("setup")["ansible_facts"]["ansible_service_mgr"]
if smgr == 'systemd':
fname = '/etc/systemd/system/dnsdist.service.d/override.conf'
f = host.file(fname)
assert f.exists
f_string = f.content.decode()
assert re.search(r'^LimitCORE=infinity$', f_string, re.MULTILINE) is not None
# # Ensure a ExecStart override is preceeded by a 'ExecStart=' reset instruction
if re.search(r'^ExecStart=.+$', f_string, re.MULTILINE) is not None:
assert re.search(r'^ExecStart=$(\r?\n)^ExecStart=.+$', f_string, re.MULTILINE) is not None
# # Ensure a ExecStartPre override is preceeded by a 'ExecStartPre=' reset instruction
if re.search(r'^ExecStartPre=.+$', f_string, re.MULTILINE) is not None:
assert re.search(r'^ExecStartPre=$(\r?\n)^ExecStartPre=.+$', f_string, re.MULTILINE) is not None | StarcoderdataPython |
3349013 | <reponame>michaeldayreads/f5-cccl<filename>f5_cccl/resource/ltm/test/test_pool_member.py
#!/usr/bin/env python
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
from pprint import pprint as pp
from f5_cccl.resource.ltm.pool_member import IcrPoolMember
from f5_cccl.resource.ltm.pool_member import PoolMember
from mock import MagicMock
# import pdb
import pytest
@pytest.fixture
def pool_member_ipv6():
pass
@pytest.fixture
def pool_member_with_rd():
member = {"name": "192.168.100.101%0:80"}
return member
@pytest.fixture
def pool_member_with_rd_ipv6():
member = {"name": "2001:0db8:3c4d:0015:0000:0000:abcd:ef12%0.80"}
return member
@pytest.fixture
def bigip():
bigip = MagicMock()
return bigip
@pytest.fixture
def pool():
return MagicMock()
@pytest.fixture
def bigip_members():
members_filename = (
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'./bigip-members.json'))
with open(members_filename) as fp:
json_data = fp.read()
json_data = json.loads(json_data)
members = [m for m in json_data['members']]
pp(json_data)
return members
def test_create_bigip_member(pool, bigip_members):
"""Test the creation of PoolMember from BIG-IP data."""
member_cfg = bigip_members[0]
pp(bigip_members)
pp(member_cfg)
# pdb.set_trace()
member = IcrPoolMember(
pool=pool,
**member_cfg
)
assert member
# Test data
assert member.data
assert member.data['name'] == "192.168.200.2:80"
assert member.data['ratio'] == 1
assert member.data['connectionLimit'] == 0
assert member.data['priorityGroup'] == 0
assert member.data['session'] == "user-enabled"
assert not member.data['description']
def test_create_pool_member(pool, bigip_members):
"""Test the creation of PoolMember from BIG-IP data."""
member_cfg = bigip_members[0]
member = PoolMember(
pool=pool,
**member_cfg
)
assert member
assert member._pool
# Test data
assert member.data
assert member.data['name'] == "192.168.200.2:80"
assert member.data['ratio'] == 1
assert member.data['connectionLimit'] == 0
assert member.data['priorityGroup'] == 0
assert member.data['session'] == "user-enabled"
assert not member.data['description']
def test_create_pool_member_with_rd(pool, pool_member_with_rd):
"""Test the creation of PoolMember from BIG-IP data."""
member = PoolMember(
partition="Common",
pool=pool,
**pool_member_with_rd
)
assert member
assert member._pool
# Test data
assert member.data
assert member.data['name'] == "192.168.100.101%0:80"
def test_create_pool_member_with_rd_ipv6(pool, pool_member_with_rd_ipv6):
"""Test the creation of PoolMember from BIG-IP data."""
member = PoolMember(
partition="Common",
pool=pool,
**pool_member_with_rd_ipv6
)
assert member
assert member._pool
# Test data
assert member.data
assert member.data['name'] == "2001:0db8:3c4d:0015:0000:0000:abcd:ef12%0.80"
| StarcoderdataPython |
95924 | import pytest
from traitlets import Any
from sepal_ui import sepalwidgets as sw
from sepal_ui.model import Model
class TestDatePicker:
def test_init(self):
# default init
datepicker = sw.DatePicker()
assert isinstance(datepicker, sw.DatePicker)
# exhaustive
datepicker = sw.DatePicker("toto")
assert isinstance(datepicker, sw.DatePicker)
return
def test_bind(self, datepicker):
class Test_io(Model):
out = Any(None).tag(sync=True)
test_io = Test_io()
test_io.bind(datepicker, "out")
date = "2020-06-12"
datepicker.v_model = date
assert test_io.out == date
assert datepicker.menu.v_model is False
return
@pytest.fixture
def datepicker(self):
"""create a default datepicker"""
return sw.DatePicker()
| StarcoderdataPython |
3348997 | <filename>mmtbx/regression/model/tst_model_biomt_mtrix.py
from __future__ import absolute_import, division, print_function
import iotbx.pdb
import mmtbx.model
import time
"""
Test multiplication of hierarchy and SS annotations in different combinations
of MTRIX and BIOMT records presence.
"""
single_mtrix_txt = """
MTRIX1 1 1.000000 0.000000 0.000000 0.00000 1
MTRIX2 1 0.000000 1.000000 0.000000 0.00000 1
MTRIX3 1 0.000000 0.000000 1.000000 0.00000 1
MTRIX1 2 0.479787 -0.038259 -0.876550 0.00000
MTRIX2 2 -0.530698 0.782918 -0.324654 0.00000
MTRIX3 2 0.698688 0.620947 0.355330 0.00000
"""
mtrix_txt = """
MTRIX1 1 1.000000 0.000000 0.000000 0.00000 1
MTRIX2 1 0.000000 1.000000 0.000000 0.00000 1
MTRIX3 1 0.000000 0.000000 1.000000 0.00000 1
MTRIX1 2 0.479787 -0.038259 -0.876550 0.00000
MTRIX2 2 -0.530698 0.782918 -0.324654 0.00000
MTRIX3 2 0.698688 0.620947 0.355330 0.00000
MTRIX1 3 -0.361936 -0.592602 -0.719600 0.00000
MTRIX2 3 -0.896947 0.431671 0.095646 0.00000
MTRIX3 3 0.253950 0.680060 -0.687769 0.00000
"""
biomt_txt = """
REMARK 350 BIOMT1 1 1.000000 0.000000 0.000000 0.00000
REMARK 350 BIOMT2 1 0.000000 1.000000 0.000000 0.00000
REMARK 350 BIOMT3 1 0.000000 0.000000 1.000000 0.00000
REMARK 350 BIOMT1 2 0.500000 -0.809017 0.309017 0.00000
REMARK 350 BIOMT2 2 0.809017 0.309017 -0.500000 0.00000
REMARK 350 BIOMT3 2 0.309017 0.500000 0.809017 0.00000
REMARK 350 BIOMT1 3 -0.309017 -0.500000 0.809017 0.00000
REMARK 350 BIOMT2 3 0.500000 -0.809017 -0.309017 0.00000
REMARK 350 BIOMT3 3 0.809017 0.309017 0.500000 0.00000
"""
ss_txt = """
HELIX 6 6 ARG A 316 LEU A 318 5 3
HELIX 7 7 SER A 335 ASN A 341 1 7
SHEET 1 E 2 TYR A 305 SER A 308 0
SHEET 2 E 2 GLN A 311 GLU A 314 -1 O ARG A 313 N PHE A 306
"""
# 300 atoms
atoms_txt = """
ATOM 2065 N GLY A 304 3.950 -35.449 102.015 1.00 21.30 N
ATOM 2066 CA GLY A 304 4.631 -35.764 103.257 1.00 19.87 C
ATOM 2067 C GLY A 304 6.074 -36.196 103.097 1.00 19.37 C
ATOM 2068 O GLY A 304 6.642 -36.823 103.994 1.00 18.69 O
ATOM 2069 N TYR A 305 6.673 -35.863 101.957 1.00 19.21 N
ATOM 2070 CA TYR A 305 8.065 -36.210 101.688 1.00 18.15 C
ATOM 2071 C TYR A 305 8.975 -35.002 101.819 1.00 18.33 C
ATOM 2072 O TYR A 305 8.673 -33.920 101.314 1.00 19.14 O
ATOM 2073 CB TYR A 305 8.202 -36.803 100.289 1.00 17.45 C
ATOM 2074 CG TYR A 305 7.826 -38.258 100.228 1.00 18.23 C
ATOM 2075 CD1 TYR A 305 8.761 -39.250 100.515 1.00 18.89 C
ATOM 2076 CD2 TYR A 305 6.526 -38.647 99.917 1.00 17.60 C
ATOM 2077 CE1 TYR A 305 8.411 -40.595 100.493 1.00 18.65 C
ATOM 2078 CE2 TYR A 305 6.166 -39.982 99.895 1.00 18.16 C
ATOM 2079 CZ TYR A 305 7.112 -40.954 100.182 1.00 18.93 C
ATOM 2080 OH TYR A 305 6.756 -42.284 100.149 1.00 20.67 O
ATOM 2081 N PHE A 306 10.097 -35.200 102.498 1.00 18.08 N
ATOM 2082 CA PHE A 306 11.061 -34.133 102.707 1.00 18.50 C
ATOM 2083 C PHE A 306 12.458 -34.585 102.325 1.00 19.27 C
ATOM 2084 O PHE A 306 12.737 -35.780 102.239 1.00 19.20 O
ATOM 2085 CB PHE A 306 11.071 -33.720 104.181 1.00 18.35 C
ATOM 2086 CG PHE A 306 9.784 -33.115 104.650 1.00 18.59 C
ATOM 2087 CD1 PHE A 306 9.582 -31.744 104.580 1.00 18.76 C
ATOM 2088 CD2 PHE A 306 8.765 -33.919 105.141 1.00 18.71 C
ATOM 2089 CE1 PHE A 306 8.379 -31.180 104.994 1.00 19.56 C
ATOM 2090 CE2 PHE A 306 7.558 -33.366 105.557 1.00 20.09 C
ATOM 2091 CZ PHE A 306 7.365 -31.993 105.483 1.00 20.22 C
ATOM 2092 N MET A 307 13.330 -33.615 102.088 1.00 20.68 N
ATOM 2093 CA MET A 307 14.717 -33.899 101.773 1.00 21.16 C
ATOM 2094 C MET A 307 15.423 -33.667 103.103 1.00 22.68 C
ATOM 2095 O MET A 307 15.811 -32.544 103.417 1.00 23.48 O
ATOM 2096 CB MET A 307 15.243 -32.920 100.729 1.00 20.61 C
ATOM 2097 CG MET A 307 16.666 -33.197 100.265 1.00 21.45 C
ATOM 2098 SD MET A 307 16.835 -34.726 99.306 1.00 21.53 S
ATOM 2099 CE MET A 307 18.018 -35.567 100.270 1.00 21.98 C
ATOM 2100 N SER A 308 15.551 -34.726 103.900 1.00 23.90 N
ATOM 2101 CA SER A 308 16.196 -34.629 105.204 1.00 25.50 C
ATOM 2102 C SER A 308 17.711 -34.736 105.044 1.00 26.51 C
ATOM 2103 O SER A 308 18.287 -35.817 105.177 1.00 26.25 O
ATOM 2104 CB SER A 308 15.681 -35.736 106.127 1.00 25.33 C
ATOM 2105 OG SER A 308 16.066 -35.503 107.471 1.00 28.15 O
ATOM 2106 N ASN A 309 18.344 -33.602 104.754 1.00 27.44 N
ATOM 2107 CA ASN A 309 19.787 -33.533 104.556 1.00 28.19 C
ATOM 2108 C ASN A 309 20.235 -34.192 103.260 1.00 28.98 C
ATOM 2109 O ASN A 309 20.106 -33.612 102.183 1.00 30.58 O
ATOM 2110 CB ASN A 309 20.522 -34.170 105.737 1.00 28.18 C
ATOM 2111 CG ASN A 309 20.631 -33.238 106.923 1.00 28.56 C
ATOM 2112 OD1 ASN A 309 21.308 -32.212 106.855 1.00 28.58 O
ATOM 2113 ND2 ASN A 309 19.963 -33.585 108.017 1.00 27.94 N
ATOM 2114 N ASP A 310 20.753 -35.410 103.369 1.00 28.99 N
ATOM 2115 CA ASP A 310 21.249 -36.144 102.212 1.00 28.59 C
ATOM 2116 C ASP A 310 20.246 -37.125 101.612 1.00 27.76 C
ATOM 2117 O ASP A 310 20.386 -37.526 100.457 1.00 28.39 O
ATOM 2118 CB ASP A 310 22.514 -36.904 102.598 1.00 30.83 C
ATOM 2119 CG ASP A 310 22.271 -37.900 103.717 1.00 33.72 C
ATOM 2120 OD1 ASP A 310 21.917 -37.469 104.838 1.00 32.95 O
ATOM 2121 OD2 ASP A 310 22.426 -39.118 103.473 1.00 34.80 O
ATOM 2122 N GLN A 311 19.238 -37.512 102.388 1.00 26.40 N
ATOM 2123 CA GLN A 311 18.239 -38.461 101.910 1.00 24.95 C
ATOM 2124 C GLN A 311 16.812 -37.956 101.941 1.00 23.86 C
ATOM 2125 O GLN A 311 16.499 -36.941 102.561 1.00 23.90 O
ATOM 2126 CB GLN A 311 18.268 -39.739 102.741 1.00 27.17 C
ATOM 2127 CG GLN A 311 19.513 -40.558 102.642 1.00 31.00 C
ATOM 2128 CD GLN A 311 19.343 -41.880 103.340 1.00 32.73 C
ATOM 2129 OE1 GLN A 311 18.986 -41.928 104.519 1.00 32.73 O
ATOM 2130 NE2 GLN A 311 19.591 -42.969 102.618 1.00 35.40 N
ATOM 2131 N ILE A 312 15.948 -38.706 101.272 1.00 22.64 N
ATOM 2132 CA ILE A 312 14.529 -38.409 101.221 1.00 20.51 C
ATOM 2133 C ILE A 312 13.903 -39.134 102.404 1.00 21.02 C
ATOM 2134 O ILE A 312 14.209 -40.300 102.653 1.00 21.21 O
ATOM 2135 CB ILE A 312 13.886 -38.956 99.933 1.00 19.13 C
ATOM 2136 CG1 ILE A 312 14.452 -38.229 98.715 1.00 18.32 C
ATOM 2137 CG2 ILE A 312 12.375 -38.824 100.008 1.00 17.77 C
ATOM 2138 CD1 ILE A 312 13.918 -38.745 97.395 1.00 17.16 C
ATOM 2139 N ARG A 313 13.042 -38.442 103.140 1.00 21.29 N
ATOM 2140 CA ARG A 313 12.363 -39.049 104.275 1.00 21.69 C
ATOM 2141 C ARG A 313 10.872 -38.764 104.223 1.00 22.24 C
ATOM 2142 O ARG A 313 10.445 -37.705 103.766 1.00 22.50 O
ATOM 2143 CB ARG A 313 12.932 -38.539 105.601 1.00 21.74 C
ATOM 2144 CG ARG A 313 14.216 -39.223 106.028 1.00 23.27 C
ATOM 2145 CD ARG A 313 14.488 -39.007 107.511 1.00 24.27 C
ATOM 2146 NE ARG A 313 15.647 -39.768 107.970 1.00 26.04 N
ATOM 2147 CZ ARG A 313 16.906 -39.483 107.652 1.00 26.93 C
ATOM 2148 NH1 ARG A 313 17.177 -38.443 106.873 1.00 26.76 N
ATOM 2149 NH2 ARG A 313 17.895 -40.244 108.103 1.00 27.18 N
ATOM 2150 N GLU A 314 10.085 -39.729 104.686 1.00 23.21 N
ATOM 2151 CA GLU A 314 8.637 -39.591 104.709 1.00 23.33 C
ATOM 2152 C GLU A 314 8.256 -39.096 106.107 1.00 23.56 C
ATOM 2153 O GLU A 314 8.950 -39.370 107.084 1.00 23.95 O
ATOM 2154 CB GLU A 314 7.990 -40.946 104.405 1.00 23.60 C
ATOM 2155 CG GLU A 314 6.517 -40.906 104.006 1.00 25.54 C
ATOM 2156 CD GLU A 314 5.571 -40.837 105.196 1.00 27.64 C
ATOM 2157 OE1 GLU A 314 5.803 -41.568 106.184 1.00 27.37 O
ATOM 2158 OE2 GLU A 314 4.586 -40.068 105.137 1.00 27.35 O
ATOM 2159 N ARG A 315 7.162 -38.349 106.187 1.00 23.65 N
ATOM 2160 CA ARG A 315 6.672 -37.790 107.443 1.00 22.98 C
ATOM 2161 C ARG A 315 6.921 -38.651 108.687 1.00 22.60 C
ATOM 2162 O ARG A 315 7.532 -38.196 109.653 1.00 22.85 O
ATOM 2163 CB ARG A 315 5.173 -37.516 107.323 1.00 23.23 C
ATOM 2164 CG ARG A 315 4.735 -36.167 107.842 1.00 24.91 C
ATOM 2165 CD ARG A 315 3.231 -36.143 108.070 1.00 28.73 C
ATOM 2166 NE ARG A 315 2.444 -36.339 106.853 1.00 31.17 N
ATOM 2167 CZ ARG A 315 2.358 -35.454 105.863 1.00 34.22 C
ATOM 2168 NH1 ARG A 315 3.018 -34.301 105.937 1.00 36.07 N
ATOM 2169 NH2 ARG A 315 1.596 -35.715 104.805 1.00 32.99 N
ATOM 2170 N ARG A 316 6.447 -39.893 108.657 1.00 22.00 N
ATOM 2171 CA ARG A 316 6.578 -40.806 109.792 1.00 21.99 C
ATOM 2172 C ARG A 316 7.984 -41.091 110.302 1.00 23.09 C
ATOM 2173 O ARG A 316 8.149 -41.529 111.439 1.00 23.65 O
ATOM 2174 CB ARG A 316 5.886 -42.136 109.475 1.00 21.22 C
ATOM 2175 CG ARG A 316 4.373 -42.042 109.402 1.00 23.38 C
ATOM 2176 CD ARG A 316 3.836 -42.668 108.123 1.00 26.73 C
ATOM 2177 NE ARG A 316 3.995 -44.121 108.085 1.00 28.42 N
ATOM 2178 CZ ARG A 316 4.479 -44.789 107.040 1.00 28.61 C
ATOM 2179 NH1 ARG A 316 4.857 -44.132 105.954 1.00 29.34 N
ATOM 2180 NH2 ARG A 316 4.571 -46.113 107.074 1.00 27.85 N
ATOM 2181 N ASP A 317 9.000 -40.846 109.485 1.00 25.17 N
ATOM 2182 CA ASP A 317 10.363 -41.127 109.913 1.00 26.92 C
ATOM 2183 C ASP A 317 11.076 -39.946 110.566 1.00 26.95 C
ATOM 2184 O ASP A 317 12.094 -40.123 111.234 1.00 27.91 O
ATOM 2185 CB ASP A 317 11.184 -41.652 108.733 1.00 31.20 C
ATOM 2186 CG ASP A 317 12.583 -42.075 109.139 1.00 35.93 C
ATOM 2187 OD1 ASP A 317 13.435 -41.187 109.360 1.00 40.17 O
ATOM 2188 OD2 ASP A 317 12.831 -43.295 109.250 1.00 37.90 O
ATOM 2189 N LEU A 318 10.550 -38.741 110.378 1.00 26.49 N
ATOM 2190 CA LEU A 318 11.155 -37.560 110.985 1.00 26.24 C
ATOM 2191 C LEU A 318 10.717 -37.537 112.448 1.00 26.95 C
ATOM 2192 O LEU A 318 9.800 -36.810 112.828 1.00 27.99 O
ATOM 2193 CB LEU A 318 10.693 -36.301 110.252 1.00 24.34 C
ATOM 2194 CG LEU A 318 11.071 -36.288 108.768 1.00 23.72 C
ATOM 2195 CD1 LEU A 318 10.394 -35.132 108.065 1.00 24.49 C
ATOM 2196 CD2 LEU A 318 12.580 -36.193 108.629 1.00 24.02 C
ATOM 2197 N THR A 319 11.392 -38.343 113.259 1.00 27.73 N
ATOM 2198 CA THR A 319 11.076 -38.486 114.676 1.00 27.93 C
ATOM 2199 C THR A 319 11.764 -37.518 115.637 1.00 28.67 C
ATOM 2200 O THR A 319 11.364 -37.407 116.795 1.00 29.01 O
ATOM 2201 CB THR A 319 11.379 -39.919 115.137 1.00 27.93 C
ATOM 2202 OG1 THR A 319 12.746 -40.235 114.841 1.00 27.70 O
ATOM 2203 CG2 THR A 319 10.472 -40.911 114.417 1.00 26.51 C
ATOM 2204 N THR A 320 12.797 -36.826 115.169 1.00 29.46 N
ATOM 2205 CA THR A 320 13.513 -35.872 116.011 1.00 29.47 C
ATOM 2206 C THR A 320 12.739 -34.564 116.150 1.00 30.76 C
ATOM 2207 O THR A 320 12.574 -34.041 117.251 1.00 31.05 O
ATOM 2208 CB THR A 320 14.900 -35.571 115.435 1.00 29.84 C
ATOM 2209 OG1 THR A 320 15.722 -36.738 115.556 1.00 28.30 O
ATOM 2210 CG2 THR A 320 15.548 -34.398 116.168 1.00 30.39 C
ATOM 2211 N SER A 321 12.274 -34.035 115.023 1.00 31.46 N
ATOM 2212 CA SER A 321 11.505 -32.799 115.014 1.00 31.46 C
ATOM 2213 C SER A 321 10.184 -33.067 114.316 1.00 30.56 C
ATOM 2214 O SER A 321 10.104 -33.934 113.448 1.00 32.00 O
ATOM 2215 CB SER A 321 12.270 -31.700 114.277 1.00 33.39 C
ATOM 2216 OG SER A 321 13.478 -31.389 114.950 1.00 37.51 O
ATOM 2217 N VAL A 322 9.149 -32.325 114.694 1.00 28.32 N
ATOM 2218 CA VAL A 322 7.829 -32.503 114.103 1.00 25.27 C
ATOM 2219 C VAL A 322 7.760 -31.974 112.671 1.00 24.50 C
ATOM 2220 O VAL A 322 8.001 -30.794 112.424 1.00 24.31 O
ATOM 2221 CB VAL A 322 6.756 -31.795 114.945 1.00 24.16 C
ATOM 2222 CG1 VAL A 322 5.380 -32.058 114.362 1.00 24.15 C
ATOM 2223 CG2 VAL A 322 6.830 -32.275 116.380 1.00 23.55 C
ATOM 2224 N PRO A 323 7.429 -32.850 111.708 1.00 23.60 N
ATOM 2225 CA PRO A 323 7.325 -32.477 110.294 1.00 22.62 C
ATOM 2226 C PRO A 323 6.166 -31.512 110.044 1.00 22.04 C
ATOM 2227 O PRO A 323 5.076 -31.683 110.594 1.00 21.04 O
ATOM 2228 CB PRO A 323 7.097 -33.816 109.596 1.00 22.18 C
ATOM 2229 CG PRO A 323 7.725 -34.803 110.523 1.00 23.36 C
ATOM 2230 CD PRO A 323 7.282 -34.306 111.868 1.00 23.35 C
ATOM 2231 N PRO A 324 6.389 -30.483 109.214 1.00 21.26 N
ATOM 2232 CA PRO A 324 5.345 -29.504 108.905 1.00 20.28 C
ATOM 2233 C PRO A 324 4.265 -30.149 108.052 1.00 20.15 C
ATOM 2234 O PRO A 324 4.394 -31.297 107.620 1.00 21.43 O
ATOM 2235 CB PRO A 324 6.093 -28.424 108.128 1.00 20.19 C
ATOM 2236 CG PRO A 324 7.499 -28.565 108.593 1.00 22.05 C
ATOM 2237 CD PRO A 324 7.676 -30.054 108.644 1.00 21.65 C
ATOM 2238 N VAL A 325 3.207 -29.394 107.797 1.00 18.93 N
ATOM 2239 CA VAL A 325 2.106 -29.878 106.986 1.00 17.74 C
ATOM 2240 C VAL A 325 1.533 -28.648 106.265 1.00 18.27 C
ATOM 2241 O VAL A 325 1.462 -27.564 106.845 1.00 19.21 O
ATOM 2242 CB VAL A 325 1.065 -30.589 107.891 1.00 15.23 C
ATOM 2243 CG1 VAL A 325 0.310 -29.581 108.721 1.00 16.64 C
ATOM 2244 CG2 VAL A 325 0.144 -31.438 107.066 1.00 16.39 C
ATOM 2245 N ALA A 326 1.160 -28.802 104.998 1.00 18.66 N
ATOM 2246 CA ALA A 326 0.636 -27.680 104.211 1.00 19.12 C
ATOM 2247 C ALA A 326 -0.783 -27.267 104.588 1.00 19.46 C
ATOM 2248 O ALA A 326 -1.755 -27.780 104.035 1.00 22.10 O
ATOM 2249 CB ALA A 326 0.699 -28.016 102.723 1.00 17.55 C
ATOM 2250 N LEU A 327 -0.898 -26.324 105.516 1.00 17.63 N
ATOM 2251 CA LEU A 327 -2.205 -25.855 105.961 1.00 16.12 C
ATOM 2252 C LEU A 327 -2.471 -24.440 105.467 1.00 16.88 C
ATOM 2253 O LEU A 327 -1.590 -23.795 104.901 1.00 18.47 O
ATOM 2254 CB LEU A 327 -2.279 -25.897 107.487 1.00 13.74 C
ATOM 2255 CG LEU A 327 -1.897 -27.240 108.115 1.00 11.17 C
ATOM 2256 CD1 LEU A 327 -1.930 -27.126 109.624 1.00 9.68 C
ATOM 2257 CD2 LEU A 327 -2.840 -28.330 107.633 1.00 9.69 C
ATOM 2258 N THR A 328 -3.692 -23.962 105.683 1.00 16.51 N
ATOM 2259 CA THR A 328 -4.076 -22.623 105.254 1.00 15.88 C
ATOM 2260 C THR A 328 -3.155 -21.559 105.881 1.00 15.66 C
ATOM 2261 O THR A 328 -3.011 -21.475 107.101 1.00 14.78 O
ATOM 2262 CB THR A 328 -5.577 -22.379 105.580 1.00 14.98 C
ATOM 2263 OG1 THR A 328 -5.835 -20.975 105.690 1.00 15.31 O
ATOM 2264 CG2 THR A 328 -5.968 -23.098 106.862 1.00 16.50 C
ATOM 2265 N ALA A 329 -2.535 -20.758 105.015 1.00 15.68 N
ATOM 2266 CA ALA A 329 -1.570 -19.718 105.391 1.00 15.70 C
ATOM 2267 C ALA A 329 -2.002 -18.573 106.305 1.00 16.13 C
ATOM 2268 O ALA A 329 -3.165 -18.169 106.317 1.00 15.82 O
ATOM 2269 CB ALA A 329 -0.949 -19.136 104.123 1.00 15.02 C
ATOM 2270 N THR A 330 -1.028 -18.050 107.054 1.00 17.52 N
ATOM 2271 CA THR A 330 -1.220 -16.930 107.984 1.00 18.76 C
ATOM 2272 C THR A 330 0.039 -16.068 108.050 1.00 18.94 C
ATOM 2273 O THR A 330 1.134 -16.520 107.713 1.00 18.98 O
ATOM 2274 CB THR A 330 -1.486 -17.400 109.427 1.00 18.55 C
ATOM 2275 OG1 THR A 330 -2.582 -18.314 109.441 1.00 25.08 O
ATOM 2276 CG2 THR A 330 -1.826 -16.215 110.316 1.00 17.33 C
ATOM 2277 N LYS A 331 -0.130 -14.826 108.494 1.00 18.76 N
ATOM 2278 CA LYS A 331 0.981 -13.893 108.648 1.00 18.12 C
ATOM 2279 C LYS A 331 1.276 -13.801 110.140 1.00 18.19 C
ATOM 2280 O LYS A 331 2.396 -13.505 110.551 1.00 19.51 O
ATOM 2281 CB LYS A 331 0.595 -12.500 108.149 1.00 17.57 C
ATOM 2282 CG LYS A 331 0.218 -12.405 106.686 1.00 18.42 C
ATOM 2283 CD LYS A 331 -0.306 -11.007 106.356 1.00 18.16 C
ATOM 2284 CE LYS A 331 0.701 -9.919 106.725 1.00 16.46 C
ATOM 2285 NZ LYS A 331 0.187 -8.552 106.431 1.00 15.24 N
ATOM 2286 N LEU A 332 0.247 -14.064 110.937 1.00 18.16 N
ATOM 2287 CA LEU A 332 0.315 -14.001 112.393 1.00 18.94 C
ATOM 2288 C LEU A 332 1.112 -15.152 113.006 1.00 20.72 C
ATOM 2289 O LEU A 332 1.034 -15.408 114.207 1.00 21.12 O
ATOM 2290 CB LEU A 332 -1.111 -13.996 112.944 1.00 17.77 C
ATOM 2291 CG LEU A 332 -2.055 -13.055 112.181 1.00 15.45 C
ATOM 2292 CD1 LEU A 332 -3.501 -13.375 112.509 1.00 14.04 C
ATOM 2293 CD2 LEU A 332 -1.723 -11.617 112.522 1.00 12.75 C
ATOM 2294 N ASN A 333 1.880 -15.838 112.166 1.00 23.44 N
ATOM 2295 CA ASN A 333 2.703 -16.971 112.578 1.00 23.93 C
ATOM 2296 C ASN A 333 4.150 -16.545 112.728 1.00 24.70 C
ATOM 2297 O ASN A 333 4.853 -16.992 113.632 1.00 24.54 O
ATOM 2298 CB ASN A 333 2.646 -18.062 111.515 1.00 26.61 C
ATOM 2299 CG ASN A 333 1.858 -19.250 111.956 1.00 29.55 C
ATOM 2300 OD1 ASN A 333 2.146 -19.843 112.991 1.00 33.22 O
ATOM 2301 ND2 ASN A 333 0.853 -19.617 111.173 1.00 34.06 N
ATOM 2302 N GLN A 334 4.586 -15.696 111.803 1.00 24.83 N
ATOM 2303 CA GLN A 334 5.948 -15.190 111.768 1.00 24.33 C
ATOM 2304 C GLN A 334 6.154 -14.072 112.782 1.00 23.29 C
ATOM 2305 O GLN A 334 5.292 -13.209 112.949 1.00 23.17 O
ATOM 2306 CB GLN A 334 6.257 -14.671 110.362 1.00 26.56 C
ATOM 2307 CG GLN A 334 7.356 -15.429 109.623 1.00 29.28 C
ATOM 2308 CD GLN A 334 7.077 -16.915 109.515 1.00 28.54 C
ATOM 2309 OE1 GLN A 334 6.021 -17.330 109.038 1.00 28.36 O
ATOM 2310 NE2 GLN A 334 8.031 -17.725 109.954 1.00 29.34 N
ATOM 2311 N SER A 335 7.298 -14.094 113.459 1.00 21.22 N
ATOM 2312 CA SER A 335 7.616 -13.065 114.439 1.00 19.84 C
ATOM 2313 C SER A 335 8.243 -11.891 113.700 1.00 19.90 C
ATOM 2314 O SER A 335 8.584 -12.005 112.522 1.00 19.68 O
ATOM 2315 CB SER A 335 8.600 -13.597 115.479 1.00 19.73 C
ATOM 2316 OG SER A 335 9.870 -13.836 114.897 1.00 20.23 O
ATOM 2317 N ALA A 336 8.401 -10.767 114.393 1.00 19.82 N
ATOM 2318 CA ALA A 336 8.988 -9.579 113.787 1.00 18.90 C
ATOM 2319 C ALA A 336 10.408 -9.856 113.309 1.00 19.44 C
ATOM 2320 O ALA A 336 10.807 -9.413 112.232 1.00 18.92 O
ATOM 2321 CB ALA A 336 8.987 -8.436 114.780 1.00 18.18 C
ATOM 2322 N SER A 337 11.169 -10.592 114.112 1.00 20.65 N
ATOM 2323 CA SER A 337 12.543 -10.918 113.756 1.00 21.77 C
ATOM 2324 C SER A 337 12.601 -11.896 112.588 1.00 21.99 C
ATOM 2325 O SER A 337 13.536 -11.852 111.790 1.00 22.49 O
ATOM 2326 CB SER A 337 13.280 -11.495 114.963 1.00 21.52 C
ATOM 2327 OG SER A 337 12.584 -12.609 115.487 1.00 27.59 O
ATOM 2328 N ASN A 338 11.610 -12.779 112.484 1.00 22.03 N
ATOM 2329 CA ASN A 338 11.584 -13.729 111.378 1.00 22.50 C
ATOM 2330 C ASN A 338 11.427 -12.965 110.068 1.00 22.06 C
ATOM 2331 O ASN A 338 12.155 -13.213 109.108 1.00 21.25 O
ATOM 2332 CB ASN A 338 10.430 -14.727 111.517 1.00 25.28 C
ATOM 2333 CG ASN A 338 10.682 -15.778 112.586 1.00 27.64 C
ATOM 2334 OD1 ASN A 338 11.812 -16.237 112.773 1.00 28.04 O
ATOM 2335 ND2 ASN A 338 9.620 -16.181 113.280 1.00 28.11 N
ATOM 2336 N ASN A 339 10.477 -12.032 110.038 1.00 21.49 N
ATOM 2337 CA ASN A 339 10.232 -11.229 108.844 1.00 20.40 C
ATOM 2338 C ASN A 339 11.511 -10.562 108.357 1.00 19.57 C
ATOM 2339 O ASN A 339 11.764 -10.496 107.156 1.00 20.36 O
ATOM 2340 CB ASN A 339 9.186 -10.147 109.115 1.00 20.32 C
ATOM 2341 CG ASN A 339 7.869 -10.714 109.585 1.00 20.71 C
ATOM 2342 OD1 ASN A 339 7.471 -11.806 109.178 1.00 22.61 O
ATOM 2343 ND2 ASN A 339 7.174 -9.969 110.435 1.00 19.61 N
ATOM 2344 N LEU A 340 12.312 -10.065 109.294 1.00 17.69 N
ATOM 2345 CA LEU A 340 13.561 -9.405 108.942 1.00 16.22 C
ATOM 2346 C LEU A 340 14.507 -10.326 108.185 1.00 16.58 C
ATOM 2347 O LEU A 340 15.286 -9.866 107.349 1.00 18.23 O
ATOM 2348 CB LEU A 340 14.251 -8.872 110.198 1.00 13.20 C
ATOM 2349 CG LEU A 340 13.589 -7.648 110.832 1.00 10.55 C
ATOM 2350 CD1 LEU A 340 14.276 -7.322 112.136 1.00 9.92 C
ATOM 2351 CD2 LEU A 340 13.663 -6.465 109.877 1.00 9.35 C
ATOM 2352 N ASN A 341 14.435 -11.624 108.468 1.00 16.36 N
ATOM 2353 CA ASN A 341 15.295 -12.597 107.798 1.00 15.90 C
ATOM 2354 C ASN A 341 14.594 -13.269 106.626 1.00 15.59 C
ATOM 2355 O ASN A 341 14.917 -14.403 106.269 1.00 15.34 O
ATOM 2356 CB ASN A 341 15.754 -13.670 108.782 1.00 15.76 C
ATOM 2357 CG ASN A 341 16.460 -13.089 109.977 1.00 16.78 C
ATOM 2358 OD1 ASN A 341 17.302 -12.200 109.841 1.00 19.51 O
ATOM 2359 ND2 ASN A 341 16.132 -13.593 111.161 1.00 16.09 N
ATOM 2360 N ALA A 342 13.640 -12.566 106.027 1.00 14.50 N
ATOM 2361 CA ALA A 342 12.894 -13.105 104.900 1.00 13.77 C
ATOM 2362 C ALA A 342 13.788 -13.386 103.695 1.00 14.02 C
ATOM 2363 O ALA A 342 14.703 -12.618 103.392 1.00 14.49 O
ATOM 2364 CB ALA A 342 11.782 -12.143 104.511 1.00 12.29 C
"""
def exercise_single_mtrix():
inp = iotbx.pdb.input(lines=single_mtrix_txt+ss_txt+atoms_txt, source_info=None)
model = mmtbx.model.manager(
model_input = inp)
# print (model.model_as_pdb())
assert model.get_number_of_atoms() == 600, model.get_number_of_atoms()
assert model.get_hierarchy().atoms_size() == 600
assert model.get_xray_structure().scatterers().size() == 600
ss_ann = model.get_ss_annotation()
# print ss_ann.as_pdb_str()
assert ss_ann.get_n_helices() == 4
assert ss_ann.get_n_sheets() == 2
def exercise_mtrix():
inp = iotbx.pdb.input(lines=mtrix_txt+ss_txt+atoms_txt, source_info=None)
model = mmtbx.model.manager(
model_input = inp)
assert model.get_number_of_atoms() == 900, model.get_number_of_atoms()
assert model.get_hierarchy().atoms_size() == 900
assert model.get_xray_structure().scatterers().size() == 900
ss_ann = model.get_ss_annotation()
# print ss_ann.as_pdb_str()
assert ss_ann.get_n_helices() == 6
assert ss_ann.get_n_sheets() == 3
def exercise_biomt():
inp = iotbx.pdb.input(lines=biomt_txt+ss_txt+atoms_txt, source_info=None)
model = mmtbx.model.manager(
model_input = inp)
assert model.get_number_of_atoms() == 300, model.get_number_of_atoms()
assert model.get_hierarchy().atoms_size() == 300
assert model.get_xray_structure().scatterers().size() == 300
ss_ann = model.get_ss_annotation()
assert ss_ann.get_n_helices() == 2
assert ss_ann.get_n_sheets() == 1
model.expand_with_BIOMT_records()
assert model.get_number_of_atoms() == 900, model.get_number_of_atoms()
assert model.get_hierarchy().atoms_size() == 900
assert model.get_xray_structure().scatterers().size() == 900, model.get_xray_structure().scatterers().size()
ss_ann = model.get_ss_annotation()
assert ss_ann.get_n_helices() == 6
assert ss_ann.get_n_sheets() == 3
def exercise_both():
inp = iotbx.pdb.input(lines=mtrix_txt+biomt_txt+ss_txt+atoms_txt, source_info=None)
model = mmtbx.model.manager(
model_input = inp)
assert model.get_number_of_atoms() == 900, model.get_number_of_atoms()
assert model.get_hierarchy().atoms_size() == 900
assert model.get_xray_structure().scatterers().size() == 900
ss_ann = model.get_ss_annotation()
# print ss_ann.as_pdb_str()
# print "="*30
assert ss_ann.get_n_helices() == 6
assert ss_ann.get_n_sheets() == 3
model.expand_with_BIOMT_records()
assert model.get_number_of_atoms() == 2700, model.get_number_of_atoms()
assert model.get_hierarchy().atoms_size() == 2700
assert model.get_xray_structure().scatterers().size() == 2700, model.get_xray_structure().scatterers().size()
ss_ann = model.get_ss_annotation()
# print ss_ann.as_pdb_str()
assert ss_ann.get_n_helices() == 18
assert ss_ann.get_n_sheets() == 9
return
if (__name__ == "__main__"):
t0 = time.time()
exercise_single_mtrix()
exercise_mtrix()
exercise_biomt()
exercise_both()
print("Total time: %8.3f"%(time.time() - t0))
print("OK.")
| StarcoderdataPython |
3325415 | from typing import Iterator, List, Tuple, Union
import random
import nltk # type: ignore
from nltk.grammar import ProbabilisticProduction # type: ignore
from nltk.grammar import Nonterminal # type: ignore
Symbol = Union[str, Nonterminal]
class PCFG(nltk.grammar.PCFG):
def generate(self, n: int) -> Iterator[str]:
"""Probabilistically, recursively reduce the start symbol `n` times,
yielding a valid sentence each time.
Args:
n: The number of sentences to generate.
Yields:
The next generated sentence.
"""
for _ in range(n):
yield self._generate_derivation(self.start())
def _generate_derivation(self, nonterminal: Nonterminal) -> str:
"""Probabilistically, recursively reduce `nonterminal` to generate a
derivation of `nonterminal`.
Args:
nonterminal: The non-terminal nonterminal to reduce.
Returns:
The derived sentence.
"""
sentence: List[str] = []
symbol: Symbol
derivation: str
for symbol in self._reduce_once(nonterminal):
if isinstance(symbol, str):
derivation = symbol
else:
derivation = self._generate_derivation(symbol)
if derivation != "":
sentence.append(derivation)
return " ".join(sentence)
def _reduce_once(self, nonterminal: Nonterminal) -> Tuple[Symbol]:
"""Probabilistically choose a production to reduce `nonterminal`, then
return the right-hand side.
Args:
nonterminal: The non-terminal symbol to derive.
Returns:
The right-hand side of the chosen production.
"""
return self._choose_production_reducing(nonterminal).rhs()
def _choose_production_reducing(
self, nonterminal: Nonterminal
) -> ProbabilisticProduction:
"""Probabilistically choose a production that reduces `nonterminal`.
Args:
nonterminal: The non-terminal symbol for which to choose a production.
Returns:
The chosen production.
"""
productions: List[ProbabilisticProduction] = self._lhs_index[nonterminal]
probabilities: List[float] = [production.prob() for production in productions]
return random.choices(productions, weights=probabilities)[0]
| StarcoderdataPython |
1750545 |
someSongs = [("a large song", 60), ("a little song", 10), ("a bigger song", 20), ("a very short song", 2), ("a tiny song", 1)]
def CDSolve(songsToUse, space):
if not songsToUse or space <= 0: # base case; if song list empty or space less than or = to 0 return empty list and space
return [], space
option1, space1 = CDSolve(songsToUse[1:], space) # recursive call; try not adding first song to the cd
if space >= songsToUse[0][1]: # test if second recursion is needed
option2, space2 = CDSolve(songsToUse[1:], space - songsToUse[0][1]) # recursive call; try adding first call to the cd
if space1 <= space2: # find best choice from the two recursion calls
bestOption, bestSpace = option1, space1
else:
bestOption, bestSpace = option2 + [songsToUse[0]], space2 # set best options to the best recursion choice
else:
bestOption, bestSpace = option1, space1
return bestOption, bestSpace # return the best choice
print(CDSolve(someSongs,22))
print(CDSolve(someSongs,68))
print(CDSolve(someSongs,12)) # program test statements
print(CDSolve(someSongs, 0))
print(CDSolve(someSongs,70)) | StarcoderdataPython |
1624369 | """Subset to List
.. helpdoc::
This widget performs subsetting into a list structure where each level of the list represents a subsetting of the data table by the selected index.
For example, the data frame;
a b c
'a' 7 8
'a' 8 9
'b' 8 9
'b' 30 49
Becomes;
$a
a b c
'a' 7 8
'a' 8 9
$b
a b c
'b' 8 9
'b' 30 49
"""
"""<widgetXML>
<name>
Subset to List
</name>
<icon>
default.png
</icon>
<summary>
Subset a data table into several tables in a list.
</summary>
<tags>
<tag priority="10">
Advanced Stats
</tag>
</tags>
<author>
<authorname><NAME></authorname>
<authorcontact><EMAIL></authorcontact>
</author>
</widgetXML>
"""
from OWRpy import *
import redRGUI, signals
import redRGUI
class subtolist(OWRpy):
globalSettingsList = ['commit']
def __init__(self, **kwargs):
OWRpy.__init__(self, **kwargs)
self.RFunctionParam_object = ''
self.setRvariableNames(["sublist"])
""".. rrsignals::
:description: `A data table read in by the widget`"""
self.inputs.addInput('id0', 'Data Table', signals.base.RDataFrame, self.processobject)
self.outputs.addOutput('id1', 'Data List', signals.base.RList)
self.selectBox = redRGUI.base.comboBox(self.controlArea, label = "Subset Column", callback = self.commitFunction)
#box = redRGUI.base.groupBox(self.controlArea, "Output")
""".. rrgui::
:description: `Run the subsetting.`
"""
self.commit = redRGUI.base.commitButton(self.bottomAreaRight, "Commit", callback = self.commitFunction,
processOnInput=True)
#self.RoutputWindow = redRGUI.base.textEdit(box,label='R Output', displayLabel=False)
self.R(
"""subtolist<-function(data, index){
# purpose is to turn the data.frame into a list with levels the same as the levels of the indicated index.
if(class(data) != "data.frame"){ return(NULL) }
if(! index %in% names(data)){ return(NULL) }
newdata <- list()
for (i in levels(as.factor(data[,index]))){
newdata[[i]]<-data[data[,index] == i,]
}
return(newdata)
}
""", wantType = redR.NOCONVERSION)
def processobject(self, data):
if data:
self.RFunctionParam_object=unicode(data.getData())
if self.commit.processOnInput():
self.commitFunction()
else: self.RFunctionParam_object = ''
def commitFunction(self):
if self.RFunctionParam_object == '': return
self.R('%(new)s<-subtolist(%(data)s, %(index)s)' % {'new':self.Rvariables['sublist'], 'data':self.RFunctionParam_object, 'index':self.selectBox.currentId()}, wantType = redR.NOCONVERSION)
if not self.R(self.Rvariables['sublist']):
self.status.setText('Subsetting failed, please check selections')
return
newdata = signals.base.RList(self, data = self.Rvariables['sublist'])
self.rSend('id1', newdata)
| StarcoderdataPython |
4811983 | <reponame>Echeverrias/IE
from django import template
from django.forms.models import model_to_dict as m_to_d
register = template.Library()
@register.filter(name='model_to_dict')
def model_to_dict(model_instance):
return m_to_d(model_instance)
@register.filter(name='model_list_to_dict_list')
def model_list_to_dict_list(model_instances_list):
return [model_to_dict(instance) for instance in model_instances_list] | StarcoderdataPython |
137611 | <gh_stars>0
from tkinter import Tk, StringVar, Frame, Label, Button
from main import Qobuz
from threading import Thread
class Window:
button_main = None
def __init__(self):
self.message = "программа готова"
def starting_main(self, string_msg):
self.button_main['state'] = "disabled"
spotify = Qobuz(string_msg)
Thread(target=spotify.main).start()
def app(self):
windows = Tk()
windows.geometry("500x250")
windows.configure(background='#313131')
windows.title('qobuz')
string_var = StringVar()
string_var.set(self.message)
frame = Frame(windows, background='#313131', pady=30)
data = string_var.set
button = Button(frame, text='Start', command=lambda data = data :self.starting_main(data),
fg="black",font=("Tahoma",10), width = 20, background='#807175',
highlightbackground="black")
self.button_main = button
button.grid(row=0, columnspan=2, pady=(0,10))
status_label = Label(frame, text='status: ',fg="#b98694",font=("Tahoma",10),background='#313131')
status_label.grid(row=1, column=0)
message_lable = Label(frame, textvariable=string_var, fg="#a89da0",font=("Tahoma",10),background='#313131')
message_lable.grid(row=1, column=1)
frame.pack(pady=(60, 0))
windows.mainloop()
if __name__ == '__main__':
s = Window()
s.app() | StarcoderdataPython |
3347547 | import numpy as np
from pupper.ServoCalibration import MICROS_PER_RAD, NEUTRAL_ANGLE_DEGREES
from pupper.HardwareConfig import PS4_COLOR, PS4_DEACTIVATED_COLOR
from enum import Enum
# TODO: put these somewhere else
class PWMParams:
def __init__(self):
self.pins = np.array([[2, 14, 18, 23], [3, 15, 27, 24], [4, 17, 22, 25]])
self.range = 4000
self.freq = 250
class ServoParams:
def __init__(self):
self.neutral_position_pwm = 1500 # Middle position
self.micros_per_rad = MICROS_PER_RAD # Must be calibrated
# The neutral angle of the joint relative to the modeled zero-angle in degrees, for each joint
self.neutral_angle_degrees = NEUTRAL_ANGLE_DEGREES
self.servo_multipliers = np.array(
[[1, 1, 1, 1], [-1, 1, -1, 1], [1, -1, 1, -1]]
)
@property
def neutral_angles(self):
return self.neutral_angle_degrees * np.pi / 180.0 # Convert to radians
class Configuration:
def __init__(self):
################# CONTROLLER BASE COLOR ##############
self.ps4_color = PS4_COLOR
self.ps4_deactivated_color = PS4_DEACTIVATED_COLOR
#################### COMMANDS ####################
self.max_x_velocity = 0.4
self.max_y_velocity = 0.3
self.max_yaw_rate = 2.0
self.max_pitch = 30.0 * np.pi / 180.0
#################### MOVEMENT PARAMS ####################
self.z_time_constant = 0.02
self.z_speed = 0.03 # maximum speed [m/s]
self.pitch_deadband = 0.02
self.pitch_time_constant = 0.25
self.max_pitch_rate = 0.15
self.roll_speed = 0.16 # maximum roll rate [rad/s]
self.yaw_time_constant = 0.3
self.max_stance_yaw = 1.2
self.max_stance_yaw_rate = 2.0
#################### STANCE ####################
self.delta_x = 0.1
self.delta_y = 0.09
self.x_shift = 0.0
self.default_z_ref = -0.16
#################### SWING ######################
self.z_coeffs = None
self.z_clearance = 0.07
self.alpha = (
0.5 # Ratio between touchdown distance and total horizontal stance movement
)
self.beta = (
0.5 # Ratio between touchdown distance and total horizontal stance movement
)
#################### GAIT #######################
self.dt = 0.01
self.num_phases = 4
self.contact_phases = np.array(
[[1, 1, 1, 0], [1, 0, 1, 1], [1, 0, 1, 1], [1, 1, 1, 0]]
)
self.overlap_time = (
0.10 # duration of the phase where all four feet are on the ground
)
self.swing_time = (
0.15 # duration of the phase when only two feet are on the ground
)
######################## GEOMETRY ######################
self.LEG_FB = 0.10 # front-back distance from center line to leg axis
self.LEG_LR = 0.04 # left-right distance from center line to leg plane
self.LEG_L2 = 0.115
self.LEG_L1 = 0.1235
self.ABDUCTION_OFFSET = 0.03 # distance from abduction axis to leg
self.FOOT_RADIUS = 0.01
self.HIP_L = 0.0394
self.HIP_W = 0.0744
self.HIP_T = 0.0214
self.HIP_OFFSET = 0.0132
self.L = 0.276
self.W = 0.100
self.T = 0.050
self.LEG_ORIGINS = np.array(
[
[self.LEG_FB, self.LEG_FB, -self.LEG_FB, -self.LEG_FB],
[-self.LEG_LR, self.LEG_LR, -self.LEG_LR, self.LEG_LR],
[0, 0, 0, 0],
]
)
self.ABDUCTION_OFFSETS = np.array(
[
-self.ABDUCTION_OFFSET,
self.ABDUCTION_OFFSET,
-self.ABDUCTION_OFFSET,
self.ABDUCTION_OFFSET,
]
)
################### INERTIAL ####################
self.FRAME_MASS = 0.560 # kg
self.MODULE_MASS = 0.080 # kg
self.LEG_MASS = 0.030 # kg
self.MASS = self.FRAME_MASS + (self.MODULE_MASS + self.LEG_MASS) * 4
# Compensation factor of 3 because the inertia measurement was just
# of the carbon fiber and plastic parts of the frame and did not
# include the hip servos and electronics
self.FRAME_INERTIA = tuple(
map(lambda x: 3.0 * x, (1.844e-4, 1.254e-3, 1.337e-3))
)
self.MODULE_INERTIA = (3.698e-5, 7.127e-6, 4.075e-5)
leg_z = 1e-6
leg_mass = 0.010
leg_x = 1 / 12 * self.LEG_L1 ** 2 * leg_mass
leg_y = leg_x
self.LEG_INERTIA = (leg_x, leg_y, leg_z)
@property
def default_stance(self):
return np.array(
[
[
self.delta_x + self.x_shift,
self.delta_x + self.x_shift,
-self.delta_x + self.x_shift,
-self.delta_x + self.x_shift,
],
[-self.delta_y, self.delta_y, -self.delta_y, self.delta_y],
[0, 0, 0, 0],
]
)
################## SWING ###########################
@property
def z_clearance(self):
return self.__z_clearance
@z_clearance.setter
def z_clearance(self, z):
self.__z_clearance = z
# b_z = np.array([0, 0, 0, 0, self.__z_clearance])
# A_z = np.array(
# [
# [0, 0, 0, 0, 1],
# [1, 1, 1, 1, 1],
# [0, 0, 0, 1, 0],
# [4, 3, 2, 1, 0],
# [0.5 ** 4, 0.5 ** 3, 0.5 ** 2, 0.5 ** 1, 0.5 ** 0],
# ]
# )
# self.z_coeffs = solve(A_z, b_z)
########################### GAIT ####################
@property
def overlap_ticks(self):
return int(self.overlap_time / self.dt)
@property
def swing_ticks(self):
return int(self.swing_time / self.dt)
@property
def stance_ticks(self):
return 2 * self.overlap_ticks + self.swing_ticks
@property
def phase_ticks(self):
return np.array(
[self.overlap_ticks, self.swing_ticks, self.overlap_ticks, self.swing_ticks]
)
@property
def phase_length(self):
return 2 * self.overlap_ticks + 2 * self.swing_ticks
class SimulationConfig:
def __init__(self):
self.XML_IN = "pupper.xml"
self.XML_OUT = "pupper_out.xml"
self.START_HEIGHT = 0.3
self.MU = 1.5 # coeff friction
self.DT = 0.001 # seconds between simulation steps
self.JOINT_SOLREF = "0.001 1" # time constant and damping ratio for joints
self.JOINT_SOLIMP = "0.9 0.95 0.001" # joint constraint parameters
self.GEOM_SOLREF = "0.01 1" # time constant and damping ratio for geom contacts
self.GEOM_SOLIMP = "0.9 0.95 0.001" # geometry contact parameters
# Joint params
G = 220 # Servo gear ratio
m_rotor = 0.016 # Servo rotor mass
r_rotor = 0.005 # Rotor radius
self.ARMATURE = G ** 2 * m_rotor * r_rotor ** 2 # Inertia of rotational joints
# print("Servo armature", self.ARMATURE)
NATURAL_DAMPING = 1.0 # Damping resulting from friction
ELECTRICAL_DAMPING = 0.049 # Damping resulting from back-EMF
self.REV_DAMPING = (
NATURAL_DAMPING + ELECTRICAL_DAMPING
) # Damping torque on the revolute joints
# Servo params
self.SERVO_REV_KP = 300 # Position gain [Nm/rad]
# Force limits
self.MAX_JOINT_TORQUE = 3.0
self.REVOLUTE_RANGE = 1.57
| StarcoderdataPython |
187160 | from .. import fields, model, namespace
ns_user = namespace('user')
#MODEL
user_model = model('model_user',{
'name': fields.String,
'id': fields.Integer,
'email': fields.String,
'music': fields.Nested(model('user styles', {
'styles':fields.List(fields.String)
}))
})
#RESPONSE
get_response = model('user', {
'users':fields.List(fields.Nested(user_model))
})
#QUERY
user_post_query = ns_user.parser()
user_post_query.add_argument('name', type=str, nullable=False)
user_post_query.add_argument('email', type=str, nullable=False, required=True) | StarcoderdataPython |
3357229 | <reponame>qixinbo/imjoy-rpc
"""Test the hypha server."""
import pytest
from imjoy_rpc import connect_to_server
from . import SIO_SERVER_URL
import numpy as np
# All test coroutines will be treated as marked.
pytestmark = pytest.mark.asyncio
class ImJoyPlugin:
"""Represent a test plugin."""
def __init__(self, ws):
"""Initialize the plugin."""
self._ws = ws
async def setup(self):
"""Set up the plugin."""
await self._ws.log("initialized")
async def run(self, ctx):
"""Run the plugin."""
await self._ws.log("hello world")
async def add(self, data):
"""Add function."""
return data + 1.0
async def test_connect_to_server(socketio_server):
"""Test connecting to the server."""
# test workspace is an exception, so it can pass directly
ws = await connect_to_server(
{"name": "my plugin", "workspace": "public", "server_url": SIO_SERVER_URL}
)
with pytest.raises(Exception, match=r".*Workspace test does not exist.*"):
ws = await connect_to_server(
{"name": "my plugin", "workspace": "test", "server_url": SIO_SERVER_URL}
)
ws = await connect_to_server({"name": "my plugin", "server_url": SIO_SERVER_URL})
await ws.export(ImJoyPlugin(ws))
ws = await connect_to_server({"server_url": SIO_SERVER_URL})
assert len(ws.config.name) == 36
async def test_numpy_array(socketio_server):
"""Test numpy array."""
ws = await connect_to_server(
{"name": "test-plugin", "workspace": "public", "server_url": SIO_SERVER_URL}
)
await ws.export(ImJoyPlugin(ws))
api = await connect_to_server(
{"name": "client", "workspace": "public", "server_url": SIO_SERVER_URL}
)
plugin = await api.get_plugin("test-plugin")
result = await plugin.add(2.1)
assert result == 2.1 + 1.0
large_array = np.zeros([2048, 2048, 4], dtype="float32")
result = await plugin.add(large_array)
np.testing.assert_array_equal(result, large_array + 1.0)
| StarcoderdataPython |
31052 | """add class of delete
Revision ID: <PASSWORD>
Revises: <PASSWORD>
Create Date: 2019-03-04 17:50:54.573744
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('subscribers', 'username')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('subscribers', sa.Column('username', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
# ### end Alembic commands ###
| StarcoderdataPython |
3248312 | # -*- coding: utf-8 -*-
"""
Utilities
=========
The module ``utils`` has a handfull of useful set of tools used in the audio analysis framework.
Visualization
-------------
.. autosummary::
:toctree: generated/
rand_cmap
crop_image
save_figlist
plot1d
plot_wave
plot_spectrum
plot2d
plot_spectrogram
overlay_rois
overlay_centroid
plot_features_map
plot_features
plot_correlation_map
plot_shape
false_Color_Spectro
Mathematical
------------
.. autosummary::
:toctree: generated/
running_mean
get_unimode
entropy
rms
kurtosis
skewness
moments
Parser
------
.. autosummary::
:toctree: generated/
read_audacity_annot
write_audacity_annot
date_parser
Miscellaneous
-------------
.. autosummary::
:toctree: generated/
index_bw
into_bins
shift_bit_length
rle
linear_scale
amplitude2dB
power2dB
dB2amplitude
dB2power
mean_dB
add_dB
nearest_idx
get_df_single_row
format_features
crossfade
crossfade_list
"""
from .miscellaneous import (index_bw,
into_bins,
shift_bit_length,
rle,
linear_scale,
amplitude2dB,
power2dB,
dB2amplitude,
dB2power,
mean_dB,
add_dB,
nearest_idx,
get_df_single_row,
format_features,
crossfade,
crossfade_list)
from .visualization import (rand_cmap,
crop_image,
save_figlist,
plot1d,
plot_wave,
plot_spectrum,
plot2d,
plot_spectrogram,
overlay_rois,
overlay_centroid,
plot_features_map,
plot_features,
plot_correlation_map,
plot_shape,
false_Color_Spectro)
from .math_func import (running_mean,
get_unimode,
entropy,
rms,
kurtosis,
skewness,
moments)
from .parser import (read_audacity_annot,
write_audacity_annot,
date_parser)
__all__ = [
# miscellaneous
'index_bw',
'into_bins',
'shift_bit_length',
'rle',
'linear_scale',
'amplitude2dB',
'power2dB',
'dB2amplitude',
'dB2power',
'mean_dB',
'add_dB',
'nearest_idx',
'get_df_single_row',
'format_features',
'crossfade',
'crossfade_list',
# visualization
'rand_cmap',
'crop_image',
'save_figlist',
'plot1d',
'plot_wave',
'plot_spectrum',
'plot2d',
'plot_spectrogram',
'overlay_rois',
'overlay_centroid',
'plot_features_map',
'plot_features',
'plot_correlation_map',
'plot_shape',
'false_Color_Spectro',
# math_func
'running_mean',
'get_unimode',
'entropy',
'rms',
'kurtosis',
'skewness',
'moments',
# parser
'read_audacity_annot',
'write_audacity_annot',
'date_parser'
]
| StarcoderdataPython |
3300572 | from flask import Flask, render_template, request, jsonify
import requests
app = Flask(__name__)
@app.route('/')
def index():
return render_template("home.html")
@app.route('/provincias')
def provincias():
response=requests.get("http://127.0.0.1:5000/api/provincias")
return render_template('provincias.html', data=response.json())
@app.route('/cantones')
def cantones():
response=requests.get("http://127.0.0.1:5000/api/cantones")
return render_template('cantones.html', data=response.json())
@app.route('/distritos')
def distritos():
response=requests.get("http://127.0.0.1:5000/api/distritos")
return render_template('distritos.html', data=response.json())
@app.route('/SAN JOSE')
def sanjose():
response=requests.get("http://127.0.0.1:5000/api/cantones")
return render_template('sanjose.html', data=response.json())
@app.route('/ALAJUELA')
def alajuela():
response=requests.get("http://127.0.0.1:5000/api/cantones")
return render_template('alajuela.html', data=response.json())
@app.route('/CARTAGO')
def cartago():
response=requests.get("http://127.0.0.1:5000/api/cantones")
return render_template('cartago.html', data=response.json())
@app.route('/HEREDIA')
def heredia():
response=requests.get("http://127.0.0.1:5000/api/cantones")
return render_template('heredia.html', data=response.json())
@app.route('/GUANACASTE')
def guanacaste():
response=requests.get("http://127.0.0.1:5000/api/cantones")
return render_template('guanacaste.html', data=response.json())
@app.route('/PUNTARENAS')
def puntarenas():
response=requests.get("http://127.0.0.1:5000/api/cantones")
return render_template('puntarenas.html', data=response.json())
@app.route('/LIMON')
def limon():
response=requests.get("http://127.0.0.1:5000/api/cantones")
return render_template('limon.html', data=response.json())
@app.route('/CONSULADO')
def consulado():
response=requests.get("http://12172.16.31.10:5000/api/cantones")
return render_template('consulado.html', data=response.json())
@app.route('/CENTRAL')
def central():
response=requests.get("http://127.0.0.1:5000/api/distritos")
return render_template('central.html', data=response.json())
if __name__ == '__main__':
app.debug = True
app.run(port=2000) | StarcoderdataPython |
1751507 | import numpy as np
import matplotlib.pylab as plt
import matplotlib.patches as patches
import os
import warnings
from scipy.interpolate import interp1d
import time
warnings.filterwarnings("ignore", category=FutureWarning)
def get_image_intensity(image):
intensity = 0
for row in image:
for pixel in row:
intensity += pixel
return intensity
def get_roi_image(filepath, roi):
image = plt.imread(filepath)
return image[roi[1][0]:roi[1][1], roi[0][0]:roi[0][1]]
def get_folder_intensities(folder, roi, extension=".tif", show_images=False, bg_roi=None):
files = os.listdir(folder)
values = []
bg_pixels = 0
bg_int = 1
sps_pixels = (roi[0][1]-roi[0][0])*(roi[1][1]-roi[1][0])
if bg_roi is not None:
bg_pixels = (bg_roi[0][1]-bg_roi[0][0])*(bg_roi[1][1]-bg_roi[1][0])
i=0
length = len(files)
for file in files:
if file.endswith(extension):
if show_images:
show_image_with_roi(os.path.join(folder,file), roi, pause=True)
sps_int = get_image_intensity(get_roi_image(os.path.join(folder,file), roi))
if bg_roi is not None:
sps_int = sps_int/sps_pixels
bg_int = get_image_intensity(get_roi_image(os.path.join(folder,file), bg_roi))/bg_pixels
values.append([int(os.path.splitext(file)[0].split('_')[-1])/1000, sps_int, bg_int])
i += 1
print("Processed File " + str(i) + "/" + str(length))
values.sort()
bg_int_start = values[0][2]
return [val[0] for val in values], [val[1]*(bg_int_start/val[2]) for val in values]
def show_image_with_roi(filepath, roi, pause=False):
im = plt.imread(filepath)
im_roi = get_roi_image(filepath, roi)
ax = plt.subplot(121)
plt.imshow(im, interpolation="none", cmap='gray')
rect = patches.Rectangle((roi_x[0],roi_y[0]),roi_x[1]-roi_x[0],roi_y[1]-roi_y[0],linewidth=1,edgecolor='r',facecolor='none')
ax.add_patch(rect)
ax = plt.subplot(122)
plt.imshow(im_roi, interpolation="none", cmap='gray')
if pause:
plt.pause(0.01)
else:
plt.show()
if __name__ == "__main__":
FOLDER = "/home/luke/Documents/RecordTest9/RHEED/record/"
roi_x = [610, 650]
roi_y = [315, 355]
X, Y = get_folder_intensities(FOLDER, [roi_x, roi_y], show_images=False)
interp = interp1d(X,Y)
plt.subplot(211)
plt.plot(X, Y, 'o')
x = np.linspace(0.1, 19.0, 10000)
plt.plot(x, interp(x))
plt.subplot(212)
sp = np.fft.rfft(interp(x))
freq = np.fft.rfftfreq(x.shape[-1])
plt.plot(freq, sp.real, freq, sp.imag)
plt.tight_layout()
plt.show()
| StarcoderdataPython |
1713111 | """oilandrope URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.i18n import i18n_patterns
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from django.views.i18n import JavaScriptCatalog
urlpatterns = [
# JavaScript translations
path('jsi18n/', JavaScriptCatalog.as_view(), name='javascript-catalog'),
]
urlpatterns += i18n_patterns(
# Main site
path('', include('core.urls')),
# Admin site
path('admin/', admin.site.urls),
# API
path('api/', include('api.urls')),
# Common
path('common/', include('common.urls')),
# Auth system
path('accounts/', include('registration.urls')),
# Bot
path('bot/', include('bot.urls')),
# Dynamic Menu
path('dynamic_menu/', include('dynamic_menu.urls')),
# React FrontEnd
path('frontend/', include('frontend.urls')),
# Roleplay
path('roleplay/', include('roleplay.urls')),
prefix_default_language=False,
)
if settings.DEBUG: # pragma: no cover
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| StarcoderdataPython |
1716338 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import scriptcontext as sc
import compas_rhino
import IGS_edges_table_cmd
import IGS_edge_information_cmd
import IGS_form_inspector_control_cmd
import IGS_form_constraint_edge_inspect_cmd
import IGS_form_constraint_vertex_inspect_cmd
import IGS_constraint_table_cmd
__commandname__ = "IGS_toolbar_inspector"
def RunCommand(is_interactive):
if 'IGS' not in sc.sticky:
compas_rhino.display_message('IGS has not been initialised yet.')
return
options = ["EdgesTable", "EdgeInformation", "ForcePolygons", "EdgeConstraints", "VertexConstraints", "ConstraintsTable"]
option = compas_rhino.rs.GetString("Select Inspection Mode:", strings=options)
if not option:
return
if option == "EdgesTable":
IGS_edges_table_cmd.RunCommand(True)
elif option == "EdgeInformation":
IGS_edge_information_cmd.RunCommand(True)
elif option == "ForcePolygons":
IGS_form_inspector_control_cmd.RunCommand(True)
elif option == "EdgeConstraints":
IGS_form_constraint_edge_inspect_cmd.RunCommand(True)
elif option == "VertexConstraints":
IGS_form_constraint_vertex_inspect_cmd.RunCommand(True)
elif option == "ConstraintsTable":
IGS_constraint_table_cmd.RunCommand(True)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
RunCommand(True)
| StarcoderdataPython |
1667297 | def bruteGen(tweet):
tweet = tweet.replace("indiavscorona", "india versus coronavirus")
tweet = tweet.replace("outbreakindia", "outbreak india")
tweet = tweet.replace("real”", "real")
tweet = tweet.replace("mutra", "urine")
tweet = tweet.replace("fakenews", "fake news")
tweet = tweet.replace("“omg", "oh my god")
tweet = tweet.replace("“damn", "damn")
tweet = tweet.replace("god’s", "gods")
tweet = tweet.replace("lockdownextension", "lockdown extension")
tweet = tweet.replace("कोरोना", "coronavirus")
tweet = tweet.replace("indiathanks", "india thanks")
tweet = tweet.replace("coronacoronavirus", "coronavirus")
tweet = tweet.replace('coronavirusinsa', "coronavirus in south africa")
tweet = tweet.replace('coronaviruscanada', 'coronavirus canada')
tweet = tweet.replace('coronavirusau', 'coronavirus australia')
tweet = tweet.replace('coronavirusaus', 'coronavirus australia')
tweet = tweet.replace('cuomoprimetime', 'new york governor prime time')
tweet = tweet.replace('letsfightcoronavirus', 'let us fight coronavirus')
tweet = tweet.replace("covid19", "coronavirus")
tweet = tweet.replace("covid", "coronavirus")
tweet = tweet.replace("aprilfoolsday", "april fools day")
tweet = tweet.replace("covidー19", "coronavirus")
tweet = tweet.replace("stayathome", "stay at home")
tweet = tweet.replace("“april", "april")
tweet = tweet.replace("“i", "i")
tweet = tweet.replace("aprilfools", "april fools")
tweet = tweet.replace("coronavirusoutbreak", "coronavirus outbreak")
tweet = tweet.replace("virusー19", "coronavirus")
tweet = tweet.replace("fool’s", "fools")
tweet = tweet.replace("what’s", "what is")
tweet = tweet.replace("coronavirus”", "coronavirus")
tweet = tweet.replace("fools”", "fools")
tweet = tweet.replace("stayhome", "stay home")
tweet = tweet.replace("quarantinelife", "quarantine life")
tweet = tweet.replace("tablighijamaat", "muslims")
tweet = tweet.replace("corona”", "coronavirus")
tweet = tweet.replace("fauci", "physician")
tweet = tweet.replace("april’s", "april")
tweet = tweet.replace("pmkcallscurfewextension", "prime minister calls for curfew extension")
tweet = tweet.replace("“virus", "coronavirus")
tweet = tweet.replace("virus”", "coronavirus")
tweet = tweet.replace("“corona", "coronavirus")
tweet = tweet.replace("coronavirustruth", "coronavirus truth")
tweet = tweet.replace("socialdistancing", "social distancing")
tweet = tweet.replace("homestaysafe", "home stay safe")
tweet = tweet.replace("“coronavirus", "coronavirus")
tweet = tweet.replace("coronavirusupdate", "coronavirus update")
tweet = tweet.replace("virusvirus", "coronavirus")
tweet = tweet.replace("coronaviruspandemic", "coronavirus pandemic")
tweet = tweet.replace("thelockdown", "the lockdown")
tweet = tweet.replace("nizamuddin", "delhi")
tweet = tweet.replace("trump’s", "donald trump")
tweet = tweet.replace("“the", "the")
tweet = tweet.replace("virus2019", "coronavirus")
tweet = tweet.replace("indiafightscorona", "india fights coronavirus")
tweet = tweet.replace("homesavelives", "home save lives")
tweet = tweet.replace("everyone’s", "everyone")
tweet = tweet.replace("coronariskforprisoners", "coronavirus risk for prisoners")
tweet = tweet.replace("coronavirususa", "coronavirus usa")
tweet = tweet.replace("tablighi", "mosque")
tweet = tweet.replace("delhimarkaz", "<NAME>")
tweet = tweet.replace("coronajihad", "coronavirus struggle")
tweet = tweet.replace("coronajihaad", "coronavirus struggle")
tweet = tweet.replace("aprilfool", "april fool")
tweet = tweet.replace("trumppressconference", "trump press conference")
tweet = tweet.replace("i’m", "i am")
tweet = tweet.replace("tigerking", "tiger king")
tweet = tweet.replace("it’s", "it is")
tweet = tweet.replace("trumpvirus", "trump virus")
tweet = tweet.replace("today’s", "today is")
tweet = tweet.replace("“you", "you")
tweet = tweet.replace("“a", "a")
tweet = tweet.replace("fools’", "fools")
tweet = tweet.replace("rtgnews", "news")
tweet = tweet.replace("19india", "india")
tweet = tweet.replace("coronavirusindia", "coronavirus india")
tweet = tweet.replace("y’all", "you all")
tweet = tweet.replace("मीडिया", "media")
tweet = tweet.replace("here’s", "here is")
tweet = tweet.replace("“we", "we")
tweet = tweet.replace("“fuck", "fuck")
tweet = tweet.replace("flattenthecurve", "flatten the curve")
tweet = tweet.replace("jammuandkashmir", "jammu and kashmir")
tweet = tweet.replace("chriscuomo", "new york governor")
tweet = tweet.replace("‘april", "april")
tweet = tweet.replace("dranbumani", "doctor")
tweet = tweet.replace("tndemandsmasstesting", "tamil nadu demands mass testing")
tweet = tweet.replace("tabligi", "muslims")
tweet = tweet.replace("don’t", "do not")
tweet = tweet.replace("वायरस", "virus")
tweet = tweet.replace("letsfightvirus", "let us fight virus")
return tweet
| StarcoderdataPython |
1676413 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# https://github.com/pytorch/fairseq. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from modules.utils import fill_with_neg_inf, get_incremental_state, set_incremental_state
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0., bias=True):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim**-0.5
self._mask = None
self.in_proj_weight = Parameter(torch.Tensor(3*embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3*embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj.bias, 0.)
def forward(self, query, key, value, mask_future_timesteps=False,
key_padding_mask=None, incremental_state=None,
need_weights=True, static_kv=False):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Future timesteps can be masked with the
`mask_future_timesteps` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert kv_same and not qkv_same
key = value = None
else:
saved_state = None
if qkv_same:
# self-attention
q, k, v = self.in_proj_qkv(query)
elif kv_same:
# encoder-decoder attention
q = self.in_proj_q(query)
if key is None:
assert value is None
# this will allow us to concat it with previous value and get
# just get the previous value
k = v = q.new(0)
else:
k, v = self.in_proj_kv(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q *= self.scaling
if saved_state is not None:
if 'prev_key' in saved_state:
k = torch.cat((saved_state['prev_key'], k), dim=0)
if 'prev_value' in saved_state:
v = torch.cat((saved_state['prev_value'], v), dim=0)
saved_state['prev_key'] = k
saved_state['prev_value'] = v
self._set_input_buffer(incremental_state, saved_state)
src_len = k.size(0)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
q = q.contiguous().view(tgt_len, bsz*self.num_heads, self.head_dim).transpose(0, 1)
k = k.contiguous().view(src_len, bsz*self.num_heads, self.head_dim).transpose(0, 1)
v = v.contiguous().view(src_len, bsz*self.num_heads, self.head_dim).transpose(0, 1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
# only apply masking at training time (when incremental state is None)
if mask_future_timesteps and incremental_state is None:
assert query.size() == key.size(), \
'mask_future_timesteps only applies to self-attention'
attn_weights += self.buffered_mask(attn_weights).unsqueeze(0)
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.float().masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
).type_as(attn_weights) # FP16 support: cast to float and back
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(attn_weights)
attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn = torch.bmm(attn_weights, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
# average attention weights over heads
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.sum(dim=1) / self.num_heads
return attn, attn_weights
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query):
return self._in_proj(query, end=self.embed_dim)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2*self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2*self.embed_dim)
def _in_proj(self, input, start=None, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
if end is not None:
weight = weight[:end, :]
if bias is not None:
bias = bias[:end]
if start is not None:
weight = weight[start:, :]
if bias is not None:
bias = bias[start:]
return F.linear(input, weight, bias)
def buffered_mask(self, tensor):
dim = tensor.size(-1)
if self._mask is None:
self._mask = torch.triu(fill_with_neg_inf(tensor.new(dim, dim)), 1)
if self._mask.size(0) < dim:
self._mask = torch.triu(fill_with_neg_inf(self._mask.resize_(dim, dim)), 1)
return self._mask[:dim, :dim]
def reorder_incremental_state(self, incremental_state, new_order):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer[k] = input_buffer[k].index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return get_incremental_state(
self,
incremental_state,
'attn_state',
) or {}
def _set_input_buffer(self, incremental_state, buffer):
set_incremental_state(
self,
incremental_state,
'attn_state',
buffer,
)
| StarcoderdataPython |
3371621 | <reponame>Infosecurity-LLC/unicon_v2<filename>connector.py
#!/usr/bin/env python3
import logging
import time
from logging.handlers import TimedRotatingFileHandler
import threading
import os
from raven.handlers.logging import SentryHandler
from raven.conf import setup_logging
import pymongo
from pymongo import errors
import pymssql
from datetime import datetime, timedelta
from dateutil.parser import parse
from pytz import timezone
import decimal
import re
import sys
import ipaddress
from bson.objectid import ObjectId
from modules.configer import configer
from modules.pre_transformer import PreTransformer
from modules.post_transformer import PostTransformer
from urllib3.exceptions import InsecureRequestWarning
import urllib3
import socket
from modules.db_connectors import SelectorMSSQL
from modules.db_connectors import SelectorPostgreSQL
urllib3.disable_warnings(InsecureRequestWarning)
setting = configer()
tz = timezone(setting['tz'])
logger = logging.getLogger(__name__)
logger.setLevel(setting['logging']['basic_level'])
current_dir = os.path.abspath(os.path.dirname(__file__))
os.chdir(current_dir)
if setting['logging'].get('log'):
file_handler = TimedRotatingFileHandler(filename=setting['log'], when='D', backupCount=7, encoding='utf-8')
file_handler.setLevel(setting['logging']['file_level'])
file_handler.setFormatter(
logging.Formatter('%(asctime)s - %(levelname)-10s - [in %(pathname)s:%(lineno)d]: - %(message)s'))
logger.addHandler(file_handler)
class ContextFilter(logging.Filter):
hostname = socket.gethostname()
def filter(self, record):
record.hostname = ContextFilter.hostname
return True
stream_handler = logging.StreamHandler()
stream_handler.setLevel(setting['logging']['term_level'])
stream_handler.addFilter(ContextFilter())
stream_handler.setFormatter(
logging.Formatter('%(asctime)s - %(levelname)-10s - %(hostname)s - [in %(pathname)s:%(lineno)d]: - %(message)s'))
logger.addHandler(stream_handler)
sentry_url = setting.get('sentry_url')
if sentry_url:
handler = SentryHandler(sentry_url)
handler.setLevel(setting['logging']['sentry_level'])
setup_logging(handler)
# MONGO ###########
mongo_host, mongo_port = setting['mongodb']['host'], setting['mongodb']['port']
mongo_lock = threading.Lock()
self_params = setting.get('self')
if not self_params:
raise Exception('Setting error. Self params not found')
class MongoException(Exception):
pass
class TransformationException(Exception):
pass
class DetermineException(Exception):
pass
class DbaseException(Exception):
pass
class Collector:
def __init__(self, conn_type, environment, env_alias):
self.env_alias = env_alias
self.conn_type = conn_type
self.environment = environment
self.rq = setting['sensors'][self.conn_type]['raw_query']
self.mongo = pymongo.MongoClient(host=mongo_host, port=mongo_port)
self.filters = setting['sensors'][self.conn_type]['filters']
def select_events(self, last_position=None, event_id=None):
"""забрать события из базы. Возвращаем итератор с сырым событием"""
try:
if "db_type" in self.environment and self.environment['db_type'] == "postgresql":
eg = SelectorPostgreSQL(self.conn_type, self.environment['external_db'])
else:
eg = SelectorMSSQL(self.conn_type, self.environment['external_db'])
# eg = Selector(self.conn_type, self.environment['external_db'])
except Exception as err:
raise DbaseException(f'[{self.conn_type}] ошибка при взаимодействии с БД : {err}')
if last_position:
cur = eg.raw_query(
self.rq['all'].format(lastposition=last_position, sensor=self.environment['external_db']['server']))
elif event_id:
cur = eg.raw_query(
self.rq['by_event_id'].format(event_id=event_id, sensor=self.environment['external_db']['server']))
else:
logger.critical(f'[{self.conn_type}] OMG!.. what we sent in the parameters??')
sys.exit(0) # в теории этого не должно случиться
vals = cur.fetchall()
for val in vals:
event = dict(zip(setting['sensors'][self.conn_type]['select_keys'], val))
for k, v in event.items():
if isinstance(v, datetime):
event[k] = event[k].isoformat()
if isinstance(v, decimal.Decimal):
event[k] = int(event[k])
yield event
def define_organisation(self, event):
"""
переопределение организации
Если не удалось промапить - берём организацию из окружения
"""
def organization_mapping_by_vsname(event, pre_org=None):
"""
Определяем организацию по имени управляющего сервера
:param event:
:param pre_org: имя организации, которое возможно было промаплено ранее
Если не удастся промапить организацию, транслируем pre_org как результат
:return:
"""
if not self.environment.get('organization_mapping_by_vsName') \
or 'organization_mapping_by_vsName' not in self.environment:
# Если в конфиге нет данных для маппинга - не маппим
return pre_org
vsname = event.get('vserver')
if vsname in self.environment.get('organization_mapping_by_vsName'):
if self.environment['organization_mapping_by_vsName'].get(vsname) == "not_map":
logger.warning(
f'[{self.conn_type}] Обнаружены события с организацией [{vsname}] которую отказались мапить')
return pre_org
logger.debug(f"[{self.conn_type}] По имени управляющего сервера событие относится к "
f"{vsname} - {self.environment['organization_mapping_by_vsName'].get(vsname)}")
return self.environment['organization_mapping_by_vsName'].get(vsname)
logger.warning(f"[{self.conn_type}] Событие по незарегистрированной организации {vsname}")
# Если организация не промаплена по виртуальному серверу
return pre_org
def organization_domain_mapping(event, pre_org=None):
"""
Определяем отганизацию по домену
:param event:
:param pre_org: имя организации, которое возможно было промаплено ранее
Если не удастся промапить организацию, транслируем pre_org как результат
:return:
"""
if not setting.get('organization_mapping_by_domain') \
or 'organization_mapping_by_domain' not in setting:
return pre_org
domain = event.get('comp_dom')
if domain in setting['organization_mapping_by_domain']:
logger.debug(f"[{self.conn_type}] По домену событие относится к "
f"{domain} - {setting['organization_mapping_by_domain'][domain]}")
return setting['organization_mapping_by_domain'][domain]
return pre_org # если нет в мапе
org = None
if event.get('vserver'):
org = organization_mapping_by_vsname(event, org)
if event.get('comp_dom') and not org:
org = organization_domain_mapping(event, org)
if not org and self.environment.get('organization'):
# если не смогли ничего промаппить, ставим организацию по дефолту из конфига
org = self.environment.get('organization')
if not org and not self.environment.get('organization'):
# Если и в конфиге не указана организация по дефолту, то ставим по ультрадефолту other
logger.error(f'[{self.conn_type}] В конфиге не указана организация и коннектор не смог определить её! '
f'Выставляю other')
org = 'other'
return org
@staticmethod
def cast(event: dict):
"""приведение типов"""
for k, v in event.items():
if isinstance(v, datetime):
event[k] = event[k].isoformat()
if isinstance(v, decimal.Decimal):
event[k] = int(event[k])
if isinstance(v, ObjectId):
event[k] = str(event[k])
try:
event[k] = int(event[k])
except (ValueError, TypeError):
pass
if k in ['username']:
event[k] = str(event[k])
return event
def basically_transformation(self, event):
"""
Привести поля к стандартному виду через конфиги
"""
def determine_common_type(event):
"""Определить основной тип событий. http, network, file, email etc"""
def validate(regex, field):
if not regex:
return False
if not field:
return False
cwr = re.compile(regex)
try:
res = cwr.match(field)
except Exception as err:
raise DetermineException(f'[{self.conn_type}] Ошибка {err} в обработке регулярки. '
f'Регулярка: {regex}, строка: {field}, событие целиком: {event}')
return res
determine_common_type = setting['sensors'][self.conn_type]['determine_common_type']
for common_types, type_conditions in determine_common_type.items():
for field_name, regex_str in type_conditions.items():
res = validate(regex_str, event.get(field_name))
if res:
return common_types
return 'file'
def get_transform_rule(event):
"""Получить актуальное правило, по которому будет рисоваться событие"""
transform_rules = setting['sensors'][self.conn_type].get('transform_rules')
if not transform_rules:
raise TransformationException(f'[{self.conn_type}] Конфиг не содержит transform_rules')
etype = event.get('event_type')
ctype = event.get('common_type')
if not ctype:
# Есои не сгенерировали
ctype = determine_common_type(event)
try:
cwr = transform_rules[etype][ctype] # current working rule
except KeyError:
raise TransformationException(
f'[{self.conn_type}] Не удалось найти правило для обработки {etype}->{ctype}. Событие: {event}')
except Exception as err:
raise TransformationException(
f'[{self.conn_type}] Случилось что-то страшное... {etype}->{ctype}. ERROR: {err} Событие: {event}')
return cwr, ctype
def regex_find(regex, field):
if not field or not regex:
return False
cwr = re.compile(regex)
try:
cwr.findall(field)
except Exception as err:
logger.critical(f'[{self.conn_type}] Ошибка {err} в обработке регулярки. '
f'Регулярка: {regex}, строка: {field}, событие целиком: {event}')
return field
return cwr.findall(field)
def normalize_ip(ip):
""" Приведение ip к единому виду"""
def hex2ip(hex_ip):
""" 0xXXXXXXXX -> ip """
hex_data = hex_ip[2:]
if len(hex_data) < 8:
hex_data = ''.join(('0', hex_data))
ipaddr = "%i.%i.%i.%i" % (
int(hex_data[0:2], 16), int(hex_data[2:4], 16), int(hex_data[4:6], 16), int(hex_data[6:8], 16))
return ipaddr
def int2ip(int_ip):
if int_ip < 0: # иногда в базах встречаются отрицательные значения
normal_int_ip = ip * -1
else:
normal_int_ip = int_ip
return str(ipaddress.IPv4Address(normal_int_ip))
if not ip:
return None
if isinstance(ip, int):
return int2ip(ip)
if isinstance(ip, bytes):
bytes_ip = int.from_bytes(ip, byteorder='big')
return int2ip(bytes_ip)
if '0x' in ip:
return hex2ip(ip)
else:
logger.error(f'[{self.conn_type}] Не удалось конвертировать {ip} в ip адрес')
return None
event = self.cast(event)
trans_rule, common_type = get_transform_rule(event)
norm_event = {'common_type': common_type} # Нормализованное событие
for k, v in trans_rule.items():
custom_type, expression = v.split('-->')
if custom_type == 'bool':
if not expression:
norm_event.update({k: None})
else:
norm_event.update({k: bool(int(expression))})
if custom_type == 'int':
norm_event.update({k: int(expression)})
if custom_type == 'str':
norm_event.update({k: str(expression)})
if custom_type == 'key':
norm_event.update({k: event[expression]})
if custom_type == 'regex':
field_key, regex = expression.split("<--'")
regex = regex[:-1]
res = regex_find(regex, event.get(field_key))
if not res:
norm_event.update({k: None})
elif len(res) == 1:
norm_event.update({k: res[0]})
else:
if isinstance(event.get(field_key), str):
norm_event.update({k: event.get(field_key)})
else:
norm_event.update({k: None})
logger.warning(f'[{self.conn_type}] Неожиданный результат от регулярки {res} regex=[{regex}] '
f'field_key=[{event.get(field_key)}]')
if custom_type == 'int2ip':
try:
norm_event.update({k: normalize_ip(event[expression])})
except ipaddress.AddressValueError:
logger.warning(
f'[{self.conn_type}] Не удалось конвертировать int поле в ip адрес: {event[expression]}')
norm_event.update({k: None})
except Exception as err:
logger.warning(f'[{self.conn_type}] Не удалось конвертировать {event[expression]} поле в ip адрес:')
logger.exception(err)
norm_event.update({k: None})
return norm_event
def transformation(self, event):
"""Привести поля к стандартному виду"""
norm_event = {}
pre_transformer = PreTransformer()
def add_require(e):
e.update({'segment': self.environment.get('segment')})
e.update({'av': setting['sensors'][self.conn_type]['device']})
e.update({'env_alias': self.env_alias})
e.update({'organization': self.define_organisation(e)})
return e
# if not setting['sensors'][self.conn_type].get('determine_common_type'):
# # сли в конфиге нет описания типов событий - запускаем маппинг и дальнейшую трансформацию для СИЕМа
# event.update({'av': setting['sensors'][self.conn_type]['device']}) # for pre_transformer
# event = pre_transformer.pre_transform_event(event)
event.update({'av': setting['sensors'][self.conn_type]['device']}) # for pre_transformer
event = pre_transformer.pre_transform_event(event)
if "transform_rules" in setting['sensors'][self.conn_type]:
norm_event = self.basically_transformation(event)
else:
norm_event = event
norm_event = add_require(norm_event)
return norm_event
def is_identical_events(self, event, full_events_list):
"""
Проверяем, не было ли раньше такого события
:param event:
:param full_events_list:
:return:
"""
def get_search_dict(event, abort_rules):
"""Сформировать два словаря для поиска в монге событий, которые задублированы"""
include_identical_keys = abort_rules.get('include_identical_keys')
exclude_identical_keys = abort_rules.get('exclude_identical_keys')
search_dict_includes = {}
search_dict_excludes = dict(event)
if isinstance(exclude_identical_keys, list):
for key in exclude_identical_keys:
search_dict_excludes.pop(key)
if isinstance(include_identical_keys, list):
for key in include_identical_keys:
search_dict_includes.update({key: event.get(key)})
return search_dict_includes, search_dict_excludes
def abort_identical_db(event):
"""
Не записывать в базу, если это повтор
Поиск по базе
:param event:
:return:
"""
abort_rules = setting['sensors'][self.conn_type]['abort_rules']
if not abort_rules:
return False
search_dict1, search_dict2 = get_search_dict(event, abort_rules)
# проверка по коллекции, в которую собрались писать
# organization = self.define_organisation(event)
if event.get('organization'):
organization = event.get('organization')
else:
organization = self.environment['organization']
collection = self.mongo[f'unicon_{self.conn_type}'][organization]
if search_dict1 and len(list(collection.find(search_dict1))) == 0:
return False
elif search_dict2 and len(list(collection.find(search_dict2))) == 0:
return False
return True
def abort_identical_local(new_event, events_list):
"""
Не записывать в базу, если это повтор
Поиск повторов в словаре, до записи в базу
:param new_event:
:param events_list:
:return:
"""
identical = False
abort_rules = setting['sensors'][self.conn_type]['abort_rules']
normal_new_event = get_search_dict(new_event, abort_rules)
for event in events_list:
if get_search_dict(event, abort_rules) == normal_new_event:
return True
return identical
if not abort_identical_local(event, full_events_list):
if not abort_identical_db(event):
return False
else:
logger.debug(f"[{self.conn_type}] --Event already exist in db [{event.get('event_id')}]: {event}")
return True
else:
logger.debug(f"[{self.conn_type}] "
f"-Event already exist in local temp full_events_list [{event.get('event_id')}]: {event}")
return True
return True
def insert2mongo(self, event_list):
"""
Преобразуем список всех событий в словарь списков событий по каждой организации.
Записываем события по каждой организации в свой collection
:param event_list:
:return:
"""
def make_event_dict():
"""
Собираем словарь списоков:
key - organization
value - event_list
"""
tmp_event_dict = {}
for event in event_list:
if 'event_type' in event and event['event_type'] and self.filters['exclude_types'] \
and event['event_type'] in self.filters['exclude_types']:
logger.debug(f"[{self.conn_type}] По {event['event_type']} реагирование временно не ведётся, "
f"событие в базу заноситься не будет {event}")
else:
# organization = self.define_organisation(event)
# event.update({'organization': organization})
if event.get('organization'):
organization = event.get('organization')
else:
organization = self.environment['organization']
# Добавим время и статус. Статус 0, значит событие еще не отправлялось в бота.
event.update({'_receive_dt': datetime.utcnow()})
event.update({'_status': 0})
if organization in tmp_event_dict:
tmp_event_list = tmp_event_dict[organization]
tmp_event_list.append(event)
tmp_event_dict.update({organization: tmp_event_list})
else:
tmp_event_list = [event]
tmp_event_dict.update({organization: tmp_event_list})
logger.debug(f"[{self.conn_type}] В базу записано новое событие [{event.get('event_id')}]: {event}")
return tmp_event_dict
if event_list:
event_dict = make_event_dict()
if event_dict:
for key in event_dict:
collection = self.mongo[f'unicon_{self.conn_type}'][key]
collection.insert_many(event_dict[key])
def collect_events(self, last_position=False, event_id=False):
""" Забираем события из базы АВ"""
full_events_list = []
for e in self.select_events(last_position=last_position, event_id=event_id):
try:
ce = self.transformation(e)
except TransformationException as err:
logger.error(err)
continue
if not ce:
logger.error(f"Не удалось разобрать событие {e.get('event_id')}: {e}")
continue
if not event_id:
if not self.is_identical_events(ce, full_events_list):
full_events_list.append(ce)
else:
full_events_list.append(ce)
self.insert2mongo(full_events_list)
class Processor:
def __init__(self, device, send_setting):
self.__sender_settings = send_setting
self.__sender_client = self.get_sender_client()
self.device = device
@staticmethod
def delete_service_fields(_event):
"""
Удаление из события служебных полей, начинающихся с "_"
:return:
"""
empty_keys = [k for k, v in _event.items() if k[0] == "_"]
for k in empty_keys:
del _event[k]
return _event
def get_sender_client(self):
"""
Инициализация нужного клиента в зависимости от метода отправки событий в бота
:return:
"""
if self.__sender_settings['method'] == 'messila_api':
from modules.messila_api import MessilaApiClient
logging.debug('loading messila_api_client')
messila_api_client = MessilaApiClient(api_url=self.__sender_settings['credentials']['messila_api']['host'],
login=self.__sender_settings['credentials']['messila_api']['login'],
password=self.__sender_settings['credentials']['messila_api'][
'password'],
verify=self.__sender_settings['credentials']['messila_api']['verify'])
logging.debug('Messila_api_client loaded')
return messila_api_client
if self.__sender_settings['method'] == 'kafka':
from modules.kafka_con import Producer
logging.debug('loading producer')
producer = Producer(auth=self.__sender_settings['credentials']['kafka']['auth_type'],
servers=self.__sender_settings['credentials']['kafka']['servers'],
**self.__sender_settings['credentials']['kafka']['auth_params'])
logging.debug('Producer loaded')
return producer
if self.__sender_settings['method'] == 'nxlog':
from socutils import NXLogSender
logging.debug('loading NxlogSender')
nxlog = NXLogSender(self.__sender_settings['credentials']['nxlog']['host'],
self.__sender_settings['credentials']['nxlog']['port'])
nxlog.connect()
logging.debug('NxlogSender loaded')
return nxlog
return False
def sender_close(self):
""" Если необходимо закрывать за собой сессии """
if self.__sender_settings['method'] == 'nxlog':
self.__sender_client.close()
def send2bot(self, event):
"""
Определение метода отправки
:param event:
:return:
"""
if self.__sender_settings['method'] == 'messila_api':
return self.__send2messila_api(event)
if self.__sender_settings['method'] == 'kafka':
return self.__send2kafka(event)
if self.__sender_settings['method'] == 'nxlog':
return self.__send2nxlog(event)
logger.error(f'[{self.device}] В конфиге не определён метод отправки событий в бота!')
return False
def __send2nxlog(self, event):
"""
Отправка события в nxlog
:param event:
:return:
"""
post_transformer = PostTransformer()
def nxlog_formater(event, devtype):
""" Форматируем событие для nxlog'a """
def md5_from_raw(raw):
import hashlib
hash_t = hashlib.md5()
hash_t.update(str(raw).encode('utf8'))
return hash_t.hexdigest()
def transform_time(_time: str):
def pars_event_time(t: str):
try:
_event_time = parse(t)
except Exception as err:
logger.error(err)
return None
return _event_time
event_time = pars_event_time(_time)
if not event_time:
return _time
try:
_new_time = tz.localize(event_time)
except Exception as err:
if err.args[0] == "Not naive datetime (tzinfo is already set)":
return str(event_time)
return str(event_time)
new_time = _new_time.isoformat()
return new_time
try:
event["EventTime"] = transform_time(event["EventTime"])
event["DetectionTime"] = transform_time(event["DetectionTime"])
new_event = {
"EventTime": event["EventTime"],
"DetectionTime": event["DetectionTime"],
"Hostname": socket.gethostname(),
"SeverityValue": self.__sender_settings['credentials']['nxlog']['nxlog_attributes'].get(
'SeverityValue'),
"Severity": self.__sender_settings['credentials']['nxlog']['nxlog_attributes'].get('Severity'),
"Organization": event['Organization'],
"OrgID": self.__sender_settings['credentials']['nxlog']['nxlog_attributes'].get('OrgID'),
"DevCat": self.__sender_settings['credentials']['nxlog']['nxlog_attributes'].get('DevCat'),
"DevSubCat": self.__sender_settings['credentials']['nxlog']['nxlog_attributes'].get('DevSubCat'),
"DevType": devtype,
"DevVendor": self.__sender_settings['credentials']['nxlog']['nxlog_attributes'].get('DevVendor'),
"raw": event,
"md5": md5_from_raw(event)
}
except Exception as err:
logger.error(f'Не удается отформатировать событие. error {err}')
return False
return new_event
siem_event = post_transformer.post_transform_event(event)
if not isinstance(self.__sender_settings['credentials']['nxlog']['nxlog_attributes'].get('DevType'), list):
event = nxlog_formater(siem_event,
self.__sender_settings['credentials']['nxlog']['nxlog_attributes'].get('DevType'))
if not event:
return False
return self.__sender_client.send_event(message=event)
if isinstance(self.__sender_settings['credentials']['nxlog']['nxlog_attributes'].get('DevType'), list):
res = False
for devtype in self.__sender_settings['credentials']['nxlog']['nxlog_attributes'].get('DevType'):
event = nxlog_formater(siem_event, devtype)
if not event:
return False
res = self.__sender_client.send_event(message=event)
return res
def __send2kafka(self, event):
"""
отправка события в kafka
:param event:
:return:
"""
event = self.delete_service_fields(event)
self.__sender_client.send(topic=self.__sender_settings['credentials']['kafka']['siem_topic'], data=event)
return True
def __send2messila_api(self, event):
"""
Отправка события в Messila API
:param event:
:return:
"""
def clear_none(_event):
""" даление ключей с None'выми значениями """
empty_keys = [k for k, v in _event.items() if v is None]
for k in empty_keys:
del _event[k]
return _event
event = clear_none(event)
event = self.delete_service_fields(event)
if event.get('event_id'):
if isinstance(event['event_id'], int):
event.update({"event_id": str(event['event_id'])})
return self.__sender_client.send_event(event)
class WorkTime:
"""
Управленияе временем
"""
def __init__(self):
self.mongo = pymongo.MongoClient(host=mongo_host, port=mongo_port)
def get_last_position(self, device):
"""
Получаем дату, с которой нужно собрать новые сработки
Так как собирать будем с какой-то даты до настоящего момента,
то в следующий раз нужно будет собирать данные со времени сейчас
:param device: база в которой нужно брать время
:return:
ldt - last date
new_lp_id - id нового last_position в монге
"""
last_dt = None
new_lp_id = None
collection = self.mongo[f'unicon_{device}']['work_time']
try:
lp = collection.find({"status": 1}).sort('_id', -1).limit(1)[:1][0]
last_dt = lp.get('last_position')
logger.info(f'[{device}] Last position {last_dt}')
last_dt = datetime.strptime(last_dt, '%Y-%m-%d %H:%M:%S') - timedelta(
hours=setting['time_indent']) # str -> datetime; -N hour
last_dt = last_dt.strftime('%Y-%m-%d %H:%M:%S')
logger.info(f'[{device}] Собираем с даты {last_dt}')
new_lp_id = collection.insert_one(
{"status": 0, 'last_position': datetime.now().strftime('%Y-%m-%d %H:%M:%S')}).inserted_id
logger.info(f'[{device}] Новая дата {new_lp_id}')
except IndexError:
logger.info("Похоже это первый запуск, так как коллекция work_time чиста")
except Exception as err:
logger.error(err)
if not last_dt:
last_dt = datetime.now() - timedelta(hours=setting['stat_range'])
last_dt = last_dt.strftime('%Y-%m-%d %H:%M:%S')
new_lp_id = collection.insert_one({"status": 0, 'last_position': last_dt}).inserted_id
logger.warning(
f"[{device}] Не найдено время начала последнего сбора событий. "
f"Зададим по дефолту (-{setting['stat_range']}h)")
if device == "drweb":
last_dt = datetime.strptime(last_dt, '%Y-%m-%d %H:%M:%S').strftime("%Y%m%d%H%M%S%f")[:-3]
return last_dt, new_lp_id
def update_last_position(self, device, new_lp_id):
"""
Если сбор данных к нам в базу прошёл успено,
то разрешаем новый last_position к использованию при следующем включении
:param device: база в которой нужно брать время
:param new_lp_id: id нового LP, который нужно пометить как подтверждённый
:return:
"""
collection = self.mongo[f'unicon_{device}']['work_time']
collection.update_one({'_id': new_lp_id}, {"$set": {"status": 1}})
logger.info(f'[{device}] Новая дата начала последнего сбора событий {new_lp_id} подтверждена')
class StartTime:
"""
Управленияе временем v2 для планировщика
"""
def __init__(self):
self.mongo = pymongo.MongoClient(host=mongo_host, port=mongo_port)
def get_last_start(self, device):
"""
Получаем время когда в прошлый раз запустили сборщик
:param device: база в которой нужно брать время
:return:
"""
last_start_dt = None
new_ls_id = None
collection = self.mongo[f'unicon_{device}']['start_time']
try:
ls = collection.find({"status": 1}).sort('_id', -1).limit(1)[:1][0] # последний успершный старт
# last_start_dt = ls.get("last_start_dt")
last_start_dt = datetime.strptime(ls.get("last_start_dt"), '%Y-%m-%d %H:%M:%S')
# logger.info('Last start connector for {} on {}'.format(device, last_start_dt))
except pymongo.errors.ServerSelectionTimeoutError:
logger.critical('MongoDB не запущена, или у неё проблемы')
sys.exit(1)
except IndexError as err:
logger.warning(err)
if not last_start_dt:
# при первом запуске нет предыдущей даты, так что создаём её сами
last_start_dt = datetime.now()
last_start_dt = last_start_dt.strftime('%Y-%m-%d %H:%M:%S')
collection.insert_one({"status": 1, 'last_start_dt': last_start_dt})
last_start_dt = datetime.strptime(last_start_dt, '%Y-%m-%d %H:%M:%S')
logger.warning(f'[{device}] Сбор данных по этому сенсору ранее не происходил. '
f'Сейчас первый запуск {last_start_dt}')
return last_start_dt
def create_new_last_start(self, device):
collection = self.mongo[f'unicon_{device}']['start_time']
new_last_start_dt = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
new_ls_id = collection.insert_one(
{"status": 0, 'last_start_dt': new_last_start_dt}
).inserted_id
logger.info(f'[{device}] Новая дата последнего старта {new_last_start_dt} {new_ls_id}')
return new_ls_id
def confirm_new_last_start(self, device, new_ls_id):
"""
Если сбор данных к нам в базу прошёл успено,
то разрешаем новый last_start к использованию при следующем включении
:param device: база в которой нужно брать время
:param new_ls_id: id нового LS, который нужно пометить как подтверждённый
:return:
"""
collection = self.mongo[f'unicon_{device}']['start_time']
collection.update_one({'_id': new_ls_id},
{"$set": {"status": 1, "last_end_time": datetime.now().strftime('%Y-%m-%d %H:%M:%S')}})
logger.info(f'[{device}] Новая дата последнего старта {new_ls_id} подтверждена')
def collect(device, last_position=None, event_id=None, env=None):
""" Сборщик событий """
if last_position:
try:
environments = setting['sensors'][device]['environments']
except KeyError:
logger.error(f'Для сенсора {device} нет соответствующего конфига')
sys.exit(1)
for _env, params in environments.items():
logger.info('Search in environments [{}]'.format(_env))
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
telnet = s.connect_ex((environments[_env]['external_db']['server'],
environments[_env]['external_db']['port']))
except Exception as err:
logger.error(f"[{device}] Проблемы с доступом к серверу БД "
f"{environments[_env]['external_db']['server']}: {err}")
return False
if telnet:
logger.error(f"[{device}] Нет доступа до сервера {environments[_env]['external_db']['server']}")
return False
iv = Collector(conn_type=device, environment=environments[_env], env_alias=_env)
try:
iv.collect_events(last_position=last_position)
except Exception as err:
logger.exception(err)
logger.error(f"[{device}] Проблемы со сбором данных с БД {environments[_env]['external_db']['server']} "
f"под УЗ {environments[_env]['external_db']['user']}")
return False
return True
elif event_id and env:
environments = setting['sensors'][device]['environments']
logger.info(f'[{device}] Search in environments [{env}]')
iv = Collector(conn_type=device, environment=environments[env], env_alias=env)
iv.collect_events(event_id=event_id)
return True
def process(device):
""" Отправлятор событий """
def get_collections_list(db_name):
""" Получаем список коллекций из базы """
collections_list = []
db = mongo[f'unicon_{db_name}']
for _collection in db.list_collection_names():
if 'system.' not in _collection:
collections_list.append(_collection)
return collections_list
def delete_old_events(_collection):
storage_time = 14
if setting.get('storage_time'):
storage_time = setting.get('storage_time')
res = _collection.delete_many(
{
"_status": 1,
"_receive_dt": {
"$lt": datetime.utcnow() - timedelta(days=storage_time)
}
})
if res.raw_result.get('n'):
logger.info(f"Из хранилища удалено {res.raw_result.get('n')} событий старше {storage_time} дней")
proc = Processor(device, setting['sensors'][device]['send2bot'])
mongo = pymongo.MongoClient(host=mongo_host, port=mongo_port)
collections_list = get_collections_list(device)
for collection_name in collections_list:
if collection_name in ['start_time', 'work_time']:
continue
logger.debug(f'[{device}] Поиск новых событий в коллекции {collection_name}')
collection = mongo[f'unicon_{device}'][collection_name]
new_events = collection.find({'_status': 0})
if new_events.count() == 0:
logger.debug(f'[{device}] Новые события для отправки в брокер не найдены')
sending_events = 0
len_events = new_events.count()
for event in new_events:
Collector.cast(event)
event_id = event['_id']
if proc.send2bot(event):
sending_events = sending_events + 1
collection.update_one({'_id': ObjectId(event_id)}, {"$set": {"_status": 1}})
logger.debug(f'[{device}] Статус события {event_id} изменен')
logger.debug(f'[{device}] Sending {sending_events}/{len_events} events to bot')
delete_old_events(collection)
proc.sender_close()
| StarcoderdataPython |
3332675 | <reponame>arturca/RasMAT<gh_stars>0
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from data.default_function_and_settings import *
import numpy as np
class Clock:
def __init__(self, previosus_time, previous_line, strip):
self.previous_time = previosus_time
self.previous_line = previous_line
self.strip = strip
self.queue = np.array([])
def clock(self):
current_time = strftime("%H:%M", time.localtime())
position = 0
if current_time != self.previous_time:
self.previous_time = current_time
how_to_wipe = randint(0, 2)
if how_to_wipe == 0:
color_wipe_bar(self.strip, Color(randint(0, 255), randint(0, 255), randint(0, 255)))
else:
colorWipe(self.strip, Color(0, 0, 0), 0)
for char in current_time:
if char != ':':
set_number(self.strip, position, int(char))
position += 1
def paint_scrolling_name(self):
# print(self.queue)
while len(self.queue[0]) > 10:
colorWipe(self.strip, Color(0, 0, 0), 0)
self.strip.show()
for i in range(6):
for j in range(10):
if self.queue[i][j]:
self.strip.setPixelColor(helper_list[i+2][j], Color(255, 0, 0))
self.strip.show()
self.queue = self.queue[:, 1:]
time.sleep(0.15)
self.previous_time = 'x'
def get_rid_of_polish_sign(self, title):
# new_title = list(titlee)
polskie = {"\xc4\x84": "A", "\xc4\x86": "C",
"\xc4\x98": "E", "\xc5\x81": "L", "\xc5\x83": "N", "\xc3\x93": "O",
"\xc5\x9a": "S", "\xc5\xb9": "Z", "\xc5\xbb": "Z", "\xc4\x85": "a",
"\xc4\x87": "c", "\xc4\x99": "e", "\xc5\x82": "l", "\xc5\x84": "n",
"\xc3\xB3": "o", "\xc5\x9b": "s", "\xc5\xba": "z", "\xc5\xbc": "z"}
for x in polskie.keys():
title = str.replace(title, x, polskie[x])
return title
# for i in range(len(u'new_title')):
# print(i)
# if new_title[i] == 'Ą':
# new_title[i] = 'A'
# elif new_title[i] == 'Ę':
# new_title[i] = 'E'
# elif new_title[i] == 'Ć' or new_title[i] == 'ć':
# print("jesst Ć")
# new_title[i] = 'C'
# elif new_title[i] == 'Ł' or new_title[i] == 'ł':
# new_title[i] = 'L'
# elif new_title[i] == 'Ń' or new_title[i] == 'ń':
# new_title[i] = 'N'
# elif new_title[i] == 'Ó' or new_title[i] == 'ó':
# print("jesst ó")
# new_title[i] = 'O'
# elif new_title[i] == 'Ś':
# new_title[i] = 'S'
# elif new_title[i] == 'Ź' or new_title[i] == 'Ż':
# new_title[i] = 'Z'
# print('STOP'.join(new_title) + '--1')
# return ''.join(new_title)
def print_song_name(self, name):
colorWipe(self.strip, Color(0, 0, 0), 0)
i = 0
name_2 = self.get_rid_of_polish_sign(name)
print(name_2)
name_2 = str.upper(name_2)
self.queue = np.array(6*[10*[False]])
for letter in name_2:
if letter == ' ':
self.queue = np.column_stack((self.queue, np.array(6*[2*[False]])))
if 0 <= ord(letter) - 65 < len(digits_and_letters.letters_list):
self.queue = np.column_stack((self.queue, np.array(digits_and_letters.letters_list[ord(letter) - 65])))
if letter != 'I' and letter != 'Y' and letter != 'L' and letter != 'E' and letter != 'T' \
and letter != 'F':
self.queue = np.column_stack((self.queue,
np.array(6*[[False]])))
self.queue = np.column_stack((self.queue, np.array(6*[10*[False]])))
self.queue = np.column_stack((self.queue, self.queue))
self.paint_scrolling_name()
def check_logs_file(self, filename):
logs_file = open(filename)
lines_of_file = logs_file.readlines()
logs_file.close()
if ("loaded" in lines_of_file[len(lines_of_file) - 1]) \
and (lines_of_file[len(lines_of_file) - 1] != self.previous_line):
name_of_song = re.search(r"<(.*)\>", lines_of_file[len(lines_of_file) - 1]).group(1)
self.previous_line = lines_of_file[len(lines_of_file) - 1]
self.print_song_name(name_of_song)
# colorWipe(self.strip, Color(150, 150, 150), 0)
# time.sleep(1)
# self.previous_time = 'x'
return
| StarcoderdataPython |
3298842 | <reponame>Xamaneone/SoftUni-Intro<filename>Python-Advanced/lists_as_stacks_and_queues_exercise/crossroads.py
from _collections import deque
green_light = int(input())
free_window = int(input())
cars = deque()
crash = False
passed = 0
x = str(input())
while x != "END":
current_car = 0
current_car_name = ""
crash_chr = 0
if x == "green":
for s in range(green_light):
if cars:
if current_car < 1:
car = cars.popleft()
current_car = len(car)
current_car_name = car
passed += 1
crash_chr = 0
current_car -= 1
crash_chr += 1
for s in range(free_window):
current_car -= 1
crash_chr += 1
else:
cars.append(x)
if current_car > 0:
crash = True
print("A crash happened!")
print(f"{current_car_name} was hit at {current_car_name[crash_chr]}.")
break
x = str(input())
if not crash:
print(f"Everyone is safe.")
print(f"{passed} total cars passed the crossroads.") | StarcoderdataPython |
3375070 | <reponame>mauriziokovacic/ACME<gh_stars>1-10
from .isscalar import *
from .size import *
from .prod import *
def numel(A):
"""
Returns the number of elements contained in the given Tensor
Parameters
----------
A : Tensor
a tensor/matrix
Returns
-------
int
the number of elements in the given tensor
"""
s = size(A)
if isscalar(s):
return 1
return prod(s)
| StarcoderdataPython |
153993 | # -*- coding: utf-8 -*-
"""
Sphinx configuration file for clik-wtforms.
:author: <NAME> <<EMAIL>>
:copyright: Copyright (c) <NAME> and contributors, 2017-2019.
:license: BSD
"""
import os
import sys
root_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
src_path = os.path.join(root_path, 'src')
sys.path.insert(0, src_path)
# =============================================================================
# -- General configuration ----------------------------------------------------
# =============================================================================
# Basic project information
project = u'clik-wtforms'
copyright = u'2017-2019, <NAME> and contributors'
author = u'<NAME> and contributors'
version = u'0.90'
release = u'0.90.2'
# Paths
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
master_doc = 'index'
source_suffix = '.rst'
templates_path = []
# Miscellaneous
language = None
pygments_style = 'sphinx'
# Extensions.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# =============================================================================
# -- HTML ---------------------------------------------------------------------
# =============================================================================
html_theme = 'sphinx_rtd_theme'
html_static_path = []
htmlhelp_basename = '%s-doc' % project
# =============================================================================
# -- LaTeX --------------------------------------------------------------------
# =============================================================================
# Basic config
latex_elements = {}
latex_documents = [
(
master_doc,
'%s.tex' % project,
project,
author,
'manual',
),
]
# References
latex_show_pagerefs = True
latex_show_urls = 'footnote'
# =============================================================================
# -- Man page -----------------------------------------------------------------
# =============================================================================
man_pages = [
(
master_doc,
project,
project,
[author],
7,
),
]
man_show_urls = True
# =============================================================================
# -- Texinfo ------------------------------------------------------------------
# =============================================================================
setup_path = os.path.join(root_path, 'setup.py')
with open(setup_path) as f:
for line in f:
line = line.strip()
if line.startswith('description='):
texinfo_description = line[13:-2]
break
texinfo_documents = [
(
master_doc,
project,
project,
author,
project,
texinfo_description,
'Miscellaneous',
),
]
texinfo_show_urls = 'footnote'
# =============================================================================
# -- sphinx.ext.intersphinx ---------------------------------------------------
# =============================================================================
intersphinx_mapping = {
'https://docs.python.org/3/': None,
'https://clik.readthedocs.io/en/0.92.4/': None,
'https://wtforms.readthedocs.io/en/2.2.1/': None,
}
# =============================================================================
# -- sphinx.ext.todo ----------------------------------------------------------
# =============================================================================
todo_include_todos = True
| StarcoderdataPython |
15443 | from typing import List
import matplotlib.pyplot as plt
class Mortgage:
"""
A mortgage overview of the total burden (incl. interest) and the monthly fees per fixed period
"""
def __init__(self, mortgage_amount, burden, periods, monthly_fees, name):
self.mortgage_amount = int(mortgage_amount)
self.burden = int(burden)
self.periods = periods.copy()
self.monthly_fees = [int(fee) for fee in monthly_fees]
self.name = name
def __add__(self, other):
if not other:
return self
mortgage_amount = self.mortgage_amount + other.mortgage_amount
burden = self.burden + other.burden
periods, monthly_fees = _align_mortgages(periods_a=self.periods,
periods_b=other.periods,
fees_a=self.monthly_fees,
fees_b=other.monthly_fees)
name = self.name
if other.name != self.name:
name += ' & ' + other.name
return Mortgage(mortgage_amount=mortgage_amount,
burden=burden,
periods=periods,
monthly_fees=monthly_fees,
name=name)
def __radd__(self, other):
return self + other
def __repr__(self):
text = (f'{self.name}: {format(self.mortgage_amount, ",d")} euro\n'
f'Total burden: {format(self.burden, ",d")} euro\n'
'Monthly fees:\n')
for period, fee in zip(self.periods, self.monthly_fees):
text += f'- {period} months: {fee} euro\'s\n'
return text
def plot(self, axes=None) -> plt.axes:
if axes is None:
fig, axes = plt.subplots(2, 1, figsize=(5, 8))
nr_periods = len(self.periods)
axes[0].bar(x=range(nr_periods), height=self.monthly_fees, tick_label=self.periods,
color='darkblue')
axes[0].set_xlabel('Period (months)')
axes[0].set_ylabel('Monthly fee\n', color='darkblue')
axes[0].set_title(f'Subsequent monthly fees\nover the specified periods\n\n{self}\n')
axes[1].bar(x=[0, 1], height=[self.mortgage_amount, self.burden], color='purple')
axes[1].set_ylabel('\nAmount (euro)', color='purple')
axes[1].set_xlabel('')
axes[1].set_xticks([0, 1])
axes[1].set_xticklabels([f'Mortgage\n{format(self.mortgage_amount, ",d")}',
f'Total burden\n{format(self.burden, ",d")}'])
plt.tight_layout()
return axes
def compare(self, others: list) -> plt.axes:
mortgages = [self] + others
nr_mortgages = len(mortgages)
fig, axes = plt.subplots(2, nr_mortgages, figsize=(nr_mortgages * 3, 8), sharey='row')
for col_axes, mortgage in zip(axes.T, mortgages):
mortgage.plot(axes=col_axes)
plt.tight_layout()
return axes
def _align_mortgages(periods_a: List[int],
periods_b: List[int],
fees_a: List[int],
fees_b: List[int]) -> (List[int], List[int]):
""" Align periods and fees of two mortgages and compute the exact fee for each period.
:param periods_a: periods for Mortgage a
:param periods_b: periods for Mortgage b
:param fees_a: monthly fees for Mortgage a
:param fees_b: monthly fees for Mortgage b
:return: tuple of aligned periods and fees for the combined Mortgages a and b
"""
periods_a, periods_b, fees_a, fees_b = \
periods_a.copy(), periods_b.copy(), fees_a.copy(), fees_b.copy()
if not periods_a:
if not periods_b:
return [], []
else:
return periods_b, fees_b
elif not periods_b:
return periods_a, fees_a
if periods_b[0] < periods_a[0]:
periods_a, periods_b = periods_b, periods_a
fees_a, fees_b = fees_b, fees_a
first_period_fee = ([periods_a[0]], [fees_a[0] + fees_b[0]])
if periods_a[0] == periods_b[0]:
recursive_result = _align_mortgages(periods_a[1:], periods_b[1:], fees_a[1:], fees_b[1:])
else:
periods_b[0] -= periods_a[0]
recursive_result = _align_mortgages(periods_a[1:], periods_b, fees_a[1:], fees_b)
return tuple(a + b for a, b in zip(first_period_fee, recursive_result))
| StarcoderdataPython |
129842 | <filename>TEE_faster_RCNN.py
from torchvision.ops.feature_pyramid_network import FeaturePyramidNetwork, LastLevelMaxPool
from typing import Dict
from torch import nn
import warnings
from torchvision.ops import misc as misc_nn_ops
from torchvision.models import resnet
from collections import OrderedDict
from torchvision.models.utils import load_state_dict_from_url
import torch
from scipy.stats import entropy
import torch.nn.functional as F
from torchvision.ops import MultiScaleRoIAlign
from torchvision.models.detection._utils import overwrite_eps
from torchvision.models.detection.anchor_utils import AnchorGenerator
from torchvision.models.detection.rpn import RPNHead, RegionProposalNetwork
from torchvision.models.detection.roi_heads import RoIHeads
from torchvision.models.detection.transform import GeneralizedRCNNTransform
from torchvision.models.detection.backbone_utils import _validate_trainable_layers
from torchvision.models.detection.faster_rcnn import TwoMLPHead,FastRCNNPredictor
class TEE_GeneralizedRCNN(nn.Module):
"""
Main class for Generalized R-CNN.
Args:
backbone (nn.Module):
rpn (nn.Module):
roi_heads (nn.Module): takes the features + the proposals from the RPN and computes
detections / masks from it.
transform (nn.Module): performs the data transformation from the inputs to feed into
the model
"""
def __init__(self, backbone, rpn, roi_heads, transform):
super(TEE_GeneralizedRCNN, self).__init__()
self.transform = transform
self.backbone = backbone
self.rpn = rpn
self.roi_heads = roi_heads
# used only on torchscript mode
self._has_warned = False
@torch.jit.unused
def eager_outputs(self, losses, detections,features):
# type: (Dict[str, Tensor], List[Dict[str, Tensor]]) -> Union[Dict[str, Tensor], List[Dict[str, Tensor]]]
if self.training:
return losses
return detections,features
def forward(self, images, TEEM_features=None, targets=None):
if self.training and targets is None:
raise ValueError("In training mode, targets should be passed")
if self.training:
assert targets is not None
for target in targets:
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
if len(boxes.shape) != 2 or boxes.shape[-1] != 4:
raise ValueError("Expected target boxes to be a tensor"
"of shape [N, 4], got {:}.".format(
boxes.shape))
else:
raise ValueError("Expected target boxes to be of type "
"Tensor, got {:}.".format(type(boxes)))
original_image_sizes: List[Tuple[int, int]] = []
for img in images:
val = img.shape[-2:]
assert len(val) == 2
original_image_sizes.append((val[0], val[1]))
images, targets = self.transform(images, targets)
# Check for degenerate boxes
# TODO: Move this to a function
if targets is not None:
for target_idx, target in enumerate(targets):
boxes = target["boxes"]
degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]
if degenerate_boxes.any():
# print the first degenerate box
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError("All bounding boxes should have positive height and width."
" Found invalid box {} for target at index {}."
.format(degen_bb, target_idx))
features,TEEMs_features = self.backbone(images.tensors,TEEM_features)
if TEEMs_features == None:
return None,None,None
if not features:
return [0]
if isinstance(features, torch.Tensor):
features = OrderedDict([('0', features)])
proposals, proposal_losses = self.rpn(images, features, targets)
detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets)
detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
if torch.jit.is_scripting():
if not self._has_warned:
warnings.warn("RCNN always returns a (Losses, Detections) tuple in scripting")
self._has_warned = True
return losses, detections,TEEMs_features
else:
return self.eager_outputs(losses, detections,TEEMs_features)
class TEE_IntermediateLayerGetter(nn.ModuleDict):
_version = 2
__annotations__ = {
"return_layers": Dict[str, str],
}
def __init__(self, model: nn.Module, return_layers: Dict[str, str],Early_Exits = []) -> None:
if not set(return_layers).issubset([name for name, _ in model.named_children()]):
raise ValueError("return_layers are not present in model")
orig_return_layers = return_layers
return_layers = {str(k): str(v) for k, v in return_layers.items()}
layers = OrderedDict()
for name, module in model.named_children():
layers[name] = module
if name in return_layers:
del return_layers[name]
if not return_layers:
break
super(TEE_IntermediateLayerGetter, self).__init__(layers)
# load pretrained TEEM modules
self.EarlyExit1 = Early_Exits[0]
self.EarlyExit2 = Early_Exits[1]
self.EarlyExit3 = Early_Exits[2]
self.EarlyExit4 = Early_Exits[3]
self.return_layers = orig_return_layers
self.state = True
def forward(self, x,TEEM_features=None):
out = OrderedDict()
self.state = True
for name, module in self.items():
if name in ['EarlyExit1','EarlyExit2','EarlyExit3','EarlyExit4']:
pass
else:
x = module(x)
if name == 'layer1' and TEEM_features is not None and self.state:
exit1 = self.EarlyExit1(TEEM_features[0],x)
entrop = entropy(exit1.detach().cpu().numpy()[0],base=2)
_, preds = torch.max(exit1.data, 1)
if preds == 1 and entrop<0.8:
return None
elif preds == 0 and entrop<0.8:
self.state = False
if name == 'layer2'and TEEM_features is not None and self.state:
exit2 = self.EarlyExit2(TEEM_features[1],x)
entrop = entropy(exit2.detach().cpu().numpy()[0],base=2)
_, preds = torch.max(exit2.data, 1)
if preds == 1 and entrop<0.8:
return None
elif preds == 0 and entrop<0.8:
self.state = False
if name == 'layer3'and TEEM_features is not None and self.state:
exit3 = self.EarlyExit3(TEEM_features[2],x)
entrop = entropy(exit3.detach().cpu().numpy()[0],base=2)
_, preds = torch.max(exit3.data, 1)
if preds == 1 and entrop<0.8:
return None
elif preds == 0 and entrop<0.90:
self.state = False
if name== 'layer4' and TEEM_features is not None and self.state:
exit4 = self.EarlyExit4(TEEM_features[3],x)
entrop = entropy(exit4.detach().cpu().numpy()[0],base=2)
_, preds = torch.max(exit4.data, 1)
if preds == 1 and entrop<0.97:
return None
elif preds == 0 and entrop<0.97:
self.state = False
if name in self.return_layers:
out_name = self.return_layers[name]
out[out_name] = x
return out
class TEE_BackboneWithFPN(nn.Module):
def __init__(self, backbone, return_layers, in_channels_list, out_channels, Early_Exits,extra_blocks=None):
super(TEE_BackboneWithFPN, self).__init__()
if extra_blocks is None:
extra_blocks = LastLevelMaxPool()
self.body = TEE_IntermediateLayerGetter(backbone, return_layers=return_layers, Early_Exits = Early_Exits)
self.fpn = FeaturePyramidNetwork(
in_channels_list=in_channels_list,
out_channels=out_channels,
extra_blocks=extra_blocks,
)
self.out_channels = out_channels
def forward(self, x,TEEM_features=None):
y = self.body(x,TEEM_features)
if y==None:
return None,None
x = self.fpn(y)
return x,y
def TEE_resnet_fpn_backbone(
backbone_name,
pretrained,
Early_Exits,
norm_layer=misc_nn_ops.FrozenBatchNorm2d,
trainable_layers=3,
returned_layers=None,
extra_blocks=None
):
backbone = resnet.__dict__[backbone_name](
pretrained=pretrained,
norm_layer=norm_layer)
# select layers that wont be frozen
assert 0 <= trainable_layers <= 5
layers_to_train = ['layer4', 'layer3', 'layer2', 'layer1', 'conv1'][:trainable_layers]
if trainable_layers == 5:
layers_to_train.append('bn1')
for name, parameter in backbone.named_parameters():
if all([not name.startswith(layer) for layer in layers_to_train]):
parameter.requires_grad_(False)
if extra_blocks is None:
extra_blocks = LastLevelMaxPool()
if returned_layers is None:
returned_layers = [1, 2, 3, 4]
assert min(returned_layers) > 0 and max(returned_layers) < 5
return_layers = {f'layer{k}': str(v) for v, k in enumerate(returned_layers)}
in_channels_stage2 = backbone.inplanes // 8
in_channels_list = [in_channels_stage2 * 2 ** (i - 1) for i in returned_layers]
out_channels = 256
return TEE_BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels, Early_Exits, extra_blocks=extra_blocks)
model_urls = {
'fasterrcnn_resnet50_fpn_coco':
'https://download.pytorch.org/models/fasterrcnn_resnet50_fpn_coco-258fb6c6.pth',
'fasterrcnn_mobilenet_v3_large_320_fpn_coco':
'https://download.pytorch.org/models/fasterrcnn_mobilenet_v3_large_320_fpn-907ea3f9.pth',
'fasterrcnn_mobilenet_v3_large_fpn_coco':
'https://download.pytorch.org/models/fasterrcnn_mobilenet_v3_large_fpn-fb6a3cc7.pth'
}
class TEE_FasterRCNN(TEE_GeneralizedRCNN):
def __init__(self, backbone ,num_classes=None,
# transform parameters
min_size=800, max_size=1333,
image_mean=None, image_std=None,
# RPN parameters
rpn_anchor_generator=None, rpn_head=None,
rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,
rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000,
rpn_nms_thresh=0.7,
rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,
rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,
rpn_score_thresh=0.0,
# Box parameters
box_roi_pool=None, box_head=None, box_predictor=None,
box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100,
box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,
box_batch_size_per_image=512, box_positive_fraction=0.25,
bbox_reg_weights=None):
if not hasattr(backbone, "out_channels"):
raise ValueError(
"backbone should contain an attribute out_channels "
"specifying the number of output channels (assumed to be the "
"same for all the levels)")
assert isinstance(rpn_anchor_generator, (AnchorGenerator, type(None)))
assert isinstance(box_roi_pool, (MultiScaleRoIAlign, type(None)))
if num_classes is not None:
if box_predictor is not None:
raise ValueError("num_classes should be None when box_predictor is specified")
else:
if box_predictor is None:
raise ValueError("num_classes should not be None when box_predictor "
"is not specified")
out_channels = backbone.out_channels
if rpn_anchor_generator is None:
anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
rpn_anchor_generator = AnchorGenerator(
anchor_sizes, aspect_ratios
)
if rpn_head is None:
rpn_head = RPNHead(
out_channels, rpn_anchor_generator.num_anchors_per_location()[0]
)
rpn_pre_nms_top_n = dict(training=rpn_pre_nms_top_n_train, testing=rpn_pre_nms_top_n_test)
rpn_post_nms_top_n = dict(training=rpn_post_nms_top_n_train, testing=rpn_post_nms_top_n_test)
rpn = RegionProposalNetwork(
rpn_anchor_generator, rpn_head,
rpn_fg_iou_thresh, rpn_bg_iou_thresh,
rpn_batch_size_per_image, rpn_positive_fraction,
rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_nms_thresh,
score_thresh=rpn_score_thresh)
if box_roi_pool is None:
box_roi_pool = MultiScaleRoIAlign(
featmap_names=['0', '1', '2', '3'],
output_size=7,
sampling_ratio=2)
if box_head is None:
resolution = box_roi_pool.output_size[0]
representation_size = 1024
box_head = TwoMLPHead(
out_channels * resolution ** 2,
representation_size)
if box_predictor is None:
representation_size = 1024
box_predictor = FastRCNNPredictor(
representation_size,
num_classes)
roi_heads = RoIHeads(
# Box
box_roi_pool, box_head, box_predictor,
box_fg_iou_thresh, box_bg_iou_thresh,
box_batch_size_per_image, box_positive_fraction,
bbox_reg_weights,
box_score_thresh, box_nms_thresh, box_detections_per_img)
if image_mean is None:
image_mean = [0.485, 0.456, 0.406]
if image_std is None:
image_std = [0.229, 0.224, 0.225]
transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)
super(TEE_FasterRCNN, self).__init__(backbone, rpn, roi_heads, transform)
def TEE_fasterrcnn_resnet50_fpn(pretrained=False, progress=True,
num_classes=91, pretrained_backbone=True, Early_Exits=[], trainable_backbone_layers = None, **kwargs):
trainable_backbone_layers = _validate_trainable_layers(
pretrained or pretrained_backbone, trainable_backbone_layers, 5, 3)
if pretrained:
# no need to download the backbone if pretrained is set
pretrained_backbone = False
backbone = TEE_resnet_fpn_backbone('resnet50', pretrained_backbone, trainable_layers = trainable_backbone_layers, Early_Exits = Early_Exits)
model = TEE_FasterRCNN(backbone, num_classes, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['fasterrcnn_resnet50_fpn_coco'],
progress=progress)
model.load_state_dict(state_dict, strict = False)
overwrite_eps(model, 0.0)
return model | StarcoderdataPython |
1662161 |
from kivy.lang.builder import Builder
from .ImageButton import ImageButton
Builder.load_string('''
<MultiImageButton>:
images_normal: '', ''
images_down: '', ''
image_set: 0
image_normal: self.images_normal[self.image_set]
image_down: self.images_down[self.image_set]
''')
class MultiImageButton(ImageButton):
def __init__(self, **kwargs):
super().__init__(**kwargs)
| StarcoderdataPython |
3395585 | from adventofcode.utils import open_input
def main():
data = open_input('adventofcode/_2021/day2/input.txt')
answer_1 = calculate_position(data)
answer_2 = calculate_position_with_aim(data)
print(answer_1, answer_2)
return answer_1, answer_2
def calculate_position(data: list[str]) -> int:
"""Calculate the submarine's position based on the `horizontal_position`
and it's `depth`, multiplying those numbers together to get the answer.
"""
horizontal_position = depth = 0
for instruction in data:
split_data = instruction.split()
action = split_data[0]
measurement = int(split_data[1])
if action == 'forward':
horizontal_position += measurement
elif action == 'up':
depth -= measurement
elif action == 'down':
depth += measurement
answer = horizontal_position * depth
return answer
def calculate_position_with_aim(data: list[str]) -> int:
"""Calculate the submarine's position based on the `horizontal_position`
and it's `depth` which is calculated by multiplying the aim and measurement
then, multiplying those numbers together to get the answer.
"""
horizontal_position = depth = aim = 0
for instruction in data:
split_data = instruction.split()
action = split_data[0]
measurement = int(split_data[1])
if action == 'forward':
horizontal_position += measurement
depth += aim * measurement
elif action == 'up':
aim -= measurement
elif action == 'down':
aim += measurement
answer = horizontal_position * depth
return answer
if __name__ == '__main__':
main()
| StarcoderdataPython |
3360992 | def TestOr2():
print 11
assert 0 == (0 or 0)
assert 1 == (0 or 1)
assert 1 == (1 or 0)
assert 1 == (1 or 1)
def TestOrNot2():
print 12
assert 1 == (not 0 or not 0)
assert 1 == (not 0 or not 1)
assert 1 == (not 1 or not 0)
assert 0 == (not 1 or not 1)
def TestOr3():
print 22
assert 0 == (0 or 0 or 0)
assert 1 == (0 or 1 or 0)
assert 1 == (1 or 0 or 0)
assert 1 == (1 or 1 or 0)
assert 1 == (0 or 0 or 1)
assert 1 == (0 or 1 or 1)
assert 1 == (1 or 0 or 1)
assert 1 == (1 or 1 or 1)
def TestAnd2():
print 33
assert 0 == (0 and 0)
assert 0 == (0 and 1)
assert 0 == (1 and 0)
assert 1 == (1 and 1)
def TestAnd3():
print 44
assert 0 == (0 and 0 and 0)
assert 0 == (0 and 1 and 0)
assert 0 == (1 and 0 and 0)
assert 0 == (1 and 1 and 0)
assert 0 == (0 and 0 and 1)
assert 0 == (0 and 1 and 1)
assert 0 == (1 and 0 and 1)
assert 1 == (1 and 1 and 1)
def NeverCalled():
assert 111 == 222
# assert short-circuting.
print 55
assert 1 == (1 or NeverCalled())
assert 0 == (0 and NeverCalled())
TestOr2()
TestOrNot2()
TestOr3()
TestAnd2()
TestAnd3()
assert 1 == (1 or 0 and 0 or 1)
pass
assert 1 == (not 0 or 0 and 0 or not 0)
pass
print "Done."
| StarcoderdataPython |
1768074 | <filename>raritan/rpc/usermgmt/__init__.py<gh_stars>0
# Do NOT edit this file!
# It was generated by IdlC class idl.json.python.ProxyAsnVisitor.
#
# Section generated from "/home/nb/builds/MEGA/px2-3.0.0-3.0.9-branch-20140613-none-release-none-pdu-raritan/fwcomponents/mkdist/tmp/px2_final/libisys/src/idl/Role.idl"
#
import raritan.rpc
from raritan.rpc import (
Interface,
Structure,
ValueObject,
Enumeration,
typecheck,
DecodeException,
)
import raritan.rpc.usermgmt
# interface
class Role(Interface):
idlType = "usermgmt.Role:1.0.0"
ERR_INVALID_VALUE = 1
# structure
class Privilege(Structure):
idlType = "usermgmt.Role.Privilege:1.0.0"
elements = ["name", "args"]
def __init__(self, name, args):
typecheck.is_string(name, AssertionError)
for x0 in args:
typecheck.is_string(x0, AssertionError)
self.name = name
self.args = args
@classmethod
def decode(cls, json, agent):
obj = cls(
name=json["name"],
args=[x0 for x0 in json["args"]],
)
return obj
def encode(self):
json = {}
json["name"] = self.name
json["args"] = [x0 for x0 in self.args]
return json
# structure
class Info(Structure):
idlType = "usermgmt.Role.Info:1.0.0"
elements = ["description", "locked", "privileges"]
def __init__(self, description, locked, privileges):
typecheck.is_string(description, AssertionError)
typecheck.is_bool(locked, AssertionError)
for x0 in privileges:
typecheck.is_struct(
x0, raritan.rpc.usermgmt.Role.Privilege, AssertionError
)
self.description = description
self.locked = locked
self.privileges = privileges
@classmethod
def decode(cls, json, agent):
obj = cls(
description=json["description"],
locked=json["locked"],
privileges=[
raritan.rpc.usermgmt.Role.Privilege.decode(x0, agent)
for x0 in json["privileges"]
],
)
return obj
def encode(self):
json = {}
json["description"] = self.description
json["locked"] = self.locked
json["privileges"] = [
raritan.rpc.usermgmt.Role.Privilege.encode(x0) for x0 in self.privileges
]
return json
def getInfo(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getInfo", args)
_ret_ = raritan.rpc.usermgmt.Role.Info.decode(rsp["_ret_"], agent)
typecheck.is_struct(_ret_, raritan.rpc.usermgmt.Role.Info, DecodeException)
return _ret_
def updateFull(self, info):
agent = self.agent
typecheck.is_struct(info, raritan.rpc.usermgmt.Role.Info, AssertionError)
args = {}
args["info"] = raritan.rpc.usermgmt.Role.Info.encode(info)
rsp = agent.json_rpc(self.target, "updateFull", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
# Do NOT edit this file!
# It was generated by IdlC class idl.json.python.ProxyAsnVisitor.
#
# Section generated from "/home/nb/builds/MEGA/px2-3.0.0-3.0.9-branch-20140613-none-release-none-pdu-raritan/fwcomponents/mkdist/tmp/px2_final/libisys/src/idl/RoleManager.idl"
#
import raritan.rpc
from raritan.rpc import (
Interface,
Structure,
ValueObject,
Enumeration,
typecheck,
DecodeException,
)
import raritan.rpc.event
import raritan.rpc.usermgmt
# value object
class RoleEvent(raritan.rpc.event.UserEvent):
idlType = "usermgmt.RoleEvent:1.0.0"
def __init__(self, rolename, actUserName, actIpAddr, source):
super(raritan.rpc.usermgmt.RoleEvent, self).__init__(
actUserName, actIpAddr, source
)
typecheck.is_string(rolename, AssertionError)
self.rolename = rolename
def encode(self):
json = super(raritan.rpc.usermgmt.RoleEvent, self).encode()
json["rolename"] = self.rolename
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
rolename=json["rolename"],
# for event.UserEvent
actUserName=json["actUserName"],
actIpAddr=json["actIpAddr"],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = ["rolename"]
elements = elements + super(raritan.rpc.usermgmt.RoleEvent, self).listElements()
return elements
# value object
class RoleAdded(RoleEvent):
idlType = "usermgmt.RoleAdded:1.0.0"
def __init__(self, rolename, actUserName, actIpAddr, source):
super(raritan.rpc.usermgmt.RoleAdded, self).__init__(
rolename, actUserName, actIpAddr, source
)
def encode(self):
json = super(raritan.rpc.usermgmt.RoleAdded, self).encode()
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
# for usermgmt.RoleEvent
rolename=json["rolename"],
# for event.UserEvent
actUserName=json["actUserName"],
actIpAddr=json["actIpAddr"],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = []
elements = elements + super(raritan.rpc.usermgmt.RoleAdded, self).listElements()
return elements
# value object
class RoleRemoved(RoleEvent):
idlType = "usermgmt.RoleRemoved:1.0.0"
def __init__(self, rolename, actUserName, actIpAddr, source):
super(raritan.rpc.usermgmt.RoleRemoved, self).__init__(
rolename, actUserName, actIpAddr, source
)
def encode(self):
json = super(raritan.rpc.usermgmt.RoleRemoved, self).encode()
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
# for usermgmt.RoleEvent
rolename=json["rolename"],
# for event.UserEvent
actUserName=json["actUserName"],
actIpAddr=json["actIpAddr"],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = []
elements = (
elements + super(raritan.rpc.usermgmt.RoleRemoved, self).listElements()
)
return elements
# value object
class RoleChanged(RoleEvent):
idlType = "usermgmt.RoleChanged:1.0.0"
def __init__(
self, oldSettings, newSettings, rolename, actUserName, actIpAddr, source
):
super(raritan.rpc.usermgmt.RoleChanged, self).__init__(
rolename, actUserName, actIpAddr, source
)
typecheck.is_struct(oldSettings, raritan.rpc.usermgmt.Role.Info, AssertionError)
typecheck.is_struct(newSettings, raritan.rpc.usermgmt.Role.Info, AssertionError)
self.oldSettings = oldSettings
self.newSettings = newSettings
def encode(self):
json = super(raritan.rpc.usermgmt.RoleChanged, self).encode()
json["oldSettings"] = raritan.rpc.usermgmt.Role.Info.encode(self.oldSettings)
json["newSettings"] = raritan.rpc.usermgmt.Role.Info.encode(self.newSettings)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
oldSettings=raritan.rpc.usermgmt.Role.Info.decode(
json["oldSettings"], agent
),
newSettings=raritan.rpc.usermgmt.Role.Info.decode(
json["newSettings"], agent
),
# for usermgmt.RoleEvent
rolename=json["rolename"],
# for event.UserEvent
actUserName=json["actUserName"],
actIpAddr=json["actIpAddr"],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = ["oldSettings", "newSettings"]
elements = (
elements + super(raritan.rpc.usermgmt.RoleChanged, self).listElements()
)
return elements
# interface
class RoleManager(Interface):
idlType = "usermgmt.RoleManager:1.0.0"
ERR_ROLE_ALREADY_EXISTS = 1
ERR_MAX_ROLES_REACHED = 2
ERR_INVALID_VALUE = 3
ERR_ROLE_DOESNT_EXIST = 1
ERR_ROLE_NOT_DELETABLE = 2
# structure
class ArgumentDesc(Structure):
idlType = "usermgmt.RoleManager.ArgumentDesc:1.0.0"
elements = ["name", "desc"]
def __init__(self, name, desc):
typecheck.is_string(name, AssertionError)
typecheck.is_string(desc, AssertionError)
self.name = name
self.desc = desc
@classmethod
def decode(cls, json, agent):
obj = cls(
name=json["name"],
desc=json["desc"],
)
return obj
def encode(self):
json = {}
json["name"] = self.name
json["desc"] = self.desc
return json
# structure
class PrivilegeDesc(Structure):
idlType = "usermgmt.RoleManager.PrivilegeDesc:1.0.0"
elements = ["name", "desc", "args"]
def __init__(self, name, desc, args):
typecheck.is_string(name, AssertionError)
typecheck.is_string(desc, AssertionError)
for x0 in args:
typecheck.is_struct(
x0, raritan.rpc.usermgmt.RoleManager.ArgumentDesc, AssertionError
)
self.name = name
self.desc = desc
self.args = args
@classmethod
def decode(cls, json, agent):
obj = cls(
name=json["name"],
desc=json["desc"],
args=[
raritan.rpc.usermgmt.RoleManager.ArgumentDesc.decode(x0, agent)
for x0 in json["args"]
],
)
return obj
def encode(self):
json = {}
json["name"] = self.name
json["desc"] = self.desc
json["args"] = [
raritan.rpc.usermgmt.RoleManager.ArgumentDesc.encode(x0)
for x0 in self.args
]
return json
# structure
class RoleAccount(Structure):
idlType = "usermgmt.RoleManager.RoleAccount:1.0.0"
elements = ["id", "name", "info"]
def __init__(self, id, name, info):
typecheck.is_int(id, AssertionError)
typecheck.is_string(name, AssertionError)
typecheck.is_struct(info, raritan.rpc.usermgmt.Role.Info, AssertionError)
self.id = id
self.name = name
self.info = info
@classmethod
def decode(cls, json, agent):
obj = cls(
id=json["id"],
name=json["name"],
info=raritan.rpc.usermgmt.Role.Info.decode(json["info"], agent),
)
return obj
def encode(self):
json = {}
json["id"] = self.id
json["name"] = self.name
json["info"] = raritan.rpc.usermgmt.Role.Info.encode(self.info)
return json
# structure
class Info(Structure):
idlType = "usermgmt.RoleManager.Info:1.0.0"
elements = ["privileges", "roles"]
def __init__(self, privileges, roles):
for x0 in privileges:
typecheck.is_struct(
x0, raritan.rpc.usermgmt.RoleManager.PrivilegeDesc, AssertionError
)
for x0 in roles:
typecheck.is_struct(
x0, raritan.rpc.usermgmt.RoleManager.RoleAccount, AssertionError
)
self.privileges = privileges
self.roles = roles
@classmethod
def decode(cls, json, agent):
obj = cls(
privileges=[
raritan.rpc.usermgmt.RoleManager.PrivilegeDesc.decode(x0, agent)
for x0 in json["privileges"]
],
roles=[
raritan.rpc.usermgmt.RoleManager.RoleAccount.decode(x0, agent)
for x0 in json["roles"]
],
)
return obj
def encode(self):
json = {}
json["privileges"] = [
raritan.rpc.usermgmt.RoleManager.PrivilegeDesc.encode(x0)
for x0 in self.privileges
]
json["roles"] = [
raritan.rpc.usermgmt.RoleManager.RoleAccount.encode(x0)
for x0 in self.roles
]
return json
def createRoleFull(self, name, info):
agent = self.agent
typecheck.is_string(name, AssertionError)
typecheck.is_struct(info, raritan.rpc.usermgmt.Role.Info, AssertionError)
args = {}
args["name"] = name
args["info"] = raritan.rpc.usermgmt.Role.Info.encode(info)
rsp = agent.json_rpc(self.target, "createRoleFull", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def deleteRole(self, name):
agent = self.agent
typecheck.is_string(name, AssertionError)
args = {}
args["name"] = name
rsp = agent.json_rpc(self.target, "deleteRole", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def getAllRoleNames(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getAllRoleNames", args)
_ret_ = [x0 for x0 in rsp["_ret_"]]
for x0 in _ret_:
typecheck.is_string(x0, DecodeException)
return _ret_
def getAllRoles(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getAllRoles", args)
_ret_ = [
raritan.rpc.usermgmt.RoleManager.RoleAccount.decode(x0, agent)
for x0 in rsp["_ret_"]
]
for x0 in _ret_:
typecheck.is_struct(
x0, raritan.rpc.usermgmt.RoleManager.RoleAccount, DecodeException
)
return _ret_
def getAllPrivileges(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getAllPrivileges", args)
_ret_ = [
raritan.rpc.usermgmt.RoleManager.PrivilegeDesc.decode(x0, agent)
for x0 in rsp["_ret_"]
]
for x0 in _ret_:
typecheck.is_struct(
x0, raritan.rpc.usermgmt.RoleManager.PrivilegeDesc, DecodeException
)
return _ret_
def getInfo(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getInfo", args)
_ret_ = raritan.rpc.usermgmt.RoleManager.Info.decode(rsp["_ret_"], agent)
typecheck.is_struct(
_ret_, raritan.rpc.usermgmt.RoleManager.Info, DecodeException
)
return _ret_
# Do NOT edit this file!
# It was generated by IdlC class idl.json.python.ProxyAsnVisitor.
#
# Section generated from "/home/nb/builds/MEGA/px2-3.0.0-3.0.9-branch-20140613-none-release-none-pdu-raritan/fwcomponents/mkdist/tmp/px2_final/libisys/src/idl/User.idl"
#
import raritan.rpc
from raritan.rpc import (
Interface,
Structure,
ValueObject,
Enumeration,
typecheck,
DecodeException,
)
import raritan.rpc.um
import raritan.rpc.usermgmt
# structure
class SnmpV3Settings(Structure):
idlType = "usermgmt.SnmpV3Settings:1.0.0"
elements = [
"enabled",
"secLevel",
"authProtocol",
"usePasswordAsAuthPassphrase",
"haveAuthPassphrase",
"authPassphrase",
"privProtocol",
"useAuthPassphraseAsPrivPassphrase",
"havePrivPassphrase",
"privPassphrase",
]
def __init__(
self,
enabled,
secLevel,
authProtocol,
usePasswordAsAuthPassphrase,
haveAuthPassphrase,
authPassphrase,
privProtocol,
useAuthPassphraseAsPrivPassphrase,
havePrivPassphrase,
privPassphrase,
):
typecheck.is_bool(enabled, AssertionError)
typecheck.is_enum(secLevel, raritan.rpc.um.SnmpV3.SecurityLevel, AssertionError)
typecheck.is_enum(
authProtocol, raritan.rpc.um.SnmpV3.AuthProtocol, AssertionError
)
typecheck.is_bool(usePasswordAsAuthPassphrase, AssertionError)
typecheck.is_bool(haveAuthPassphrase, AssertionError)
typecheck.is_string(authPassphrase, AssertionError)
typecheck.is_enum(
privProtocol, raritan.rpc.um.SnmpV3.PrivProtocol, AssertionError
)
typecheck.is_bool(useAuthPassphraseAsPrivPassphrase, AssertionError)
typecheck.is_bool(havePrivPassphrase, AssertionError)
typecheck.is_string(privPassphrase, AssertionError)
self.enabled = enabled
self.secLevel = secLevel
self.authProtocol = authProtocol
self.usePasswordAsAuthPassphrase = usePasswordAsAuthPassphrase
self.haveAuthPassphrase = haveAuthPassphrase
self.authPassphrase = authPassphrase
self.privProtocol = privProtocol
self.useAuthPassphraseAsPrivPassphrase = useAuthPassphraseAsPrivPassphrase
self.havePrivPassphrase = havePrivPassphrase
self.privPassphrase = privPassphrase
@classmethod
def decode(cls, json, agent):
obj = cls(
enabled=json["enabled"],
secLevel=raritan.rpc.um.SnmpV3.SecurityLevel.decode(json["secLevel"]),
authProtocol=raritan.rpc.um.SnmpV3.AuthProtocol.decode(
json["authProtocol"]
),
usePasswordAsAuthPassphrase=json["usePasswordAsAuthPassphrase"],
haveAuthPassphrase=json["haveAuthPassphrase"],
authPassphrase=json["authPassphrase"],
privProtocol=raritan.rpc.um.SnmpV3.PrivProtocol.decode(
json["privProtocol"]
),
useAuthPassphraseAsPrivPassphrase=json["useAuthPassphraseAsPrivPassphrase"],
havePrivPassphrase=json["havePrivPassphrase"],
privPassphrase=json["privPassphrase"],
)
return obj
def encode(self):
json = {}
json["enabled"] = self.enabled
json["secLevel"] = raritan.rpc.um.SnmpV3.SecurityLevel.encode(self.secLevel)
json["authProtocol"] = raritan.rpc.um.SnmpV3.AuthProtocol.encode(
self.authProtocol
)
json["usePasswordAsAuthPassphrase"] = self.usePasswordAsAuthPassphrase
json["haveAuthPassphrase"] = self.haveAuthPassphrase
json["authPassphrase"] = self.authPassphrase
json["privProtocol"] = raritan.rpc.um.SnmpV3.PrivProtocol.encode(
self.privProtocol
)
json[
"useAuthPassphraseAsPrivPassphrase"
] = self.useAuthPassphraseAsPrivPassphrase
json["havePrivPassphrase"] = self.havePrivPassphrase
json["privPassphrase"] = self.privPassphrase
return json
# structure
class AuxInfo(Structure):
idlType = "usermgmt.AuxInfo:1.0.0"
elements = ["fullname", "telephone", "eMail"]
def __init__(self, fullname, telephone, eMail):
typecheck.is_string(fullname, AssertionError)
typecheck.is_string(telephone, AssertionError)
typecheck.is_string(eMail, AssertionError)
self.fullname = fullname
self.telephone = telephone
self.eMail = eMail
@classmethod
def decode(cls, json, agent):
obj = cls(
fullname=json["fullname"],
telephone=json["telephone"],
eMail=json["eMail"],
)
return obj
def encode(self):
json = {}
json["fullname"] = self.fullname
json["telephone"] = self.telephone
json["eMail"] = self.eMail
return json
# enumeration
class TemperatureEnum(Enumeration):
idlType = "usermgmt.TemperatureEnum:1.0.0"
values = ["DEG_C", "DEG_F"]
TemperatureEnum.DEG_C = TemperatureEnum(0)
TemperatureEnum.DEG_F = TemperatureEnum(1)
# enumeration
class LengthEnum(Enumeration):
idlType = "usermgmt.LengthEnum:1.0.0"
values = ["METER", "FEET"]
LengthEnum.METER = LengthEnum(0)
LengthEnum.FEET = LengthEnum(1)
# enumeration
class PressureEnum(Enumeration):
idlType = "usermgmt.PressureEnum:1.0.0"
values = ["PASCAL", "PSI"]
PressureEnum.PASCAL = PressureEnum(0)
PressureEnum.PSI = PressureEnum(1)
# structure
class Preferences(Structure):
idlType = "usermgmt.Preferences:1.0.0"
elements = ["temperatureUnit", "lengthUnit", "pressureUnit"]
def __init__(self, temperatureUnit, lengthUnit, pressureUnit):
typecheck.is_enum(
temperatureUnit, raritan.rpc.usermgmt.TemperatureEnum, AssertionError
)
typecheck.is_enum(lengthUnit, raritan.rpc.usermgmt.LengthEnum, AssertionError)
typecheck.is_enum(
pressureUnit, raritan.rpc.usermgmt.PressureEnum, AssertionError
)
self.temperatureUnit = temperatureUnit
self.lengthUnit = lengthUnit
self.pressureUnit = pressureUnit
@classmethod
def decode(cls, json, agent):
obj = cls(
temperatureUnit=raritan.rpc.usermgmt.TemperatureEnum.decode(
json["temperatureUnit"]
),
lengthUnit=raritan.rpc.usermgmt.LengthEnum.decode(json["lengthUnit"]),
pressureUnit=raritan.rpc.usermgmt.PressureEnum.decode(json["pressureUnit"]),
)
return obj
def encode(self):
json = {}
json["temperatureUnit"] = raritan.rpc.usermgmt.TemperatureEnum.encode(
self.temperatureUnit
)
json["lengthUnit"] = raritan.rpc.usermgmt.LengthEnum.encode(self.lengthUnit)
json["pressureUnit"] = raritan.rpc.usermgmt.PressureEnum.encode(
self.pressureUnit
)
return json
# structure
class UserInfo(Structure):
idlType = "usermgmt.UserInfo:1.0.0"
elements = [
"enabled",
"locked",
"blocked",
"needPasswordChange",
"auxInfo",
"snmpV3Settings",
"sshPublicKey",
"preferences",
"roleIds",
]
def __init__(
self,
enabled,
locked,
blocked,
needPasswordChange,
auxInfo,
snmpV3Settings,
sshPublicKey,
preferences,
roleIds,
):
typecheck.is_bool(enabled, AssertionError)
typecheck.is_bool(locked, AssertionError)
typecheck.is_bool(blocked, AssertionError)
typecheck.is_bool(needPasswordChange, AssertionError)
typecheck.is_struct(auxInfo, raritan.rpc.usermgmt.AuxInfo, AssertionError)
typecheck.is_struct(
snmpV3Settings, raritan.rpc.usermgmt.SnmpV3Settings, AssertionError
)
typecheck.is_string(sshPublicKey, AssertionError)
typecheck.is_struct(
preferences, raritan.rpc.usermgmt.Preferences, AssertionError
)
for x0 in roleIds:
typecheck.is_int(x0, AssertionError)
self.enabled = enabled
self.locked = locked
self.blocked = blocked
self.needPasswordChange = needPasswordChange
self.auxInfo = auxInfo
self.snmpV3Settings = snmpV3Settings
self.sshPublicKey = sshPublicKey
self.preferences = preferences
self.roleIds = roleIds
@classmethod
def decode(cls, json, agent):
obj = cls(
enabled=json["enabled"],
locked=json["locked"],
blocked=json["blocked"],
needPasswordChange=json["needPasswordChange"],
auxInfo=raritan.rpc.usermgmt.AuxInfo.decode(json["auxInfo"], agent),
snmpV3Settings=raritan.rpc.usermgmt.SnmpV3Settings.decode(
json["snmpV3Settings"], agent
),
sshPublicKey=json["sshPublicKey"],
preferences=raritan.rpc.usermgmt.Preferences.decode(
json["preferences"], agent
),
roleIds=[x0 for x0 in json["roleIds"]],
)
return obj
def encode(self):
json = {}
json["enabled"] = self.enabled
json["locked"] = self.locked
json["blocked"] = self.blocked
json["needPasswordChange"] = self.needPasswordChange
json["auxInfo"] = raritan.rpc.usermgmt.AuxInfo.encode(self.auxInfo)
json["snmpV3Settings"] = raritan.rpc.usermgmt.SnmpV3Settings.encode(
self.snmpV3Settings
)
json["sshPublicKey"] = self.sshPublicKey
json["preferences"] = raritan.rpc.usermgmt.Preferences.encode(self.preferences)
json["roleIds"] = [x0 for x0 in self.roleIds]
return json
# structure
class UserCapabilities(Structure):
idlType = "usermgmt.UserCapabilities:1.0.0"
elements = ["canSetPassword", "canSetPreferences"]
def __init__(self, canSetPassword, canSetPreferences):
typecheck.is_bool(canSetPassword, AssertionError)
typecheck.is_bool(canSetPreferences, AssertionError)
self.canSetPassword = canSetPassword
self.canSetPreferences = canSetPreferences
@classmethod
def decode(cls, json, agent):
obj = cls(
canSetPassword=json["canSetPassword"],
canSetPreferences=json["canSetPreferences"],
)
return obj
def encode(self):
json = {}
json["canSetPassword"] = self.canSetPassword
json["canSetPreferences"] = self.canSetPreferences
return json
# interface
class User(Interface):
idlType = "usermgmt.User:1.0.1"
ERR_PASSWORD_UNCHANGED = 1
ERR_PASSWORD_EMPTY = 2
ERR_PASSWORD_TOO_SHORT = 3
ERR_PASSWORD_TOO_LONG = 4
ERR_PASSWORD_CTRL_CHARS = 5
ERR_PASSWORD_NEED_LOWER = 6
ERR_PASSWORD_NEED_UPPER = 7
ERR_PASSWORD_NEED_NUMERIC = 8
ERR_PASSWORD_NEED_SPECIAL = 9
ERR_PASSWORD_IN_HISTORY = 10
ERR_PASSWORD_TOO_SHORT_FOR_SNMP = 11
ERR_INVALID_ARGUMENT = 12
def getInfo(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getInfo", args)
_ret_ = raritan.rpc.usermgmt.UserInfo.decode(rsp["_ret_"], agent)
typecheck.is_struct(_ret_, raritan.rpc.usermgmt.UserInfo, DecodeException)
return _ret_
def setAccountPassword(self, password):
agent = self.agent
typecheck.is_string(password, AssertionError)
args = {}
args["password"] = password
rsp = agent.json_rpc(self.target, "setAccountPassword", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def updateAccountFull(self, password, info):
agent = self.agent
typecheck.is_string(password, AssertionError)
typecheck.is_struct(info, raritan.rpc.usermgmt.UserInfo, AssertionError)
args = {}
args["password"] = password
args["info"] = raritan.rpc.usermgmt.UserInfo.encode(info)
rsp = agent.json_rpc(self.target, "updateAccountFull", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def getInfoAndPrivileges(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getInfoAndPrivileges", args)
info = raritan.rpc.usermgmt.UserInfo.decode(rsp["info"], agent)
privileges = [
raritan.rpc.usermgmt.Role.Privilege.decode(x0, agent)
for x0 in rsp["privileges"]
]
typecheck.is_struct(info, raritan.rpc.usermgmt.UserInfo, DecodeException)
for x0 in privileges:
typecheck.is_struct(
x0, raritan.rpc.usermgmt.Role.Privilege, DecodeException
)
return (info, privileges)
def setPreferences(self, prefs):
agent = self.agent
typecheck.is_struct(prefs, raritan.rpc.usermgmt.Preferences, AssertionError)
args = {}
args["prefs"] = raritan.rpc.usermgmt.Preferences.encode(prefs)
rsp = agent.json_rpc(self.target, "setPreferences", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def getCapabilities(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getCapabilities", args)
_ret_ = raritan.rpc.usermgmt.UserCapabilities.decode(rsp["_ret_"], agent)
typecheck.is_struct(
_ret_, raritan.rpc.usermgmt.UserCapabilities, DecodeException
)
return _ret_
# Do NOT edit this file!
# It was generated by IdlC class idl.json.python.ProxyAsnVisitor.
#
# Section generated from "/home/nb/builds/MEGA/px2-3.0.0-3.0.9-branch-20140613-none-release-none-pdu-raritan/fwcomponents/mkdist/tmp/px2_final/libisys/src/idl/UserManager.idl"
#
import raritan.rpc
from raritan.rpc import (
Interface,
Structure,
ValueObject,
Enumeration,
typecheck,
DecodeException,
)
import raritan.rpc.event
import raritan.rpc.usermgmt
# structure
class Account(Structure):
idlType = "usermgmt.Account:1.0.0"
elements = ["name", "info"]
def __init__(self, name, info):
typecheck.is_string(name, AssertionError)
typecheck.is_struct(info, raritan.rpc.usermgmt.UserInfo, AssertionError)
self.name = name
self.info = info
@classmethod
def decode(cls, json, agent):
obj = cls(
name=json["name"],
info=raritan.rpc.usermgmt.UserInfo.decode(json["info"], agent),
)
return obj
def encode(self):
json = {}
json["name"] = self.name
json["info"] = raritan.rpc.usermgmt.UserInfo.encode(self.info)
return json
# value object
class AccountEvent(raritan.rpc.event.UserEvent):
idlType = "usermgmt.AccountEvent:1.0.0"
def __init__(self, username, actUserName, actIpAddr, source):
super(raritan.rpc.usermgmt.AccountEvent, self).__init__(
actUserName, actIpAddr, source
)
typecheck.is_string(username, AssertionError)
self.username = username
def encode(self):
json = super(raritan.rpc.usermgmt.AccountEvent, self).encode()
json["username"] = self.username
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
username=json["username"],
# for event.UserEvent
actUserName=json["actUserName"],
actIpAddr=json["actIpAddr"],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = ["username"]
elements = (
elements + super(raritan.rpc.usermgmt.AccountEvent, self).listElements()
)
return elements
# value object
class AccountAdded(AccountEvent):
idlType = "usermgmt.AccountAdded:1.0.0"
def __init__(self, username, actUserName, actIpAddr, source):
super(raritan.rpc.usermgmt.AccountAdded, self).__init__(
username, actUserName, actIpAddr, source
)
def encode(self):
json = super(raritan.rpc.usermgmt.AccountAdded, self).encode()
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
# for usermgmt.AccountEvent
username=json["username"],
# for event.UserEvent
actUserName=json["actUserName"],
actIpAddr=json["actIpAddr"],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = []
elements = (
elements + super(raritan.rpc.usermgmt.AccountAdded, self).listElements()
)
return elements
# value object
class AccountRemoved(AccountEvent):
idlType = "usermgmt.AccountRemoved:1.0.0"
def __init__(self, username, actUserName, actIpAddr, source):
super(raritan.rpc.usermgmt.AccountRemoved, self).__init__(
username, actUserName, actIpAddr, source
)
def encode(self):
json = super(raritan.rpc.usermgmt.AccountRemoved, self).encode()
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
# for usermgmt.AccountEvent
username=json["username"],
# for event.UserEvent
actUserName=json["actUserName"],
actIpAddr=json["actIpAddr"],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = []
elements = (
elements + super(raritan.rpc.usermgmt.AccountRemoved, self).listElements()
)
return elements
# value object
class PasswordChanged(AccountEvent):
idlType = "usermgmt.PasswordChanged:1.0.0"
def __init__(self, username, actUserName, actIpAddr, source):
super(raritan.rpc.usermgmt.PasswordChanged, self).__init__(
username, actUserName, actIpAddr, source
)
def encode(self):
json = super(raritan.rpc.usermgmt.PasswordChanged, self).encode()
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
# for usermgmt.AccountEvent
username=json["username"],
# for event.UserEvent
actUserName=json["actUserName"],
actIpAddr=json["actIpAddr"],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = []
elements = (
elements + super(raritan.rpc.usermgmt.PasswordChanged, self).listElements()
)
return elements
# value object
class AccountChanged(AccountEvent):
idlType = "usermgmt.AccountChanged:1.0.0"
def __init__(
self, oldSettings, newSettings, username, actUserName, actIpAddr, source
):
super(raritan.rpc.usermgmt.AccountChanged, self).__init__(
username, actUserName, actIpAddr, source
)
typecheck.is_struct(oldSettings, raritan.rpc.usermgmt.UserInfo, AssertionError)
typecheck.is_struct(newSettings, raritan.rpc.usermgmt.UserInfo, AssertionError)
self.oldSettings = oldSettings
self.newSettings = newSettings
def encode(self):
json = super(raritan.rpc.usermgmt.AccountChanged, self).encode()
json["oldSettings"] = raritan.rpc.usermgmt.UserInfo.encode(self.oldSettings)
json["newSettings"] = raritan.rpc.usermgmt.UserInfo.encode(self.newSettings)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
oldSettings=raritan.rpc.usermgmt.UserInfo.decode(
json["oldSettings"], agent
),
newSettings=raritan.rpc.usermgmt.UserInfo.decode(
json["newSettings"], agent
),
# for usermgmt.AccountEvent
username=json["username"],
# for event.UserEvent
actUserName=json["actUserName"],
actIpAddr=json["actIpAddr"],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = ["oldSettings", "newSettings"]
elements = (
elements + super(raritan.rpc.usermgmt.AccountChanged, self).listElements()
)
return elements
# interface
class UserManager(Interface):
idlType = "usermgmt.UserManager:1.0.2"
ERR_USER_DOESNT_EXIST = 1
ERR_USER_NOT_DELETABLE = 2
ERR_USER_ALREADY_EXISTS = 1
ERR_MAX_USERS_REACHED = 2
ERR_PASSWORD_TOO_SHORT_FOR_SNMP = 3
ERR_INVALID_VALUE = 4
ERR_PASSWORD_EMPTY = 5
ERR_PASSWORD_TOO_SHORT = 6
ERR_PASSWORD_TOO_LONG = 7
ERR_PASSWORD_CTRL_CHARS = 8
ERR_PASSWORD_NEED_LOWER = 9
ERR_PASSWORD_NEED_UPPER = 10
ERR_PASSWORD_NEED_NUMERIC = 11
ERR_PASSWORD_NEED_SPECIAL = 12
def getAccountNames(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getAccountNames", args)
_ret_ = [x0 for x0 in rsp["_ret_"]]
for x0 in _ret_:
typecheck.is_string(x0, DecodeException)
return _ret_
def createAccount(self, username, password):
agent = self.agent
typecheck.is_string(username, AssertionError)
typecheck.is_string(password, AssertionError)
args = {}
args["username"] = username
args["password"] = password
rsp = agent.json_rpc(self.target, "createAccount", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def deleteAccount(self, username):
agent = self.agent
typecheck.is_string(username, AssertionError)
args = {}
args["username"] = username
rsp = agent.json_rpc(self.target, "deleteAccount", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def getAllAccounts(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getAllAccounts", args)
_ret_ = [raritan.rpc.usermgmt.Account.decode(x0, agent) for x0 in rsp["_ret_"]]
for x0 in _ret_:
typecheck.is_struct(x0, raritan.rpc.usermgmt.Account, DecodeException)
return _ret_
def createAccountFull(self, username, password, info):
agent = self.agent
typecheck.is_string(username, AssertionError)
typecheck.is_string(password, AssertionError)
typecheck.is_struct(info, raritan.rpc.usermgmt.UserInfo, AssertionError)
args = {}
args["username"] = username
args["password"] = password
args["info"] = raritan.rpc.usermgmt.UserInfo.encode(info)
rsp = agent.json_rpc(self.target, "createAccountFull", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def getAccountsByRole(self, roleName):
agent = self.agent
typecheck.is_string(roleName, AssertionError)
args = {}
args["roleName"] = roleName
rsp = agent.json_rpc(self.target, "getAccountsByRole", args)
_ret_ = [raritan.rpc.usermgmt.Account.decode(x0, agent) for x0 in rsp["_ret_"]]
for x0 in _ret_:
typecheck.is_struct(x0, raritan.rpc.usermgmt.Account, DecodeException)
return _ret_
def getDefaultPreferences(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getDefaultPreferences", args)
_ret_ = raritan.rpc.usermgmt.Preferences.decode(rsp["_ret_"], agent)
typecheck.is_struct(_ret_, raritan.rpc.usermgmt.Preferences, DecodeException)
return _ret_
def setDefaultPreferences(self, prefs):
agent = self.agent
typecheck.is_struct(prefs, raritan.rpc.usermgmt.Preferences, AssertionError)
args = {}
args["prefs"] = raritan.rpc.usermgmt.Preferences.encode(prefs)
rsp = agent.json_rpc(self.target, "setDefaultPreferences", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
| StarcoderdataPython |
71448 | #!/usr/bin/env python
"""
emr_simulator.py
Part of Dooplicity framework
Runs JSON-encoded Hadoop Streaming job flow. FUNCTIONALITY IS IDIOSYNCRATIC;
it is currently confined to those features used by Rail. Format of input JSON
mirrors that of StepConfig list from JSON sent to EMR via RunJobsFlow. Any
files input to a mapper can be gzip'd, but inputs to a reducer currently cannot
be.
In --ipy mode, the script uses IPython Parallel to run tasks on different
engines mediated by a controller. IPython Parallel controller and engines must
be started before this script is invoked.
All paths in input JSON should be absolute.
Licensed under the MIT License:
Copyright (c) 2014 <NAME> and <NAME>.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import argparse
import sys
from collections import defaultdict, OrderedDict, deque
import time
import json
import interface as dp_iface
import gc
import signal
import socket
import subprocess
import glob
import hashlib
import tempfile
import shutil
import os
import contextlib
from tools import make_temp_dir, make_temp_dir_and_register_cleanup
from ansibles import Url
import site
import string
def add_args(parser):
""" Adds args relevant to EMR simulator.
parser: object of type parser.ArgumentParser
No return value.
"""
parser.add_argument(
'-m', '--memcap', type=int, required=False, default=(1024*300),
help=('Maximum amount of memory (in bytes) to use per UNIX sort '
'instance.')
)
parser.add_argument(
'-p', '--num-processes', type=int, required=False, default=1,
help='Number of subprocesses to open at once.'
)
parser.add_argument(
'-t', '--max-attempts', type=int, required=False, default=4,
help=('Maximum number of times to attempt a task.')
)
parser.add_argument(
'-s', '--separator', type=str, required=False, default='\t',
help='Separator between successive fields in inputs and '
'intermediates.'
)
parser.add_argument(
'-k', '--keep-intermediates', action='store_const', const=True,
default=False,
help='Keeps all intermediate output.'
)
parser.add_argument(
'--keep-last-output', action='store_const', const=True,
default=False,
help='If --keep-intermediates is False, keeps outputs that are ' \
'unused as inputs by steps.'
)
parser.add_argument('--gzip-outputs', action='store_const',
const=True, default=False,
help='Compress step output files with gzip.'
)
parser.add_argument('--gzip-level', type=int, required=False,
default=3,
help='Level of gzip compression to use, if applicable.'
)
parser.add_argument('--ipy', action='store_const', const=True,
default=False,
help=('Uses IPython Parallel controller and engines to execute '
'tasks; this permits running a MapReduce job flow on a wide '
'array of cluster setups. Ignores --num-processes in favor '
'of the number of available engines.')
)
parser.add_argument('--ipcontroller-json', type=str, required=False,
default=None,
help=('Path to ipcontroller-client.json file; relevant only if '
'--ipy is invoked. See IPython Parallel documentation for '
'more information. If left unspecified, IPython\'s '
'default path is used.')
)
parser.add_argument('--ipy-profile', type=str, required=False,
default=None,
help=('Connects to this IPython profile; relevant only '
'if --ipy is invoked and takes precedence over '
'--ipcontroller-json.')
)
parser.add_argument('--scratch', type=str, required=False,
default=None,
help=('Where to write any intermediate output before copying to '
'consolidated intermediate directory. This is typically '
'a directory local to a given node. None means write '
'directly to consolidated intermediate directory. The '
'string \"-\" means write to a temporary directory securely '
'created by Python.')
)
parser.add_argument('--direct-write', action='store_const',
const=True, default=False,
help=('Always write intermediate files directly to consolidated '
'intermediate directory, even if --scratch is specified.')
)
parser.add_argument('--common', type=str, required=False,
default=None,
help=('Location of a writable directory accessible across all '
'nodes; this is where some temporary files may be stored '
'and is not important unless running in --ipy mode; if '
'left unspecified, defaults to Python temporary directory'))
parser.add_argument('--sort', type=str, required=False,
default='sort',
help=('Path to sort executable. Add arguments as necessary, '
'e.g. for specifying a directory for storing sort\'s '
'temporary files.'))
def init_worker():
""" Prevents KeyboardInterrupt from reaching a pool's workers.
Exiting gracefully after KeyboardInterrupt or SystemExit is a
challenge. The solution implemented here is by <NAME> and is from
http://noswap.com/blog/python-multiprocessing-keyboardinterrupt .
No return value.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def yopen(gzipped, *args):
""" Passes args on to the appropriate opener, gzip or regular.
A dooplicity.tools.xopen that uses the gzip module, which is
unsafe for writing. See xopen's docstring in dooplicity.tools for
more information.
gzipped: True iff gzip.open() should be used to open rather than
open(); False iff open() should be used; None if input should be
read and guessed
*args: unnamed arguments to pass
Return value: file object
"""
import gzip
if gzipped is None:
with open(args[0], 'rb') as binary_input_stream:
# Check for magic number
if binary_input_stream.read(2) == '\x1f\x8b':
gzipped = True
else:
gzipped = False
if gzipped:
return gzip.open(*args)
return open(*args)
def parsed_keys(partition_options, key_fields):
""" Parses UNIX sort options to figure out what to partition on.
Returned is a function that takes a line as input and returns a tuple
of elements from the line to partition on OR False if the args are
invalid.
partition_options: UNIX sort options like -k1,1 -k3 -k3,4r -k 4 -k 5,3
key_fields: number of fields from line to consider key
Return value: see above
"""
try:
# Make list of tuples of start, end indexes
parsed_args = [
tuple([int(el) - 1
for el in arg.strip().strip('nr').split(',')])
for arg in partition_options.split('-k')
if arg.strip() and len(arg.split(',')) <= 2
]
except Exception:
# args are invalid
return False
else:
exec (
"""def partitioned_key(line, separator):
key = line.strip().split(separator)[:{key_fields}]
return {return_value}
""".format(key_fields=key_fields,
return_value='+'.join(['key[{}:{}]'.format(
arg[0], arg[1] + 1 if len(arg) == 2 else ''
) for arg in parsed_args]))
)
return partitioned_key
def gzip_into(gzip_level, outfn):
return subprocess.Popen('gzip -%d >%s' % (gzip_level, outfn),
shell=True, bufsize=-1,
executable='/bin/bash',
stdin=subprocess.PIPE)
def presorted_tasks(input_files, process_id, sort_options, output_dir,
key_fields, separator, partition_options, task_count,
memcap, gzip=False, gzip_level=3, scratch=None,
direct_write=False, sort='sort', mod_partition=False,
max_attempts=4):
""" Partitions input data into tasks and presorts them.
Files in output directory are in the format x.y, where x is a task
number on the interval [0, number of tasks - 1], and y is a process
ID that identifies which process created the file. y is unimportant;
the glob x.* should be catted to the reducer.
Formula for computing task assignment:
int(hashlib.md5(key).hexdigest(), 16) % (task_count)
input_files: list of files on which to operate.
process_id: unique identifier for current process.
sort_options: options to use when presorting.
output_dir: directory in which to write output files.
key_fields: number of fields from a line to consider the key.
separator: separator between successive fields from line.
partition_options: sort-like options to use when partitioning.
task_count: number of tasks in which to partition input.
memcap: maximum percent of memory to use per UNIX sort instance.
gzip: True iff all files written should be gzipped; else False.
gzip_level: Level of gzip compression to use, if applicable.
scratch: where to write output before copying to output_dir. If "-"
string, writes to temporary directory; if None, writes directly
to output directory.
direct_write: write intermediate files directly to final destination,
no matter what scratch is.
sort: path to sort executable
mod_partition: if True, task is assigned according to formula
(product of fields) % task_count
max_attempts: maximum number of times to attempt partitioning input.
MUST BE FINAL ARG to be compatible with
execute_balanced_job_with_retries().
Return value: None if no errors encountered; otherwise error string.
"""
try:
from operator import mul
task_streams = {}
if scratch is not None:
scratch = os.path.expanduser(os.path.expandvars(scratch))
if gzip:
task_stream_processes = {}
if direct_write:
final_output_dir = output_dir
elif scratch == '-':
# Write to temporary directory
final_output_dir = output_dir
try:
output_dir = tempfile.mkdtemp()
except OSError as e:
return ('Problem encountered creating temporary '
'scratch subdirectory: %s' % e)
elif scratch:
# Write to temporary directory in special location
final_output_dir = output_dir
try:
os.makedirs(scratch)
except OSError as e:
if os.path.isfile(scratch):
return ('Scratch directory %s is a file.' % scratch)
except IOError as e:
return ('Scratch directory %s is not writable: %s' % (scratch,
e))
try:
output_dir = tempfile.mkdtemp(dir=scratch)
except OSError as e:
return ('Problem encountered creating temporary '
'scratch subdirectory of %s: %s' % (scratch, e))
else:
final_output_dir = output_dir
output_dir = os.path.expandvars(output_dir)
final_output_dir = os.path.expandvars(final_output_dir)
partitioned_key = parsed_keys(partition_options, separator)
if not partitioned_key:
# Invalid partition options
return ('Partition options "%s" are invalid.' % partition_options)
for input_file in input_files:
with yopen(None, input_file) as input_stream:
for line in input_stream:
key = partitioned_key(line, separator)
if mod_partition and len(key) <= 1:
try:
task = abs(int(key[0])) % task_count
except (IndexError, ValueError):
# Null key or some field doesn't work with this
task = int(
hashlib.md5(separator.join(key)).hexdigest(),
16
) % task_count
else:
task = int(
hashlib.md5(separator.join(key)).hexdigest(), 16
) % task_count
try:
task_streams[task].write(line)
except KeyError:
# Task file doesn't exist yet; create it
if gzip:
task_file = os.path.join(output_dir, str(task) +
'.' + str(process_id)
+ '.unsorted.gz')
task_stream_processes[task] = gzip_into(gzip_level, task_file)
task_streams[task] \
= task_stream_processes[task].stdin
else:
task_file = os.path.join(output_dir, str(task) +
'.' + str(process_id)
+ '.unsorted')
task_streams[task] = open(task_file, 'w')
task_streams[task].write(line)
for task in task_streams:
task_streams[task].close()
if gzip:
for task in task_stream_processes:
task_stream_processes[task].wait()
# Presort task files
if gzip:
for unsorted_file in glob.glob(os.path.join(
output_dir,
'*.%s.unsorted.gz'
% process_id
)):
sort_command = (('set -eo pipefail; gzip -cd %s | '
'LC_ALL=C %s -S %d %s -t$\'%s\' | '
'gzip -c -%d >%s')
% (unsorted_file, sort, memcap,
sort_options,
separator.encode('string_escape'),
gzip_level,
unsorted_file[:-12] + '.gz'))
try:
subprocess.check_output(sort_command,
shell=True,
executable='/bin/bash',
bufsize=-1,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
return (('Error "%s" encountered sorting file %s; exit '
'code was %d; command invoked was "%s".') %
(e.output.strip(),
unsorted_file, e.returncode,
sort_command))
finally:
os.remove(unsorted_file)
else:
for unsorted_file in glob.glob(os.path.join(
output_dir,
'*.%s.unsorted'
% process_id
)):
sort_command = 'LC_ALL=C %s -S %d %s -t$\'%s\' %s >%s' % (
sort, memcap,
sort_options,
separator.encode(
'string_escape'
),
unsorted_file,
unsorted_file[:-9]
)
try:
subprocess.check_output(sort_command,
shell=True,
executable='/bin/bash',
bufsize=-1,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
return (('Error "%s" encountered sorting file %s; exit '
'code was %d; command invoked was "%s".') %
(e.output.strip(),
unsorted_file, e.returncode,
sort_command))
finally:
os.remove(unsorted_file)
return None
except Exception:
# Uncaught miscellaneous exception
from traceback import format_exc
return ('Error\n\n%s\nencountered partitioning input files '
'[%s] into tasks.'
% (format_exc(),
(('%s, '* (len(input_files) - 1)
+ '%s') % tuple(input_files))))
finally:
if 'final_output_dir' in locals() and final_output_dir != output_dir:
# Copy all output files to final destination and kill temp dir
for root, dirnames, filenames in os.walk(output_dir):
if not filenames: continue
destination = os.path.join(
final_output_dir,
os.path.relpath(root, output_dir)
)
try:
os.makedirs(destination)
except OSError:
# Directory already exists
pass
for filename in filenames:
shutil.copy(
os.path.join(root, filename),
os.path.join(destination, filename)
)
shutil.rmtree(output_dir)
def counter_cmd(outfn):
return ("grep '^reporter:counter:' | "
"sed 's/.*://' | "
"awk -v FS=',' "
"'{tot[$1,\" \",$2] += $3} END "
"{for(d in tot) {print d,tot[d]}}' > %s") % outfn
def step_runner_with_error_return(streaming_command, input_glob, output_dir,
err_dir, counter_dir, task_id, multiple_outputs,
separator, sort_options, memcap,
gzip=False, gzip_level=3, scratch=None,
direct_write=False, sort='sort',
dir_to_path=None, attempt_number=None):
""" Runs a streaming command on a task, segregating multiple outputs.
streaming_command: streaming command to run.
input_glob: input files on which to run streaming command
specified with wildcard; files in a directory are
considered, while subdirectories are neglected. Either every
file in input_glob should be gzip'd, or none should be.
Gzip'd input is accommodated only for a map step!
output_dir: directory in which to write output.
err_dir: directory in which to write errors
task_id: unique numerical identifer for task. Used to set
environment variable mapred_task_partition and determine
output filename.
multiple_outputs: True if output should be divided by key before
first instance of separator, described below.
separator: character separating successive fields in a line from
input_file.
sort_options: None if no sort should be performed on input_glob;
otherwise, performs merge sort with unix sort -m and the
specified string of command-line parameters. EACH INPUT FILE
SHOULD BE PRESORTED.
memcap: maximum percent of memory to use per UNIX sort instance.
gzip: True iff all files written should be gzipped; else False.
gzip_level: Level of gzip compression to use, if applicable.
scratch: where to write output before copying to output_dir. If "-"
string, writes to temporary directory; if None, writes directly
to output directory.
direct_write: write intermediate files directly to final destination,
no matter what scratch is.
sort: path to sort executable.
dir_to_path: path to add to PATH.
attempt_number: attempt number of current task or None if no retries.
MUST BE FINAL ARG to be compatible with
execute_balanced_job_with_retries().
Return value: None iff step runs successfully; otherwise error message.
"""
command_to_run = None
try:
if direct_write:
final_output_dir = output_dir
elif scratch == '-':
# Write to temporary directory
final_output_dir = output_dir
try:
output_dir = tempfile.mkdtemp()
except OSError as e:
return ('Problem encountered creating temporary '
'scratch subdirectory: %s' % e)
elif scratch:
scratch = os.path.expanduser(os.path.expandvars(scratch))
# Write to temporary directory in special location
final_output_dir = output_dir
try:
os.makedirs(scratch)
except OSError as e:
if os.path.isfile(scratch):
return ('Scratch directory %s is a file.' % scratch)
except IOError as e:
return ('Scratch directory %s is not writable: %s' % (scratch,
e))
try:
output_dir = tempfile.mkdtemp(dir=scratch)
except OSError as e:
return ('Problem encountered creating temporary '
'scratch subdirectory of %s: %s' % (scratch, e))
else:
final_output_dir = output_dir
output_dir = os.path.expandvars(output_dir)
final_output_dir = os.path.expandvars(final_output_dir)
input_files = [input_file for input_file in glob.glob(input_glob)
if os.path.isfile(input_file)]
if not input_files:
# No input!
return None
if sort_options is None:
# Mapper. Check if first input file is gzip'd
with open(input_files[0], 'rb') as binary_input_stream:
if binary_input_stream.read(2) == '\x1f\x8b':
# Magic number of gzip'd file found
prefix = 'gzip -cd %s' % input_glob
else:
prefix = 'cat %s' % input_glob
else:
# Reducer. Merge sort the input glob.
if gzip:
# Use process substitution
prefix = '(LC_ALL=C %s -S %d %s -t$\'%s\' -m %s' % (
sort, memcap, sort_options,
separator.encode('string_escape'),
' '.join(['<(gzip -cd %s)' % input_file
for input_file in input_files]) + ')'
)
else:
# Reducer. Merge sort the input glob.
prefix = 'LC_ALL=C %s -S %d %s -t$\'%s\' -m %s' % (sort,
memcap,
sort_options,
separator.encode('string_escape'),
input_glob)
err_file = os.path.abspath(os.path.join(err_dir, (
('%d.log' % task_id)
if attempt_number is None
else ('%d.%d.log'
% (task_id, attempt_number)
)
)
))
counter_file = os.path.abspath(os.path.join(counter_dir, (
('%d.counts' % task_id)
if attempt_number is None
else ('%d.%d.counts'
% (task_id, attempt_number)
)
)
))
new_env = os.environ.copy()
new_env['mapreduce_task_partition'] \
= new_env['mapred_task_partition'] = str(task_id)
if multiple_outputs:
# Must grab each line of output and separate by directory
command_to_run \
= prefix + ' | ' + streaming_command + (
' 2> >(tee %s | %s)'
) % (err_file, counter_cmd(counter_file))
# Need bash or zsh for process substitution
multiple_output_process = subprocess.Popen(
' '.join([('set -eo pipefail; cd %s;' % dir_to_path)
if dir_to_path is not None
else 'set -eo pipefail;',
command_to_run]),
shell=True,
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'),
env=new_env,
bufsize=-1,
executable='/bin/bash'
)
task_file_streams = {}
if gzip:
task_file_stream_processes = {}
for line in multiple_output_process.stdout:
key, _, line_to_write = line.partition(separator)
try:
task_file_streams[key].write(line_to_write)
except KeyError:
'''Must create new file, but another process could have
created the output directory.'''
key_dir = os.path.join(output_dir, key)
try:
os.makedirs(key_dir)
except OSError:
if not os.path.exists(key_dir):
return (('Streaming command "%s" failed: problem '
'encountered creating output '
'directory %s.') % (command_to_run,
key_dir))
if gzip:
task_file_stream_processes[key] = gzip_into(gzip_level,
os.path.join(key_dir, str(task_id) + '.gz'))
task_file_streams[key] \
= task_file_stream_processes[key].stdin
else:
task_file_streams[key] = open(
os.path.join(key_dir, str(task_id)), 'w'
)
task_file_streams[key].write(line_to_write)
multiple_output_process_return = multiple_output_process.wait()
if multiple_output_process_return != 0:
return (('Streaming command "%s" failed; exit level was %d.')
% (command_to_run, multiple_output_process_return))
for key in task_file_streams:
task_file_streams[key].close()
else:
if gzip:
out_file = os.path.abspath(
os.path.join(output_dir, str(task_id) + '.gz')
)
command_to_run \
= prefix + ' | ' + streaming_command + (
' 2> >(tee %s | %s) | gzip -%d >%s'
% (err_file,
counter_cmd(counter_file),
gzip_level,
out_file)
)
else:
out_file = os.path.abspath(
os.path.join(output_dir, str(task_id))
)
command_to_run \
= prefix + ' | ' + streaming_command + (
' >%s 2> >(tee %s | %s)'
% (out_file,
err_file,
counter_cmd(counter_file)))
try:
# Need bash or zsh for process substitution
subprocess.check_output(' '.join([('set -eo pipefail; cd %s;'
% dir_to_path)
if dir_to_path is not None
else 'set -eo pipefail;',
command_to_run]),
shell=True,
env=new_env,
bufsize=-1,
stderr=subprocess.STDOUT,
executable='/bin/bash')
except subprocess.CalledProcessError as e:
return (('Streaming command "%s" failed; exit level was %d.')
% (command_to_run, e.returncode))
return None
except Exception as e:
# Uncaught miscellaneous exception
from traceback import format_exc
return ('Error\n\n%s\nencountered executing task on input %s.'
% (format_exc(), input_glob))
finally:
if 'task_file_stream_processes' in locals():
for key in task_file_streams:
task_file_streams[key].close()
for key in task_file_stream_processes:
task_file_stream_processes[key].wait()
if 'final_output_dir' in locals() and final_output_dir != output_dir:
# Copy all output files to final destination and kill temp dir
for root, dirnames, filenames in os.walk(output_dir):
if not filenames: continue
destination = os.path.join(
final_output_dir,
os.path.relpath(root, output_dir)
)
try:
os.makedirs(destination)
except OSError:
# Directory already exists
pass
for filename in filenames:
shutil.copy(
os.path.join(root, filename),
os.path.join(destination, filename)
)
shutil.rmtree(output_dir)
def run_simulation(branding, json_config, force, memcap, num_processes,
separator, keep_intermediates, keep_last_output,
log, gzip=False, gzip_level=3, ipy=False,
ipcontroller_json=None, ipy_profile=None, scratch=None,
common=None, sort='sort', max_attempts=4,
direct_write=False):
""" Runs Hadoop Streaming simulation.
FUNCTIONALITY IS IDIOSYNCRATIC; it is currently confined to those
features used by Rail. Format of input JSON mirrors that used by
elastic-mapreduce-ruby. Any files input to a mapper can be gzip'd,
but inputs to a reducer currently cannot be.
branding: text file with branding to print to screen before running
job. This is where the name of a software package or ASCII art
can go.
json_config: JSON configuration file. Google Getting Started with
Amazon Elastic MapReduce for formatting information.
force: True iff all existing directories should be erased when
writing intermediates.
memcap: maximum fraction of memory to use across UNIX sort instances.
num_processes: number of subprocesses to open at once; applicable only
when not in ipy mode
separator: separator between successive fields in inputs and
intermediates.
keep_intermediates: keeps all intermediate output.
keep_last_output: keeps outputs that are unused as inputs by steps.
log: name of file in which to store messages written to stderr
gzip: True iff all files written should be gzipped; else False.
gzip_level: level of gzip compression to use, if applicable.
ipy: use iPython Parallel engines to run tasks.
ipcontroller_json: path to ipcontroller-client.json; relevant only if
ipy is True. If None, uses IPython's default location.
ipy_profile: name of IPython Parallel cluster configuration profile to
use; None if profile is not specified. In this case,
ipcontroller_json takes precedence.
scratch: scratch directory, typically local. Files are written here by
tasks before copying to final destination. If None, files are
written directly to final destination. If '-', files are written
to securely created temporary directory.
common: path to directory accessible across nodes in --ipy mode
sort: sort executable including command-line arguments
max_attempts: maximum number of times to attempt a task in ipy mode.
direct_write: always writes intermediate files directly to final
destination, even when scratch is specified
No return value.
"""
global failed
import shutil
import os
import tempfile
import glob
if log is not None:
try:
os.makedirs(os.path.dirname(log))
except OSError:
pass
try:
log_stream = open(log, 'a')
except Exception as e:
log_stream = None
else:
log_stream = None
iface = dp_iface.DooplicityInterface(branding=branding,
log_stream=log_stream)
failed = False
try:
# Using IPython Parallel?
if ipy:
try:
from ipyparallel import Client
except ImportError:
iface.fail('IPython Parallel is required to run Dooplicity\'s '
'EMR simulator in --ipy mode. Visit ipython.org to '
'download it, or simply download the Anaconda '
'distribution of Python at '
'https://store.continuum.io/cshop/anaconda/; it\'s '
'easy to install and comes with IPython and '
'several other useful packages.')
failed = True
raise
if ipy_profile:
try:
pool = Client(profile=ipy_profile)
except ValueError:
iface.fail('Cluster configuration profile "%s" was not '
'found.' % ipy_profile)
failed = True
raise
elif ipcontroller_json:
try:
pool = Client(ipcontroller_json)
except IOError:
iface.fail(
'Cannot find connection information JSON file %s.'
% ipcontroller_json
)
failed = True
raise
else:
try:
pool = Client()
except IOError:
iface.fail(
'Cannot find ipcontroller-client.json. Ensure '
'that IPython Parallel controller and engines are '
'running. If controller is running on a remote '
'machine, copy the ipcontroller-client.json file '
'from there to a local directory; then rerun this '
'script specifying the local path to '
'ipcontroller-client.json with the '
'--ipcontroller-json command-line parameter.'
)
failed = True
raise
if not pool.ids:
iface.fail(
'An IPython Parallel controller is running, but no '
'engines are connected to it.'
)
failed = True
raise RuntimeError
# Use all engines
num_processes = len(pool)
all_engines = set(pool.ids)
from tools import apply_async_with_errors
direct_view = pool[:]
# Use Dill to permit general serializing
try:
import dill
except ImportError:
raise RuntimeError(
'Dooplicity requires Dill. Install it by running '
'"pip install dill", or see the StackOverflow '
'question http://stackoverflow.com/questions/23576969/'
'how-to-install-dill-in-ipython for other leads.'
)
else:
direct_view.use_dill()
iface.status('Loading dependencies on IPython Parallel engines...')
with direct_view.sync_imports(quiet=True):
import subprocess
import glob
import hashlib
import tempfile
import shutil
import os
direct_view.push(dict(
yopen=yopen,
step_runner_with_error_return=\
step_runner_with_error_return,
presorted_tasks=presorted_tasks,
parsed_keys=parsed_keys,
counter_cmd=counter_cmd
))
iface.step('Loaded dependencies on IPython Parallel engines.')
# Get host-to-engine and engine pids relations
current_hostname = socket.gethostname()
host_map = apply_async_with_errors(
pool, all_engines, socket.gethostname,
dict_format=True
)
engine_map = defaultdict(list)
for engine in host_map:
engine_map[host_map[engine]].append(engine)
pid_map = apply_async_with_errors(
pool, all_engines, os.getpid,
dict_format=True
)
def interrupt_engines(pool, iface):
""" Interrupts IPython Parallel engines spanned by view
Taken from:
http://mail.scipy.org/pipermail/ipython-dev/
2014-March/013426.html
pool: IPython Parallel Client object
iface: instance of DooplicityInterface
No return value.
"""
iface.status('Interrupting IPython Parallel engines...')
for engine_id in pool.ids:
host = host_map[engine_id]
kill_command = (
'CPIDS=$(pgrep -P {}); echo $CPIDS;'
'(sleep 33 && kill -9 $CPIDS &); '
'kill -9 $CPIDS'
).format(pid_map[engine_id])
if host == socket.gethostname():
pass
# local
#subprocess.Popen(kill_command,
# bufsize=-1, shell=True
# )
else:
#subprocess.Popen(
# ('ssh -oStrictHostKeyChecking=no '
# '-oBatchMode=yes {} \'{}\'').format(
# host, kill_command
# ), bufsize=-1, shell=True
#)
pass
import random
def execute_balanced_job_with_retries(pool, iface,
task_function, task_function_args,
status_message='Tasks completed',
finish_message='Completed tasks.', max_attempts=4):
""" Executes parallel job over IPython Parallel engines.
Tasks are assigned to free engines as they become
available. If a task fails on one engine, it is retried on
another engine. If a task has been tried on all engines but
fails before max_attempts is exceeded, the step is failed.
pool: IPython Parallel Client object; all engines it spans
are used
iface: DooplicityInterface object for spewing log messages
to console
task_function: name if function to execute
task_function_args: iterable of lists, each of whose
items are task_function's arguments, WITH THE EXCEPTION
OF A SINGLE KEYWORD ARGUMENT "attempt_count". This
argument must be the final keyword argument of the
function but _excluded_ from the arguments in any item
of task_function_args.
status_message: status message about tasks completed
finish_message: message to output when all tasks are
completed
max_attempts: max number of times to attempt any given
task
No return value.
"""
global failed
random.seed(pool.ids[-1])
used_engines, free_engines = set(), set(pool.ids)
completed_tasks = 0
tasks_to_assign = deque([
[task_function_arg, i, []] for i, task_function_arg
in enumerate(task_function_args)
])
task_count = len(tasks_to_assign)
assigned_tasks, asyncresults = {}, {}
max_task_fails = 0
iface.status((' %s: '
'%d/%d | \\max_i (task_i fails): %d/%d')
% (status_message, completed_tasks,
task_count, max_task_fails,
max_attempts - 1))
while completed_tasks < task_count:
if tasks_to_assign:
task_to_assign = tasks_to_assign.popleft()
forbidden_engines = set(task_to_assign[2])
if len(forbidden_engines) >= 2:
# After two fails, do not allow reused nodes
for forbidden_engine in task_to_assign[2]:
forbidden_engines.update(
engine_map[host_map[forbidden_engine]]
)
if all_engines <= forbidden_engines:
iface.fail(('No more running IPython Parallel '
'engines and/or nodes on which '
'function-arg combo (%s, %s) has not '
'failed attempt to execute. Check the '
'IPython Parallel cluster\'s '
'integrity and resource availability.')
% (task_function, task_to_assign[0]),
steps=(job_flow[step_number:]
if step_number != 0 else None))
failed = True
raise RuntimeError
try:
assigned_engine = random.choice(
list(free_engines - forbidden_engines)
)
except IndexError:
# No engine to assign yet; add back to queue
tasks_to_assign.append(task_to_assign)
else:
asyncresults[task_to_assign[1]] = (
pool[assigned_engine].apply_async(
task_function,
*(task_to_assign[0] +
[len(task_to_assign[2])])
)
)
assigned_tasks[task_to_assign[1]] = [
task_to_assign[0], task_to_assign[1],
task_to_assign[2] + [assigned_engine]
]
used_engines.add(assigned_engine)
free_engines.remove(assigned_engine)
asyncresults_to_remove = []
for task in asyncresults:
if asyncresults[task].ready():
return_value = asyncresults[task].get()
if return_value is not None:
if max_attempts > len(assigned_tasks[task][2]):
# Add to queue for reattempt
tasks_to_assign.append(
assigned_tasks[task]
)
max_task_fails = max(
len(assigned_tasks[task][2]),
max_task_fails
)
asyncresults_to_remove.append(task)
else:
# Bail if max_attempts is saturated
iface.fail(return_value,
steps=(job_flow[step_number:]
if step_number != 0 else None))
failed = True
raise RuntimeError
else:
# Success
completed_tasks += 1
asyncresults_to_remove.append(task)
iface.status((' %s: '
'%d/%d | '
'\\max_i (task_i fails): '
'%d/%d')
% (status_message, completed_tasks,
task_count, max_task_fails,
max_attempts - 1))
assert assigned_tasks[task][-1][-1] == \
asyncresults[task].engine_id
# Free engine
used_engines.remove(
assigned_tasks[task][-1][-1]
)
free_engines.add(assigned_tasks[task][-1][-1])
for task in asyncresults_to_remove:
del asyncresults[task]
del assigned_tasks[task]
time.sleep(0.1)
assert not used_engines
iface.step(finish_message)
@contextlib.contextmanager
def cache(pool=None, file_or_archive=None, archive=True):
""" Places X.[tar.gz/tgz]#Y in dir Y, unpacked if archive
pool: IPython Parallel Client object; all engines it spans
are used
archive: file in format X.tar.gz#Y; None if nothing should
be done
Yields before deleting all files.
"""
global failed
if file_or_archive is None:
'''So with statements can be used all the time, even
when there's nothing to be archived'''
assert pool is None
yield None
return
try:
(file_or_archive, destination_filename) = \
file_or_archive.split('#')
except TypeError:
iface.fail(('%s is an invalid cache argument.'
% file_or_archive),
steps=(job_flow[step_number:]
if step_number != 0 else None))
failed = True
raise RuntimeError
file_or_archive = os.path.expanduser(
os.path.expandvars(file_or_archive)
)
file_or_archive_url = Url(file_or_archive)
if not (file_or_archive_url.is_nfs
or file_or_archive_url.is_local):
iface.fail(('The file %s is not local or on NFS.'
% file_or_archive),
steps=(job_flow[step_number:]
if step_number != 0 else None))
failed = True
raise
file_or_archive = file_or_archive_url.to_url()
file_or_archive_basename = os.path.basename(file_or_archive)
if archive:
archive_dir = destination_filename
destination_filename = os.path.basename(file_or_archive)
if not os.path.isfile(file_or_archive):
iface.fail(('The file %s does not exist and thus cannot '
'be cached.') % file_or_archive,
steps=(job_flow[step_number:]
if step_number != 0 else None))
failed = True
raise RuntimeError
iface.status('Preparing temporary directories for storing '
'%s on slave nodes.' % file_or_archive_basename)
'''Select engines to do "heavy lifting"; that is, they remove
files copied to hosts on SIGINT/SIGTERM. Do it randomly
(NO SEED) so if IWF occurs, second try will be different.
IWF = intermittent weird failure. Set random seed so temp
directory is reused if restarting job.'''
random.seed(str(sorted(pid_map.keys())))
engines_for_copying = [random.choice(list(engines))
for engines in engine_map.values()
if len(engines) > 0]
'''Herd won't work with local engines; work around this by
separating engines into two groups: local and remote.'''
remote_hostnames_for_copying = list(
set(engine_map.keys()).difference(
set([current_hostname]))
)
local_engines_for_copying = [
engine for engine in engines_for_copying
if engine in engine_map[current_hostname]
]
'''Create temporary directories on selected nodes; NOT
WINDOWS-COMPATIBLE; must be changed if porting to Windows.'''
if scratch == '-':
scratch_dir = tempfile.gettempdir()
else:
scratch_dir = scratch
temp_dir = os.path.join(
scratch_dir,
'dooplicity-%s' % ''.join(
random.choice(string.ascii_uppercase
+ string.digits)
for _ in xrange(12)
)
)
'''To accommodate any slot-local BASH variables that may be in
--scratch, echo them on all engines before adding to engine
PYTHONPATHs.'''
temp_dirs = apply_async_with_errors(pool, all_engines,
subprocess.check_output,
'echo "%s"' % temp_dir,
shell=True,
executable='/bin/bash',
message=('Error obtaining full paths of temporary '
'directories on cluster nodes. Restart IPython '
'Parallel engines and try again.'),
dict_format=True)
for engine in temp_dirs:
temp_dirs[engine].strip()
engines_with_unique_scratch, engines_to_symlink = [], []
engine_to_copy_engine = {}
for engine_for_copying in engines_for_copying:
for engine in engine_map[
host_map[engine_for_copying]
]:
engine_to_copy_engine[engine] = engine_for_copying
if (engine != engine_for_copying
and temp_dirs[engine]
!= temp_dirs[engine_for_copying]):
engines_with_unique_scratch.append(engine)
engines_to_symlink.append(engine)
elif engine == engine_for_copying:
engines_with_unique_scratch.append(engine)
apply_async_with_errors(
pool, engines_for_copying, subprocess.check_output,
'mkdir -p %s' % temp_dir, shell=True,
executable='/bin/bash',
message=(('Error(s) encountered creating temporary '
'directories for storing {} on slave nodes. '
'Restart IPython Parallel engines and try '
'again.').format(file_or_archive))
)
if engines_to_symlink:
'''Create symlinks to resources in case of slot-local
scratch dirs'''
source_paths, destination_paths = {}, {}
for engine_to_symlink in engines_to_symlink:
source_paths[engine_to_symlink] = temp_dirs[
engine_to_copy_engine[engine_to_symlink]
]
destination_paths[engine_to_symlink] = temp_dirs[
engine_to_symlink
]
apply_async_with_errors(pool, engines_to_symlink,
os.remove, destination_paths,
message=('Error(s) encountered removing symlinks '
'in slot-local scratch directories.'),
errors_to_ignore=['OSError'])
apply_async_with_errors(pool, engines_to_symlink,
os.symlink, source_paths, destination_paths,
message=('Error(s) encountered symlinking '
'among slot-local scratch directories.'))
# Add temp dirs to path
apply_async_with_errors(
pool, all_engines, site.addsitedir, temp_dirs,
message=(('Error(s) encountered adding temporary '
'directories for storing {} to path on '
'slave nodes.').format(
file_or_archive
))
)
'''Only foolproof way to die is by process polling. See
http://stackoverflow.com/questions/284325/
how-to-make-child-process-die-after-parent-exits for more
information.'''
apply_async_with_errors(
pool, engines_for_copying, subprocess.check_output,
('echo "trap \\"{{ rm -rf {temp_dir}; exit 0; }}\\" '
'SIGHUP SIGINT SIGTERM EXIT; '
'while [[ \$(ps -p \$\$ -o ppid=) -gt 1 ]]; '
'do sleep 1; done & wait" '
'>{temp_dir}/delscript.sh').format(temp_dir=temp_dir),
shell=True,
executable='/bin/bash',
message=(
'Error creating script for scheduling temporary '
'directories on cluster nodes for deletion. '
'Restart IPython Parallel engines and try again.'
)
)
apply_async_with_errors(
pool, engines_for_copying, subprocess.Popen,
'/usr/bin/env bash %s/delscript.sh' % temp_dir, shell=True,
executable='/bin/bash',
message=(
'Error scheduling temporary directories on slave '
'nodes for deletion. Restart IPython Parallel engines '
'and try again.'
)
)
iface.status('Caching %s.' % file_or_archive_basename)
destination_path = os.path.join(temp_dir, destination_filename)
try:
import herd.herd as herd
except ImportError:
'''Torrent distribution channel for compressed archive not
available.'''
apply_async_with_errors(
pool,
engines_for_copying,
shutil.copyfile,
file_or_archive, destination_path,
message=(('Error(s) encountered copying %s to '
'slave nodes. Refer to the errors above '
'-- and especially make sure $TMPDIR is not '
'out of space on any node supporting an '
'IPython Parallel engine -- before trying '
'again.')
% file_or_archive),
)
else:
if local_engines_for_copying:
apply_async_with_errors(pool,
local_engines_for_copying,
subprocess.check_output, 'cp %s %s' % (
file_or_archive, destination_path
), shell=True, executable='/bin/bash',
message=(('Error(s) encountered copying %s to '
'local filesystem. Refer to the errors '
'above -- and especially make sure '
'$TMPDIR is not out of space on any '
'node supporting an IPython Parallel '
'engine -- before trying again.')
% file_or_archive),
)
if remote_hostnames_for_copying:
herd.run_with_opts(
file_or_archive,
destination_path,
hostlist=','.join(remote_hostnames_for_copying)
)
# Extract if necessary
if archive:
apply_async_with_errors(
pool, engines_for_copying, subprocess.check_output,
'rm -rf {}'.format(
os.path.join(temp_dir, archive_dir)
), shell=True, executable='/bin/bash'
)
apply_async_with_errors(
pool, engines_for_copying, subprocess.check_output,
'mkdir -p {}'.format(
os.path.join(temp_dir, archive_dir)
), shell=True, executable='/bin/bash'
)
apply_async_with_errors(
pool, engines_for_copying, subprocess.check_output,
'tar xzf {} -C {}'.format(
destination_path,
os.path.join(temp_dir, archive_dir)),
shell=True,
executable='/bin/bash'
)
apply_async_with_errors(
pool, engines_for_copying, subprocess.check_output,
'rm -f {}'.format(destination_path),
shell=True, executable='/bin/bash'
)
iface.step('Cached %s.' % file_or_archive_basename)
try:
yield temp_dir
finally:
# Cleanup
apply_async_with_errors(
pool,
engines_for_copying, subprocess.check_output,
'rm -rf {}'.format(temp_dir), shell=True,
executable='/bin/bash',
message=('Error(s) encountered removing temporary '
'directories.')
)
else:
import multiprocessing
def execute_balanced_job_with_retries(pool, iface,
task_function, task_function_args,
status_message='Tasks completed',
finish_message='Completed tasks.', max_attempts=4):
""" Executes parallel job locally with multiprocessing module.
Tasks are added to queue if they fail, and max_attempts-1
failures are permitted per task.
pool: multiprocessing.Pool object
iface: DooplicityInterface object for spewing log messages
to console
task_function: name if function to execute
task_function_args: iterable of lists, each of whose
items are task_function's arguments, WITH THE EXCEPTION
OF A SINGLE KEYWORD ARGUMENT "attempt_count". This
argument must be the final keyword argument of the
function but _excluded_ from the arguments in any item
of task_function_args.
status_message: status message about tasks completed
finish_message: message to output when all tasks are
completed
max_attempts: max number of times to attempt any given
task
No return value.
"""
global failed
completed_tasks = 0
tasks_to_assign = deque([
[task_function_arg, i, 0] for i, task_function_arg
in enumerate(task_function_args)
])
task_count = len(tasks_to_assign)
assigned_tasks, asyncresults = {}, {}
max_task_fails = 0
iface.status((' %s: %d/%d%s')
% (status_message, completed_tasks, task_count,
(' | \\max_i (task_i fails): %d/%d'
% (max_task_fails,
max_attempts - 1)
if max_attempts > 1 else '')))
while completed_tasks < task_count:
if tasks_to_assign:
task_to_assign = tasks_to_assign.popleft()
asyncresults[task_to_assign[1]] = (
pool.apply_async(
task_function,
args=(task_to_assign[0] +
[task_to_assign[2]])
)
)
assigned_tasks[task_to_assign[1]] = [
task_to_assign[0], task_to_assign[1],
task_to_assign[2] + 1
]
asyncresults_to_remove = []
for task in asyncresults:
if asyncresults[task].ready():
return_value = asyncresults[task].get()
if return_value is not None:
if max_attempts > assigned_tasks[task][2]:
# Add to queue for reattempt
tasks_to_assign.append(
assigned_tasks[task]
)
max_task_fails = max(
assigned_tasks[task][2],
max_task_fails
)
asyncresults_to_remove.append(task)
else:
# Bail if max_attempts is saturated
iface.fail(return_value,
steps=(job_flow[step_number:]
if step_number != 0 else None))
failed = True
raise RuntimeError
else:
# Success
completed_tasks += 1
asyncresults_to_remove.append(task)
iface.status((' %s: %d/%d%s')
% (status_message, completed_tasks,
task_count,
(' | \\max_i (task_i fails): %d/%d'
% (max_task_fails,
max_attempts - 1)
if max_attempts > 1 else '')))
for task in asyncresults_to_remove:
del asyncresults[task]
del assigned_tasks[task]
time.sleep(0.1)
iface.step(finish_message)
@contextlib.contextmanager
def cache(pool=None, file_or_archive=None, archive=True):
""" Places X.[tar.gz/tgz]#Y in dir Y, unpacked if archive
pool: IPython Parallel Client object; all engines it spans
are used
archive: file in format X.tar.gz#Y; False if nothing should
be done
Yields before deleting all files.
"""
global failed
if file_or_archive is None:
'''So with statements can be used all the time, even
when there's nothing to be archived'''
assert pool is None
yield None
return
try:
(file_or_archive, destination_filename) = \
file_or_archive.split('#')
except TypeError:
iface.fail(('%s is an invalid cache argument.'
% file_or_archive),
steps=(job_flow[step_number:]
if step_number != 0 else None))
failed = True
raise
destination_filename = os.path.expanduser(
os.path.expandvars(destination_filename)
)
file_or_archive_url = Url(file_or_archive)
if not file_or_archive_url.is_local:
iface.fail(('The file %s is not local.'
% file_or_archive),
steps=(job_flow[step_number:]
if step_number != 0 else None))
failed = True
raise
file_or_archive = file_or_archive_url.to_url()
file_or_archive_basename = os.path.basename(file_or_archive)
if archive:
archive_dir = destination_filename
destination_filename = os.path.basename(file_or_archive)
if not os.path.isfile(file_or_archive):
iface.fail(('The file %s does not exist and thus cannot '
'be cached.') % file_or_archive,
steps=(job_flow[step_number:]
if step_number != 0 else None))
failed = True
raise RuntimeError
temp_dir = make_temp_dir_and_register_cleanup(
None if scratch == '-'
else scratch
)
iface.status('Caching %s.' % file_or_archive_basename)
destination_path = os.path.join(temp_dir, destination_filename)
shutil.copyfile(file_or_archive, destination_path)
# Extract if necessary
if archive:
try:
os.makedirs(os.path.join(temp_dir, archive_dir))
except OSError:
# Hopefully, directory is already created
pass
import subprocess
try:
subprocess.check_output(
'tar xzf {} -C {}'.format(
destination_path,
os.path.join(temp_dir, archive_dir)
),
shell=True,
bufsize=-1,
stderr=subprocess.STDOUT,
executable='/bin/bash'
)
except subprocess.CalledProcessError as e:
iface.fail(
('Decompression of archive failed; exit code '
'was %s, and reason was "%s".') % (
e.returncode,
e.output.strip()
),
steps=(job_flow[step_number:]
if step_number != 0 else None)
)
failed = True
raise RuntimeError
try:
os.remove(destination_path)
except OSError:
pass
iface.step('Cached %s.' % file_or_archive_basename)
try:
yield temp_dir
finally:
# Cleanup
shutil.rmtree(temp_dir, ignore_errors=True)
# Serialize JSON configuration
if json_config is not None:
with open(json_config) as json_stream:
full_payload = json.load(json_stream)
else:
full_payload = json.load(sys.stdin)
try:
job_flow = full_payload['Steps']
except KeyError:
iface.fail(
'Input JSON not in proper format. Ensure that the JSON '
'object has a Steps key.'
)
failed = True
raise
step_count = len(job_flow)
steps = OrderedDict()
try:
for step in job_flow:
step_args = {}
j = 0
j_max = len(step['HadoopJarStep']['Args'])
while j < j_max:
arg_name = step['HadoopJarStep']['Args'][j][1:].strip()
if arg_name == 'D':
D_arg = step['HadoopJarStep']['Args'][j+1].split('=')
if D_arg[0] in ['mapred.reduce.tasks',
'mapreduce.job.reduces']:
step_args['task_count'] = int(D_arg[1])
elif D_arg[0] \
in ['mapred.text.key.partitioner.options',
'mapreduce.partition.keypartitioner.options']:
step_args['partition_options'] = D_arg[1]
elif D_arg[0] \
== 'stream.num.map.output.key.fields':
step_args['key_fields'] = int(D_arg[1])
# Default sort is now across key fields
if 'sort_options' not in step_args:
step_args['sort_options'] = (
'-k1,%d' % step_args['key_fields']
)
elif D_arg[0] \
in ['mapred.text.key.comparator.options',
'mapreduce.partition.keycomparator.options']:
step_args['sort_options'] = D_arg[1]
j += 2
elif arg_name == 'input':
try:
step_args['input'] = ','.join(
[step['HadoopJarStep']['Args'][j+1],
step_args['input']]
)
except KeyError:
step_args['input'] \
= step['HadoopJarStep']['Args'][j+1]
j += 2
elif arg_name == 'multiOutput':
step_args['multiple_outputs'] = True
j += 1
elif arg_name == 'lazyOutput':
# Do nothing
j += 1
else:
step_args[step['HadoopJarStep']['Args'][j][1:]] \
= step['HadoopJarStep']['Args'][j+1].strip()
j += 2
# Set default options
if 'key_fields' not in step_args:
step_args['key_fields'] = 1
if 'partition_options' not in step_args:
step_args['partition_options'] = '-k1'
if 'sort_options' not in step_args:
step_args['sort_options'] = '-k1'
steps[step['Name']] = step_args
except (KeyError, IndexError):
iface.fail(
'JSON file not in proper format. Ensure '
'that each step object has a HadoopJarStep '
'object with an Args array and a Name string.'
)
failed = True
raise
'''Check steps for required Hadoop streaming command-line parameters
and for whether outputs are writable.'''
missing_data = defaultdict(list)
bad_output_data = []
required_data = set(['input', 'output', 'mapper', 'reducer'])
identity_mappers \
= set(['cat', 'org.apache.hadoop.mapred.lib.IdentityMapper'])
identity_reducers \
= set(['cat', 'org.apache.hadoop.mapred.lib.IdentityReducer'])
errors = []
for step in steps:
step_data = steps[step]
for required_parameter in required_data:
if required_parameter not in step_data:
missing_data[step].append('-' + required_parameter)
elif not force and required_parameter == 'output' \
and os.path.exists(step_data['output']):
bad_output_data.append(step)
try:
if step_data['inputformat'] \
== 'org.apache.hadoop.mapred.lib.NLineInputFormat' \
and os.path.isdir(step_data['input']):
errors.append(('In step "%s", input should be a single '
'file if using NLineFormat, but '
'"%s" was specified.') % (
step,
step_data['input']
))
except KeyError:
pass
if missing_data:
errors.extend(['Step "%s" is missing required parameter(s) "%s".' %
(step, ', '.join(missing_data[step]))
for step in missing_data])
if bad_output_data:
errors.extend(['Output directory name "%s" of step "%s" already '
'exists as a file or directory, and --force was '
'not invoked to permit overwriting it.'
% (steps[step]['output'], step)
for step in bad_output_data])
if errors:
iface.fail('\n'.join([(('%d) ' % (i+1)) + error)
if len(errors) > 1 else errors[0]
for i, error in enumerate(errors)]))
failed = True
raise RuntimeError
if not keep_intermediates:
# Create schedule for deleting intermediates
marked_intermediates = set()
all_outputs = set()
post_step_cleanups = defaultdict(list)
'''Traverse steps in reverse order to obtain when an intermediate
directory is last used.'''
for i, step in enumerate(
OrderedDict(reversed(steps.items()[1:]))
):
step_inputs = [os.path.abspath(step_input) for step_input in
steps[step]['input'].split(',')]
all_outputs.add(os.path.abspath(steps[step]['output']))
for step_input in step_inputs:
if step_input not in marked_intermediates:
post_step_cleanups[step_count - i - 1].append(
step_input
)
marked_intermediates.add(step_input)
# Create intermediate directories
for step in steps:
try:
shutil.rmtree(steps[step]['output'])
except OSError:
# May be a file then
try:
os.remove(steps[step]['output'])
except OSError:
# Just didn't exist
pass
try:
os.makedirs(steps[step]['output'])
except OSError:
iface.fail(('Problem encountered trying to create '
'directory %s.') % steps[step]['output'])
failed = True
raise
for dr in ['dp.map.log', 'dp.map.counters', 'dp.reduce.log', 'dp.reduce.counters']:
full_dir = os.path.join(steps[step]['output'], dr)
try:
os.makedirs(full_dir)
except OSError:
iface.fail(('Problem encountered trying to create '
'directory %s.') % dr)
failed = True
raise
# Run steps
step_number = 0
total_steps = len(steps)
if not ipy:
# Pool's only for if we're in local mode
try:
pool = multiprocessing.Pool(num_processes, init_worker,
maxtasksperchild=5)
except Exception:
# maxtasksperchild doesn't work, somehow? Supported only in 2.7
pool = multiprocessing.Pool(num_processes, init_worker)
for step in steps:
step_data = steps[step]
step_inputs = []
# Handle multiple input files/directories
for input_file_or_dir in step_data['input'].split(','):
if os.path.isfile(input_file_or_dir):
step_inputs.append(input_file_or_dir)
elif os.path.isdir(input_file_or_dir):
step_inputs.extend(
glob.glob(os.path.join(input_file_or_dir, '*'))
)
# TODO: support cacheArchives and cacheFile simultaneously
if 'archives' in step_data or 'cacheArchive' in step_data:
# Prefer archives to cacheArchives
try:
to_cache = step_data['archives']
except KeyError:
to_cache = step_data['cacheArchive']
elif 'files' in step_data or 'cacheFile' in step_data:
try:
to_cache = step_data['files']
except KeyError:
to_cache = step_data['cacheFile']
else:
to_cache = None
with cache(pool if to_cache else None, to_cache,
True if 'archives'
in step_data else False) as dir_to_path:
if step_data['mapper'] not in identity_mappers:
# Perform map step only if mapper isn't identity
try:
if ('multiple_outputs' in step_data) or \
step_data['outputformat'] \
in ['edu.jhu.cs.MultipleOutputFormat',
'edu.jhu.cs.'
'MultipleIndexedLzoTextOutputFormat']:
multiple_outputs = True
else:
multiple_outputs = False
except KeyError:
# No multiple outputs
multiple_outputs = False
try:
if step_data['inputformat'] \
== 'org.apache.hadoop.mapred.lib.NLineInputFormat':
nline_input = True
else:
nline_input = False
except KeyError:
# Don't assign one line per mapper
nline_input = False
output_dir = step_data['output']
if step_data['reducer'] not in identity_reducers:
'''There's a reducer parameter, so input to reducer is
output of mapper. Change output directory.'''
output_dir = os.path.join(output_dir, 'dp.map')
try:
os.makedirs(output_dir)
except OSError:
if os.path.exists(output_dir):
pass
else:
iface.fail(('Problem encountered trying to '
'create directory %s.')
% output_dir,
steps=(job_flow[step_number:]
if step_number != 0
else None))
failed = True
raise
try:
if ('multiple_outputs' in step_data) or \
step_data['outputformat'] \
in ['edu.jhu.cs.MultipleOutputFormat',
'edu.jhu.cs.'
'MultipleIndexedLzoTextOutputFormat']:
# Multiple outputs apply AFTER reduce step
multiple_outputs = False
except KeyError:
# No outputformat
pass
if nline_input:
# Create temporary input files
split_input_dir = make_temp_dir(common)
input_files = []
try:
with open(step_inputs[0]) as nline_stream:
for i, line in enumerate(nline_stream):
offset = str(i)
input_files.append(os.path.join(
split_input_dir,
offset
)
)
with open(input_files[-1], 'w') \
as output_stream:
print >>output_stream, separator.join([
offset, line
])
except IndexError:
raise RuntimeError('No NLineInputFormat input to '
'step "%s".' % step)
else:
input_files = [input_file for input_file in step_inputs
if os.path.isfile(input_file)]
input_file_count = len(input_files)
if not input_file_count:
iface.step('No input found; skipping step.')
err_dir = os.path.join(steps[step]['output'], 'dp.map.log')
counter_dir = os.path.join(steps[step]['output'], 'dp.map.counters')
iface.step('Step %d/%d: %s' %
(step_number + 1, total_steps, step))
iface.status(' Starting step runner...')
execute_balanced_job_with_retries(
pool, iface, step_runner_with_error_return,
[[step_data['mapper'], input_file,
output_dir, err_dir,
counter_dir,
i, multiple_outputs,
separator, None, None, gzip,
gzip_level, scratch, direct_write,
sort, dir_to_path]
for i, input_file
in enumerate(input_files)
if os.path.isfile(input_file)],
status_message='Tasks completed',
finish_message=(
' Completed %s.'
% dp_iface.inflected(input_file_count, 'task')
),
max_attempts=max_attempts
)
# Adjust step inputs in case a reducer follows
step_inputs = [input_file for input_file
in glob.glob(output_dir)
if os.path.isfile(input_file)]
if step_data['reducer'] not in identity_reducers:
'''Determine whether to use "mod" partitioner that uses
product of key fields % reducer count to assign tasks.'''
try:
if (step_data['partitioner']
== 'edu.jhu.cs.ModPartitioner'):
mod_partition = True
else:
mod_partition = False
except KeyError:
# Default to no mod partition
mod_partition = False
# Partition inputs into tasks, presorting
output_dir = os.path.join(step_data['output'], 'dp.tasks')
try:
os.makedirs(output_dir)
except OSError:
if os.path.exists(output_dir):
pass
else:
iface.fail(('Problem encountered trying to '
'create directory %s.') % output_dir,
steps=(job_flow[step_number:]
if step_number != 0 else None))
failed = True
raise
input_files = [input_file for input_file in step_inputs
if os.path.isfile(input_file)]
input_file_count = len(input_files)
if input_file_count > num_processes:
file_count_per_group = input_file_count / num_processes
input_file_groups = [
input_files[k:k+file_count_per_group]
for k in
xrange(0, input_file_count,
file_count_per_group)
]
else:
input_file_groups = [[input_file]
for input_file in input_files]
input_file_group_count = len(input_file_groups)
iface.step('Step %d/%d: %s'
% (step_number + 1, total_steps, step))
execute_balanced_job_with_retries(
pool, iface, presorted_tasks,
[[input_file_group, i,
step_data['sort_options'], output_dir,
step_data['key_fields'], separator,
step_data['partition_options'],
step_data['task_count'], memcap, gzip,
gzip_level, scratch, direct_write,
sort, mod_partition]
for i, input_file_group
in enumerate(input_file_groups)],
status_message='Inputs partitioned',
finish_message=(
' Partitioned %s into tasks.'
% dp_iface.inflected(input_file_group_count,
'input')
),
max_attempts=max_attempts
)
iface.status(' Starting step runner...')
input_files = [os.path.join(output_dir, '%d.*' % i)
for i in xrange(step_data['task_count'])]
# Filter out bad globs
input_files = [input_file for input_file in input_files
if glob.glob(input_file)]
input_file_count = len(input_files)
try:
multiple_outputs = (
('multiple_outputs' in step_data) or
step_data['outputformat']
in ['edu.jhu.cs.MultipleOutputFormat',
'edu.jhu.cs.'
'MultipleIndexedLzoTextOutputFormat']
)
except KeyError:
multiple_outputs = False
err_dir = os.path.join(
steps[step]['output'],
'dp.reduce.log'
)
counter_dir = os.path.join(
steps[step]['output'],
'dp.reduce.counters'
)
output_dir = step_data['output']
return_values = []
execute_balanced_job_with_retries(
pool, iface, step_runner_with_error_return,
[[step_data['reducer'], input_file, output_dir,
err_dir, counter_dir, i, multiple_outputs, separator,
step_data['sort_options'], memcap, gzip,
gzip_level, scratch, direct_write,
sort, dir_to_path]
for i, input_file
in enumerate(input_files)],
status_message='Tasks completed',
finish_message=(
' Completed %s.'
% dp_iface.inflected(input_file_count, 'task')
),
max_attempts=max_attempts
)
# Really close open file handles in PyPy
gc.collect()
if not keep_intermediates:
iface.status(' Deleting temporary files...')
# Kill NLineInput files if they're there
try:
shutil.rmtree(split_input_dir)
except (NameError, OSError):
pass
try:
# Intermediate map output should be deleted if it exists
shutil.rmtree(
os.path.join(
step_data['output'], 'dp.map'
)
)
except OSError:
pass
try:
# Remove dp.tasks directory
shutil.rmtree(
os.path.join(step_data['output'], 'dp.tasks')
)
except OSError:
pass
for to_remove in post_step_cleanups[step_number]:
if to_remove not in all_outputs:
'''Remove directory only if it's an -output of some
step and an -input of another step.'''
continue
if os.path.isfile(to_remove):
try:
os.remove(to_remove)
except OSError:
pass
elif os.path.isdir(to_remove):
for detritus in glob.iglob(
os.path.join(to_remove, '*')
):
if detritus[-4:] != '.log':
try:
os.remove(detritus)
except OSError:
try:
shutil.rmtree(detritus)
except OSError:
pass
if not os.listdir(to_remove):
try:
os.rmdir(to_remove)
except OSError:
pass
iface.step(' Deleted temporary files.')
step_number += 1
if not ipy:
pool.close()
if not keep_last_output and not keep_intermediates:
try:
os.remove(step_data['output'])
except OSError:
# Not a file; treat as dir
for detritus in glob.iglob(
os.path.join(step_data['output'], '*')
):
if detritus[-4:] != '.log':
try:
os.remove(detritus)
except OSError:
# Not a file
try:
shutil.rmtree(detritus)
except OSError:
# Phantom; maybe user deleted it
pass
if not keep_intermediates:
for step in steps:
step_data = steps[step]
try:
os.remove(step_data['output'])
except OSError:
# Not a file; treat as dir
for detritus in glob.iglob(
os.path.join(step_data['output'], '*')
):
if detritus[-4:] != '.log':
try:
os.remove(detritus)
except OSError:
# Not a file
try:
shutil.rmtree(detritus)
except OSError:
# Phantom; maybe user deleted it
pass
iface.done()
except (Exception, GeneratorExit):
# GeneratorExit added just in case this happens on modifying code
if 'interrupt_engines' in locals():
interrupt_engines(pool, iface)
if not failed:
time.sleep(0.2)
if 'step_number' in locals():
iface.fail(steps=(job_flow[step_number:]
if step_number != 0 else None))
else:
iface.fail()
if 'split_input_dir' in locals():
'''raise below refers to last exception, so can't try-except
OSError here'''
if os.path.isdir(split_input_dir):
shutil.rmtree(split_input_dir)
raise
except (KeyboardInterrupt, SystemExit):
if 'interrupt_engines' in locals():
interrupt_engines(pool, iface)
if 'step_number' in locals():
iface.fail(steps=(job_flow[step_number:]
if step_number != 0 else None),
opener='*****Terminated*****')
else:
iface.fail()
if 'pool' in locals() and 'interrupt_engines' not in locals():
pool.terminate()
pool.join()
if 'split_input_dir' in locals():
try:
shutil.rmtree(split_input_dir)
except OSError:
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
add_args(parser)
dp_iface.add_args(parser)
args = parser.parse_args(sys.argv[1:])
run_simulation(args.branding, args.json_config, args.force,
args.memcap, args.num_processes, args.separator,
args.keep_intermediates, args.keep_last_output,
args.log, args.gzip_outputs, args.gzip_level,
args.ipy, args.ipcontroller_json, args.ipy_profile,
args.scratch, args.common, args.sort, args.max_attempts,
args.direct_write)
| StarcoderdataPython |
1781413 | <gh_stars>0
import cv2
import numpy as np
filterCond = lambda cond: np.transpose(np.nonzero(cond > 0))
# Load the original groundtruth
groundtruth = cv2.imread('data/2018_IEEE_GRSS_DFC_GT_TR.tif', cv2.IMREAD_GRAYSCALE)
y = cv2.resize(groundtruth, (2384, 601), fx=0.5, fy=0.5, interpolation = cv2.INTER_NEAREST)
# Save groundtruth for further processing
cv2.imwrite('data/2018_IEEE_GRSS_DFC_GT_TR_Downscaled.tif', y)
# Throw out training samples (magic numbers are 11 and 76)
# 1 => not used in training
trainingSamples = cv2.imread('data/trainSamples.png', cv2.IMREAD_GRAYSCALE) == 11
# 1 => not used in training and not invalid
validationImage = (y * trainingSamples[601:, 596:(596+2384)] > 0)
cv2.imwrite('data/validation-image.tif', validationImage * 255)
cv2.imwrite('data/validation-image.png', validationImage * 255)
# Save labels synchronized with cleaned data
np.savetxt('data/2018_IEEE_GRSS_DFC_GT_TR_Downscaled.txt', y[np.nonzero(validationImage)], '%d')
# Save to disk
validationImage = filterCond(validationImage)
np.savetxt('data/validation-image.txt', validationImage, '%d')
print("Successfull? " + 'Yes!' if np.array_equal(np.loadtxt('data/validation-image.txt', np.int), validationImage) else 'No...') | StarcoderdataPython |
1722639 | import pandas as pd
import pytest
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
)
from transformers.pipelines import pipeline
from ray.ml.preprocessor import Preprocessor
from ray.ml.predictors.integrations.huggingface import HuggingFacePredictor
prompts = pd.DataFrame(
["Complete me", "And me", "Please complete"], columns=["sentences"]
)
# We are only testing Casual Language Modeling here
model_checkpoint = "sshleifer/tiny-gpt2"
tokenizer_checkpoint = "sgugger/gpt2-like-tokenizer"
class DummyPreprocessor(Preprocessor):
def transform_batch(self, df):
self._batch_transformed = True
return df
@pytest.mark.parametrize("preprocessor", [True, False])
def test_predict(preprocessor, tmpdir):
if preprocessor:
preprocessor = DummyPreprocessor()
else:
preprocessor = None
model_config = AutoConfig.from_pretrained(model_checkpoint)
model = AutoModelForCausalLM.from_config(model_config)
predictor = HuggingFacePredictor(
pipeline=pipeline(
task="text-generation",
model=model,
tokenizer=AutoTokenizer.from_pretrained(tokenizer_checkpoint),
),
preprocessor=preprocessor,
)
predictions = predictor.predict(prompts)
assert len(predictions) == 3
if preprocessor:
assert hasattr(predictor.preprocessor, "_batch_transformed")
| StarcoderdataPython |
1747776 | #!/usr/bin/env python3
import unittest
import os
from rkd.api.testing import BasicTestingCase
from rkd.test import TestTask
from rkd.contract import ArgumentEnv
CURRENT_SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
class TestTaskInterface(BasicTestingCase):
def test_table(self):
"""Simply test table() - the table is expected to use an external library, it is expected that external library
will be tested already, but we need to check there if the interface matches
"""
task = TestTask()
out = task.table(
header=['Activist', 'Born date'],
body=[
['<NAME>', '1814'],
['<NAME>', '1853'],
['<NAME>', '1896'],
['<NAME>', '1873']
]
)
self.assertIn('---------------------------------', out)
self.assertIn('<NAME>', out)
self.assertIn('<NAME>', out)
self.assertIn('Activist', out)
self.assertIn('Born date', out)
def test_should_fork(self):
task = TestTask()
with self.subTest('Will fork'):
task.get_become_as = lambda: 'root'
self.assertTrue(task.should_fork())
with self.subTest('Will not fork - no user specified'):
task.get_become_as = lambda: ''
self.assertFalse(task.should_fork())
def test_internal_normalized_get_declared_envs_maps_primitive_types_into_class_instances(self):
task = TestTask()
task.get_declared_envs = lambda: {
'SOME_ENV': 'primitive',
'SOME_OTHER_ENV': ArgumentEnv(name='SOME_OTHER_ENV', switch='--cmd', default='not primitive')
}
normalized = task.internal_normalized_get_declared_envs()
with self.subTest('Verify converted string -> ArgumentEnv'):
self.assertTrue(isinstance(normalized['SOME_ENV'], ArgumentEnv))
self.assertEqual('SOME_ENV', normalized['SOME_ENV'].name)
self.assertEqual('--some-env', normalized['SOME_ENV'].switch)
self.assertEqual('primitive', normalized['SOME_ENV'].default)
with self.subTest('Verify not converted'):
self.assertTrue(isinstance(normalized['SOME_OTHER_ENV'], ArgumentEnv))
def test_internal_getenv_finds_mapped_environment_variable_by_switch_name(self):
task = TestTask()
task.get_declared_envs = lambda: {
'SOME_OTHER_ENV': ArgumentEnv(name='SOME_OTHER_ENV', switch='--cmd', default='not primitive')
}
self.assertEqual('not primitive', task.internal_getenv('', envs={}, switch='--cmd'))
def test_internal_getenv_finds_envronment_variable_by_its_not_mapped_name(self):
task = TestTask()
task.get_declared_envs = lambda: {
'SOME_ENV': 'primitive'
}
with self.subTest('Should find by first argument SOME_ENV'):
self.assertEqual('primitive', task.internal_getenv('SOME_ENV', envs={}, switch=''))
with self.subTest('Should find by first argument, even if will not find for valid switch'):
self.assertEqual('primitive', task.internal_getenv('SOME_ENV', envs={}, switch='--some-env'))
with self.subTest('Should find by first argument, even if will not find for invalid switch'):
self.assertEqual('primitive', task.internal_getenv('SOME_ENV', envs={}, switch='--some-non-existing'))
| StarcoderdataPython |
126140 | <filename>fr/c.py
# coding: utf-8
import sys
sys.path.append(".")
from workshop.fr.c import *
DIVULGUER_MOT_SECRET = VRAI
"""
- 'motAuHasard': un mot aléatoire ;
- 'suggestion'; le contenu du champ texte du mot secret ;
utilisé seulement en mode 'dev'.
Retourne 'suggestion' si non vide, 'motAuHasard' sinon.
"""
def choisirMot(suggestion,motAuHasard):
if suggestion:
return suggestion
else:
return motAuHasard
go(globals())
| StarcoderdataPython |
4832857 | <gh_stars>10-100
from enum import Enum
class CoreMode(Enum):
up = 'up' # respond to everyone (within checks)
maintenance = 'maintenance' # respond to owners
down = 'down' # respond to no one
boot = 'boot' # respond to no one until the next boot
| StarcoderdataPython |
1755948 | #!/usr/env/python
# -*- coding: utf-8 -*-
'''
Script that processes a dataset of rated articles and checks each article's
talk page in order to verify how many templates with importance ratings are
on their talk pages.
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os
import re
import logging
import MySQLdb
import pywikibot
from pywikibot.pagegenerators import PreloadingGenerator, PagesFromTitlesGenerator
import mwparserfromhell as mwp
class TalkPage:
def __init__(self, page_id):
self.page_id = page_id
self.page_title = ''
self.num_ratings = 0
class TalkpageProcessor:
def __init__(self):
## Language code of the Wikipedia edition we're processing for
self.lang = 'en'
## Do 10 at a time in case the talk page is huge
self.slice_size = 10
self.db_conf = "~/replica.my.cnf"
self.db_server = "enwiki.labsdb"
self.db_name = "enwiki_p"
self.db_conn = None
self.db_cursor = None
## Names of templates with a "priority" parameter.
self.priority_templates = []
def db_connect(self):
'''
Connect to the database. Returns True if successful.
'''
self.db_conn = None
self.db_cursor = None
try:
self.db_conn = MySQLdb.connect(db=self.db_name,
host=self.db_server,
read_default_file=os.path.expanduser(self.db_conf))
self.db_cursor = self.db_conn.cursor(MySQLdb.cursors.SSDictCursor)
except MySQLdb.Error as e:
logging.error('Unable to connect to database')
logging.error('{} : {}'.format(e[0], e[1]))
if self.db_conn:
return(True)
return(False)
def db_disconnect(self):
'''Close our database connections.'''
try:
self.db_cursor.close()
self.db_conn.close()
except:
pass
return()
def process_template(self, template):
'''
Process the template and return a list of any valid ratings
found in it.
'''
## Valid importance ratings
VALID_RATINGS = set(['top','high','mid','low'])
## There are several cases where an importance rating might be found:
##
## 1: parameter named importance
## 2: sub-project importance parameters (e.g. WikiProject Africa
## uses a "Djibouti-importance" parameter)
## 3: sub-project priority parameters (e.g. WikiProject Biography
## uses a "filmbio-priority" parameter)
##
## Note that some WikiProjects use a "priority" parameter. We will
## ignore that parameter as we have yet to see an example where it
## results in a subsequent categorization of the article. As we're
## interested in knowing about them, we'll store the template names
## and write them out at the end.
ratings = []
if template.has('priority'):
self.priority_templates.append(str(template.name.strip_code()))
elif template.has('importance'):
rating = str(template.get('importance').value.strip_code()).strip().lower()
if rating in VALID_RATINGS:
ratings.append(rating)
for param in template.params:
p_name = str(param.name.strip_code()).strip().lower()
## This regex is deliberately liberal because some projects
## use things like "&" in the parameter name.
if re.search('.+-(priority|importance)$', p_name):
rating = str(param.value.strip_code()).strip().lower()
if rating in VALID_RATINGS:
ratings.append(rating)
return(ratings)
def check_talkpages(self, input_filename, output_filename,
id_col_idx):
'''
Go through all the pages in the given dataset of unanimously rated
articles and check their talk pages in order to establish the number
of actual importance ratings they have.
:param input_filename: path to the TSV dataset
:type input_filename: str
:param output_filename: path to output TSV dataset
:type output_filename: str
:param id_col_idx: zero-based index of the page ID column
:type id_col_idx: int
'''
## SQL query to get page titles based on page IDs
title_query = '''SELECT page_id, page_title
FROM page
WHERE page_id IN ({idlist})'''
site = pywikibot.Site(self.lang)
## Mapping page IDs and titles to talk page data
id_page_map = {}
title_page_map = {}
## read in the dataset
with open(input_filename, 'r', encoding='utf-8') as infile:
infile.readline() # skip header
for line in infile:
cols = line.rstrip('\n').split('\t')
page_id = cols[id_col_idx]
id_page_map[page_id] = TalkPage(page_id)
## find the current page title of all the pages
## (ideally none of them should have incorrect page IDs)
if not self.db_connect():
logging.error('unable to connect to database')
return()
pageids = list(id_page_map.keys())
i = 0
while i < len(pageids):
subset = pageids[i:i+self.slice_size]
self.db_cursor.execute(title_query.format(
idlist=','.join(subset)))
for row in self.db_cursor.fetchall():
page_id = str(row['page_id'])
page_title = row['page_title'].decode('utf-8').replace('_', ' ')
id_page_map[page_id].page_title = page_title
title_page_map[page_title] = id_page_map[page_id]
# ok, iterate
i += self.slice_size
self.db_disconnect()
talkpage_titles = ["Talk:{}".format(title)
for title in title_page_map.keys()]
for talkpage in PreloadingGenerator(
PagesFromTitlesGenerator(talkpage_titles),
step=self.slice_size):
logging.info('processing {}'.format(talkpage.title()))
## The templates are at the top of the page, so if it's a long
## page, truncate to speed up parsing.
try:
content = talkpage.get()
except pywikibot.exceptions.IsRedirectPage as e:
logging.warning('{} is a redirect'.format(talkpage.title()))
continue
if len(content) > 8*1024:
content = content[:8*1024]
parsed_page = mwp.parse(content)
for template in parsed_page.filter_templates(recursive=True):
ratings = self.process_template(template)
## Sanity check
if len({k:1 for k in ratings}) > 1:
logging.warning('{} has non-unanimous importance ratings'.format(talkpage.title()))
else:
title_page_map[talkpage.title(withNamespace=False)].num_ratings += len(ratings)
## Write out all pages with priority templates, if any
if self.priority_templates:
print('We found the following templates with a "priority" parameter')
for template in self.priority_templates:
print('* {}'.format(template))
print('')
## Write out a dataset of page ID and num ratings
with open(output_filename, 'w', encoding='utf-8') as outfile:
outfile.write('talk_page_id\ttalk_page_title\tnum_wpratings\n')
for (page_id, page_data) in id_page_map.items():
outfile.write('{0.page_id}\t{0.page_title}\t{0.num_ratings}\n'.format(page_data))
## ok, done
return()
def main():
import argparse
cli_parser = argparse.ArgumentParser(
description="script to check talk pages for importance ratings"
)
cli_parser.add_argument("input_filename", type=str,
help="path to the input TSV dataset")
cli_parser.add_argument("output_filename", type=str,
help="path to the output TSV extended dataset")
cli_parser.add_argument("id_col_idx", type=int,
help="zero-based index of the page ID column")
# Verbosity option
cli_parser.add_argument('-v', '--verbose', action='store_true',
help='write informational output')
args = cli_parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.INFO)
processor = TalkpageProcessor()
processor.check_talkpages(args.input_filename, args.output_filename,
args.id_col_idx)
return()
if __name__ == '__main__':
main()
| StarcoderdataPython |
16485 | import numpy as np
import unittest
import coremltools.models.datatypes as datatypes
from coremltools.models import neural_network as neural_network
from coremltools.models import MLModel
from coremltools.models.neural_network.printer import print_network_spec
from coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes import \
remove_disconnected_layers, transform_conv_crop, remove_redundant_transposes
import copy
import pytest
DEBUG = False
np.random.seed(100)
class MLModelPassesTest(unittest.TestCase):
def test_load_constant_remove(self):
input_features = [('data', datatypes.Array(*(3, 4)))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_activation('relu1', 'RELU', 'data', 'relu1')
builder.add_load_constant_nd('const1', 'c1', constant_value=np.ones((5,)), shape=(5,))
builder.add_activation('relu2', 'RELU', 'relu1', 'out')
builder.add_load_constant_nd('const2', 'c2', constant_value=np.ones((5,)), shape=(5,))
builder.add_load_constant_nd('const3', 'c3', constant_value=np.ones((5,)), shape=(5,))
spec = builder.spec
np.testing.assert_equal(5, len(spec.neuralNetwork.layers))
remove_disconnected_layers(spec)
np.testing.assert_equal(2, len(spec.neuralNetwork.layers))
def test_dead_layer_remove(self):
input_features = [('data', datatypes.Array(*(3, 4)))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_activation('relu1', 'RELU', 'data', 'relu1')
builder.add_load_constant_nd('const1', 'c1', constant_value=np.ones((5,)), shape=(5,))
builder.add_load_constant_nd('const2', 'c2', constant_value=np.ones((5,)), shape=(5,))
builder.add_split_nd('splitnd1', 'const2', ['s1', 's2', 's3'], axis=0, num_splits=3)
builder.add_squeeze('squeeze', 's1', 'squeeze_out')
builder.add_activation('relu4', 'RELU', 's2', 'relu4')
builder.add_activation('relu5', 'RELU', 'relu4', 'relu5')
builder.add_load_constant_nd('const3', 'c3', constant_value=np.ones((5,)), shape=(5,))
builder.add_activation('relu2', 'RELU', 'relu1', 'out')
spec = builder.spec
np.testing.assert_equal(9, len(spec.neuralNetwork.layers))
remove_disconnected_layers(spec)
np.testing.assert_equal(2, len(spec.neuralNetwork.layers))
@pytest.mark.xfail
def test_dead_layer_remove_branch(self):
convergence_tolerance = 1e-8
input_features = [('input', datatypes.Array(*(2,)))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
# add condition to break from the loop, if convergence criterion is met
builder.add_less_than('cond', ['input'], 'cond', alpha=convergence_tolerance)
branch_layer = builder.add_branch('branch_layer', 'cond')
builder_ifbranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.ifBranch)
builder_ifbranch.add_activation('relu1', 'RELU', 'input', 'relu1_out')
builder_ifbranch.add_activation('relu2_out', 'RELU', 'relu1_out', 'relu2_out')
builder_elsebranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.elseBranch)
builder_elsebranch.add_activation('linear1', 'LINEAR', 'input', 'linear1_out')
builder_elsebranch.add_activation('linear2', 'LINEAR', 'linear1_out', 'relu2_out')
builder.add_squeeze('out', 'input', 'out', squeeze_all=True)
mlmodel = MLModel(builder.spec)
data = np.random.rand(2,)
data_dict = {'input': data}
before_pass_out = mlmodel.predict(data_dict)['out']
if DEBUG:
print('\n mlmodel description before remove disconnected layers pass: \n')
print_network_spec(builder.spec, style='coding')
remove_disconnected_layers(builder.spec)
if DEBUG:
print('\n mlmodel description after remove disconnected layers pass: \n')
print_network_spec(builder.spec, style='coding')
mlmodel = MLModel(builder.spec)
after_pass_out = mlmodel.predict(data_dict)['out']
np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=2)
np.testing.assert_equal(len(builder.spec.neuralNetwork.layers), 1)
@pytest.mark.xfail
def test_dead_layer_partial_branch(self):
convergence_tolerance = 1e-8
input_features = [('input', datatypes.Array(*(2,)))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
# add condition to break from the loop, if convergence criterion is met
builder.add_less_than('cond', ['input'], 'cond', alpha=convergence_tolerance)
branch_layer = builder.add_branch('branch_layer', 'cond')
builder_ifbranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.ifBranch)
builder_ifbranch.add_activation('relu1', 'RELU', 'input', 'relu1_out')
builder_ifbranch.add_activation('relu2_out', 'RELU', 'relu1_out', 'relu2_out')
builder_elsebranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.elseBranch)
builder_elsebranch.add_activation('linear1', 'LINEAR', 'input', 'linear1_out')
builder_elsebranch.add_activation('linear_red_1', 'LINEAR', 'input', 'linear_red1_out')
builder_elsebranch.add_activation('linear_red_2', 'LINEAR', 'linear_red1_out', 'linear_red2_out')
builder_elsebranch.add_activation('linear2', 'LINEAR', 'linear1_out', 'relu2_out')
builder.add_squeeze('out', 'relu2_out', 'out', squeeze_all=True)
mlmodel = MLModel(builder.spec)
data = np.random.rand(2,)
data_dict = {'input': data}
before_pass_out = mlmodel.predict(data_dict)['out']
if DEBUG:
print('\n mlmodel description before remove disconnected layers pass: \n')
print_network_spec(builder.spec, style='coding')
old_spec = copy.copy(builder.spec)
remove_disconnected_layers(builder.spec)
if DEBUG:
print('\n mlmodel description after remove disconnected layers pass: \n')
print_network_spec(builder.spec, style='coding')
mlmodel = MLModel(builder.spec)
after_pass_out = mlmodel.predict(data_dict)['out']
np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=2)
np.testing.assert_equal(len(old_spec.neuralNetwork.layers[1].branch.ifBranch.layers),
len(builder.spec.neuralNetwork.layers[1].branch.ifBranch.layers))
np.testing.assert_equal(len(builder.spec.neuralNetwork.layers[1].branch.elseBranch.layers), 2)
def test_conv_crop_bn_to_conv_bn_crop(self):
input_features = [('data', datatypes.Array(1, 10, 10))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
W = np.ones((2,10,1,10), dtype=np.float32)
builder.add_convolution(name='conv',
kernel_channels=1,
output_channels=2,
height=2, width=2,
stride_height=1, stride_width=1,
border_mode='valid', groups=1,
W=W,
b=None, has_bias=False,
input_name='data', output_name='conv_out')
builder.add_crop(name='crop',
left=1, right=1, top=1, bottom=1, offset=0,
input_names=['conv_out'],
output_name='crop_out')
builder.add_batchnorm(name='bn',
channels=2,
gamma=np.ones(2,).astype(np.float32),
beta=np.ones(2,).astype(np.float32),
mean=np.ones(2,).astype(np.float32),
variance=np.ones(2,).astype(np.float32),
input_name='crop_out',
output_name='out')
# Conv -> Crop -> BN
spec = builder.spec.neuralNetwork
np.testing.assert_equal('crop', spec.layers[1].WhichOneof('layer'))
np.testing.assert_equal('batchnorm', spec.layers[2].WhichOneof('layer'))
# transform the pattern
transform_conv_crop(builder.spec)
# Conv -> BN -> Crop
np.testing.assert_equal('batchnorm', spec.layers[1].WhichOneof('layer'))
np.testing.assert_equal('crop', spec.layers[2].WhichOneof('layer'))
def test_conv_crop_bn_relu_to_conv_bn_relu_crop(self):
input_features = [('data', datatypes.Array(1, 10, 10))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
W = np.ones((2,10,1,10), dtype=np.float32)
builder.add_convolution(name='conv',
kernel_channels=1,
output_channels=2,
height=2, width=2,
stride_height=1, stride_width=1,
border_mode='valid', groups=1,
W=W,
b=None, has_bias=False,
input_name='data', output_name='conv_out')
builder.add_crop(name='crop',
left=1, right=1, top=1, bottom=1, offset=0,
input_names=['conv_out'],
output_name='crop_out')
builder.add_batchnorm(name='bn',
channels=2,
gamma=np.ones(2,).astype(np.float32),
beta=np.ones(2,).astype(np.float32),
mean=np.ones(2,).astype(np.float32),
variance=np.ones(2,).astype(np.float32),
input_name='crop_out',
output_name='bn_out')
builder.add_activation(name='relu',
non_linearity='RELU',
input_name='bn_out',
output_name='out')
# Conv -> Crop -> BN -> ReLU
spec = builder.spec.neuralNetwork
np.testing.assert_equal('crop', spec.layers[1].WhichOneof('layer'))
np.testing.assert_equal('batchnorm', spec.layers[2].WhichOneof('layer'))
np.testing.assert_equal('activation', spec.layers[3].WhichOneof('layer'))
# transform the pattern
transform_conv_crop(builder.spec)
# Conv -> BN -> ReLU -> Crop
np.testing.assert_equal('batchnorm', spec.layers[1].WhichOneof('layer'))
np.testing.assert_equal('activation', spec.layers[2].WhichOneof('layer'))
np.testing.assert_equal('crop', spec.layers[3].WhichOneof('layer'))
def test_redundant_transposes(self):
def _build_and_test_network(input_size, transpose_layers, expected_layers):
"""
Helper function for testing transpose removal.
Args:
input_size: Size of the input network tensor.
transpose_layers: Array of transpose axes definitions.
expected_layers: Array of indices into transpose_layers indicating
which of the transpose layers should be present after the
graph pass.
"""
input_features = [('data', datatypes.Array(*input_size))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
last_layer = 'data'
for idx, axes in enumerate(transpose_layers):
name = 't{}'.format(idx)
if idx == len(transpose_layers) - 1:
output_name = 'out'
else:
output_name = name + '_out'
builder.add_transpose(name=name,
axes=axes,
input_name=last_layer,
output_name=output_name)
last_layer = output_name
spec = builder.spec.neuralNetwork
# Check the network before the graph pass.
for idx in range(len(transpose_layers)):
np.testing.assert_equal('transpose', spec.layers[idx].WhichOneof('layer'))
# Run the removal pass.
remove_redundant_transposes(builder.spec)
# Verify only the expected layers remain.
np.testing.assert_equal(len(spec.layers), len(expected_layers))
for output_layer_idx, input_layer_idx in enumerate(expected_layers):
np.testing.assert_equal(
'transpose',
spec.layers[output_layer_idx].WhichOneof('layer')
)
np.testing.assert_array_equal(
transpose_layers[input_layer_idx],
spec.layers[output_layer_idx].transpose.axes
)
_build_and_test_network(
input_size=[1, 10, 10],
# These transposes together are the identity.
transpose_layers=[[2, 0, 1], [1, 2, 0]],
expected_layers=[],
)
_build_and_test_network(
input_size=[1, 10, 10],
# These transposes are not inverses.
transpose_layers=[[2, 0, 1], [2, 0, 1]],
expected_layers=[0, 1],
)
_build_and_test_network(
input_size=[1, 1, 10, 10, 3],
# First two are the identity, then an extra.
transpose_layers=[[2, 4, 1, 0, 3], [3, 2, 0, 4, 1], [1, 0, 2, 3, 4]],
expected_layers=[2],
)
_build_and_test_network(
input_size=[1, 1, 10, 10, 3],
# First is okay, next two are the identity.
transpose_layers=[[1, 0, 2, 3, 4], [2, 4, 1, 0, 3], [3, 2, 0, 4, 1]],
expected_layers=[0],
)
# A slightly more complicated test case where there are two transposes
# in topological order, but are actually in parallel in the graph.
builder = neural_network.NeuralNetworkBuilder(
[('data', datatypes.Array(2, 4, 8))],
[('out', None)]
)
last_layer = 'data'
builder.add_transpose(name='t1',
axes=[0, 2, 1],
input_name='data',
output_name='t1')
builder.add_transpose(name='t2',
axes=[0, 2, 1],
input_name='data',
output_name='t2')
builder.add_stack(name='stack',
input_names=['t1', 't2'],
output_name='out')
spec = builder.spec.neuralNetwork
# Run the removal pass.
remove_redundant_transposes(builder.spec)
# Verify nothing was removed.
np.testing.assert_equal(len(spec.layers), 3)
if __name__ == '__main__':
RUN_ALL_TESTS = True
if RUN_ALL_TESTS:
unittest.main()
else:
suite = unittest.TestSuite()
suite.addTest(MLModelPassesTest('test_load_constant_remove'))
unittest.TextTestRunner().run(suite)
| StarcoderdataPython |
1709601 | <gh_stars>10-100
#!/usr/bin/env python
import gym
import logging
import os
import sys
import gflags as flags
from baselines import bench
from baselines import logger
from baselines.logger import Logger, TensorBoardOutputFormat, HumanOutputFormat
from baselines.common import set_global_seeds
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from deepq import deepq
from deepq.models import cnn_to_mlp
from acktr.policies import CnnPolicy
from acktr import acktr_disc
import ppaquette_gym_super_mario
from wrappers import MarioActionSpaceWrapper
from wrappers import ProcessFrame84
import datetime
PROJ_DIR = os.path.dirname(os.path.abspath(__file__))
import pprint
FLAGS = flags.FLAGS
flags.DEFINE_string("log", "stdout", "logging type(stdout, tensorboard)")
flags.DEFINE_string("env", "ppaquette/SuperMarioBros-1-1-v0", "RL environment to train.")
flags.DEFINE_string("algorithm", "deepq", "RL algorithm to use.")
flags.DEFINE_integer("timesteps", 2000000, "Steps to train")
flags.DEFINE_float("exploration_fraction", 0.5, "Exploration Fraction")
flags.DEFINE_boolean("prioritized", False, "prioritized_replay")
flags.DEFINE_boolean("dueling", False, "dueling")
flags.DEFINE_integer("num_cpu", 4, "number of cpus")
flags.DEFINE_float("lr", 5e-4, "Learning rate")
max_mean_reward = 0
last_filename = ""
start_time = datetime.datetime.now().strftime("%Y%m%d%H%M")
def train_acktr(env_id, num_timesteps, seed, num_cpu):
"""Train a acktr model.
Parameters
-------
env_id: environment to train on
num_timesteps: int
number of env steps to optimizer for
seed: int
number of random seed
num_cpu: int
number of parallel agents
"""
num_timesteps //= 4
def make_env(rank):
def _thunk():
# 1. Create gym environment
env = gym.make(env_id)
env.seed(seed + rank)
if logger.get_dir():
env = bench.Monitor(env, os.path.join(logger.get_dir(), "{}.monitor.json".format(rank)))
gym.logger.setLevel(logging.WARN)
# 2. Apply action space wrapper
env = MarioActionSpaceWrapper(env)
# 3. Apply observation space wrapper to reduce input size
env = ProcessFrame84(env)
return env
return _thunk
set_global_seeds(seed)
env = SubprocVecEnv([make_env(i) for i in range(num_cpu)])
policy_fn = CnnPolicy
acktr_disc.learn(policy_fn, env, seed, total_timesteps=num_timesteps,
nprocs=num_cpu, save_interval=True, lr=FLAGS.lr,
callback=acktr_callback)
env.close()
def train_dqn(env_id, num_timesteps):
"""Train a dqn model.
Parameters
-------
env_id: environment to train on
num_timesteps: int
number of env steps to optimizer for
"""
# 1. Create gym environment
env = gym.make(FLAGS.env)
# 2. Apply action space wrapper
env = MarioActionSpaceWrapper(env)
# 3. Apply observation space wrapper to reduce input size
env = ProcessFrame84(env)
# 4. Create a CNN model for Q-Function
model = cnn_to_mlp(
convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)],
hiddens=[256],
dueling=FLAGS.dueling
)
# 5. Train the model
act = deepq.learn(
env,
q_func=model,
lr=FLAGS.lr,
max_timesteps=FLAGS.timesteps,
buffer_size=10000,
exploration_fraction=FLAGS.exploration_fraction,
exploration_final_eps=0.01,
train_freq=4,
learning_starts=10000,
target_network_update_freq=1000,
gamma=0.99,
prioritized_replay=FLAGS.prioritized,
callback=deepq_callback
)
act.save("mario_model.pkl")
env.close()
def deepq_callback(locals, globals):
#pprint.pprint(locals)
global max_mean_reward, last_filename
if('done' in locals and locals['done'] == True):
if('mean_100ep_reward' in locals
and locals['num_episodes'] >= 10
and locals['mean_100ep_reward'] > max_mean_reward
):
print("mean_100ep_reward : %s max_mean_reward : %s" %
(locals['mean_100ep_reward'], max_mean_reward))
if(not os.path.exists(os.path.join(PROJ_DIR,'models/deepq/'))):
try:
os.mkdir(os.path.join(PROJ_DIR,'models/'))
except Exception as e:
print(str(e))
try:
os.mkdir(os.path.join(PROJ_DIR,'models/deepq/'))
except Exception as e:
print(str(e))
if(last_filename != ""):
os.remove(last_filename)
print("delete last model file : %s" % last_filename)
max_mean_reward = locals['mean_100ep_reward']
act = deepq.ActWrapper(locals['act'], locals['act_params'])
filename = os.path.join(PROJ_DIR,'models/deepq/mario_reward_%s.pkl' % locals['mean_100ep_reward'])
act.save(filename)
print("save best mean_100ep_reward model to %s" % filename)
last_filename = filename
def acktr_callback(locals, globals):
global max_mean_reward, last_filename
#pprint.pprint(locals)
if('mean_100ep_reward' in locals
and locals['num_episodes'] >= 10
and locals['mean_100ep_reward'] > max_mean_reward
):
print("mean_100ep_reward : %s max_mean_reward : %s" %
(locals['mean_100ep_reward'], max_mean_reward))
if(not os.path.exists(os.path.join(PROJ_DIR,'models/acktr/'))):
try:
os.mkdir(os.path.join(PROJ_DIR,'models/'))
except Exception as e:
print(str(e))
try:
os.mkdir(os.path.join(PROJ_DIR,'models/acktr/'))
except Exception as e:
print(str(e))
if(last_filename != ""):
os.remove(last_filename)
print("delete last model file : %s" % last_filename)
max_mean_reward = locals['mean_100ep_reward']
model = locals['model']
filename = os.path.join(PROJ_DIR,'models/acktr/mario_reward_%s.pkl' % locals['mean_100ep_reward'])
model.save(filename)
print("save best mean_100ep_reward model to %s" % filename)
last_filename = filename
def main():
FLAGS(sys.argv)
logdir = "tensorboard"
if(FLAGS.algorithm == "deepq"):
logdir = "tensorboard/%s/%s_%s_prio%s_duel%s_lr%s/%s" % (
FLAGS.algorithm,
FLAGS.timesteps,
FLAGS.exploration_fraction,
FLAGS.prioritized,
FLAGS.dueling,
FLAGS.lr,
start_time
)
elif(FLAGS.algorithm == "acktr"):
logdir = "tensorboard/%s/%s_num%s_lr%s/%s" % (
FLAGS.algorithm,
FLAGS.timesteps,
FLAGS.num_cpu,
FLAGS.lr,
start_time
)
if(FLAGS.log == "tensorboard"):
Logger.DEFAULT \
= Logger.CURRENT \
= Logger(dir=None,
output_formats=[TensorBoardOutputFormat(logdir)])
elif(FLAGS.log == "stdout"):
Logger.DEFAULT \
= Logger.CURRENT \
= Logger(dir=None,
output_formats=[HumanOutputFormat(sys.stdout)])
print("env : %s" % FLAGS.env)
print("algorithm : %s" % FLAGS.algorithm)
print("timesteps : %s" % FLAGS.timesteps)
print("exploration_fraction : %s" % FLAGS.exploration_fraction)
print("prioritized : %s" % FLAGS.prioritized)
print("dueling : %s" % FLAGS.dueling)
print("num_cpu : %s" % FLAGS.num_cpu)
print("lr : %s" % FLAGS.lr)
# Choose which RL algorithm to train.
if(FLAGS.algorithm == "deepq"): # Use DQN
train_dqn(env_id=FLAGS.env, num_timesteps=FLAGS.timesteps)
elif(FLAGS.algorithm == "acktr"): # Use acktr
train_acktr(FLAGS.env, num_timesteps=int(FLAGS.timesteps), seed=0, num_cpu=FLAGS.num_cpu)
if __name__ == '__main__':
main()
| StarcoderdataPython |
132275 | <gh_stars>0
class Body(object):
def __init__(self, mass, position, velocity, name = None):
if (name != None):
self.name = name
self.mass = mass
self.position = position
self.velocity = velocity
| StarcoderdataPython |
1687791 | <gh_stars>0
from django.apps import AppConfig
class MoviepanelConfig(AppConfig):
name = 'towatch.apps.moviepanel'
verbose_name = 'moviepanel'
| StarcoderdataPython |
4838009 | """
Module supporting BirdVoxDetect NFC detectors.
When this module is imported, it dynamically creates a detector class
(a subclass of the `_Detector` class of this module) for each BirdVoxDetect
detector in the archive database and adds it to the detector extensions of
this Vesper server.
BirdVoxDetect (https://github.com/BirdVox/birdvoxdetect) is an NFC
detector created by the BirdVox project (https://wp.nyu.edu/birdvox/).
"""
from contextlib import AbstractContextManager
import csv
import logging
import os.path
import tempfile
import wave
import numpy as np
from vesper.django.app.models import Processor
from vesper.util.settings import Settings
import vesper.util.conda_utils as conda_utils
import vesper.util.os_utils as os_utils
import vesper.util.signal_utils as signal_utils
_CLIP_DURATION = .6
_THRESHOLD_TYPES = ('FT', 'AT')
class DetectorError(Exception):
pass
class _Detector:
"""
Vesper wrapper for BirdVoxDetect NFC detector.
An instance of this class wraps BirdVoxDetect as a Vesper detector.
The instance operates on a single audio channel. It accepts a sequence
of consecutive sample arrays of any sizes via its `detect` method,
concatenates them in a temporary audio file, and runs BirdVoxDetect
on the audio file when its `complete_detection` method is called.
BirdVoxDetect is run in its own Conda environment, which can be
different from the Conda environment in which the Vesper server is
running. After BirdVoxDetect finishes processing the file,
`complete_detection` invokes a listener's `process_clip` method for
each of the resulting clips. The `process_clip` method must accept
three arguments: the start index and length of the detected clip,
and a dictionary of annotations for the clip.
"""
def __init__(self, input_sample_rate, listener):
self._input_sample_rate = input_sample_rate
self._listener = listener
self._clip_length = signal_utils.seconds_to_frames(
_CLIP_DURATION, self._input_sample_rate)
# Create and open temporary audio file. Do not delete
# automatically on close. We will close the file after we
# finish writing it, and then BirdVoxDetect will open it
# again for reading. We delete the file ourselves after
# BirdVoxDetect finishes processing it.
self._audio_file = tempfile.NamedTemporaryFile(
suffix='.wav', delete=False)
# Create audio file writer.
self._audio_file_writer = WaveFileWriter(
self._audio_file, 1, self._input_sample_rate)
@property
def settings(self):
return self._settings
@property
def input_sample_rate(self):
return self._input_sample_rate
@property
def listener(self):
return self._listener
def detect(self, samples):
self._audio_file_writer.write(samples)
def complete_detection(self):
"""
Completes detection after the `detect` method has been called
for all input.
"""
# Close audio file writer and audio file.
self._audio_file_writer.close()
self._audio_file.close()
audio_file_path = self._audio_file.name
with tempfile.TemporaryDirectory() as output_dir_path, \
FileDeleter(audio_file_path):
settings = self.settings
module_name = 'vesper_birdvox.run_birdvoxdetect'
# Build list of command line arguments.
threshold = str(settings.threshold)
audio_file_path = self._audio_file.name
args = (
'--threshold', threshold,
'--output-dir', output_dir_path,
audio_file_path)
if settings.threshold_adaptive:
args = ('--threshold-adaptive',) + args
environment_name = f'birdvoxdetect-{settings.detector_version}'
try:
results = conda_utils.run_python_script(
module_name, args, environment_name)
except Exception as e:
raise DetectorError(
f'Could not run {self.extension_name} in Conda '
f'environment "{environment_name}". Error message '
f'was: {str(e)}')
self._log_bvd_results(results)
if results.returncode != 0:
# BVD process completed abnormally
raise DetectorError(
f'{self.extension_name} process completed abnormally. '
f'See above log messages for details.')
else:
# BVD process completed normally
detection_file_path = self._get_detection_file_path(
output_dir_path, audio_file_path)
self._process_detection_file(detection_file_path)
def _log_bvd_results(self, results):
if results.returncode != 0:
# BVD process completed abnormally.
logging.info(
f' {self.extension_name} process completed '
f'abnormally with return code {results.returncode}. '
f'No clips will be created.')
else:
# BVD process completed normally
logging.info(
f' {self.extension_name} process completed normally.')
self._log_bvd_output_stream(results.stdout, 'standard output')
self._log_bvd_output_stream(results.stderr, 'standard error')
def _log_bvd_output_stream(self, stream_text, stream_name):
if len(stream_text) == 0:
logging.info(
f' {self.extension_name} process {stream_name} '
f'was empty.')
else:
logging.info(
f' {self.extension_name} process {stream_name} was:')
lines = stream_text.strip().splitlines()
for line in lines:
logging.info(f' {line}')
def _get_detection_file_path(self, output_dir_path, audio_file_path):
audio_file_name_base = \
os.path.splitext(os.path.basename(audio_file_path))[0]
detection_file_name = \
f'{audio_file_name_base}_detections_for_vesper.csv'
return os.path.join(output_dir_path, detection_file_name)
def _process_detection_file(self, detection_file_path):
with open(detection_file_path, newline='') as detection_file:
reader = csv.reader(detection_file)
# Skip header.
header = next(reader)
column_count = len(header)
for row in reader:
start_index = self._get_clip_start_index(row[0])
# Create dictionary of annotations for this clip,
# ignoring missing values.
annotations = dict(
(header[i], row[i])
for i in range(1, column_count)
if row[i] != '')
self._listener.process_clip(
start_index, self._clip_length, annotations=annotations)
self._listener.complete_processing()
def _get_clip_start_index(self, center_time):
center_time = float(center_time)
center_index = signal_utils.seconds_to_frames(
center_time, self._input_sample_rate)
return center_index - self._clip_length // 2
class FileDeleter(AbstractContextManager):
def __init__(self, file_path):
self._file_path = file_path
def __exit__(self, exception_type, exception_value, traceback):
os_utils.delete_file(self._file_path)
_detector_classes = None
def get_detector_classes():
"""
Gets the BirdVoxDetector detector classes for this archive.
The classes are created the first time this method is called, with
one class for each BirdVoxDetect detector in the archive database.
"""
global _detector_classes
if _detector_classes is None:
# have not yet created detector classes
_detector_classes = _create_detector_classes()
return _detector_classes
def _create_detector_classes():
detectors = Processor.objects.filter(type='Detector')
bvd_detectors = detectors.filter(name__startswith='BirdVoxDetect')
detector_classes = []
for detector in bvd_detectors:
try:
cls = _create_detector_class(detector)
except Exception as e:
logging.warning(
f'Could not create detector "{detector.name}". '
f'Error message was: {str(e)}')
else:
detector_classes.append(cls)
return detector_classes
def _create_detector_class(processor):
detector_version, threshold_type, threshold = \
_parse_detector_name(processor.name)
# Get detector version with an underscore instead of a string,
# but keep the original version since we'll need that, too.
detector_version_ = detector_version.replace('.', '_')
threshold_adaptive = threshold_type == 'AT'
threshold_string = f'{threshold:02d}'
class_name = \
f'Detector_{detector_version_}_{threshold_type}_{threshold_string}'
extension_name = \
f'BirdVoxDetect {detector_version} {threshold_type} {threshold_string}'
settings = Settings(
detector_version=detector_version,
threshold_adaptive=threshold_adaptive,
threshold=threshold)
class_dict = {
'extension_name': extension_name,
'_settings': settings
}
return type(class_name, (_Detector,), class_dict)
def _parse_detector_name(name):
parts = name.split()
if len(parts) != 4:
raise ValueError(
f'Name must be of the form "BirdVoxDetect <version> <type> '
f'<threshold>", for example "BirdVoxDetect 0.5.0 FT 30".')
detector_version = parts[1]
threshold_type = parts[2]
if threshold_type not in _THRESHOLD_TYPES:
raise ValueError(
f'Unrecognized detection threshold type "{threshold_type}". '
f'The threshold type must be either "FT" or "AT".')
try:
threshold = int(parts[3])
except Exception:
raise ValueError(
f'Bad detection threshold "{parts[3]}". The threshold must '
f'be a number in the range [0, 100].')
return detector_version, threshold_type, threshold
class WaveFileWriter:
"""Writes a .wav file one sample array at a time."""
def __init__(self, file_, num_channels, sample_rate):
self._writer = wave.open(file_, 'wb')
self._writer.setparams((num_channels, 2, sample_rate, 0, 'NONE', None))
def write(self, samples):
# Convert samples to wave file dtype if needed.
if samples.dtype != np.dtype('<i2'):
samples = np.array(np.round(samples), dtype='<i2')
# Convert samples to bytes.
data = samples.transpose().tobytes()
self._writer.writeframes(data)
def close(self):
self._writer.close()
| StarcoderdataPython |
1659537 | import numpy as np
def atari_make_initial_state(state):
return np.stack([state] * 4, axis=2)
def atari_make_next_state(state, next_state):
return np.append(state[:,:,1:], np.expand_dims(next_state, 2), axis=2) | StarcoderdataPython |
1741535 | #!/usr/bin/env python 3
# -*- coding: utf-8 -*-
from functools import lru_cache
@lru_cache
def fib(n):
if n == 0 or n == 1:
return n
else:
return fib(n - 2) + fib(n - 1)
if __name__ == '__main__':
import timeit
print(timeit.timeit('fib(10)', setup="from __main__ import fib"))
| StarcoderdataPython |
1669040 | name = str(input('What is your name? ')).strip().split()
print(f'Nice to meet you! \n'
f'Your first name is {name[0]} \n'
f'Your last name is {name[len(name) - 1]}')
| StarcoderdataPython |
59617 | <reponame>ojarva/home-info-display
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('info_weather', '0006_auto_20150322_2310'),
]
operations = [
migrations.CreateModel(
name='MarineDataPoint',
fields=[
('id', models.AutoField(verbose_name='ID',
serialize=False, auto_created=True, primary_key=True)),
('timestamp', models.DateTimeField()),
('location', models.CharField(max_length=50)),
('forecast', models.BooleanField(default=False)),
('air_temperature', models.DecimalField(
null=True, max_digits=4, decimal_places=1, blank=True)),
('water_temperature', models.DecimalField(
null=True, max_digits=4, decimal_places=1, blank=True)),
('wave_dispersion', models.DecimalField(
null=True, max_digits=4, decimal_places=1, blank=True)),
('wave_height', models.DecimalField(null=True,
max_digits=4, decimal_places=1, blank=True)),
('wave_period', models.DecimalField(null=True,
max_digits=4, decimal_places=1, blank=True)),
('wind_direction', models.DecimalField(
null=True, max_digits=4, decimal_places=1, blank=True)),
('wind_gusts', models.DecimalField(null=True,
max_digits=4, decimal_places=1, blank=True)),
('wind_max', models.DecimalField(null=True,
max_digits=4, decimal_places=1, blank=True)),
('wind_speed', models.DecimalField(null=True,
max_digits=4, decimal_places=1, blank=True)),
],
options={
'ordering': ('-timestamp',),
'get_latest_by': 'timestamp',
},
bases=(models.Model,),
),
migrations.DeleteModel(
name='MarineWeather',
),
migrations.AlterUniqueTogether(
name='marinedatapoint',
unique_together=set([('timestamp', 'location')]),
),
]
| StarcoderdataPython |
1739877 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from jsonschema import validate as js_v, exceptions as js_e
from http import HTTPStatus
from copy import deepcopy
__author__ = "<NAME> <<EMAIL>>"
__version__ = "0.1"
version_date = "Mar 2018"
"""
Validator of input data using JSON schemas for those items that not contains an OSM yang information model
"""
# Basis schemas
patern_name = "^[ -~]+$"
shortname_schema = {"type": "string", "minLength": 1, "maxLength": 60, "pattern": "^[^,;()\\.\\$'\"]+$"}
passwd_schema = {"type": "string", "minLength": 1, "maxLength": 60}
name_schema = {"type": "string", "minLength": 1, "maxLength": 255, "pattern": "^[^,;()'\"]+$"}
string_schema = {"type": "string", "minLength": 1, "maxLength": 255}
xml_text_schema = {"type": "string", "minLength": 1, "maxLength": 1000, "pattern": "^[^']+$"}
description_schema = {"type": ["string", "null"], "maxLength": 255, "pattern": "^[^'\"]+$"}
id_schema_fake = {"type": "string", "minLength": 2, "maxLength": 36}
bool_schema = {"type": "boolean"}
null_schema = {"type": "null"}
# "pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
id_schema = {"type": "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
time_schema = {"type": "string", "pattern": "^[0-9]{4}-[0-1][0-9]-[0-3][0-9]T[0-2][0-9]([0-5]:){2}"}
pci_schema = {"type": "string", "pattern": "^[0-9a-fA-F]{4}(:[0-9a-fA-F]{2}){2}\\.[0-9a-fA-F]$"}
# allows [] for wildcards. For that reason huge length limit is set
pci_extended_schema = {"type": "string", "pattern": "^[0-9a-fA-F.:-\\[\\]]{12,40}$"}
http_schema = {"type": "string", "pattern": "^https?://[^'\"=]+$"}
bandwidth_schema = {"type": "string", "pattern": "^[0-9]+ *([MG]bps)?$"}
memory_schema = {"type": "string", "pattern": "^[0-9]+ *([MG]i?[Bb])?$"}
integer0_schema = {"type": "integer", "minimum": 0}
integer1_schema = {"type": "integer", "minimum": 1}
path_schema = {"type": "string", "pattern": "^(\\.){0,2}(/[^/\"':{}\\(\\)]+)+$"}
vlan_schema = {"type": "integer", "minimum": 1, "maximum": 4095}
vlan1000_schema = {"type": "integer", "minimum": 1000, "maximum": 4095}
mac_schema = {"type": "string",
"pattern": "^[0-9a-fA-F][02468aceACE](:[0-9a-fA-F]{2}){5}$"} # must be unicast: LSB bit of MSB byte ==0
dpid_Schema = {"type": "string", "pattern": "^[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){7}$"}
# mac_schema={"type":"string", "pattern":"^([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$"}
ip_schema = {"type": "string",
"pattern": "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"}
ip_prefix_schema = {"type": "string",
"pattern": "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}"
"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/(30|[12]?[0-9])$"}
port_schema = {"type": "integer", "minimum": 1, "maximum": 65534}
object_schema = {"type": "object"}
schema_version_2 = {"type": "integer", "minimum": 2, "maximum": 2}
# schema_version_string={"type":"string","enum": ["0.1", "2", "0.2", "3", "0.3"]}
log_level_schema = {"type": "string", "enum": ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]}
checksum_schema = {"type": "string", "pattern": "^[0-9a-fA-F]{32}$"}
size_schema = {"type": "integer", "minimum": 1, "maximum": 100}
array_edition_schema = {
"type": "object",
"patternProperties": {
"^\\$": {}
},
"additionalProperties": False,
"minProperties": 1,
}
nameshort_list_schema = {
"type": "array",
"minItems": 1,
"items": shortname_schema,
}
ns_instantiate_vdu = {
"title": "ns action instantiate input schema for vdu",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"id": name_schema,
"volume": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"name": name_schema,
"vim-volume-id": name_schema,
},
"required": ["name", "vim-volume-id"],
"additionalProperties": False
}
},
"interface": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"name": name_schema,
"ip-address": ip_schema,
"mac-address": mac_schema,
"floating-ip-required": bool_schema,
},
"required": ["name"],
"additionalProperties": False
}
}
},
"required": ["id"],
"additionalProperties": False
}
ip_profile_dns_schema = {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"address": ip_schema,
},
"required": ["address"],
"additionalProperties": False
}
}
ip_profile_dhcp_schema = {
"type": "object",
"properties": {
"enabled": {"type": "boolean"},
"count": integer1_schema,
"start-address": ip_schema
},
"additionalProperties": False,
}
ip_profile_schema = {
"title": "ip profile validation schame",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"ip-version": {"enum": ["ipv4", "ipv6"]},
"subnet-address": ip_prefix_schema,
"gateway-address": ip_schema,
"dns-server": ip_profile_dns_schema,
"dhcp-params": ip_profile_dhcp_schema,
}
}
ip_profile_update_schema = {
"title": "ip profile validation schame",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"ip-version": {"enum": ["ipv4", "ipv6"]},
"subnet-address": {"oneOf": [null_schema, ip_prefix_schema]},
"gateway-address": {"oneOf": [null_schema, ip_schema]},
"dns-server": {"oneOf": [null_schema, ip_profile_dns_schema]},
"dhcp-params": {"oneOf": [null_schema, ip_profile_dhcp_schema]},
},
"additionalProperties": False
}
ns_instantiate_internal_vld = {
"title": "ns action instantiate input schema for vdu",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"name": name_schema,
"vim-network-name": name_schema,
"vim-network-id": name_schema,
"ip-profile": ip_profile_update_schema,
"internal-connection-point": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"id-ref": name_schema,
"ip-address": ip_schema,
# "mac-address": mac_schema,
},
"required": ["id-ref"],
"minProperties": 2,
"additionalProperties": False
},
}
},
"required": ["name"],
"minProperties": 2,
"additionalProperties": False
}
additional_params_for_vnf = {
"type": "array",
"items": {
"type": "object",
"properties": {
"member-vnf-index": name_schema,
"additionalParams": object_schema,
},
"required": ["member-vnf-index", "additionalParams"],
"additionalProperties": False
}
}
ns_instantiate = {
"title": "ns action instantiate input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"lcmOperationType": string_schema,
"nsInstanceId": id_schema,
"netsliceInstanceId": id_schema,
"nsName": name_schema,
"nsDescription": {"oneOf": [description_schema, {"type": "null"}]},
"nsdId": id_schema,
"vimAccountId": id_schema,
"additionalParamsForNs": object_schema,
"additionalParamsForVnf": additional_params_for_vnf,
"ssh_keys": {"type": "array", "items": {"type": "string"}},
"nsr_id": id_schema,
"vduImage": name_schema,
"vnf": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"member-vnf-index": name_schema,
"vimAccountId": id_schema,
"vdu": {
"type": "array",
"minItems": 1,
"items": ns_instantiate_vdu,
},
"internal-vld": {
"type": "array",
"minItems": 1,
"items": ns_instantiate_internal_vld
}
},
"required": ["member-vnf-index"],
"minProperties": 2,
"additionalProperties": False
}
},
"vld": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"name": string_schema,
"vim-network-name": {"OneOf": [string_schema, object_schema]},
"vim-network-id": {"OneOf": [string_schema, object_schema]},
"ip-profile": object_schema,
"vnfd-connection-point-ref": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"member-vnf-index-ref": name_schema,
"vnfd-connection-point-ref": name_schema,
"ip-address": ip_schema,
# "mac-address": mac_schema,
},
"required": ["member-vnf-index-ref", "vnfd-connection-point-ref"],
"minProperties": 3,
"additionalProperties": False
},
}
},
"required": ["name"],
"additionalProperties": False
}
},
},
"required": ["nsName", "nsdId", "vimAccountId"],
"additionalProperties": False
}
ns_action = { # TODO for the moment it is only contemplated the vnfd primitive execution
"title": "ns action input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"lcmOperationType": string_schema,
"nsInstanceId": id_schema,
"member_vnf_index": name_schema,
"vnf_member_index": name_schema, # TODO for backward compatibility. To remove in future
"vdu_id": name_schema,
"primitive": name_schema,
"primitive_params": {"type": "object"},
},
"required": ["primitive", "primitive_params"], # TODO add member_vnf_index
"additionalProperties": False
}
ns_scale = { # TODO for the moment it is only VDU-scaling
"title": "ns scale input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"lcmOperationType": string_schema,
"nsInstanceId": id_schema,
"scaleType": {"enum": ["SCALE_VNF"]},
"scaleVnfData": {
"type": "object",
"properties": {
"vnfInstanceId": name_schema,
"scaleVnfType": {"enum": ["SCALE_OUT", 'SCALE_IN']},
"scaleByStepData": {
"type": "object",
"properties": {
"scaling-group-descriptor": name_schema,
"member-vnf-index": name_schema,
"scaling-policy": name_schema,
},
"required": ["scaling-group-descriptor", "member-vnf-index"],
"additionalProperties": False
},
},
"required": ["scaleVnfType", "scaleByStepData"], # vnfInstanceId
"additionalProperties": False
},
"scaleTime": time_schema,
},
"required": ["scaleType", "scaleVnfData"],
"additionalProperties": False
}
schema_version = {"type": "string", "enum": ["1.0"]}
schema_type = {"type": "string"}
vim_account_edit_schema = {
"title": "vim_account edit input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"name": name_schema,
"description": description_schema,
"type": shortname_schema,
"vim": name_schema,
"datacenter": name_schema,
"vim_url": description_schema,
"vim_url_admin": description_schema,
"vim_tenant": name_schema,
"vim_tenant_name": name_schema,
"vim_username": shortname_schema,
"vim_password": <PASSWORD>,
"config": {"type": "object"}
},
"additionalProperties": False
}
vim_account_new_schema = {
"title": "vim_account creation input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"schema_version": schema_version,
"schema_type": schema_type,
"name": name_schema,
"description": description_schema,
"vim": name_schema,
"datacenter": name_schema,
"vim_type": {"enum": ["openstack", "openvim", "vmware", "opennebula", "aws"]},
"vim_url": description_schema,
# "vim_url_admin": description_schema,
# "vim_tenant": name_schema,
"vim_tenant_name": name_schema,
"vim_user": shortname_schema,
"vim_password": <PASSWORD>_schema,
"config": {"type": "object"}
},
"required": ["name", "vim_url", "vim_type", "vim_user", "vim_password", "vim_tenant_name"],
"additionalProperties": False
}
wim_account_edit_schema = {
"title": "wim_account edit input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"name": name_schema,
"description": description_schema,
"type": shortname_schema,
"wim": name_schema,
"wim_url": description_schema,
"user": shortname_schema,
"password": <PASSWORD>,
"config": {"type": "object"}
},
"additionalProperties": False
}
wim_account_new_schema = {
"title": "wim_account creation input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"schema_version": schema_version,
"schema_type": schema_type,
"name": name_schema,
"description": description_schema,
"wim": name_schema,
"wim_type": {"enum": ["tapi", "onos", "odl", "dynpac"]},
"wim_url": description_schema,
"user": shortname_schema,
"password": <PASSWORD>,
"config": {"type": "object"}
},
"required": ["name", "wim_url", "wim_type"],
"additionalProperties": False
}
sdn_properties = {
"name": name_schema,
"description": description_schema,
"dpid": dpid_Schema,
"ip": ip_schema,
"port": port_schema,
"type": {"type": "string", "enum": ["opendaylight", "floodlight", "onos"]},
"version": {"type": "string", "minLength": 1, "maxLength": 12},
"user": shortname_schema,
"password": <PASSWORD>
}
sdn_new_schema = {
"title": "sdn controller information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": sdn_properties,
"required": ["name", "port", 'ip', 'dpid', 'type'],
"additionalProperties": False
}
sdn_edit_schema = {
"title": "sdn controller update information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": sdn_properties,
# "required": ["name", "port", 'ip', 'dpid', 'type'],
"additionalProperties": False
}
sdn_port_mapping_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "sdn port mapping information schema",
"type": "array",
"items": {
"type": "object",
"properties": {
"compute_node": shortname_schema,
"ports": {
"type": "array",
"items": {
"type": "object",
"properties": {
"pci": pci_extended_schema,
"switch_port": shortname_schema,
"switch_mac": mac_schema
},
"required": ["pci"]
}
}
},
"required": ["compute_node", "ports"]
}
}
sdn_external_port_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "External port information",
"type": "object",
"properties": {
"port": {"type": "string", "minLength": 1, "maxLength": 60},
"vlan": vlan_schema,
"mac": mac_schema
},
"required": ["port"]
}
# PDUs
pdu_interface = {
"type": "object",
"properties": {
"name": shortname_schema,
"mgmt": bool_schema,
"type": {"enum": ["overlay", 'underlay']},
"ip-address": ip_schema,
# TODO, add user, password, ssh-key
"mac-address": mac_schema,
"vim-network-name": shortname_schema, # interface is connected to one vim network, or switch port
"vim-network-id": shortname_schema,
# # provide this in case SDN assist must deal with this interface
# "switch-dpid": dpid_Schema,
# "switch-port": shortname_schema,
# "switch-mac": shortname_schema,
# "switch-vlan": vlan_schema,
},
"required": ["name", "mgmt", "ip-address"],
"additionalProperties": False
}
pdu_new_schema = {
"title": "pdu creation input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"name": shortname_schema,
"type": shortname_schema,
"description": description_schema,
"shared": bool_schema,
"vims": nameshort_list_schema,
"vim_accounts": nameshort_list_schema,
"interfaces": {
"type": "array",
"items": pdu_interface,
"minItems": 1
}
},
"required": ["name", "type", "interfaces"],
"additionalProperties": False
}
pdu_edit_schema = {
"title": "pdu edit input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"name": shortname_schema,
"type": shortname_schema,
"description": description_schema,
"shared": bool_schema,
"vims": {"oneOf": [array_edition_schema, nameshort_list_schema]},
"vim_accounts": {"oneOf": [array_edition_schema, nameshort_list_schema]},
"interfaces": {"oneOf": [
array_edition_schema,
{
"type": "array",
"items": pdu_interface,
"minItems": 1
}
]}
},
"additionalProperties": False,
"minProperties": 1
}
# USERS
user_new_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "New user schema",
"type": "object",
"properties": {
"username": shortname_schema,
"password": <PASSWORD>,
"projects": nameshort_list_schema,
},
"required": ["username", "password", "projects"],
"additionalProperties": False
}
user_edit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "User edit schema for administrators",
"type": "object",
"properties": {
"password": <PASSWORD>,
"projects": {
"oneOf": [
nameshort_list_schema,
array_edition_schema
]
},
},
"minProperties": 1,
"additionalProperties": False
}
# PROJECTS
project_new_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "New project schema for administrators",
"type": "object",
"properties": {
"name": shortname_schema,
"admin": bool_schema,
},
"required": ["name"],
"additionalProperties": False
}
project_edit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Project edit schema for administrators",
"type": "object",
"properties": {
"admin": bool_schema,
},
"additionalProperties": False,
"minProperties": 1
}
# GLOBAL SCHEMAS
nbi_new_input_schemas = {
"users": user_new_schema,
"projects": project_new_schema,
"vim_accounts": vim_account_new_schema,
"sdns": sdn_new_schema,
"ns_instantiate": ns_instantiate,
"ns_action": ns_action,
"ns_scale": ns_scale,
"pdus": pdu_new_schema,
}
nbi_edit_input_schemas = {
"users": user_edit_schema,
"projects": project_edit_schema,
"vim_accounts": vim_account_edit_schema,
"sdns": sdn_edit_schema,
"pdus": pdu_edit_schema,
}
# NETSLICE SCHEMAS
nsi_slice_instantiate = deepcopy(ns_instantiate)
nsi_slice_instantiate["title"] = "netslice subnet instantiation params input schema"
nsi_slice_instantiate["properties"]["id"] = name_schema
nsi_slice_instantiate["properties"]["additionalParamsForNsi"] = object_schema
nsi_slice_instantiate["properties"]["additionalParamsForSubnet"] = {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": name_schema,
"additionalParamsForNs": object_schema,
"additionalParamsForVnf": additional_params_for_vnf
},
"required": ["id"],
"additionalProperties": False
}
}
del nsi_slice_instantiate["required"]
del nsi_slice_instantiate["properties"]["additionalParamsForNs"]
del nsi_slice_instantiate["properties"]["additionalParamsForVnf"]
nsi_vld_instantiate = {
"title": "netslice vld instantiation params input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"name": string_schema,
"vim-network-name": {"OneOf": [string_schema, object_schema]},
"vim-network-id": {"OneOf": [string_schema, object_schema]},
"ip-profile": object_schema,
},
"required": ["name"],
"additionalProperties": False
}
nsi_instantiate = {
"title": "netslice action instantiate input schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"lcmOperationType": string_schema,
"nsiInstanceId": id_schema,
"nsiName": name_schema,
"nsiDescription": {"oneOf": [description_schema, {"type": "null"}]},
"nstId": string_schema,
"vimAccountId": id_schema,
"ssh_keys": {"type": "string"},
"nsi_id": id_schema,
"netslice-subnet": {
"type": "array",
"minItems": 1,
"items": nsi_slice_instantiate
},
"netslice-vld": {
"type": "array",
"minItems": 1,
"items": nsi_vld_instantiate
},
},
"required": ["nsiName", "nstId", "vimAccountId"],
"additionalProperties": False
}
nsi_action = {
}
nsi_terminate = {
}
class ValidationError(Exception):
def __init__(self, message, http_code=HTTPStatus.UNPROCESSABLE_ENTITY):
self.http_code = http_code
Exception.__init__(self, message)
def validate_input(indata, schema_to_use):
"""
Validates input data against json schema
:param indata: user input data. Should be a dictionary
:param schema_to_use: jsonschema to test
:return: None if ok, raises ValidationError exception on error
"""
try:
if schema_to_use:
js_v(indata, schema_to_use)
return None
except js_e.ValidationError as e:
if e.path:
error_pos = "at '" + ":".join(map(str, e.path)) + "'"
else:
error_pos = ""
raise ValidationError("Format error {} '{}' ".format(error_pos, e.message))
except js_e.SchemaError:
raise ValidationError("Bad json schema {}".format(schema_to_use), http_code=HTTPStatus.INTERNAL_SERVER_ERROR)
| StarcoderdataPython |
1681107 | #!/bin/env python3
import argparse
import esprima
import json
import logging
import os
import re
import sys
import traceback
logger = logging.getLogger(__name__)
err_context = 3
def get_req_body_elems(obj, elems):
if obj.type in ['FunctionExpression', 'ArrowFunctionExpression']:
get_req_body_elems(obj.body, elems)
elif obj.type == 'BlockStatement':
for s in obj.body:
get_req_body_elems(s, elems)
elif obj.type == 'TryStatement':
get_req_body_elems(obj.block, elems)
elif obj.type == 'ExpressionStatement':
get_req_body_elems(obj.expression, elems)
elif obj.type == 'MemberExpression':
left = get_req_body_elems(obj.object, elems)
right = obj.property.name
if left == 'req.body' and right not in elems:
elems.append(right)
return '{}.{}'.format(left, right)
elif obj.type == 'VariableDeclaration':
for s in obj.declarations:
get_req_body_elems(s, elems)
elif obj.type == 'VariableDeclarator':
if obj.id.type == 'ObjectPattern':
# get_req_body_elems() can't be called directly here:
# const {isAdmin, isNoComments, isCommentOnly} = req.body;
right = get_req_body_elems(obj.init, elems)
if right == 'req.body':
for p in obj.id.properties:
name = p.key.name
if name not in elems:
elems.append(name)
else:
get_req_body_elems(obj.init, elems)
elif obj.type == 'Property':
get_req_body_elems(obj.value, elems)
elif obj.type == 'ObjectExpression':
for s in obj.properties:
get_req_body_elems(s, elems)
elif obj.type == 'CallExpression':
for s in obj.arguments:
get_req_body_elems(s, elems)
elif obj.type == 'ArrayExpression':
for s in obj.elements:
get_req_body_elems(s, elems)
elif obj.type == 'IfStatement':
get_req_body_elems(obj.test, elems)
if obj.consequent is not None:
get_req_body_elems(obj.consequent, elems)
if obj.alternate is not None:
get_req_body_elems(obj.alternate, elems)
elif obj.type in ('LogicalExpression', 'BinaryExpression', 'AssignmentExpression'):
get_req_body_elems(obj.left, elems)
get_req_body_elems(obj.right, elems)
elif obj.type in ('ReturnStatement', 'UnaryExpression'):
get_req_body_elems(obj.argument, elems)
elif obj.type == 'Literal':
pass
elif obj.type == 'Identifier':
return obj.name
elif obj.type == 'FunctionDeclaration':
pass
else:
print(obj)
return ''
def cleanup_jsdocs(jsdoc):
# remove leading spaces before the first '*'
doc = [s.lstrip() for s in jsdoc.value.split('\n')]
# remove leading stars
doc = [s.lstrip('*') for s in doc]
# remove leading empty lines
while len(doc) and not doc[0].strip():
doc.pop(0)
# remove terminating empty lines
while len(doc) and not doc[-1].strip():
doc.pop(-1)
return doc
class JS2jsonDecoder(json.JSONDecoder):
def decode(self, s):
result = super().decode(s) # result = super(Decoder, self).decode(s) for Python 2.x
return self._decode(result)
def _decode(self, o):
if isinstance(o, str) or isinstance(o, unicode):
try:
return int(o)
except ValueError:
return o
elif isinstance(o, dict):
return {k: self._decode(v) for k, v in o.items()}
elif isinstance(o, list):
return [self._decode(v) for v in o]
else:
return o
def load_return_type_jsdoc_json(data):
regex_replace = [(r'\n', r' '), # replace new lines by spaces
(r'([\{\s,])(\w+)(:)', r'\1"\2"\3'), # insert double quotes in keys
(r'(:)\s*([^:\},\]]+)\s*([\},\]])', r'\1"\2"\3'), # insert double quotes in values
(r'(\[)\s*([^{].+)\s*(\])', r'\1"\2"\3'), # insert double quotes in array items
(r'^\s*([^\[{].+)\s*', r'"\1"')] # insert double quotes in single item
for r, s in regex_replace:
data = re.sub(r, s, data)
return json.loads(data)
class EntryPoint(object):
def __init__(self, schema, statements):
self.schema = schema
self.method, self._path, self.body = statements
self._jsdoc = None
self._doc = {}
self._raw_doc = None
self.path = self.compute_path()
self.method_name = self.method.value.lower()
self.body_params = []
if self.method_name in ('post', 'put'):
get_req_body_elems(self.body, self.body_params)
# replace the :parameter in path by {parameter}
self.url = re.sub(r':([^/]*)Id', r'{\1}', self.path)
self.url = re.sub(r':([^/]*)', r'{\1}', self.url)
# reduce the api name
# get_boards_board_cards() should be get_board_cards()
tokens = self.url.split('/')
reduced_function_name = []
for i, token in enumerate(tokens):
if token in ('api'):
continue
if (i < len(tokens) - 1 and # not the last item
tokens[i + 1].startswith('{')): # and the next token is a parameter
continue
reduced_function_name.append(token.strip('{}'))
self.reduced_function_name = '_'.join(reduced_function_name)
# mark the schema as used
schema.used = True
def compute_path(self):
return self._path.value.rstrip('/')
def log(self, message, level):
if self._raw_doc is None:
logger.log(level, 'in {},'.format(self.schema.name))
logger.log(level, message)
return
logger.log(level, 'in {}, lines {}-{}'.format(self.schema.name,
self._raw_doc.loc.start.line,
self._raw_doc.loc.end.line))
logger.log(level, self._raw_doc.value)
logger.log(level, message)
def error(self, message):
return self.log(message, logging.ERROR)
def warn(self, message):
return self.log(message, logging.WARNING)
def info(self, message):
return self.log(message, logging.INFO)
@property
def doc(self):
return self._doc
@doc.setter
def doc(self, doc):
'''Parse the JSDoc attached to an entry point.
`jsdoc` will not get these right as they are not attached to a method.
So instead, we do our custom parsing here (yes, subject to errors).
The expected format is the following (empty lines between entries
are ignored):
/**
* @operation name_of_entry_point
* @tag: a_tag_to_add
* @tag: an_other_tag_to_add
* @summary A nice summary, better in one line.
*
* @description This is a quite long description.
* We can use *mardown* as the final rendering is done
* by slate.
*
* indentation doesn't matter.
*
* @param param_0 description of param 0
* @param {string} param_1 we can also put the type of the parameter
* before its name, like in JSDoc
* @param {boolean} [param_2] we can also tell if the parameter is
* optional by adding square brackets around its name
*
* @return Documents a return value
*/
Notes:
- name_of_entry_point will be referenced in the ToC of the generated
document. This is also the operationId used in the resulting openapi
file. It needs to be uniq in the namesapce (the current schema.js
file)
- tags are appended to the current Schema attached to the file
'''
self._raw_doc = doc
self._jsdoc = cleanup_jsdocs(doc)
def store_tag(tag, data):
# check that there is something to store first
if not data.strip():
return
# remove terminating whitespaces and empty lines
data = data.rstrip()
# parameters are handled specially
if tag == 'param':
if 'params' not in self._doc:
self._doc['params'] = {}
params = self._doc['params']
param_type = None
try:
name, desc = data.split(maxsplit=1)
except ValueError:
desc = ''
if name.startswith('{'):
param_type = name.strip('{}')
if param_type == 'Object':
# hope for the best
param_type = 'object'
elif param_type not in ['string', 'number', 'boolean', 'integer', 'array', 'file']:
self.warn('unknown type {}\n allowed values: string, number, boolean, integer, array, file'.format(param_type))
try:
name, desc = desc.split(maxsplit=1)
except ValueError:
desc = ''
optional = name.startswith('[') and name.endswith(']')
if optional:
name = name[1:-1]
# we should not have 2 identical parameter names
if tag in params:
self.warn('overwriting parameter {}'.format(name))
params[name] = (param_type, optional, desc)
if name.endswith('Id'):
# we strip out the 'Id' from the form parameters, we need
# to keep the actual description around
name = name[:-2]
if name not in params:
params[name] = (param_type, optional, desc)
return
# 'tag' can be set several times
if tag == 'tag':
if tag not in self._doc:
self._doc[tag] = []
self._doc[tag].append(data)
return
# 'return' tag is json
if tag == 'return_type':
try:
data = load_return_type_jsdoc_json(data)
except json.decoder.JSONDecodeError:
pass
# we should not have 2 identical tags but @param or @tag
if tag in self._doc:
self.warn('overwriting tag {}'.format(tag))
self._doc[tag] = data
# reset the current doc fields
self._doc = {}
# first item is supposed to be the description
current_tag = 'description'
current_data = ''
for line in self._jsdoc:
if line.lstrip().startswith('@'):
tag, data = line.lstrip().split(maxsplit=1)
if tag in ['@operation', '@summary', '@description', '@param', '@return_type', '@tag']:
# store the current data
store_tag(current_tag, current_data)
current_tag = tag.lstrip('@')
current_data = ''
line = data
else:
self.info('Unknown tag {}, ignoring'.format(tag))
current_data += line + '\n'
store_tag(current_tag, current_data)
@property
def summary(self):
if 'summary' in self._doc:
# new lines are not allowed
return self._doc['summary'].replace('\n', ' ')
return None
def doc_param(self, name):
if 'params' in self._doc and name in self._doc['params']:
return self._doc['params'][name]
return None, None, None
def print_openapi_param(self, name, indent):
ptype, poptional, pdesc = self.doc_param(name)
if pdesc is not None:
print('{}description: |'.format(' ' * indent))
print('{}{}'.format(' ' * (indent + 2), pdesc))
else:
print('{}description: the {} value'.format(' ' * indent, name))
if ptype is not None:
print('{}type: {}'.format(' ' * indent, ptype))
else:
print('{}type: string'.format(' ' * indent))
if poptional:
print('{}required: false'.format(' ' * indent))
else:
print('{}required: true'.format(' ' * indent))
@property
def operationId(self):
if 'operation' in self._doc:
return self._doc['operation']
return '{}_{}'.format(self.method_name, self.reduced_function_name)
@property
def description(self):
if 'description' in self._doc:
return self._doc['description']
return None
@property
def returns(self):
if 'return_type' in self._doc:
return self._doc['return_type']
return None
@property
def tags(self):
tags = []
if self.schema.fields is not None:
tags.append(self.schema.name)
if 'tag' in self._doc:
tags.extend(self._doc['tag'])
return tags
def print_openapi_return(self, obj, indent):
if isinstance(obj, dict):
print('{}type: object'.format(' ' * indent))
print('{}properties:'.format(' ' * indent))
for k, v in obj.items():
print('{}{}:'.format(' ' * (indent + 2), k))
self.print_openapi_return(v, indent + 4)
elif isinstance(obj, list):
if len(obj) > 1:
self.error('Error while parsing @return tag, an array should have only one type')
print('{}type: array'.format(' ' * indent))
print('{}items:'.format(' ' * indent))
self.print_openapi_return(obj[0], indent + 2)
elif isinstance(obj, str) or isinstance(obj, unicode):
rtype = 'type: ' + obj
if obj == self.schema.name:
rtype = '$ref: "#/definitions/{}"'.format(obj)
print('{}{}'.format(' ' * indent, rtype))
def print_openapi(self):
parameters = [token[1:-2] if token.endswith('Id') else token[1:]
for token in self.path.split('/')
if token.startswith(':')]
print(' {}:'.format(self.method_name))
print(' operationId: {}'.format(self.operationId))
if self.summary is not None:
print(' summary: {}'.format(self.summary))
if self.description is not None:
print(' description: |')
for line in self.description.split('\n'):
if line.strip():
print(' {}'.format(line))
else:
print('')
if len(self.tags) > 0:
print(' tags:')
for tag in self.tags:
print(' - {}'.format(tag))
# export the parameters
if self.method_name in ('post', 'put'):
print(''' consumes:
- multipart/form-data
- application/json''')
if len(parameters) > 0 or self.method_name in ('post', 'put'):
print(' parameters:')
if self.method_name in ('post', 'put'):
for f in self.body_params:
print(''' - name: {}
in: formData'''.format(f))
self.print_openapi_param(f, 10)
for p in parameters:
if p in self.body_params:
self.error(' '.join((p, self.path, self.method_name)))
print(''' - name: {}
in: path'''.format(p))
self.print_openapi_param(p, 10)
print(''' produces:
- application/json
security:
- UserSecurity: []
responses:
'200':
description: |-
200 response''')
if self.returns is not None:
print(' schema:')
self.print_openapi_return(self.returns, 12)
class SchemaProperty(object):
def __init__(self, statement, schema, context):
self.schema = schema
self.statement = statement
self.name = statement.key.name or statement.key.value
self.type = 'object'
self.blackbox = False
self.required = True
imports = {}
for p in statement.value.properties:
try:
if p.key.name == 'type':
if p.value.type == 'Identifier':
self.type = p.value.name.lower()
elif p.value.type == 'ArrayExpression':
self.type = 'array'
self.elements = [e.name.lower() for e in p.value.elements]
elif p.key.name == 'allowedValues':
self.type = 'enum'
self.enum = []
def parse_enum(value, enum):
if value.type == 'ArrayExpression':
for e in value.elements:
parse_enum(e, enum)
elif value.type == 'Literal':
enum.append(value.value.lower())
return
elif value.type == 'Identifier':
# tree wide lookout for the identifier
def find_variable(elem, match):
if isinstance(elem, list):
for value in elem:
ret = find_variable(value, match)
if ret is not None:
return ret
try:
items = elem.items()
except AttributeError:
return None
except TypeError:
return None
if (elem.type == 'VariableDeclarator' and
elem.id.name == match):
return elem
elif (elem.type == 'ImportSpecifier' and
elem.local.name == match):
# we have to treat that case in the caller, because we lack
# information of the source of the import at that point
return elem
elif (elem.type == 'ExportNamedDeclaration' and
elem.declaration.type == 'VariableDeclaration'):
ret = find_variable(elem.declaration.declarations, match)
if ret is not None:
return ret
for type, value in items:
ret = find_variable(value, match)
if ret is not None:
if ret.type == 'ImportSpecifier':
# first open and read the import source, if
# we haven't already done so
path = elem.source.value
if elem.source.value.startswith('/'):
script_dir = os.path.dirname(os.path.realpath(__file__))
path = os.path.abspath(os.path.join('{}/..'.format(script_dir), elem.source.value.lstrip('/')))
else:
path = os.path.abspath(os.path.join(os.path.dirname(context.path), elem.source.value))
path += '.js'
if path not in imports:
imported_context = parse_file(path)
imports[path] = imported_context
imported_context = imports[path]
# and then re-run the find in the imported file
return find_variable(imported_context.program.body, match)
return ret
return None
elem = find_variable(context.program.body, value.name)
if elem is None:
raise TypeError('can not find "{}"'.format(value.name))
return parse_enum(elem.init, enum)
parse_enum(p.value, self.enum)
elif p.key.name == 'blackbox':
self.blackbox = True
elif p.key.name == 'optional' and p.value.value:
self.required = False
except Exception:
input = ''
for line in range(p.loc.start.line - err_context, p.loc.end.line + 1 + err_context):
if line < p.loc.start.line or line > p.loc.end.line:
input += '. '
else:
input += '>>'
input += context.text_at(line, line)
input = ''.join(input)
logger.error('{}:{}-{} can not parse {}:\n{}'.format(context.path,
p.loc.start.line,
p.loc.end.line,
p.type,
input))
logger.error('esprima tree:\n{}'.format(p))
logger.error(traceback.format_exc())
sys.exit(1)
self._doc = None
self._raw_doc = None
@property
def doc(self):
return self._doc
@doc.setter
def doc(self, jsdoc):
self._raw_doc = jsdoc
self._doc = cleanup_jsdocs(jsdoc)
def process_jsdocs(self, jsdocs):
start = self.statement.key.loc.start.line
for index, doc in enumerate(jsdocs):
if start + 1 == doc.loc.start.line:
self.doc = doc
jsdocs.pop(index)
return
def __repr__(self):
return 'SchemaProperty({}{}, {})'.format(self.name,
'*' if self.required else '',
self.doc)
def print_openapi(self, indent, current_schema, required_properties):
schema_name = self.schema.name
name = self.name
# deal with subschemas
if '.' in name:
if name.endswith('$'):
# reference in reference
subschema = ''.join([n.capitalize() for n in self.name.split('.')[:-1]])
subschema = self.schema.name + subschema
if current_schema != subschema:
if required_properties is not None and required_properties:
print(' required:')
for f in required_properties:
print(' - {}'.format(f))
required_properties.clear()
print(''' {}:
type: object'''.format(subschema))
return current_schema
subschema = name.split('.')[0]
schema_name = self.schema.name + subschema.capitalize()
name = name.split('.')[-1]
if current_schema != schema_name:
if required_properties is not None and required_properties:
print(' required:')
for f in required_properties:
print(' - {}'.format(f))
required_properties.clear()
print(''' {}:
type: object
properties:'''.format(schema_name))
if required_properties is not None and self.required:
required_properties.append(name)
print('{}{}:'.format(' ' * indent, name))
if self.doc is not None:
print('{} description: |'.format(' ' * indent))
for line in self.doc:
if line.strip():
print('{} {}'.format(' ' * indent, line))
else:
print('')
ptype = self.type
if ptype in ('enum', 'date'):
ptype = 'string'
if ptype != 'object':
print('{} type: {}'.format(' ' * indent, ptype))
if self.type == 'array':
print('{} items:'.format(' ' * indent))
for elem in self.elements:
if elem == 'object':
print('{} $ref: "#/definitions/{}"'.format(' ' * indent, schema_name + name.capitalize()))
else:
print('{} type: {}'.format(' ' * indent, elem))
if not self.required:
print('{} x-nullable: true'.format(' ' * indent))
elif self.type == 'object':
if self.blackbox:
print('{} type: object'.format(' ' * indent))
else:
print('{} $ref: "#/definitions/{}"'.format(' ' * indent, schema_name + name.capitalize()))
elif self.type == 'enum':
print('{} enum:'.format(' ' * indent))
for enum in self.enum:
print('{} - {}'.format(' ' * indent, enum))
if '.' not in self.name and not self.required:
print('{} x-nullable: true'.format(' ' * indent))
return schema_name
class Schemas(object):
def __init__(self, context, data=None, jsdocs=None, name=None):
self.name = name
self._data = data
self.fields = None
self.used = False
if data is not None:
if self.name is None:
self.name = data.expression.callee.object.name
content = data.expression.arguments[0].arguments[0]
self.fields = [SchemaProperty(p, self, context) for p in content.properties]
self._doc = None
self._raw_doc = None
if jsdocs is not None:
self.process_jsdocs(jsdocs)
@property
def doc(self):
if self._doc is None:
return None
return ' '.join(self._doc)
@doc.setter
def doc(self, jsdoc):
self._raw_doc = jsdoc
self._doc = cleanup_jsdocs(jsdoc)
def process_jsdocs(self, jsdocs):
start = self._data.loc.start.line
end = self._data.loc.end.line
for doc in jsdocs:
if doc.loc.end.line + 1 == start:
self.doc = doc
docs = [doc
for doc in jsdocs
if doc.loc.start.line >= start and doc.loc.end.line <= end]
for field in self.fields:
field.process_jsdocs(docs)
def print_openapi(self):
# empty schemas are skipped
if self.fields is None:
return
print(' {}:'.format(self.name))
print(' type: object')
if self.doc is not None:
print(' description: {}'.format(self.doc))
print(' properties:')
# first print out the object itself
properties = [field for field in self.fields if '.' not in field.name]
for prop in properties:
prop.print_openapi(6, None, None)
required_properties = [f.name for f in properties if f.required]
if required_properties:
print(' required:')
for f in required_properties:
print(' - {}'.format(f))
# then print the references
current = None
required_properties = []
properties = [f for f in self.fields if '.' in f.name and not f.name.endswith('$')]
for prop in properties:
current = prop.print_openapi(6, current, required_properties)
if required_properties:
print(' required:')
for f in required_properties:
print(' - {}'.format(f))
required_properties = []
# then print the references in the references
for prop in [f for f in self.fields if '.' in f.name and f.name.endswith('$')]:
current = prop.print_openapi(6, current, required_properties)
if required_properties:
print(' required:')
for f in required_properties:
print(' - {}'.format(f))
class Context(object):
def __init__(self, path):
self.path = path
with open(path) as f:
self._txt = f.readlines()
data = ''.join(self._txt)
self.program = esprima.parseModule(data,
options={
'comment': True,
'loc': True
})
def txt_for(self, statement):
return self.text_at(statement.loc.start.line, statement.loc.end.line)
def text_at(self, begin, end):
return ''.join(self._txt[begin - 1:end])
def parse_file(path):
try:
# if the file failed, it's likely it doesn't contain a schema
context = Context(path)
except:
return
return context
def parse_schemas(schemas_dir):
schemas = {}
entry_points = []
for root, dirs, files in os.walk(schemas_dir):
files.sort()
for filename in files:
path = os.path.join(root, filename)
context = parse_file(path)
program = context.program
current_schema = None
jsdocs = [c for c in program.comments
if c.type == 'Block' and c.value.startswith('*\n')]
try:
for statement in program.body:
# find the '<ITEM>.attachSchema(new SimpleSchema(<data>)'
# those are the schemas
if (statement.type == 'ExpressionStatement' and
statement.expression.callee is not None and
statement.expression.callee.property is not None and
statement.expression.callee.property.name == 'attachSchema' and
statement.expression.arguments[0].type == 'NewExpression' and
statement.expression.arguments[0].callee.name == 'SimpleSchema'):
schema = Schemas(context, statement, jsdocs)
current_schema = schema.name
schemas[current_schema] = schema
# find all the 'if (Meteor.isServer) { JsonRoutes.add('
# those are the entry points of the API
elif (statement.type == 'IfStatement' and
statement.test.type == 'MemberExpression' and
statement.test.object.name == 'Meteor' and
statement.test.property.name == 'isServer'):
data = [s.expression.arguments
for s in statement.consequent.body
if (s.type == 'ExpressionStatement' and
s.expression.type == 'CallExpression' and
s.expression.callee.object.name == 'JsonRoutes')]
# we found at least one entry point, keep them
if len(data) > 0:
if current_schema is None:
current_schema = filename
schemas[current_schema] = Schemas(context, name=current_schema)
schema_entry_points = [EntryPoint(schemas[current_schema], d)
for d in data]
entry_points.extend(schema_entry_points)
end_of_previous_operation = -1
# try to match JSDoc to the operations
for entry_point in schema_entry_points:
operation = entry_point.method # POST/GET/PUT/DELETE
# find all jsdocs that end before the current operation,
# the last item in the list is the one we need
jsdoc = [j for j in jsdocs
if j.loc.end.line + 1 <= operation.loc.start.line and
j.loc.start.line > end_of_previous_operation]
if bool(jsdoc):
entry_point.doc = jsdoc[-1]
end_of_previous_operation = operation.loc.end.line
except TypeError:
logger.warning(context.txt_for(statement))
logger.error('{}:{}-{} can not parse {}'.format(path,
statement.loc.start.line,
statement.loc.end.line,
statement.type))
raise
return schemas, entry_points
def generate_openapi(schemas, entry_points, version):
print('''swagger: '2.0'
info:
title: Wekan REST API
version: {0}
description: |
The REST API allows you to control and extend Wekan with ease.
If you are an end-user and not a dev or a tester, [create an issue](https://github.com/wekan/wekan/issues/new) to request new APIs.
> All API calls in the documentation are made using `curl`. However, you are free to use Java / Python / PHP / Golang / Ruby / Swift / Objective-C / Rust / Scala / C# or any other programming languages.
# Production Security Concerns
When calling a production Wekan server, ensure it is running via HTTPS and has a valid SSL Certificate. The login method requires you to post your username and password in plaintext, which is why we highly suggest only calling the REST login api over HTTPS. Also, few things to note:
* Only call via HTTPS
* Implement a timed authorization token expiration strategy
* Ensure the calling user only has permissions for what they are calling and no more
schemes:
- http
securityDefinitions:
UserSecurity:
type: apiKey
in: header
name: Authorization
paths:
/users/login:
post:
operationId: login
summary: Login with REST API
consumes:
- application/x-www-form-urlencoded
- application/json
tags:
- Login
parameters:
- name: username
in: formData
required: true
description: |
Your username
type: string
- name: password
in: formData
required: true
description: |
Your password
type: string
format: password
responses:
200:
description: |-
Successful authentication
schema:
items:
properties:
id:
type: string
token:
type: string
tokenExpires:
type: string
400:
description: |
Error in authentication
schema:
items:
properties:
error:
type: number
reason:
type: string
default:
description: |
Error in authentication
/users/register:
post:
operationId: register
summary: Register with REST API
description: |
Notes:
- You will need to provide the token for any of the authenticated methods.
consumes:
- application/x-www-form-urlencoded
- application/json
tags:
- Login
parameters:
- name: username
in: formData
required: true
description: |
Your username
type: string
- name: password
in: formData
required: true
description: |
Your password
type: string
format: password
- name: email
in: formData
required: true
description: |
Your email
type: string
responses:
200:
description: |-
Successful registration
schema:
items:
properties:
id:
type: string
token:
type: string
tokenExpires:
type: string
400:
description: |
Error in registration
schema:
items:
properties:
error:
type: number
reason:
type: string
default:
description: |
Error in registration
'''.format(version))
# GET and POST on the same path are valid, we need to reshuffle the paths
# with the path as the sorting key
methods = {}
for ep in entry_points:
if ep.path not in methods:
methods[ep.path] = []
methods[ep.path].append(ep)
sorted_paths = list(methods.keys())
sorted_paths.sort()
for path in sorted_paths:
print(' {}:'.format(methods[path][0].url))
for ep in methods[path]:
ep.print_openapi()
print('definitions:')
for schema in schemas.values():
# do not export the objects if there is no API attached
if not schema.used:
continue
schema.print_openapi()
def main():
parser = argparse.ArgumentParser(description='Generate an OpenAPI 2.0 from the given JS schemas.')
script_dir = os.path.dirname(os.path.realpath(__file__))
parser.add_argument('--release', default='git-master', nargs=1,
help='the current version of the API, can be retrieved by running `git describe --tags --abbrev=0`')
parser.add_argument('dir', default=os.path.abspath('{}/../models'.format(script_dir)), nargs='?',
help='the directory where to look for schemas')
args = parser.parse_args()
schemas, entry_points = parse_schemas(args.dir)
generate_openapi(schemas, entry_points, args.release[0])
if __name__ == '__main__':
main()
| StarcoderdataPython |
1721306 | # $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements test cases for the WebDAV-specific file system factory.
"""
import unittest
from datafinder.persistence.adapters.sftp import factory
from datafinder.persistence.error import PersistenceError
class ParseDiskFreeOutpoutParserTestCase(unittest.TestCase):
def testExpectedDefaultCase(self):
dfOut = (
"Filesystem 1K-blocks Used Available Use% Mounted on\n"
"/dev/sdb1 103079200 245600 97590824 1% /home\n")
availableSpace = factory._parseDiskFreeCommandOutForAvailableSpace(dfOut)
self.assertEquals(availableSpace, 99933003776)
def testMultipleDevices(self):
dfOut = (
"Filesystem 1K-blocks Used Available Use% Mounted on\n"
"/dev/sdb1 103079200 245600 200 1% /home\n"
"/dev/sdc1 103079200 245600 1000 1% /home\n")
availableSpace = factory._parseDiskFreeCommandOutForAvailableSpace(dfOut)
self.assertEquals(availableSpace, 204800)
def testInvalidFormat(self):
dfOut = "INVALID"
self.assertRaises(PersistenceError, factory._parseDiskFreeCommandOutForAvailableSpace, dfOut)
def testInsufficientColumns(self):
dfOut = (
"Filesystem 1K-blocks Used Available Use% Mounted on\n"
"/dev/sdb1 103079200\n")
self.assertRaises(PersistenceError, factory._parseDiskFreeCommandOutForAvailableSpace, dfOut)
def testNotANumber(self):
dfOut = (
"Filesystem 1K-blocks Used Available Use% Mounted on\n"
"/dev/sdb1 103079200 245600 NOTANUMBER 1% /home\n")
self.assertRaises(PersistenceError, factory._parseDiskFreeCommandOutForAvailableSpace, dfOut)
def testLargeValue(self):
dfOut = (
"Filesystem 1K-blocks Used Available Use% Mounted on\n"
"/dev/sdb1 103079200 245600 975908240000000000000000 1% /home\n")
availableSpace = factory._parseDiskFreeCommandOutForAvailableSpace(dfOut)
self.assertEquals(availableSpace, 999330037760000000000000000)
| StarcoderdataPython |
3296476 | <reponame>eslickj/idaes-pse
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
try:
from pyomo.core.expr.sympy_tools import (
_prod, _sum, _functionMap, _operatorMap, _pyomo_operator_map)
except ImportError:
from pyomo.core.base.symbolic import (
_prod, _sum, _functionMap, _operatorMap, _pyomo_operator_map)
from pyomo.environ import ExternalFunction, Var, Expression, value
from pyomo.core.base.constraint import _ConstraintData, Constraint
from pyomo.core.base.expression import _ExpressionData
from pyomo.core.base.block import _BlockData
from pyomo.core.expr.visitor import StreamBasedExpressionVisitor
from pyomo.core.expr.numeric_expr import (ExternalFunctionExpression,
ExpressionBase)
from pyomo.core.expr import current as EXPR, native_types
from pyomo.common.collections import ComponentMap
import sympy
from IPython.display import display, Markdown
import re
#TODO<jce> Look into things like sum operator and template expressions
def _add_latex_subscripts(x, s):
if not "_" in x:
return "{}_{{ {} }}".format(x, s)
else:
return re.sub(r"^(.+_)({.+}|.)", "\\1{{ \\2 ,{} }}".format(s), x)
def deduplicate_symbol(x, v, used):
"""
Check if x is a duplicated LaTeX symbol if so add incrementing Di subscript
Args:
x: symbol string
v: pyomo object
used: dictionary of pyomo objects with symbols as keys
Returns:
Returns a unique symbol. If x was not in used keys, returns x,
otherwise adds exponents to make it unique.
"""
y = x
k = 1
while x in used and id(used[x]) != id(v):
x = _add_latex_subscripts(y, "D{}".format(k))
k += 1
if k > 1000:
# Either the symbol string is not updating or there are lots of dupes
break
used[x] = v
return x
class PyomoSympyBimap(object):
"""
This is based on the class of the same name in pyomo.core.base.symbolic, but
it adds mapping latex symbols to the sympy symbols. This will get you pretty
equations when using sympy's LaTeX writer.
"""
def __init__(self):
self.pyomo2sympy = ComponentMap()
self.parent_symbol = ComponentMap()
self.sympy2pyomo = {}
self.sympy2latex = {}
self.used = {}
self.i_var = 0
self.i_expr = 0
self.i_func = 0
self.i = 0
def getPyomoSymbol(self, sympy_object, default=None):
return self.sympy2pyomo.get(sympy_object, default)
def getSympySymbol(self, pyomo_object):
if pyomo_object in self.pyomo2sympy:
return self.pyomo2sympy[pyomo_object]
else:
return self._add_sympy(pyomo_object)
def sympyVars(self):
return self.sympy2pyomo.keys()
def _add_sympy(self, pyomo_object):
parent_object = pyomo_object.parent_component()
if isinstance(parent_object, Var):
sympy_class = sympy.Symbol
base_name = "x"
i = self.i_var
self.i_var += 1
elif isinstance(parent_object, Expression):
sympy_class = sympy.Symbol
base_name = "u"
i = self.i_expr
self.i_expr += 1
elif isinstance(parent_object, ExternalFunction):
sympy_class = sympy.Function
base_name = "func"
i = self.i_func
self.i_func += 1
else:
raise Exception("Should be Var, Exression, or ExternalFunction")
if parent_object.is_indexed() and parent_object in self.parent_symbol:
x = self.parent_symbol[parent_object][0]
latex_symbol = self.parent_symbol[parent_object][1]
else:
x = "{}_{}".format(base_name, i)
base_latex = getattr(parent_object, "latex_symbol", None)
if base_latex is not None:
latex_symbol = deduplicate_symbol(base_latex, parent_object,
self.used)
else:
latex_symbol = x
if parent_object.is_indexed():
if parent_object not in self.parent_symbol:
self.parent_symbol[parent_object] = (x, latex_symbol)
idx = pyomo_object.index()
idxl = idx
if isinstance(idx, tuple):
idxl = ",".join(idx)
idx = "|".join(idx)
x = "{}[{}]".format(x, idx)
latex_symbol = _add_latex_subscripts(latex_symbol, idxl)
sympy_obj = sympy_class(x)
self.pyomo2sympy[pyomo_object] = sympy_obj
self.sympy2pyomo[sympy_obj] = pyomo_object
self.sympy2latex[sympy_obj] = latex_symbol
return sympy_obj
class Pyomo2SympyVisitor(StreamBasedExpressionVisitor):
"""
This is based on the class of the same name in pyomo.core.base.symbolic, but
it catches ExternalFunctions and does not decend into named expressions.
"""
def __init__(self, object_map):
super(Pyomo2SympyVisitor, self).__init__()
self.object_map = object_map
def exitNode(self, node, values):
if isinstance(node, ExternalFunctionExpression):
# catch ExternalFunction
_op = self.object_map.getSympySymbol(node._fcn)
else:
if node.__class__ is EXPR.UnaryFunctionExpression:
return _functionMap[node._name](values[0])
_op = _pyomo_operator_map.get(node.__class__, None)
if _op is None:
return node._apply_operation(values)
else:
return _op(*tuple(values))
def beforeChild(self, node, child):
# Don't replace native or sympy types
if type(child) in native_types:
return False, child
# We will not descend into named expressions...
if child.is_expression_type():
if child.is_named_expression_type():
# To keep expressions from becoming too crazy complicated
# treat names expressions like variables.
return False, self.object_map.getSympySymbol(child)
else:
return True, None
# Replace pyomo variables with sympy variables
if child.is_potentially_variable():
return False, self.object_map.getSympySymbol(child)
# Everything else is a constant...
return False, value(child)
def sympify_expression(expr):
"""
Converts Pyomo expressions to sympy expressions.
This is based on the function of the same name in pyomo.core.base.symbolic.
The difference between this and the Pymomo is that this one checks if the
expr argument is a named expression and expands it anyway. This allows named
expressions to only be expanded if they are the top level object.
"""
#
# Create the visitor and call it.
#
object_map = PyomoSympyBimap()
visitor = Pyomo2SympyVisitor(object_map)
is_expr, ans = visitor.beforeChild(None, expr)
try: # If I explicitly ask for a named expression then descend into it.
if expr.is_named_expression_type():
is_expr = True
except:
pass
if not is_expr: # and not expr.is_named_expression_type():
return object_map, ans
return object_map, visitor.walk_expression(expr)
def _add_docs(object_map, docs, typ, head):
"""
Adds documentation for a set of pyomo components to a markdown table
Args:
object_map (PyomoSympyBimap): Pyomo, sympy, LaTeX mapping
docs: string containing a mardown table
typ: the class of objects to document (Var, Expression, ExternalFunction)
head: a string to used in the sybol table heading for this class of objects
Returns:
A new string markdown table with added doc rows.
"""
docked = set() # components already documented, mainly for indexed compoents
whead = True # write heading before adding first item
for i, sc in enumerate(object_map.sympyVars()):
c = object_map.getPyomoSymbol(sc)
c = c.parent_component() # Document the parent for indexed comps
if not isinstance(c, typ): continue
if whead: # add heading if is first entry in this section
docs += "**{}** | **Doc** | **Path**\n".format(head)
whead = False
if id(c) not in docked:
docked.add(id(c)) # make sure we don't get a line for every index
try: #just document the parent of indexed vars
s = object_map.parent_symbol[c][1]
except KeyError: # non-indexed vars
s = object_map.sympy2latex[sc]
docs += "${}$|{}|{}\n".format(s, c.doc, c)
return docs
def to_latex(expr):
"""Return a sympy expression for the given Pyomo expression
Args:
expr (Expression): Pyomo expression
Returns:
(dict): keys: sympy_expr, a sympy expression; where, markdown string
with documentation table; latex_expr, a LaTeX string representation
of the expression.
"""
object_map, sympy_expr = sympify_expression(expr)
# This next bit documents the expression, could use a lot of work, but
# for now it generates markdown tables that are resonably readable in a
# jupyter notebook.
docs = "\nSymbol | Doc | Path\n ---: | --- | ---\n"
docs = _add_docs(object_map, docs, Var, "Variable")
docs = _add_docs(object_map, docs, Expression, "Expression")
docs = _add_docs(object_map, docs, ExternalFunction, "Function")
#probably should break this up, but this will do for now.
return {"sympy_expr": sympy_expr,
"where":docs,
"latex_expr":sympy.latex(sympy_expr, symbol_names=object_map.sympy2latex)}
def document_constraints(comp, doc=True, descend_into=True):
"""
Provides nicely formatted constraint documetntation in markdown format,
assuming the $$latex math$$ and $latex math$ syntax is supported.
Args:
comp: A Pyomo component to document in {_ConstraintData, _ExpressionData,
_BlockData}.
doc: True adds a documentation table for each constraint or expression.
Due to the way symbols are semi-automatiaclly generated, the
exact symbol definitions may be unique to each constraint or
expression, if unique LaTeX symbols were not provided
everywhere in a block.
descend_into: If True, look in subblocks for constraints.
Returns:
A string in markdown format with equations in LaTeX form.
"""
s = None
if isinstance(comp, _ExpressionData):
d = to_latex(comp)
if doc:
s = "$${}$$\n{}".format(d["latex_expr"], d["where"])
else:
s = "$${}$$".format(d["latex_expr"])
elif isinstance(comp, _ConstraintData):
d = to_latex(comp.body)
if doc:
s = "$${} \le {}\le {}$$\n{}".format(
comp.lower, d["latex_expr"], comp.upper, d["where"])
else:
s = "$${} \le {}\le {}$$".format(
comp.lower, d["latex_expr"], comp.upper)
elif isinstance(comp, _BlockData):
cs = []
for c in comp.component_data_objects(
Constraint, descend_into=descend_into):
if not c.active:
continue
cs.append("## {}".format(c))
cs.append(document_constraints(c, doc))
s = "\n".join(cs)
return s
def ipython_document_constraints(comp, doc=True, descend_into=True):
"""
See document_constraints, this just directly displays the markdown instead
of returning a string.
"""
s = document_constraints(comp, doc, descend_into)
display(Markdown(s))
| StarcoderdataPython |
71658 | # -*- coding: utf-8 -*-
"""
Created on Sat May 1 18:26:28 2021
@author: abhay
version: 0.0.1
"""
import os,argparse
stack = []
'''
Stack is Used as a directory(folder/path) pointer due to its property of LIFO
It's last element is current directory Current Directory
'''
ipath = ''
'''
This is the path made every time when we move from
directory to directory
Maked by using Stack
'''
unordereddat = {}
'''
Data added when a directory is found
eg.{'MYDIR':[file1,dir2,file2],'dir2':[file5,file6]}
Note:Uncomment KeepRec if you want to use.
'''
def stackUpdate(dirr,operation):
'''
Its going to update here when required
Its required when we have to go back or forward
in a directory(path).
Note:Whenever do stackUpdate always do buildUpdate
'''
global stack
if operation == 'a':
stack.append(dirr)
elif operation == 'p' and len(stack) != 0:
stack.pop()
def buildPath():
'''
This is going to build path for us using stack.
Note:Whenever do stackUpdate always do buildUpdate
'''
global stack,ipath,path
ipath = path
if len(stack) > 0:
for i in range(0,len(stack)):
ipath = ipath + stack[i] + '/'
else:
ipath = path
def keepRec():#Not required (Optional)
'''
Used to keep record of each and every files in a directory.
It must be called to do so.
A dictionary with key = Folder name, values =[file1,file2,file3]
'''
global unordereddat,ipath,stack
files = []
with os.scandir(ipath) as ent:
for i in ent:
files.append(i.name)
try:
dirc = stack[-1]
except:
#for the frist time
dirc = path
unordereddat[dirc] = files
def datas():
'''
This function is going to do our actual work of listing
files and directories order wise.
'''
global ps,wp,f
buildPath()
#keepRec()
with os.scandir(ipath) as ent:
for i in ent:
#listing all files
if os.path.isfile(ipath+i.name):
if ps == "p" and wp == "wp":
print(ipath+i.name+'\n')
elif ps == "p" and wp == "nwp":
print(len(stack)*'\t'+'-'+i.name)
elif ps == "s" and wp == "wp":
f.write(ipath+i.name+'\n')
elif ps == "s" and wp == "nwp":
f.write(len(stack)*'\t'+'-'+i.name)
#listing directorie and going inside it
elif os.path.isdir(ipath+i.name):
if ps == "p" and wp == "wp":
print(ipath+i.name+'\n')
elif ps == "p" and wp == "nwp":
print(len(stack)*'\t'+'-'+i.name)
elif ps == "s" and wp == "wp":
f.write(ipath+i.name+'\n')
elif ps == "s" and wp == "nwp":
f.write(len(stack)*'\t'+'-'+i.name)
stackUpdate(i.name,'a')
buildPath()
#keepRec()
datas()
stackUpdate(ipath, 'p')
buildPath()
parser = argparse.ArgumentParser()
parser.add_argument("Path", help = "Enter Main Path (path of directory inside which you want list)")
parser.add_argument("Print_Store", help = "Print or Store Output", choices = ["p","s"])
parser.add_argument("With_Without_Path", help = "What you want output with full path or without path(Only Name) ?", choices = ["wp","nwp"])
args = parser.parse_args()
path = args.Path
ps = args.Print_Store
wp = args.With_Without_Path
if ps == "s":
f = open('Output.txt','w')
datas()
if ps == "s":
f.close()
#print(unordereddat)#Gives a dictionary with key = Folder name, values =[file1,file2,file3]
#Note: Uncomment keepRec above in code frist
| StarcoderdataPython |
1673972 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import webob
from quantum.api import extensions
from quantum.api.v2.attributes import convert_to_int
from quantum.api.v2 import base
from quantum.api.v2 import resource
from quantum.common import exceptions as q_exc
from quantum.manager import QuantumManager
from quantum.openstack.common import importutils
from quantum import quota
from quantum import wsgi
RESOURCE_NAME = 'quota'
RESOURCE_COLLECTION = RESOURCE_NAME + "s"
QUOTAS = quota.QUOTAS
DB_QUOTA_DRIVER = 'quantum.db.quota_db.DbQuotaDriver'
EXTENDED_ATTRIBUTES_2_0 = {
RESOURCE_COLLECTION: {}
}
class QuotaSetsController(wsgi.Controller):
def __init__(self, plugin):
self._resource_name = RESOURCE_NAME
self._plugin = plugin
self._driver = importutils.import_class(cfg.CONF.QUOTAS.quota_driver)
self._update_extended_attributes = True
def _update_attributes(self):
for quota_resource in QUOTAS.resources.iterkeys():
attr_dict = EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION]
attr_dict[quota_resource] = {'allow_post': False,
'allow_put': True,
'convert_to': convert_to_int,
'is_visible': True}
self._update_extended_attributes = False
def _get_quotas(self, request, tenant_id):
return self._driver.get_tenant_quotas(
request.context, QUOTAS.resources, tenant_id)
def create(self, request, body=None):
raise webob.exc.HTTPNotImplemented()
def index(self, request):
context = request.context
self._check_admin(context)
return {self._resource_name + "s":
self._driver.get_all_quotas(context, QUOTAS.resources)}
def tenant(self, request):
"""Retrieve the tenant info in context."""
context = request.context
if not context.tenant_id:
raise q_exc.QuotaMissingTenant()
return {'tenant': {'tenant_id': context.tenant_id}}
def show(self, request, id):
if id != request.context.tenant_id:
self._check_admin(request.context,
reason=_("Non-admin is not authorised "
"to access quotas for another tenant"))
return {self._resource_name: self._get_quotas(request, id)}
def _check_admin(self, context,
reason=_("Only admin can view or configure quota")):
if not context.is_admin:
raise q_exc.AdminRequired(reason=reason)
def delete(self, request, id):
self._check_admin(request.context)
self._driver.delete_tenant_quota(request.context, id)
def update(self, request, id, body=None):
self._check_admin(request.context)
if self._update_extended_attributes:
self._update_attributes()
body = base.Controller.prepare_request_body(
request.context, body, False, self._resource_name,
EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION])
for key, value in body[self._resource_name].iteritems():
self._driver.update_quota_limit(request.context, id, key, value)
return {self._resource_name: self._get_quotas(request, id)}
class Quotasv2(extensions.ExtensionDescriptor):
"""Quotas management support."""
@classmethod
def get_name(cls):
return "Quota management support"
@classmethod
def get_alias(cls):
return RESOURCE_COLLECTION
@classmethod
def get_description(cls):
description = 'Expose functions for quotas management'
if cfg.CONF.QUOTAS.quota_driver == DB_QUOTA_DRIVER:
description += ' per tenant'
return description
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/network/ext/quotas-sets/api/v2.0"
@classmethod
def get_updated(cls):
return "2012-07-29T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
controller = resource.Resource(
QuotaSetsController(QuantumManager.get_plugin()),
faults=base.FAULT_MAP)
return [extensions.ResourceExtension(
Quotasv2.get_alias(),
controller,
collection_actions={'tenant': 'GET'})]
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| StarcoderdataPython |
165487 | from sys import path
import os
import subprocess
from shutil import copyfile
from shutil import copytree
from shutil import rmtree
from shutil import move
from os import scandir
from os import remove
from os import path
ROOT_PYTHON_PATH=os.path.dirname(os.path.abspath(__file__))
def splitLicencesYmlInAtomicElements():
print('Splitting the licences '+ROOT_PYTHON_PATH)
licencesYmlReferent= open(ROOT_PYTHON_PATH+"\\licenses.yml","r")
licences_bloc=open(ROOT_PYTHON_PATH+'\\dependencies_hash.py',"w+")
#You need to slit the bloc into files:
##Dart and Henson
# - artifact: com.f2prateek.dart:dart-annotations:+
# name: Dart Annotations
# copyrightHolder: 2013 <NAME>, 2014 <NAME>
# license: The Apache Software License, Version 2.0
# licenseUrl: http://www.apache.org/licenses/LICENSE-2.0.txt
# url: https://github.com/f2prateek/dart
#Into a file com.f2prateek.dart_dart-annotations.txt
#Class generation
commentList=['toto','tata']
parsingComments=False
for rawLine in licencesYmlReferent:
if ('- artifact:' in rawLine):
#close previous file
if not licences_bloc is None:
licences_bloc.close
#open new file
fileName=rawLine[12:-2].replace(':','_')+'.txt'
licences_bloc=open(ROOT_PYTHON_PATH+'\\'+fileName,"w+")
#write the comment associated with this dependency
print("In write CmList="+str(commentList))
for commentLine in commentList:
licences_bloc.write(commentLine)
#write the element
licences_bloc.write(rawLine)
parsingComments=False
elif "#" in rawLine:
#Keep the comment, it will be associated with th next dependency found
if not parsingComments:
commentList=[]
parsingComments=True
commentList.append(rawLine)
else:
#copy the content
if not licences_bloc is None:
licences_bloc.write(rawLine)
if not licences_bloc is None:
licencesYmlReferent.close()
licencesYmlReferent.close()
#start the migration
splitLicencesYmlInAtomicElements() | StarcoderdataPython |
38196 | import requests
"""
Delete a project version.
If there are no more versions available for a given project, that project will be deleted too.
"""
def delete_version(server, project, version):
url = "http://{}/delversion.json".format(server)
data = {
"project": project,
"version": version
}
with requests.Session() as session:
try:
r = session.post(url, data=data)
except:
return None
return r.json()
| StarcoderdataPython |
3383091 | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import inspect
import pathlib
from typing import Union, IO, AnyStr
import pandas
from . import DataFrame
from modin.config import IsExperimental, Engine
from modin.data_management.factories.dispatcher import EngineDispatcher
from ...pandas import _update_engine
def read_sql(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize=None,
partition_column=None,
lower_bound=None,
upper_bound=None,
max_sessions=None,
):
"""Read SQL query or database table into a DataFrame.
Args:
sql: string or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name.
con: SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode)
index_col: Column(s) to set as index(MultiIndex).
coerce_float: Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to
floating point, useful for SQL result sets.
params: List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
parse_dates:
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns: List of column names to select from SQL table (only used when reading a table).
chunksize: If specified, return an iterator where `chunksize` is the number of rows to include in each chunk.
partition_column: column used to share the data between the workers (MUST be a INTEGER column)
lower_bound: the minimum value to be requested from the partition_column
upper_bound: the maximum value to be requested from the partition_column
max_sessions: the maximum number of simultaneous connections allowed to use
Returns:
Pandas Dataframe
"""
Engine.subscribe(_update_engine)
assert IsExperimental.get(), "This only works in experimental mode"
_, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
return DataFrame(query_compiler=EngineDispatcher.read_sql(**kwargs))
# CSV and table
def _make_parser_func(sep):
"""
Create a parser function from the given sep.
Parameters
----------
sep: str
The separator default to use for the parser.
Returns
-------
A function object.
"""
def parser_func(
filepath_or_buffer: Union[str, pathlib.Path, IO[AnyStr]],
sep=sep,
delimiter=None,
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
iterator=False,
chunksize=None,
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=0,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
error_bad_lines=True,
warn_bad_lines=True,
skipfooter=0,
doublequote=True,
delim_whitespace=False,
low_memory=True,
memory_map=False,
float_precision=None,
):
# ISSUE #2408: parse parameter shared with pandas read_csv and read_table and update with provided args
_pd_read_csv_signature = {
val.name for val in inspect.signature(pandas.read_csv).parameters.values()
}
_, _, _, f_locals = inspect.getargvalues(inspect.currentframe())
if f_locals.get("sep", sep) is False:
f_locals["sep"] = "\t"
kwargs = {k: v for k, v in f_locals.items() if k in _pd_read_csv_signature}
return _read(**kwargs)
return parser_func
def _read(**kwargs):
"""
Read csv file from local disk.
Parameters
----------
filepath_or_buffer:
The filepath of the csv file.
We only support local files for now.
kwargs: Keyword arguments in pandas.read_csv
"""
from modin.data_management.factories.dispatcher import EngineDispatcher
Engine.subscribe(_update_engine)
try:
pd_obj = EngineDispatcher.read_csv_glob(**kwargs)
except AttributeError:
raise AttributeError("read_csv_glob() is only implemented for pandas on Ray.")
# This happens when `read_csv` returns a TextFileReader object for iterating through
if isinstance(pd_obj, pandas.io.parsers.TextFileReader):
reader = pd_obj.read
pd_obj.read = lambda *args, **kwargs: DataFrame(
query_compiler=reader(*args, **kwargs)
)
return pd_obj
return DataFrame(query_compiler=pd_obj)
read_csv_glob = _make_parser_func(sep=",")
| StarcoderdataPython |
3217941 | <reponame>matthewpipie/vectra_api_tools
import json
import requests
import warnings
import html
import re
warnings.filterwarnings('always', '.*', PendingDeprecationWarning)
class HTTPException(Exception):
def __init__(self, response):
"""
Custom exception class to report possible API errors
The body is contructed by extracting the API error code from the requests.Response object
"""
try:
r = response.json()
if 'detail' in r:
detail = r['detail']
elif 'errors' in r:
detail = r['errors'][0]['title']
elif '_meta' in r:
detail = r['_meta']['message']
else:
detail = response.content
except Exception:
detail = response.content
body = 'Status code: {code} - {detail}'.format(code=str(response.status_code), detail=detail)
super().__init__(body)
def request_error_handler(func):
def request_handler(self, *args, **kwargs):
response = func(self, *args, **kwargs)
if response.status_code in [200, 201, 204]:
return response
else:
raise HTTPException(response)
return request_handler
def validate_api_v2(func):
def api_validator(self, *args, **kwargs):
if self.version == 2:
return func(self, *args, **kwargs)
else:
raise NotImplementedError('Method only accessible via v2 of API')
return api_validator
def deprecation(message):
warnings.warn(message, PendingDeprecationWarning)
def param_deprecation(key):
message = '{0} will be deprecated with Vectra API v1 which will be annouced in an upcoming release'.format(key)
warnings.warn(message, PendingDeprecationWarning)
class VectraClient(object):
def __init__(self, url=None, token=None, user=None, password=None, verify=False):
"""
Initialize Vectra client
:param url: IP or hostname of Vectra brain (ex https://www.example.com) - required
:param token: API token for authentication when using API v2*
:param user: Username to authenticate to Vectra brain when using API v1*
:param password: Password when using username to authenticate using API v1*
:param verify: Verify SSL (default: False) - optional
*Either token or user are required
"""
self.url = url
self.version = 2 if token else 1
self.verify = verify
url = VectraClient._remove_trailing_slashes(url)
if token:
self.url = '{url}/api/v2'.format(url=url)
self.headers = {
'Authorization': "Token " + token.strip(),
'Content-Type': "application/json",
'Cache-Control': "no-cache"
}
elif user and password:
self.url = '{url}/api'.format(url=url)
self.auth = (user, password)
deprecation('Deprecation of the Vectra API v1 will be announced in an upcoming release. Migrate to API v2'
' when possible')
else:
raise RuntimeError("At least one form of authentication is required. Please provide a token or username"
" and password")
@staticmethod
def _remove_trailing_slashes(url):
url = url[:-1] if url.endswith('/') else url
return url
@staticmethod
def _generate_campaign_params(args):
"""
Generate query parameters for campaigns based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['fields', 'dst_ip', 'target_domain', 'state', 'name', 'last_updated_gte',
'note_modified_timestamp_gte','page', 'page_size']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid campaign query parameter'.format(str(k)))
return params
@staticmethod
def _generate_host_params(args):
"""
Generate query parameters for hosts based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['active_traffic', 'all', 'c_score', 'c_score_gte', 'certainty', 'certainty_gte',
'fields', 'has_active_traffic', 'include_detection_summaries', 'is_key_asset', 'is_targeting_key_asset',
'key_asset', 'last_detection_timestamp', 'last_source', 'mac_address', 'max_id', 'min_id',
'name', 'note_modified_timestamp_gte', 'ordering','page', 'page_size', 'privilege_category',
'privilege_level', 'privilege_level_gte', 'state', 't_score', 't_score_gte', 'tags',
'targets_key_asset', 'threat', 'threat_gte']
deprecated_keys = ['c_score', 'c_score_gte', 'key_asset', 't_score', 't_score_gte', 'targets_key_asset']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid host query parameter'.format(str(k)))
if k in deprecated_keys: param_deprecation(k)
return params
@staticmethod
def _generate_host_by_id_params(args):
"""
Generate query parameters for host based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['fields', 'include_external', 'include_ldap']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid host query parameter'.format(str(k)))
return params
@staticmethod
def _generate_detection_params(args):
"""
Generate query parameters for detections based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['c_score', 'c_score_gte', 'category', 'certainty', 'certainty_gte', 'description',
'detection', 'detection_category', 'detection_type', 'fields', 'host_id', 'is_targeting_key_asset',
'is_triaged', 'last_timestamp', 'max_id', 'min_id', 'note_modified_timestamp_gte', 'ordering',
'page', 'page_size', 'src_ip', 'state', 't_score', 't_score_gte', 'tags', 'targets_key_asset',
'threat', 'threat_gte']
deprecated_keys = ['c_score', 'c_score_gte', 'category', 'detection', 't_score', 't_score_gte', 'targets_key_asset']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid detection query parameter'.format(str(k)))
if k in deprecated_keys: param_deprecation(k)
return params
@staticmethod
def _generate_group_params(args):
"""
Generate query parameters for groups based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['description', 'domains', 'host_ids', 'host_names', 'last_modified_by',
'last_modified_timestamp', 'name', 'page', 'page_size', 'type']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid group query parameter'.format(str(k)))
return params
@staticmethod
def _generate_rule_params(args):
"""
Generate query parameters for rules based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['contains', 'fields', 'include_templates', 'page', 'page_size', 'ordering']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid rule query parameter'.format(str(k)))
return params
@staticmethod
def _generate_rule_by_id_params(args):
"""
Generate query parameters for rule based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['fields']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid rule query parameter'.format(str(k)))
return params
@staticmethod
def _generate_user_params(args):
"""
Generate query parameters for users based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['username', 'role', 'account_type', 'authentication_profile', 'last_login_gte']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid user query parameter'.format(str(k)))
return params
@staticmethod
def _generate_ip_address_params(args):
"""
Generate query parameters for ip address queries based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['include_ipv4', 'include_ipv6']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid ip address query parameter'.format(str(k)))
return params
@staticmethod
def _generate_subnet_params(args):
"""
Generate query parameters for subnet queries based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['ordering', 'search']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid subnet query parameter'.format(str(k)))
return params
@staticmethod
def _generate_internal_network_params(args):
"""
Generate query parameters for internal network queries based on provided argsbased on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['include_ipv4', 'include_ipv6']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid internal network query parameter'.format(str(k)))
return params
@validate_api_v2
@request_error_handler
def _get_request(self, url, **kwargs):
"""
Do a get request on the provided URL
This is used by paginated endpoints
:rtype: requests.Response
"""
params = {}
for k, v in kwargs.items():
params[k] = v
if self.version == 2:
return requests.get(url, headers=self.headers, params=params, verify=self.verify)
else:
return requests.get(url, auth=self.auth, params=params, verify=self.verify)
@validate_api_v2
@request_error_handler
def get_campaigns(self, **kwargs):
"""
Query all campaigns - all parameters are optional
:param dst_ip: filter on campaign destination IP
:param target_domain: filter on campaign destination domain
:param state: campaign state, possible values are: init, active, closed, closed_never_active
:param name: filter on campaign name
:param last_updated_gte: return only campaigns with a last updated timestamp gte (datetime)
:param note_modified_timestamp_gte: return only campaigns with a last updated timestamp on their note gte (datetime)
:param fields: comma separated string of fields to be filtered and returned
possible values are: id, dst_ip, target_domain, state, name, last_updated,
note, note_modified_by, note_modified_timestamp
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
"""
return requests.get('{url}/campaigns'.format(url=self.url), headers=self.headers,
params=self._generate_campaign_params(kwargs), verify=self.verify)
def get_all_campaigns(self, **kwargs):
"""
Generator to retrieve all campaigns - all parameters are optional
:param dst_ip: filter on campaign destination IP
:param target_domain: filter on campaign destination domain
:param state: campaign state, possible values are: init, active, closed, closed_never_active
:param name: filter on campaign name
:param last_updated_gte: return only campaigns with a last updated timestamp gte (datetime)
:param note_modified_timestamp_gte: return only campaigns with a last updated timestamp on their note gte (datetime)
:param fields: comma separated string of fields to be filtered and returned
possible values are: id, dst_ip, target_domain, state, name, last_updated,
note, note_modified_by, note_modified_timestamp
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
"""
resp = requests.get('{url}/campaigns'.format(url=self.url), headers=self.headers,
params=self._generate_campaign_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url=resp.json()['next'])
yield resp
@validate_api_v2
@request_error_handler
def get_campaign_by_id(self, campaign_id=None, **kwargs):
"""
Get campaign by id
"""
if not campaign_id:
raise ValueError('Campaign id required')
return requests.get('{url}/campaigns/{id}'.format(url=self.url, id=campaign_id),
headers=self.headers, verify=self.verify)
@request_error_handler
def get_hosts(self, **kwargs):
"""
Query all hosts - all parameters are optional
:param all: if set to False, endpoint will only return hosts that have active detections, active traffic or are marked as key assets - default False
:param active_traffic: only return hosts that have seen traffic in the last 2 hours (bool)
:param c_score: certainty score (int) - will be removed with deprecation of v1 of api
:param c_score_gte: certainty score greater than or equal to (int) - will be removed with deprecation of v1 of api
:param certainty: certainty score (int)
:param certainty_gte: certainty score greater than or equal to (int)
:param fields: comma separated string of fields to be filtered and returned
possible values are: id,name,active_traffic,has_active_traffic,t_score,threat,c_score,
certainty,severity,last_source,ip,previous_ips,last_detection_timestamp,key_asset,
is_key_asset,state,targets_key_asset,is_targeting_key_asset,detection_set,
host_artifact_set,sensor,sensor_name,tags,note,note_modified_by,note_modified_timestamp,
url,host_url,last_modified,assigned_to,assigned_date,groups,has_custom_model,privilege_level,
privilege_category,probable_owner,detection_profile
:param has_active_traffic: host has active traffic (bool)
:param include_detection_summaries: include detection summary in response (bool)
:param is_key_asset: host is key asset (bool)
:param is_targeting_key_asset: host is targeting key asset (bool)
:param key_asset: host is key asset (bool) - will be removed with deprecation of v1 of api
:param last_detection_timestamp: timestamp of last detection on this host (datetime)
:param last_source: registered ip addst modified timestamp greater than or equal to (datetime)ress of host
:param mac_address: registered mac address of host
:param max_id: maximum ID of host returned
:param min_id: minimum ID of host returned
:param name: registered name of host
:param note_modified_timestamp_gte: note last modified timestamp greater than or equal to (datetime)
:param ordering: field to use to order response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param privilege_category: privilege category of host (low/medium/high)
:param privilege_level: privilege level of host (0-10)
:param privilege_level_gte: privilege level of host greater than or equal to (int)
:param state: state of host (active/inactive)
:param t_score: threat score (int) - will be removed with deprecation of v1 of api
:param t_score_gte: threat score greater than or equal to (int) - will be removed with deprection of v1 of api
:param tags: tags assigned to host
:param targets_key_asset: host is targeting key asset (bool)
:param threat: threat score (int)
:param threat_gte: threat score greater than or equal to (int)
"""
if self.version == 2:
return requests.get('{url}/hosts'.format(url=self.url), headers=self.headers,
params=self._generate_host_params(kwargs), verify=self.verify)
else:
return requests.get('{url}/hosts'.format(url=self.url), auth=self.auth,
params=self._generate_host_params(kwargs), verify=self.verify)
def get_all_hosts(self, **kwargs):
"""
Generator to retrieve all hosts - all parameters are optional
:param all: if set to False, endpoint will only return hosts that have active detections, active traffic or are marked as key assets - default False
:param active_traffic: only return hosts that have seen traffic in the last 2 hours (bool)
:param c_score: certainty score (int) - will be removed with deprecation of v1 of api
:param c_score_gte: certainty score greater than or equal to (int) - will be removed with deprecation of v1 of api
:param certainty: certainty score (int)
:param certainty_gte: certainty score greater than or equal to (int)
:param fields: comma separated string of fields to be filtered and returned
possible values are: id,name,active_traffic,has_active_traffic,t_score,threat,c_score,
certainty,severity,last_source,ip,previous_ips,last_detection_timestamp,key_asset,
is_key_asset,state,targets_key_asset,is_targeting_key_asset,detection_set,
host_artifact_set,sensor,sensor_name,tags,note,note_modified_by,note_modified_timestamp,
url,host_url,last_modified,assigned_to,assigned_date,groups,has_custom_model,privilege_level,
privilege_category,probable_owner,detection_profile
:param has_active_traffic: host has active traffic (bool)
:param include_detection_summaries: include detection summary in response (bool)
:param is_key_asset: host is key asset (bool)
:param is_targeting_key_asset: host is targeting key asset (bool)
:param key_asset: host is key asset (bool) - will be removed with deprecation of v1 of api
:param last_detection_timestamp: timestamp of last detection on this host (datetime)
:param last_source: registered ip addst modified timestamp greater than or equal to (datetime)ress of host
:param mac_address: registered mac address of host
:param max_id: maximum ID of host returned
:param min_id: minimum ID of host returned
:param name: registered name of host
:param note_modified_timestamp_gte: note last modified timestamp greater than or equal to (datetime)
:param ordering: field to use to order response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param privilege_category: privilege category of host (low/medium/high)
:param privilege_level: privilege level of host (0-10)
:param privilege_level_gte: privilege level of host greater than or equal to (int)
:param state: state of host (active/inactive)
:param t_score: threat score (int) - will be removed with deprecation of v1 of api
:param t_score_gte: threat score greater than or equal to (int) - will be removed with deprection of v1 of api
:param tags: tags assigned to host
:param targets_key_asset: host is targeting key asset (bool)
:param threat: threat score (int)
:param threat_gte: threat score greater than or equal to (int)
"""
resp = requests.get('{url}/hosts'.format(url=self.url), headers=self.headers,
params=self._generate_host_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url=resp.json()['next'])
yield resp
@request_error_handler
def get_host_by_id(self, host_id=None, **kwargs):
"""
Get host by id
:param host_id: host id - required
:param include_external: include fields regarding external connectors (e.g. CrowdStrike) - optional
:param include_ldap: include LDAP context pulled over AD connector - optional
:param fields: comma separated string of fields to be filtered and returned - optional
possible values are: active_traffic, assigned_date, assigned_to, c_score, campaign_summaries,
carbon_black, certainty, crowdstrike, detection_profile, detection_set, detection_summaries,
groups, has_active_traffic, has_custom_model, has_shell_knocker_learnings, host_artifact_set,
host_luid, host_session_luid, host_url, id, ip, is_key_asset, is_targeting_key_asset, key_asset,
last_detection_timestamp, last_modified, last_seen, last_source, ldap, name, note, note_modified_by,
note_modified_timestamp, previous_ips, privilege_category, privilege_level, probable_owner, sensor,
sensor_name, severity, shell_knocker, state, suspicious_admin_learnings, t_score, tags, targets_key_asset,
threat, url, vcenter
"""
if not host_id:
raise ValueError('Host id required')
if self.version == 2:
return requests.get('{url}/hosts/{id}'.format(url=self.url, id=host_id), headers=self.headers,
params=self._generate_host_by_id_params(kwargs), verify=self.verify)
else:
return requests.get('{url}/hosts/{id}'.format(url=self.url, id=host_id), auth=self.auth,
params=self._generate_host_by_id_params(kwargs), verify=self.verify)
@validate_api_v2
@request_error_handler
def set_key_asset(self, host_id=None, set=True):
"""
(Un)set host as key asset
:param host_id: id of host needing to be set - required
:param set: set flag to true if setting host as key asset
"""
if not host_id:
raise ValueError('Host id required')
if set:
payload = {'key_asset':'true'}
else:
payload = {'key_asset':'false'}
return requests.patch('{url}/hosts/{id}'.format(url=self.url, id=host_id), headers=self.headers, json=payload,
verify=self.verify)
@validate_api_v2
@request_error_handler
def get_host_tags(self, host_id=None):
"""
Get host tags
:param host_id: ID of the host for which to retrieve the tags
"""
if not host_id:
raise ValueError('Host id required')
return requests.get('{url}/tagging/host/{id}'.format(url=self.url, id=host_id), headers=self.headers,
verify=False)
@validate_api_v2
@request_error_handler
def set_host_tags(self, host_id=None, tags=[], append=False):
"""
Set host tags
:param host_id:
:param tags: list of tags to add to host
:param append: overwrites existing list if set to False, appends to existing tags if set to True
Set to empty list to clear tags (default: False)
"""
if not host_id:
raise ValueError('Host id required')
if append and type(tags) == list:
current_list = self.get_host_tags(host_id=host_id).json()['tags']
payload = {
"tags": current_list + tags
}
elif type(tags) == list:
payload = {
"tags": tags
}
else:
raise TypeError('tags must be of type list')
return requests.patch('{url}/tagging/host/{id}'.format(url=self.url, id=host_id), headers=self.headers,
json=payload, verify=self.verify)
@validate_api_v2
@request_error_handler
def bulk_set_hosts_tag(self, tag, host_ids):
"""
Set a tag in bulk on multiple hosts. Only one tag can be set at a time
:param host_ids: IDs of the hosts for which to set the tag
"""
if not isinstance(host_ids, list):
raise TypeError('Host IDs must be of type list')
payload = {
'objectIds': host_ids,
'tag': tag
}
return requests.post('{url}/tagging/host'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@validate_api_v2
@request_error_handler
def bulk_delete_hosts_tag(self, tag, host_ids):
"""
Delete a tag in bulk on multiple hosts. Only one tag can be deleted at a time
:param host_ids: IDs of the hosts on which to delete the tag
"""
if not isinstance(host_ids, list):
raise TypeError('Host IDs must be of type list')
payload = {
'objectIds': host_ids,
'tag': tag
}
return requests.delete('{url}/tagging/host'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@validate_api_v2
@request_error_handler
def get_host_note(self, host_id=None):
"""
Get host notes
:param host_id:
For consistency we return a requests.models.Response object
As we do not want to return the complete host body, we alter the response content
"""
if not host_id:
raise ValueError('Host id required')
host = requests.get('{url}/hosts/{id}'.format(url=self.url, id=host_id), headers=self.headers, verify=self.verify)
if host.status_code == 200:
host_note = host.json()['note']
# API endpoint return HTML escaped characters
host_note = html.unescape(host_note) if host_note else ''
json_dict = {'status': 'success', 'host_id': str(host_id), 'note': host_note}
host._content = json.dumps(json_dict).encode('utf-8')
return host
@validate_api_v2
@request_error_handler
def set_host_note(self, host_id=None, note='', append=False):
"""
Set host note
:param host_id:
:param note: content of the note to set
:param append: overwrites existing note if set to False, appends if set to True
Set to empty note string to clear host note
"""
if not host_id:
raise ValueError('Host id required')
if append and isinstance(note, str):
current_note = self.get_host_note(host_id=host_id).json()['note']
if current_note:
if len(note) > 0:
payload = {
"note": '{}{}{}'.format(current_note, '\n', note)
}
else:
payload = {
"note": current_note
}
else:
payload = {
"note": note
}
elif isinstance(note, str):
payload = {
"note": note
}
else:
raise TypeError('Note must be of type str')
return requests.patch('{url}/hosts/{id}'.format(url=self.url, id=host_id), headers=self.headers, data=json.dumps(payload),
verify=self.verify)
@request_error_handler
def get_detections(self, **kwargs):
"""
Query all detections - all parameters are optional
:param c_score: certainty score (int) - will be removed with deprecation of v1 of api
:param c_score_gte: certainty score greater than or equal to (int) - will be removed with deprecation of v1 of api
:param category: detection category - will be removed with deprecation of v1 of api
:param certainty: certainty score (int)
:param certainty_gte: certainty score greater than or equal to (int)
:param detection: detection type
:param detection_type: detection type
:param detection_category: detection category
:param description:
:param fields: comma separated string of fields to be filtered and returned
possible values are: id, url, detection_url, category, detection, detection_category,
detection_type, custom_detection, description, src_ip, state, t_score, c_score,
certainty, threat, first_timestamp, last_timestamp, targets_key_asset,
is_targeting_key_asset, src_account, src_host, note, note_modified_by,
note_modified_timestamp, sensor, sensor_name, tags, triage_rule_id, assigned_to,
assigned_date, groups, is_marked_custom, is_custom_model
:param host_id: detection id (int)
:param is_targeting_key_asset: detection is targeting key asset (bool)
:param is_triaged: detection is triaged
:param last_timestamp: timestamp of last activity on detection (datetime)
:param max_id: maximum ID of detection returned
:param min_id: minimum ID of detection returned
:param ordering: field used to sort response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param src_ip: source ip address of host attributed to detection
:param state: state of detection (active/inactive)
:param t_score: threat score (int) - will be removed with deprecation of v1 of api
:param t_score_gte: threat score is greater than or equal to (int) - will be removed with deprecation of v1 of api
:param tags: tags assigned to detection
:param targets_key_asset: detection targets key asset (bool) - will be removed with deprecation of v1 of api
:param threat: threat score (int)
:param threat_gte threat score is greater than or equal to (int)
:param note_modified_timestamp_gte: note last modified timestamp greater than or equal to (datetime)
"""
if self.version == 2:
return requests.get('{url}/detections'.format(url=self.url), headers=self.headers,
params=self._generate_detection_params(kwargs), verify=self.verify)
else:
return requests.get('{url}/detections'.format(url=self.url), auth=self.auth,
params=self._generate_detection_params(kwargs), verify=self.verify)
def get_all_detections(self, **kwargs):
"""
Generator to retrieve all detections - all parameters are optional
:param c_score: certainty score (int) - will be removed with deprecation of v1 of api
:param c_score_gte: certainty score greater than or equal to (int) - will be removed with deprecation of v1 of api
:param category: detection category - will be removed with deprecation of v1 of api
:param certainty: certainty score (int)
:param certainty_gte: certainty score greater than or equal to (int)
:param detection: detection type
:param detection_type: detection type
:param detection_category: detection category
:param description:
:param fields: comma separated string of fields to be filtered and returned
possible values are: id, url, detection_url, category, detection, detection_category,
detection_type, custom_detection, description, src_ip, state, t_score, c_score,
certainty, threat, first_timestamp, last_timestamp, targets_key_asset,
is_targeting_key_asset, src_account, src_host, note, note_modified_by,
note_modified_timestamp, sensor, sensor_name, tags, triage_rule_id, assigned_to,
assigned_date, groups, is_marked_custom, is_custom_model
:param host_id: detection id (int)
:param is_targeting_key_asset: detection is targeting key asset (bool)
:param is_triaged: detection is triaged
:param last_timestamp: timestamp of last activity on detection (datetime)
:param max_id: maximum ID of detection returned
:param min_id: minimum ID of detection returned
:param ordering: field used to sort response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param src_ip: source ip address of host attributed to detection
:param state: state of detection (active/inactive)
:param t_score: threat score (int) - will be removed with deprecation of v1 of api
:param t_score_gte: threat score is greater than or equal to (int) - will be removed with deprecation of v1 of api
:param tags: tags assigned to detection
:param targets_key_asset: detection targets key asset (bool) - will be removed with deprecation of v1 of api
:param threat: threat score (int)
:param threat_gte threat score is greater than or equal to (int)
:param note_modified_timestamp_gte: note last modified timestamp greater than or equal to (datetime)
"""
resp = requests.get('{url}/detections'.format(url=self.url), headers=self.headers,
params=self._generate_detection_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@request_error_handler
def get_detection_by_id(self, detection_id=None, **kwargs):
"""
Get detection by id
:param detection_id: detection id - required
:param fields: comma separated string of fields to be filtered and returned - optional
possible values are: id, url, detection_url, category, detection, detection_category,
detection_type, custom_detection, description, src_ip, state, t_score, c_score,
certainty, threat, first_timestamp, last_timestamp, targets_key_asset,
is_targeting_key_asset, src_account, src_host, note, note_modified_by,
note_modified_timestamp, sensor, sensor_name, tags, triage_rule_id, assigned_to,
assigned_date, groups, is_marked_custom, is_custom_model
"""
if not detection_id:
raise ValueError('Detection id required')
if self.version == 2:
return requests.get('{url}/detections/{id}'.format(url=self.url, id=detection_id), headers=self.headers,
params=self._generate_detection_params(kwargs), verify=self.verify)
else:
return requests.get('{url}/detections/{id}'.format(url=self.url, id=detection_id), auth=self.auth,
params=self._generate_detection_params(kwargs), verify=self.verify)
@validate_api_v2
@request_error_handler
def mark_detections_fixed(self, detection_ids=None):
"""
Mark detections as fixed
:param detection_ids: list of detections to mark as fixed
"""
if not isinstance(detection_ids, list):
raise ValueError('Must provide a list of detection IDs to mark as fixed')
return self._toggle_detections_fixed(detection_ids, fixed=True)
@validate_api_v2
@request_error_handler
def unmark_detections_fixed(self, detection_ids=None):
"""
Unmark detections as fixed
:param detection_ids: list of detections to unmark as fixed
"""
if not isinstance(detection_ids, list):
raise ValueError('Must provide a list of detection IDs to unmark as fixed')
return self._toggle_detections_fixed(detection_ids, fixed=False)
def _toggle_detections_fixed(self, detection_ids, fixed):
"""
Internal function to mark/unmark detections as fixed
"""
payload = {
'detectionIdList': detection_ids,
'mark_as_fixed': str(fixed)
}
return requests.patch('{url}/detections'.format(url=self.url), json=payload, headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def mark_detections_custom(self, detection_ids=[], triage_category=None):
"""
Mark detections as custom
:param detection_ids: list of detection IDs to mark as custom
:param triage_category: custom name to give detection
:rtype: requests.Response
"""
if not isinstance(detection_ids, list):
raise ValueError('Must provide a list of detection IDs to mark as custom')
payload = {
"triage_category": triage_category,
"detectionIdList": detection_ids
}
return requests.post('{url}/rules'.format(url=self.url), headers=self.headers, json=payload,
verify=self.verify)
@validate_api_v2
@request_error_handler
def unmark_detections_custom(self, detection_ids=[]):
"""
Unmark detection as custom
:param detection_ids: list of detection IDs to unmark as custom
:rtype: requests.Response
"""
if not isinstance(detection_ids, list):
raise ValueError('Must provide a list of detection IDs to unmark as custom')
payload = {
"detectionIdList": detection_ids
}
response = requests.delete('{url}/rules'.format(url=self.url), headers=self.headers, json=payload,
verify=self.verify)
# DELETE returns an empty response, but we populate the response for consistency with the mark_as_fixed() function
json_dict = {'_meta': {'message': 'Successfully unmarked detections', 'level': 'Success'}}
response._content = json.dumps(json_dict).encode('utf-8')
return response
@validate_api_v2
@request_error_handler
def get_detection_tags(self, detection_id=None):
"""
Get detection tags
:param detection_id:
"""
return requests.get('{url}/tagging/detection/{id}'.format(url=self.url, id=detection_id), headers=self.headers,
verify=False)
@validate_api_v2
@request_error_handler
def set_detection_tags(self, detection_id=None, tags=[], append=False):
"""
Set detection tags
:param detection_id:
:param tags: list of tags to add to detection
:param append: overwrites existing list if set to False, appends to existing tags if set to True
Set to empty list to clear all tags (default: False)
"""
if append and type(tags) == list:
current_list = self.get_detection_tags(detection_id=detection_id).json()['tags']
payload = {
"tags": current_list + tags
}
elif type(tags) == list:
payload = {
"tags": tags
}
else:
raise TypeError('tags must be of type list')
return requests.patch('{url}/tagging/detection/{id}'.format(url=self.url, id=detection_id), headers=self.headers,
json=payload, verify=self.verify)
@validate_api_v2
@request_error_handler
def bulk_set_detections_tag(self, tag, detection_ids):
"""
Set a tag in bulk on multiple detections. Only one tag can be set at a time
:param detection_ids: IDs of the detections for which to set the tag
"""
if not isinstance(detection_ids, list):
raise TypeError('Detection IDs must be of type list')
payload = {
'objectIds': detection_ids,
'tag': tag
}
return requests.post('{url}/tagging/detection'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@validate_api_v2
@request_error_handler
def bulk_delete_detections_tag(self, tag, detection_ids):
"""
Delete a tag in bulk on multiple detections. Only one tag can be deleted at a time
:param detection_ids: IDs of the detections for which to delete the tag
"""
if not isinstance(detection_ids, list):
raise TypeError('Detection IDs must be of type list')
payload = {
'objectIds': detection_ids,
'tag': tag
}
return requests.delete('{url}/tagging/detection'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@validate_api_v2
@request_error_handler
def get_detection_note(self, detection_id=None):
"""
Get detection notes
:param detection_id:
For consistency we return a requests.models.Response object
As we do not want to return the complete detection body, we alter the response content
"""
detection = requests.get('{url}/detections/{id}'.format(url=self.url, id=detection_id), headers=self.headers, verify=self.verify)
if detection.status_code == 200:
detection_note = detection.json()['note']
# API endpoint return HTML escaped characters
detection_note = html.unescape(detection_note) if detection_note else ''
json_dict = {'status': 'success', 'detection_id': str(detection_id), 'note': detection_note}
detection._content = json.dumps(json_dict).encode('utf-8')
return detection
@validate_api_v2
@request_error_handler
def set_detection_note(self, detection_id=None, note='', append=False):
"""
Set detection note
:param detection_id:
:param note: content of the note to set
:param append: overwrites existing note if set to False, appends if set to True
Set to empty note string to clear detection note
"""
if append and isinstance(note, str):
current_note = self.get_detection_note(detection_id=detection_id).json()['note']
if current_note:
if len(note) > 0:
payload = {
"note": '{}{}{}'.format(current_note, '\n', note)
}
else:
payload = {
"note": current_note
}
else:
payload = {
"note": note
}
elif isinstance(note, str):
payload = {
"note": note
}
else:
raise TypeError('Note must be of type str')
return requests.patch('{url}/detections/{id}'.format(url=self.url, id=detection_id), headers=self.headers, json=payload,
verify=self.verify)
@validate_api_v2
def get_detection_pcap(self, detection_id=None, filename=None):
"""
Get detection pcap
:param detection_id: ID of the detection for which to get a pcap
:param filename: filename to write the pcap to. Will be overwriten if already exists.
"""
response = requests.get('{url}/detections/{id}/pcap'.format(url=self.url, id=detection_id), headers=self.headers,
verify=False)
if response.status_code not in [200, 201, 204]:
raise HTTPException(response)
with open(filename, 'wb') as f:
f.write(response.content)
# Return a <Response> object for consistency
json_dict = {'status': 'success', 'detection_id': str(detection_id), 'file_created': filename}
response._content = json.dumps(json_dict).encode('utf-8')
return response
# TODO add request_error_handler decorator as soon as get_rules_by_name() returns requests.Response object
@validate_api_v2
def get_rules(self, name=None, rule_id=None, **kwargs):
"""
Query all rules
:param name: name of rule to search (substring matching)
:param rule_id: ID of rule to return
:param contains:
:param fields: comma separated string of fields to be filtered and returned
possible values are: active_detections, all_hosts, category, created_timestamp, description,
enabled, flex1, flex2, flex3, flex4, flex5, flex6, host, host_group, id, identity, ip,
ip_group, is_whitelist, last_timestamp, priority, remote1_dns, remote1_dns_groups,
remote1_ip, remote1_ip_groups, remote1_kerb_account, remote1_kerb_service, remote1_port,
remote1_proto, remote2_dns, remote2_dns_groups, remote2_ip, remote2_ip_groups, remote2_port,
remote2_proto, sensor_luid, smart_category, template, total_detections, type_vname, url
:param include_templates: include rule templates, default is False
:param ordering: field used to sort response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
"""
deprecation('Some rules are no longer compatible with the APIv2, please switch to the APIv2.1')
if name:
deprecation('The "name" argument will be removed from this function, please use get_all_rules with the "contains" query parameter')
return self.get_rules_by_name(triage_category=name)
elif rule_id:
deprecation('The "rule_id" argument will be removed from this function, please use the corresponding get_rule_by_id function')
return self.get_rule_by_id(rule_id)
else:
return requests.get('{url}/rules'.format(url=self.url), headers=self.headers,
params=self._generate_rule_params(kwargs), verify=self.verify)
@validate_api_v2
@request_error_handler
def get_rule_by_id(self, rule_id, **kwargs):
"""
Get triage rules by id
:param rule_id: id of triage rule to retrieve
:param fields: comma separated string of fields to be filtered and returned
possible values are: active_detections, all_hosts, category, created_timestamp, description,
enabled, flex1, flex2, flex3, flex4, flex5, flex6, host, host_group, id, identity, ip,
ip_group, is_whitelist, last_timestamp, priority, remote1_dns, remote1_dns_groups,
remote1_ip, remote1_ip_groups, remote1_kerb_account, remote1_kerb_service, remote1_port,
remote1_proto, remote2_dns, remote2_dns_groups, remote2_ip, remote2_ip_groups, remote2_port,
remote2_proto, sensor_luid, smart_category, template, total_detections, type_vname, url
"""
if not rule_id:
raise ValueError('Rule id required')
deprecation('Some rules are no longer compatible with the APIv2, please switch to the APIv2.1')
return requests.get('{url}/rules/{id}'.format(url=self.url, id=rule_id), headers=self.headers,
params=self._generate_rule_by_id_params(kwargs), verify=False)
# TODO make return type requests.Reponse
@validate_api_v2
def get_rules_by_name(self, triage_category=None, description=None):
"""
Get triage rules by name or description
Condition are to be read as OR
:param triage_category: 'Triage as' field of filter
:param description: Description of the triage filter
:rtype list: to be backwards compatible
"""
search_query = triage_category if triage_category else description
response = self.get_rules(contains=search_query)
return response.json()['results']
@validate_api_v2
def get_all_rules(self, **kwargs):
"""
Generator to retrieve all rules page by page - all parameters are optional
:param contains:
:param fields: comma separated string of fields to be filtered and returned
possible values are: active_detections, all_hosts, category, created_timestamp, description,
enabled, flex1, flex2, flex3, flex4, flex5, flex6, host, host_group, id, identity, ip,
ip_group, is_whitelist, last_timestamp, priority, remote1_dns, remote1_dns_groups,
remote1_ip, remote1_ip_groups, remote1_kerb_account, remote1_kerb_service, remote1_port,
remote1_proto, remote2_dns, remote2_dns_groups, remote2_ip, remote2_ip_groups, remote2_port,
remote2_proto, sensor_luid, smart_category, template, total_detections, type_vname, url
:param include_templates: include rule templates, default is False
:param ordering: field used to sort response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
"""
resp = requests.get('{url}/rules'.format(url=self.url), headers=self.headers,
params=self._generate_rule_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@validate_api_v2
@request_error_handler
def create_rule(self, detection_category=None, detection_type=None, triage_category=None, is_whitelist=False, **kwargs):
"""
Create triage rule
:param detection_category: detection category to triage [botnet activity, command & control, reconnaissance,
lateral movement, exfiltration]
:param detection_type: detection type to triage
:param triage_category: name that will be used for triaged detection
:param description: name of the triage rule
:param is_whitelist: set to True if rule is to whitelist; opposed to tracking detections without scores (boolean)
:param ip: list of ip addresses to apply to triage rule
:param ip_group: list of IP groups IDs to add to rule
:param host: list of host ids to apply to triage rule
:param host_group: list of Host groups IDs to add to rule
:param sensor_luid: list of sensor luids to triage
:param priority: used to determine order of triage filters (int)
:param all_hosts: apply triage rule to all hosts (boolean)
:param remote1_ip: destination IP where this Triage filter will be applied to
:param remote1_ip_groups: destination IP Groups where this Triage filter will be applied to
:param remote1_proto: destination protocol where this Triage filter will be applied to
:param remote1_port: destination port where this Triage filter will be applied to
:param remote1_dns: destination FQDN where this Triage filter will apply to
:param remote1_dns_groups: domain groups where this Triage filter will apply to
:param remote2_ip: destination IP where this Triage filter will be applied to
:param remote2_ip_groups: destination IP Groups where this Triage filter will be applied to
:param remote2_proto: destination protocol where this Triage filter will be applied to
:param remote2_port: destination port where this Triage filter will be applied to
:param remote2_dns: destination FQDN where this Triage filter will apply to
:param remote2_dns_groups: domain groups where this Triage filter will apply to
:param account: accounts where this triage filter will apply to (list)
:param named_pipe: (Suspicious Remote Execution) named pipes where this triage filter will apply to (list)
:param uuid: (Suspicious Remote Execution) UUID where this triage filter will apply to (list)
:param identity: (Kerberos detection) identity where this triage filter will apply to (list)
:param service: (PAA detections) services where this triage filter will apply to (list)
:param file_share: (Ransomware File Activity) file share where this triage filter will apply to - escape backslashes with "\" (list)
:param file_extensions: (Ransomware File Activity) file extensions where this triage filter will apply to (list)
:param rdp_client_name: (Suspicious Remote Desktop) RDP client name where this triage filter will apply to (list)
:param rdp_client_token: (Suspicious Remote Desktop) RDP client token where this triage filter will apply to (list)
:param keyboard_name: (Suspicious Remote Desktop) RDP keyboard name where this triage filter will apply to (list)
:returns request object
"""
if not all([detection_category, detection_type, triage_category]):
raise KeyError("missing required parameter: "
"detection_category, detection_type, triage_category")
if detection_category.lower() not in ['botnet activity', 'command & control', 'reconnaissance', 'lateral movement', 'exfiltration']:
raise ValueError("detection_category not recognized")
payload = {
'detection_category': detection_category,
'detection': detection_type,
'triage_category': triage_category,
'is_whitelist': is_whitelist
}
valid_keys = ['description', 'is_whitelist', 'ip', 'ip_group', 'host', 'host_group',
'sensor_luid', 'priority', 'all_hosts', 'remote1_ip', 'remote1_ip_groups',
'remote1_proto', 'remote1_port', 'remote1_dns', 'remote1_dns_groups', 'remote2_ip',
'remote2_ip_groups', 'remote2_proto', 'remote2_port', 'remote2_dns',
'remote2_dns_groups', 'account', 'named_pipe', 'uuid', 'identity', 'service',
'file_share', 'file_extensions', 'rdp_client_name', 'rdp_client_token', 'keyboard_name']
for k, v in kwargs.items():
if k in valid_keys:
payload[k] = v
else:
raise ValueError('argument {} is an invalid field for rule creation'.format(str(k)))
return requests.post('{url}/rules'.format(url=self.url), headers=self.headers, json=payload,
verify=self.verify)
@validate_api_v2
@request_error_handler
def update_rule(self, rule_id=None, name=None, append=False, **kwargs):
"""
Update triage rule
:param rule_id: id of rule to update
:param name: name of rule to update
:param append: set to True if appending to existing list (boolean)
:param description: name of the triage rule
:param is_whitelist: set to True if rule is to whitelist; opposed to tracking detections without scores (boolean)
:param ip: list of ip addresses to apply to triage rule
:param ip_group: list of IP groups IDs to add to rule
:param host: list of host ids to apply to triage rule
:param host_group: list of Host groups IDs to add to rule
:param sensor_luid: list of sensor luids to triage
:param priority: used to determine order of triage filters (int)
:param all_hosts: apply triage rule to all hosts (boolean)
:param remote1_ip: destination IP where this Triage filter will be applied to
:param remote1_ip_groups: destination IP Groups where this Triage filter will be applied to
:param remote1_proto: destination protocol where this Triage filter will be applied to
:param remote1_port: destination port where this Triage filter will be applied to
:param remote1_dns: destination FQDN where this Triage filter will apply to
:param remote1_dns_groups: domain groups where this Triage filter will apply to
:param remote2_ip: destination IP where this Triage filter will be applied to
:param remote2_ip_groups: destination IP Groups where this Triage filter will be applied to
:param remote2_proto: destination protocol where this Triage filter will be applied to
:param remote2_port: destination port where this Triage filter will be applied to
:param remote2_dns: destination FQDN where this Triage filter will apply to
:param remote2_dns_groups: domain groups where this Triage filter will apply to
:param account: accounts where this triage filter will apply to (list)
:param named_pipe: (Suspicious Remote Execution) named pipes where this triage filter will apply to (list)
:param uuid: (Suspicious Remote Execution) UUID where this triage filter will apply to (list)
:param identity: (Kerberos detection) identity where this triage filter will apply to (list)
:param service: (PAA detections) services where this triage filter will apply to (list)
:param file_share: (Ransomware File Activity) file share where this triage filter will apply to - escape backslashes with "\" (list)
:param file_extensions: (Ransomware File Activity) file extensions where this triage filter will apply to (list)
:param rdp_client_name: (Suspicious Remote Desktop) RDP client name where this triage filter will apply to (list)
:param rdp_client_token: (Suspicious Remote Desktop) RDP client token where this triage filter will apply to (list)
:param keyboard_name: (Suspicious Remote Desktop) RDP keyboard name where this triage filter will apply to (list)
:returns request object
"""
if name:
deprecation('The "name" argument will be removed from this function, please use get_all_rules with the "contains" query parameter')
matching_rules = self.get_rules_by_name(triage_category=name)
if len(matching_rules) > 1:
raise Exception('More than one rule matching the name')
elif len(matching_rules) < 1:
raise Exception('No rule matching the search')
else:
rule = matching_rules[0]
elif rule_id:
rule = self.get_rule_by_id(rule_id=rule_id).json()
else:
raise ValueError("rule name or id must be provided")
valid_keys = ['description', 'is_whitelist', 'ip', 'ip_group', 'host', 'host_group',
'sensor_luid', 'priority', 'all_hosts', 'remote1_ip', 'remote1_ip_groups',
'remote1_proto', 'remote1_port', 'remote1_dns', 'remote1_dns_groups', 'remote2_ip',
'remote2_ip_groups', 'remote2_proto', 'remote2_port', 'remote2_dns',
'remote2_dns_groups', 'account', 'named_pipe', 'uuid', 'identity', 'service',
'file_share', 'file_extensions', 'rdp_client_name', 'rdp_client_token', 'keyboard_name']
for k, v in kwargs.items():
if k in valid_keys:
if append:
if isinstance(rule[k], list):
rule[k] += v
else:
rule[k] = v
else:
rule[k] = v
else:
raise ValueError('invalid parameter provided: {}'.format(str(k)))
return requests.put('{url}/rules/{id}'.format(url=self.url, id=rule['id']), headers=self.headers, json=rule,
verify=self.verify)
@validate_api_v2
@request_error_handler
def delete_rule(self, rule_id=None, restore_detections=True):
"""
Delete triage rule
:param rule_id:
:param restore_detections: restore previously triaged detections (bool) default behavior is to restore
detections
"""
if not rule_id:
raise ValueError('Rule id required')
params = {
'restore_detections': restore_detections
}
return requests.delete('{url}/rules/{id}'.format(url=self.url, id=rule_id), headers=self.headers, params=params,
verify=self.verify)
@validate_api_v2
@request_error_handler
def get_groups(self, **kwargs):
"""
Query all groups - all parameters are optional
:param description: description of groups to search
:param domains: search for groups containing those domains (list)
:param host_ids: search for groups containing those host IDs (list)
:param host_names: search for groups containing those hosts (list)
:param last_modified_by: username of last person to modify this group
:param last_modified_timestamp: timestamp of last modification of group (datetime)
:param name: name of groups to search
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param type: type of group to search (domain/host/ip)
"""
return requests.get('{url}/groups'.format(url=self.url), headers=self.headers,
params=self._generate_group_params(kwargs), verify=self.verify)
@validate_api_v2
def get_all_groups(self, **kwargs):
"""
Generator to retrieve all groups - all parameters are optional
:param description: description of groups to search
:param domains: search for groups containing those domains (list)
:param host_ids: search for groups containing those host IDs (list)
:param host_names: search for groups containing those hosts (list)
:param last_modified_by: username of last person to modify this group
:param last_modified_timestamp: timestamp of last modification of group (datetime)
:param name: name of groups to search
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param type: type of group to search (domain/host/ip)
"""
resp = requests.get('{url}/groups'.format(url=self.url), headers=self.headers,
params=self._generate_group_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@validate_api_v2
@request_error_handler
def get_group_by_id(self, group_id):
"""
Get groups by id
:param rule_id: id of group to retrieve
"""
return requests.get('{url}/groups/{id}'.format(url=self.url, id=group_id), headers=self.headers, verify=False)
@validate_api_v2
def get_groups_by_name(self, name=None, description=None):
"""
Get groups by name or description
:param name: Name of group*
:param description: Description of the group*
*params are to be read as OR
"""
if name and description:
raise Exception('Can only provide a name OR a description')
if name:
response = self.get_groups(name=name)
return response.json()['results']
elif description:
response = self.get_groups(description=description)
return response.json()['results']
@validate_api_v2
@request_error_handler
def create_group(self, name=None, description='', type=None, members=[], rules=[], **kwargs):
"""
Create group
:param name: name of the group to create
:param description: description of the group
:param type: type of the group to create (domain/host/ip)
:param members: list of host ids to add to group
:param rules: list of triage rule ids to add to group
:rtype requests.Response:
"""
if not name:
raise ValueError("missing required parameter: name")
if not type:
raise ValueError("missing required parameter: type")
if type not in ['host', 'domain', 'ip']:
raise ValueError('parameter type must have value "domain", "ip" or "host"')
if not isinstance(members, list):
raise TypeError("members must be type: list")
if not isinstance(rules, list):
raise TypeError("rules must be type: list")
payload = {
"name": name,
"description": description,
"type": type,
"members": members,
"rules": rules,
}
for k, v in kwargs.items():
if not type(v) == list:
raise TypeError("{} must be of type: list".format(k))
payload[k] = v
return requests.post('{url}/groups'.format(url=self.url), headers=self.headers, json=payload,
verify=self.verify)
@validate_api_v2
@request_error_handler
def update_group(self, group_id, append=False, **kwargs):
"""
Update group
:param group_id: id of group to update
:param name: name of group
:param description: description of the group
:param type: type of the group (domain/host/ip)
:param members: list of host ids to add to group
:param rules: list of rule ids to add to group
:param append: set to True if appending to existing list (boolean)
"""
valid_keys = ['name', 'description', 'type', 'members', 'rules']
group = self.get_group_by_id(group_id = group_id).json()
try:
id = group['id']
except KeyError:
raise KeyError('Group with id {} was not found'.format(str(group_id)))
# Transform members into flat list as API returns dicts for host groups
if group['type'] == 'host':
members = set()
for member in group['members']:
members.add(member['id'])
group['members'] = list(members)
for k, v in kwargs.items():
if k in valid_keys and v is not None:
if k in ['members', 'rules'] and not isinstance(v, list):
raise TypeError('{} must be of type: list'.format(k))
if append:
group[k] += v
else:
group[k] = v
else:
raise KeyError('Key {} is not valid'.format(k))
group['members'] = list(set(group['members']))
return requests.patch('{url}/groups/{id}'.format(url=self.url, id=id), headers=self.headers, json=group,
verify=self.verify)
@validate_api_v2
@request_error_handler
def delete_group(self, group_id=None):
"""
Delete group
:param group_id:
detections
"""
return requests.delete('{url}/groups/{id}'.format(url=self.url, id=group_id), headers=self.headers, verify=self.verify)
@validate_api_v2
def get_all_users(self, **kwargs):
"""
Generator to query all users
:param username: filter by username
:param role: filter by role
:param account_type: filter by account type (local, ldap, radius or tacacs)
:param authentication_profile: filter by authentication profile
:param last_login_gte: filter for users that have logged in since the given timestamp
"""
resp = requests.get('{url}/users'.format(url=self.url), headers=self.headers,
params=self._generate_user_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@validate_api_v2
@request_error_handler
def get_user_by_id(self, user_id=None):
"""
Get users by id
:param user: id of user to retrieve
"""
if not user_id:
raise ValueError('User id required')
return requests.get('{url}/users/{id}'.format(url=self.url, id=user_id), headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def update_user(self, user_id=None, account_type=None, authentication_profile=None):
"""
Update the authentication type for a user
:param user_id: user ID
:param account_type: new user account type (local, ldap, radius, tacacs)
:param authentication_profile: authentication profile name
"""
if not user_id:
raise ValueError('User id required')
if not account_type in ['local', 'ldap', 'radius', 'tacacs']:
raise ValueError('Invalid account_type provided')
if not authentication_profile:
raise ValueError('Authentication profile required')
payload = {
'account_type': account_type,
'authentication_profile': authentication_profile
}
return requests.patch('{url}/users/{id}'.format(url=self.url, id=user_id), json=payload, headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def get_proxies(self, proxy_id=None):
"""
Get all defined proxies
"""
if proxy_id:
deprecation('The "proxy_id" argument will be removed from this function, please use the get_proxy_by_id() function')
return self.get_proxy_by_id(proxy_id=proxy_id)
else:
return requests.get('{url}/proxies'.format(url=self.url), headers=self.headers, verify=self.verify)
@validate_api_v2
@request_error_handler
def get_proxy_by_id(self, proxy_id=None):
"""
Get proxy by id
:param proxy_id: id of proxy to retrieve - caution those are UUIDs not int
"""
if not proxy_id:
raise ValueError('Proxy id required')
return requests.get('{url}/proxies/{id}'.format(url=self.url, id=proxy_id), headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def add_proxy(self, address=None, enable=True):
"""
Add a proxy to the proxy list
:param address: IP address of the proxy to add
:param enable: set to true to consider the IP as a proxy, false to never consider it as proxy
"""
payload = {
"proxy": {
"address": address,
"considerProxy": enable
}
}
return requests.post('{url}/proxies'.format(url=self.url), json=payload, headers=self.headers, verify=self.verify)
# TODO PATCH request modifies the proxy ID and 404 is actually a 500 - APP-10753
@validate_api_v2
@request_error_handler
def update_proxy(self, proxy_id=None, address=None, enable=True):
"""
Update an existing proxy in the system
:param proxy_id: ID of the proxy to update
:param address: IP address to set for this proxy
:param enable: set to true to consider the IP as a proxy, false to never consider it as proxy
CAUTION: the proxy ID (ressource identifier) gets modified by the PATCH request at the moment
CAUTION: PATCHing an invalid ID returns a HTTP 500 instead of 404 at the moment
"""
if not proxy_id:
raise ValueError('Proxy id required')
payload = {"proxy": {}}
if address is not None:
payload["proxy"]["address"] = address
if enable is not None:
payload["proxy"]["considerProxy"] = enable
return requests.patch('{url}/proxies/{id}'.format(url=self.url, id=proxy_id), json=payload, headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def delete_proxy(self,proxy_id=None):
"""
Delete a proxy from the proxy list
:param proxy_id: ID of the proxy to delete
"""
return requests.delete('{url}/proxies/{id}'.format(url=self.url, id=proxy_id), headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def create_feed(self, name=None, category=None, certainty=None, itype=None, duration=None):
"""
Creates new threat feed
***Values for category, type, and certainty are case sensitive***
:param name: name of threat feed
:param category: category that detection will register. supported values are lateral, exfil, and cnc
:param certainty: certainty applied to detection. Supported values are Low, Medium, High
:param itype: indicator type - supported values are Anonymize, Exfiltration, Malware Artifacts, and Watchlist
:param duration: days that the threat feed will be applied
:returns: request object
"""
if not category in ['lateral', 'exfil', 'cnc']:
raise ValueError('Invalid category provided: {}'.format(category))
if not certainty in ['Low', 'Medium', 'High']:
raise ValueError('Invalid certainty provided: {}'.format(str(certainty)))
if not itype in ['Anonymize', 'Exfiltration', 'Malware Artifacts', 'Watchlist']:
raise ValueError('Invalid itype provided: {}'.format(str(itype)))
payload = {
"threatFeed": {
"name": name,
"defaults": {
"category": category,
"certainty": certainty,
"indicatorType": itype,
"duration": duration
}
}
}
return requests.post('{url}/threatFeeds'.format(url=self.url), json=payload, headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def delete_feed(self, feed_id=None):
"""
Deletes threat feed from Vectra
:param feed_id: id of threat feed (returned by get_feed_by_name())
"""
return requests.delete('{url}/threatFeeds/{id}'.format(url=self.url, id=feed_id),
headers=self.headers, verify=self.verify)
@validate_api_v2
@request_error_handler
def get_feeds(self):
"""
Gets list of currently configured threat feeds
"""
return requests.get('{url}/threatFeeds'.format(url=self.url), headers=self.headers, verify=self.verify)
@validate_api_v2
def get_feed_by_name(self, name=None):
"""
Gets configured threat feed by name and returns id (used in conjunction with updating and deleting feeds)
:param name: name of threat feed
"""
try:
response = requests.get('{url}/threatFeeds'.format(url=self.url), headers=self.headers, verify=self.verify)
except requests.ConnectionError:
raise Exception('Unable to connect to remote host')
if response.status_code == 200:
for feed in response.json()['threatFeeds']:
if feed['name'].lower() == name.lower():
return feed['id']
else:
raise HTTPException(response)
@validate_api_v2
@request_error_handler
def post_stix_file(self, feed_id=None, stix_file=None):
"""
Uploads STIX file to new threat feed or overwrites STIX file in existing threat feed
:param feed_id: id of threat feed (returned by get_feed_by_name)
:param stix_file: stix filename
"""
return requests.post('{url}/threatFeeds/{id}'.format(url=self.url, id=feed_id), headers=self.headers,
files={'file': open(stix_file)}, verify=self.verify)
@validate_api_v2
def advanced_search(self, stype=None, page_size=50, query=None):
"""
Advanced search
:param stype: search type (hosts, detections)
:param page_size: number of objects returned per page
:param advanced query (download the following guide for more details on query language
https://support.vectranetworks.com/hc/en-us/articles/360003225254-Search-Reference-Guide)
"""
if stype not in ["hosts", "detections"]:
raise ValueError("Supported values for stype are hosts or detections")
if not query:
raise ValueError('Query parameter is required')
params = {
'page_size': page_size,
'query_string': query
}
resp = requests.get('{url}/search/{stype}'.format(url=self.url, stype=stype), headers=self.headers,
params=params, verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url=resp.json()['next'])
yield resp
@validate_api_v2
def get_all_traffic_stats(self):
"""
Generator to get all traffic stats
"""
resp = requests.get('{url}/traffic'.format(url=self.url), headers=self.headers, verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@validate_api_v2
def get_all_sensor_traffic_stats(self, sensor_luid=None):
"""
Generator to get all traffic stats from a sensor
:param sensor_luid: LUID of the sensor for which to get the stats. Can be retrived in the UI under Manage > Sensors
"""
if not sensor_luid:
raise ValueError('Sensor LUID required')
resp = requests.get('{url}/traffic/{luid}'.format(url=self.url, luid=sensor_luid), headers=self.headers, verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@validate_api_v2
def get_all_subnets(self, **kwargs):
"""
Generator to get all subnets seen by the brain
:param ordering: ordering key of the results.
possible values are: subnet, hosts, firstSeen, lastSeen
:param search: only return subnets containing the search string
"""
resp = requests.get('{url}/subnets'.format(url=self.url), params=self._generate_subnet_params(kwargs),
headers=self.headers, verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@validate_api_v2
def get_all_sensor_subnets(self, sensor_luid=None, **kwargs):
"""
Generator to get all subnets seen by a sensor
:param sensor_luid: LUID of the sensor for which to get the subnets seen - required
:param ordering: ordering key of the results.
possible values are: subnet, hosts, firstSeen, lastSeen
:param search: only return subnets containing the search string
"""
if not sensor_luid:
raise ValueError('Sensor LUID required')
resp = requests.get('{url}/subnets/{luid}'.format(url=self.url, luid=sensor_luid),
params=self._generate_subnet_params(kwargs), headers=self.headers, verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
# TODO see if the endpoint should become a generator
@validate_api_v2
@request_error_handler
def get_ip_addresses(self, **kwargs):
"""
Get all active IPs seen by the brain
CAUTION: this is not a generator
:param include_ipv4: Include IPv4 addresses - default True
:param include_ipv6: Include IPv6 addresses - default True
"""
return requests.get('{url}/ip_addresses'.format(url=self.url), params=self._generate_ip_address_params(kwargs),
headers=self.headers, verify=self.verify)
@validate_api_v2
@request_error_handler
def get_internal_networks(self):
"""
Get all internal networks configured on the brain
"""
return requests.get('{url}/settings/internal_network'.format(url=self.url),
headers=self.headers, verify=self.verify)
@validate_api_v2
@request_error_handler
def set_internal_networks(self, include=[], exclude=[], drop=[], append=True):
"""
Get all internal networks configured on the brain
Set account tags
:param include: list of subnets to add the internal subnets list
:param exclude: list of subnets to exclude from the internal subnets list
:param drop: list of subnets to add to the drop list
:param append: overwrites existing lists if set to False, appends to existing tags if set to True
"""
if append and all(isinstance(i, list) for i in [include, exclude, drop]):
current_list = self.get_internal_networks().json()
# We must make all entries unique
payload = {
'include': list(set(include).union(set(current_list['included_subnets']))),
'exclude': list(set(exclude).union(set(current_list['excluded_subnets']))),
'drop': list(set(drop).union(set(current_list['dropped_subnets'])))
}
elif all(isinstance(i, list) for i in [include, exclude, drop]):
payload = {
'include': include,
'exclude': exclude,
'drop': drop
}
else:
raise TypeError('subnets must be of type list')
return requests.post('{url}/settings/internal_network'.format(url=self.url),
json=payload, headers=self.headers, verify=self.verify)
# TODO see if check parameter has been fixed - APP-10753
@request_error_handler
def get_health_check(self, check=None):
"""
Get health statistics for the appliance
:param check: specific check to run - optional
CAUTION: the check parameter is broken for the time being
"""
if not check:
return requests.get('{url}/health'.format(url=self.url), headers=self.headers, verify=self.verify)
else:
if not isinstance(check, str):
raise ValueError('check need to be a string')
return requests.get('{url}/health/{check}'.format(url=self.url, check=check), headers=self.headers, verify=self.verify)
class VectraClientV2_1(VectraClient):
def __init__(self, url=None, token=None, verify=False):
"""
Initialize Vectra client
:param url: IP or hostname of Vectra brain (ex https://www.example.com) - required
:param token: API token for authentication when using API v2*
:param verify: Verify SSL (default: False) - optional
"""
super().__init__(url=url, token=token, verify=verify)
# Remove potential trailing slash
url = VectraClient._remove_trailing_slashes(url)
# Set endpoint to APIv2.1
self.url = '{url}/api/v2.1'.format(url=url)
@staticmethod
def _generate_account_params(args):
"""
Generate query parameters for accounts based provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['all', 'c_score', 'c_score_gte', 'certainty', 'certainty_gte', 'fields', 'first_seen',
'include_detection_summaries', 'last_seen', 'last_source', 'max_id', 'min_id', 'name',
'note_modified_timestamp_gte', 'ordering', 'page', 'page_size', 'privilege_category',
'privilege_level', 'privilege_level_gte', 'state', 't_score', 't_score_gte', 'tags',
'threat', 'threat_gte', 'uid']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid account query parameter'.format(str(k)))
return params
@staticmethod
def _generate_detect_usage_params(args):
"""
Generate query parameters for detect usage query based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
search = re.compile('[0-9]{4}-[0-9]{2}')
valid_keys = ['start', 'end']
for k, v in args.items():
if k in valid_keys:
if v is not None:
# We validate the parameters here as the error thrown by the endpoint is not very verbose
if search.match(v):
params[k] = v
else:
raise ValueError('{} is not a valid date string for detect usage query'.format(str(v)))
else:
raise ValueError('argument {} is an invalid detect usage query parameter'.format(str(k)))
return params
def get_campaigns(self, **kwargs):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_campaigns() which supports pagination')
def get_hosts(self, **kwargs):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_hosts() which supports pagination')
def get_detections(self, **kwargs):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_detections() which supports pagination')
def get_all_accounts(self, **kwargs):
"""
Generator to retrieve all accounts - all parameters are optional
:param all: does nothing
:param c_score: certainty score (int) - will be removed with deprecation of v1 of api
:param c_score_gte: certainty score greater than or equal to (int) - will be removed with deprecation of v1 of api
:param certainty: certainty score (int)
:param certainty_gte: certainty score greater than or equal to (int)
:param fields: comma separated string of fields to be filtered and returned
possible values are id, url, name, state, threat, certainty, severity, account_type,
tags, note, note_modified_by, note_modified_timestamp, privilege_level, privilege_category,
last_detection_timestamp, detection_set, probable_home
:param first_seen: first seen timestamp of the account (datetime)
:param include_detection_summaries: include detection summary in response (bool)
:param last_seen: last seen timestamp of the account (datetime)
:param last_source: registered ip address of host
:param max_id: maximum ID of account returned
:param min_id: minimum ID of account returned
:param name: registered name of host
:param note_modified_timestamp_gte: note last modified timestamp greater than or equal to (datetime)
:param ordering: field to use to order response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param privilege_category: privilege category of account (low/medium/high)
:param privilege_level: privilege level of account (0-10)
:param privilege_level_gte: privilege of account level greater than or equal to (int)
:param state: state of host (active/inactive)
:param t_score: threat score (int) - will be removed with deprecation of v1 of api
:param t_score_gte: threat score greater than or equal to (int) - will be removed with deprection of v1 of api
:param tags: tags assigned to host
:param threat: threat score (int)
:param threat_gte: threat score greater than or equal to (int)
"""
resp = requests.get('{url}/accounts'.format(url=self.url), headers=self.headers,
params=self._generate_account_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url=resp.json()['next'])
yield resp
@request_error_handler
def get_account_by_id(self, account_id=None, **kwargs):
"""
Get account by id
:param account_id: account id - required
:param fields: comma separated string of fields to be filtered and returned - optional
possible values are id, url, name, state, threat, certainty, severity, account_type,
tags, note, note_modified_by, note_modified_timestamp, privilege_level, privilege_category,
last_detection_timestamp, detection_set, probable_home
"""
if not account_id:
raise ValueError('Account id required')
return requests.get('{url}/accounts/{id}'.format(url=self.url, id=account_id), headers=self.headers,
params=self._generate_account_params(kwargs), verify=self.verify)
@request_error_handler
def get_account_tags(self, account_id=None):
"""
Get Account tags
:param account_id: ID of the account for which to retrieve the tags
"""
return requests.get('{url}/tagging/account/{id}'.format(url=self.url, id=account_id), headers=self.headers,
verify=False)
@request_error_handler
def set_account_tags(self, account_id=None, tags=[], append=False):
"""
Set account tags
:param account_id: ID of the account for which to set the tags
:param tags: list of tags to add to account
:param append: overwrites existing list if set to False, appends to existing tags if set to True
Set to empty list to clear tags (default: False)
"""
if append and type(tags) == list:
current_list = self.get_account_tags(account_id=account_id).json()['tags']
payload = {
"tags": current_list + tags
}
elif type(tags) == list:
payload = {
"tags": tags
}
else:
raise TypeError('tags must be of type list')
headers = self.headers.copy()
headers.update({
'Content-Type': "application/json",
'Cache-Control': "no-cache"
})
return requests.patch('{url}/tagging/account/{id}'.format(url=self.url, id=account_id), headers=headers,
json=payload, verify=self.verify)
@request_error_handler
def bulk_set_accounts_tag(self, tag, account_ids):
"""
Set a tag in bulk on multiple accounts. Only one tag can be set at a time
:param account_ids: IDs of the accounts for which to set the tag
"""
if not isinstance(account_ids, list):
raise TypeError('account IDs must be of type list')
payload = {
'objectIds': account_ids,
'tag': tag
}
return requests.post('{url}/tagging/account'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@request_error_handler
def bulk_delete_accounts_tag(self, tag, account_ids):
"""
Delete a tag in bulk on multiple accounts. Only one tag can be deleted at a time
:param account_ids: IDs of the accounts on which to delete the tag
"""
if not isinstance(account_ids, list):
raise TypeError('account IDs must be of type list')
payload = {
'objectIds': account_ids,
'tag': tag
}
return requests.delete('{url}/tagging/account'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@request_error_handler
def get_account_note(self, account_id=None):
"""
Get account notes
:param account_id: ID of the account for which to retrieve the note
For consistency we return a requests.models.Response object
As we do not want to return the complete host body, we alter the response content
"""
account = requests.get('{url}/accounts/{id}'.format(url=self.url, id=account_id), headers=self.headers, verify=self.verify)
if account.status_code == 200:
account_note = account.json()['note']
# API endpoint return HTML escaped characters
account_note = html.unescape(account_note) if account_note else ''
json_dict = {'status': 'success', 'account_id': str(account_id), 'note': account_note}
account._content = json.dumps(json_dict).encode('utf-8')
return account
# TODO check if PATCH endpoint has been implemented on accounts
def set_account_note(self, account_id=None, note='', append=False):
raise NotImplementedError('The PATCH endpoint is not yet implemented on /accounts')
@request_error_handler
def get_locked_accounts(self):
"""
Get list of account locked by Account Lockdown
"""
return requests.get('{url}/lockdown/account'.format(url=self.url), headers=self.headers, verify=self.verify)
def get_rules(self, **kwargs):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_rules() which supports pagination')
def advanced_search(self, stype=None, page_size=50, query=None):
"""
Advanced search
:param stype: search type (hosts, detections, accounts)
:param page_size: number of objects returned per page
:param advanced query (download the following guide for more details on query language
https://support.vectranetworks.com/hc/en-us/articles/360003225254-Search-Reference-Guide)
"""
if stype not in ['hosts', 'detections', 'accounts']:
raise ValueError("Supported values for stype are hosts, detections or accounts")
if not query:
raise ValueError('Query parameter is required')
params = {
'page_size': page_size,
'query_string': query
}
resp = requests.get('{url}/search/{stype}'.format(url=self.url, stype=stype), headers=self.headers,
params=params, verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url=resp.json()['next'])
yield resp
@request_error_handler
def get_rule_by_id(self, rule_id, **kwargs):
"""
Get triage rules by id
:param rule_id: id of triage rule to retrieve
:param fields: comma separated string of fields to be filtered and returned
possible values are: active_detections, additional_conditions, created_timestamp,
description, detection, detection_category, enabled, id, is_whitelist, last_timestamp,
priority, source_conditions, template, total_detections, triage_category, url
"""
if not rule_id:
raise ValueError('Rule id required')
return requests.get('{url}/rules/{id}'.format(url=self.url, id=rule_id), headers=self.headers,
params=self._generate_rule_by_id_params(kwargs), verify=False)
def get_rules_by_name(self, triage_category=None, description=None):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_rules with the "contains" query parameter')
def get_all_rules(self, **kwargs):
"""
Generator to retrieve all rules page by page - all parameters are optional
:param contains: search for rules containing this string (substring matching)
:param fields: comma separated string of fields to be filtered and returned
possible values are: active_detections, additional_conditions, created_timestamp,
description, detection, detection_category, enabled, id, is_whitelist, last_timestamp,
priority, source_conditions, template, total_detections, triage_category, url
:param include_templates: include rule templates, default is False
:param ordering: field used to sort response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
"""
resp = requests.get('{url}/rules'.format(url=self.url), headers=self.headers,
params=self._generate_rule_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
#TODO wait on fix
# CAUTION: this returns an error 500 altough the rule has been created succesfully\
# when source_conditions and/or additional_conditions are empty - APP-11016
@request_error_handler
def create_rule(self, detection_category=None, detection_type=None, triage_category=None,
source_conditions={'OR':[]}, additional_conditions={'OR':[]}, is_whitelist=False, **kwargs):
"""
Create triage rule
:param detection_category: detection category to triage
possible values are: botnet activity, command & control, reconnaissance,
lateral movement, exfiltration
:param detection_type: detection type to triage
:param triage_category: name that will be used for triaged detection
:param source_conditions: JSON blobs to represent a tree-like conditional structure
operators for leaf nodes: ANY_OF or NONE_OF
operators for non-leaf nodes: AND or OR
possible value for conditions: ip, host, account, sensor
Here is an example of a payload:
"sourceConditions": {
"OR": [
{
"AND": [
{
"ANY_OF": {
"field": "ip",
"values": [
{
"value": "10.45.91.184",
"label": "10.45.91.184"
}
],
"groups": [],
"label": "IP"
}
}
]
}
]
}
}
:param additional_conditions: JSON blobs to represent a tree-like conditional structure
operators for leaf nodes: ANY_OF or NONE_OF
operators for non-leaf nodes: AND or OR
possible value for conditions: remote1_ip, remote1_ip_groups, remote1_proto, remote1_port,
remote1_dns, remote1_dns_groups, remote2_ip, remote2_ip_groups, remote2_proto, remote2_port,
remote2_dns, remote2_dns_groups, account, named_pipe, uuid, identity, service, file_share,
file_extensions, rdp_client_name, rdp_client_token, keyboard_name
Here is an example of a payload:
"additionalConditions": {
"OR": [
{
"AND": [
{
"ANY_OF": {
"field": "remote1_ip",
"values": [
{
"value": "10.1.52.71",
"label": "10.1.52.71"
}
],
"groups": [],
"label": "External Target IP"
}
}
]
}
]
}
:param is_whitelist: set to True if rule is a whitelist, opposed to tracking detections without scores (boolean)
:param description: name of the triage rule - optional
:param priority: used to determine order of triage filters (int) - optional
:returns request object
"""
if not all([detection_category, detection_type, triage_category]):
raise ValueError('Missing required parameter')
if detection_category.lower() not in ['botnet activity', 'command & control', 'reconnaissance', 'lateral movement', 'exfiltration']:
raise ValueError("detection_category not recognized")
payload = {
'detection_category': detection_category,
'detection': detection_type,
'triage_category': triage_category,
'is_whitelist': is_whitelist,
'source_conditions': source_conditions,
'additional_conditions': additional_conditions
}
return requests.post('{url}/rules'.format(url=self.url), headers=self.headers, json=payload,
verify=self.verify)
#TODO wait on fix
# CAUTION: this returns an error 500 altough the rule has been updated succesfully\
# when source_conditions and/or additional_conditions are empty - APP-11016
# CAUTION2: API will error out if original rule has empty source or additional_conditions and\
# payload has non-empty conditions - APP-11016
@request_error_handler
def update_rule(self, rule_id=None, **kwargs):
"""
Update triage rule
:param rule_id: id of rule to update
:param triage_category: name that will be used for triaged detection
:param source_conditions: JSON blobs to represent a tree-like conditional structure
operators for leaf nodes: ANY_OF or NONE_OF
operators for non-leaf nodes: AND or OR
possible value for conditions: ip, host, account, sensor
Here is an example of a payload:
"sourceConditions": {
"OR": [
{
"AND": [
{
"ANY_OF": {
"field": "ip",
"values": [
{
"value": "10.45.91.184",
"label": "10.45.91.184"
}
],
"groups": [],
"label": "IP"
}
}
]
}
]
}
}
:param additional_conditions: JSON blobs to represent a tree-like conditional structure
operators for leaf nodes: ANY_OF or NONE_OF
operators for non-leaf nodes: AND or OR
possible value for conditions: remote1_ip, remote1_ip_groups, remote1_proto, remote1_port,
remote1_dns, remote1_dns_groups, remote2_ip, remote2_ip_groups, remote2_proto, remote2_port,
remote2_dns, remote2_dns_groups, account, named_pipe, uuid, identity, service, file_share,
file_extensions, rdp_client_name, rdp_client_token, keyboard_name
Here is an example of a payload:
"additionalConditions": {
"OR": [
{
"AND": [
{
"ANY_OF": {
"field": "remote1_ip",
"values": [
{
"value": "10.1.52.71",
"label": "10.1.52.71"
}
],
"groups": [],
"label": "External Target IP"
}
}
]
}
]
}
:param is_whitelist: set to True if rule is a whitelist, opposed to tracking detections without scores (boolean)
:param description: name of the triage rule - optional
:param priority: used to determine order of triage filters (int) - optional
:param enabled: is the rule currently enables (boolean) - optional - Not yet implemented!
:returns request object
"""
if rule_id:
rule = self.get_rule_by_id(rule_id=rule_id).json()
else:
raise ValueError("rule id must be provided")
valid_keys = ['description', 'priority', 'enabled', 'triage_category',
'is_whitelist', 'source_conditions', 'additional_conditions']
for k, v in kwargs.items():
if k in valid_keys:
rule[k] = v
else:
raise ValueError('invalid parameter provided: {}'.format(str(k)))
return requests.put('{url}/rules/{id}'.format(url=self.url, id=rule['id']), headers=self.headers, json=rule,
verify=self.verify)
def get_groups(self, **kwargs):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_groups() which supports pagination')
def get_groups_by_name(self, name=None, description=None):
raise DeprecationWarning('This function has been deprecated in the Vectra API client v2.1. Please use get_all_groups with the "description" query parameter')
def get_detect_usage(self, **kwargs):
"""
Get average montly IP count for Detect
:param start: starting month for the usage statistics - format YYYY-mm
:param end: end month for the usage statistics - format YYYY-mm
Default is statistics from last month
"""
return requests.get('{url}/usage/detect'.format(url=self.url), params=self._generate_detect_usage_params(kwargs),
headers=self.headers, verify=self.verify)
| StarcoderdataPython |
3362652 | import requests
REQUESTS_METHODS = {
'get': requests.get,
'post': requests.post,
'patch': requests.patch,
'put': requests.put,
'delete': requests.delete,
}
def make_request(method, *args, **kwargs):
return REQUESTS_METHODS[method]('https://www.google.com', *args, **kwargs)
| StarcoderdataPython |
104808 | <reponame>MetaBytez/bitbitbot<gh_stars>0
from enum import IntEnum
from typing import Optional
from pydantic import BaseModel
class Role(IntEnum):
BROADCASTER = 0
MODERATOR = 1
SUBSCRIBER = 2
VIEWER = 3
class TwitchTags(BaseModel):
display_name: str
color: Optional[str]
user_id: str
broadcaster: bool = False
mod: bool
subscriber: bool
role: Role = Role.VIEWER
def __init__(self, **data):
super().__init__(**data)
if self.mod:
self.role = Role.MODERATOR
elif self.subscriber:
self.role = Role.SUBSCRIBER
if 'broadcaster' in data.get('badges', ''):
self.broadcaster = True
self.role = Role.BROADCASTER
| StarcoderdataPython |
3267452 | my_name = '<NAME>'
my_age = 35 # not a lie
my_height = 185 # cm
my_weight = 70 # kg
my_eyes = 'Green'
my_teeth = 'White'
my_hair = 'Brown'
my_height = my_height / 2
print(f"let's talk about {my_name}.")
print(f"he's {my_height} centimeters tall.")
print(f"He's {my_weight} kg heavy")
print("Actually that's not too heavy")
print(f"He's got {my_eyes} eyes and {my_hair} hair.")
# this try line
total = my_age + my_height + my_weight
print(f"If I add {my_age}, {my_height}, and {my_weight} I get {total}" ) | StarcoderdataPython |
3295348 | import os
from cctbx import xray
from cctbx.sgtbx import space_group
from cctbx.sgtbx import space_group_symbols
from cctbx.uctbx import unit_cell
from cctbx.crystal import symmetry
import sys
if sys.version_info < (3, 0):
version = 2
def ersatz_pointgroup(spacegroup_name):
'''Guess the pointgroup for the spacegroup by mapping from short to
long name, then taking 1st character from each block.'''
pg = None
for record in open(
os.path.join(os.environ['CLIBD'], 'symop.lib'), 'r').readlines():
if ' ' in record[:1]:
continue
if spacegroup_name == record.split()[3]:
pg = record.split()[4][2:]
elif spacegroup_name == record.split('\'')[1].replace(' ', ''):
pg = record.split()[4][2:]
if not pg:
if version == 2:
try:
raise RuntimeError, 'spacegroup %s unknown' % spacegroup_name
except:
pass
else:
raise RuntimeError('spacegroup {} unknown'.format(spacegroup_name))
# FIXME this is probably not correct for small molecule work...
# just be aware of this, in no danger right now of handling non-chiral
# spacegroups
if '/' in pg:
pg = pg.split('/')[0]
result = spacegroup_name[0] + pg
if 'H3' in result:
result = result.replace('H3', 'R3')
return result
def spacegroup_to_lattice(input_spacegroup):
''' This generates a lattics from a the imported file bu chopping off
the first letter of the cell type, changing to lowercase and then
prepending it to the first letter of the spacegroup.'''
def fix_hH(lattice):
if lattice != 'hH':
return lattice
return 'hR'
mapping = {'TRICLINIC':'a',
'MONOCLINIC':'m',
'ORTHORHOMBIC':'o',
'TETRAGONAL':'t',
'TRIGONAL':'h',
'HEXAGONAL':'h',
'CUBIC':'c'}
if type(input_spacegroup) == type(u''):
input_spacegroup = str(input_spacegroup)
if type(input_spacegroup) == type(''):
for record in open(
os.path.join(os.environ['CLIBD'], 'symop.lib'), 'r').readlines():
if ' ' in record[:1]:
continue
if input_spacegroup == record.split()[3]:
return fix_hH(mapping[record.split()[5]] + record.split()[3][0])
elif type(input_spacegroup) == type(0):
for record in open(
os.path.join(os.environ['CLIBD'], 'symop.lib'), 'r').readlines():
if ' ' in record[:1]:
continue
if input_spacegroup == int(record.split()[0]):
return fix_hH(mapping[record.split()[5]] + record.split()[3][0])
else:
if version == 2:
try:
raise RuntimeError, 'bad type for input: %s' % type(input_spacegroup)
except:
pass
else:
raise RuntimeError('bad type for input: {}'.format(type(input_spacegroup)))
return None
def check_spacegroup_name(spacegroup_name):
'''Will return normalised name if spacegroup name is recognised,
raise exception otherwise. For checking command-line options.'''
try:
j = int(spacegroup_name)
if j > 230 or j <= 0:
if version == 2:
try:
raise RuntimeError, 'spacegroup number nonsense: %s' \
% spacegroup_name
except:
pass
else:
raise RuntimeError('spacegroup number nonsense: {}'.format(
spacegroup_name))
return spacegroup_number_to_name(j)
except ValueError as e:
pass
found_spacegroup = None
for record in open(
os.path.join(os.environ['CLIBD'], 'symop.lib'), 'r').readlines():
if ' ' in record[:1]:
continue
if spacegroup_name == record.split()[3]:
return spacegroup_name
if version == 2:
try:
raise RuntimeError, 'spacegroup name "%s" not recognised' % spacegroup_name
except:
pass
else:
raise RuntimeError('spacegroup name "{}" not recognised'.format(spacegroup_name))
def check_split_cell(cell_string):
'''Will return tuple of floats a, b, c, alpha, beta, gamma from input
cell string which contains a,b,c,alpha,beta,gamma raising an exception
if there is a problem.'''
ideal_string = 'a,b,c,alpha,beta,gamma'
if not cell_string.count(',') == 5:
if version == 2:
try:
raise RuntimeError, '%s should be of the form %s' % \
(cell_string, ideal_string)
except:
pass
else:
raise RuntimeError('{} should be of the form {}'.format(
cell_string, ideal_string))
a, b, c, alpha, beta, gamma = tuple(
map(float, cell_string.split(',')))
return a, b, c, alpha, beta, gamma
def constrain_cell(lattice_class, cell):
'''Constrain cell to fit lattice class x.'''
a, b, c, alpha, beta, gamma = cell
if lattice_class == 'a':
return (a, b, c, alpha, beta, gamma)
elif lattice_class == 'm':
return (a, b, c, 90.0, beta, 90.0)
elif lattice_class == 'o':
return (a, b, c, 90.0, 90.0, 90.0)
elif lattice_class == 't':
e = (a + b) / 2.0
return (e, e, c, 90.0, 90.0, 90.0)
elif lattice_class == 'h':
e = (a + b) / 2.0
return (e, e, c, 90.0, 90.0, 120.0)
elif lattice_class == 'c':
e = (a + b + c) / 3.0
return (e, e, e, 90.0, 90.0, 90.0)
if version == 2:
try:
raise RuntimeError, 'lattice class not recognised: %s' % lattice_class
except:
pass
else:
raise RuntimeError('lattice class not recognised: {}'.format(lattice_class))
def spacegroup_number_to_name(spg_num):
'''Convert a spacegroup number to a more readable name.'''
database = {}
for record in open(
os.path.join(os.environ['CLIBD'], 'symop.lib'), 'r').readlines():
if ' ' in record[:1]:
continue
number = int(record.split()[0])
name = record.split('\'')[1].strip()
database[number] = name
return database[spg_num]
def lattice_to_spacegroup(lattice):
''' Converts a lattice to the spacegroup with the lowest symmetry
possible for that lattice'''
l2s = {
'aP':1, 'mP':3, 'mC':5, 'mI':5,
'oP':16, 'oC':21, 'oI':23, 'oF':22,
'tP':75, 'tI':79, 'hP':143, 'hR':146,
'hH':146, 'cP':195, 'cF':196, 'cI':197
}
return l2s[lattice]
def lauegroup_to_lattice(lauegroup):
'''Convert a Laue group representation (from pointless, e.g. I m m m)
to something useful, like the implied crystal lattice (in this
case, oI.)'''
# this has been calculated from the results of Ralf GK's sginfo and a
# little fiddling...
#
# 19/feb/08 added mI record as pointless has started producing this -
# why??? this is not a "real" spacegroup... may be able to switch this
# off...
# 'I2/m': 'mI',
lauegroup_to_lattice = {'Ammm': 'oA',
'C2/m': 'mC',
'I2/m': 'mI',
'Cmmm': 'oC',
'Fm-3': 'cF',
'Fm-3m': 'cF',
'Fmmm': 'oF',
'H-3': 'hR',
'H-3m': 'hR',
'R-3:H': 'hR',
'R-3m:H': 'hR',
'R-3': 'hR',
'R-3m': 'hR',
'I4/m': 'tI',
'I4/mmm': 'tI',
'Im-3': 'cI',
'Im-3m': 'cI',
'Immm': 'oI',
'P-1': 'aP',
'P-3': 'hP',
'P-3m': 'hP',
'P2/m': 'mP',
'P4/m': 'tP',
'P4/mmm': 'tP',
'P6/m': 'hP',
'P6/mmm': 'hP',
'Pm-3': 'cP',
'Pm-3m': 'cP',
'Pmmm': 'oP'}
updated_laue = ''
for l in lauegroup.split():
if not l == '1':
updated_laue += l
return lauegroup_to_lattice[updated_laue]
def generate_primitive_cell(unit_cell_constants, space_group_name):
'''For a given set of unit cell constants and space group, determine the
corresponding primitive unit cell...'''
uc = unit_cell(unit_cell_constants)
sg = space_group(space_group_symbols(space_group_name).hall())
cs = symmetry(unit_cell = uc,
space_group = sg)
csp = cs.change_basis(cs.change_of_basis_op_to_primitive_setting())
return csp.unit_cell()
if __name__ == '__main__':
for token in sys.argv[1:]:
print ersatz_pointgroup(token)
| StarcoderdataPython |
3224073 | from prml.nn.array.array import Array
from prml.nn.config import config
import numpy as np
def ones(size):
return Array(np.ones(size, dtype=config.dtype))
| StarcoderdataPython |
3230887 | <reponame>scholarsmate/durasftp
import argparse
from durasftp.common.log import set_log_file_path
class LogFileAction(argparse.Action):
"""
This argparse action allows for a command line argument to specify a log file to write to:
Example:
python <script> --log-file <log-file-path>
"""
def __init__(self, option_strings, dest, default=None, required=False, help=None):
super().__init__(
option_strings=option_strings,
dest=dest,
nargs=1,
default=default,
required=required,
help=help,
)
def __call__(self, parser, namespace, values, option_string=None):
log_file_path = values[0]
if log_file_path is not None:
set_log_file_path(log_file_path)
setattr(namespace, self.dest, log_file_path)
| StarcoderdataPython |
3356569 | <reponame>LimePencil/baekjoonProblems
import sys
n = int(sys.stdin.readline().rstrip("\n"))
arr = list(map(int,sys.stdin.readline().rstrip("\n").split(" ")))
arr.sort()
minimum = float('inf')
a1 = 0
a2 = 0
a3 = 0
for i in range(n-2):
s = i+1
e = n-1
target = -1*arr[i]
while s<e:
se = arr[s]+arr[e]
if abs(se+arr[i])<minimum:
minimum = abs(se+arr[i])
a1,a2,a3 = arr[i],arr[s],arr[e]
if se+arr[i] == 0:
break
if se > target:
e-=1
else:
s+=1
print(str(a1)+" " + str(a2)+ " " +str(a3))
| StarcoderdataPython |
117900 | <gh_stars>1-10
'''
Created on May 1, 2016
@author: Drew
'''
PlayBtnPos = (0, 0, 0.0)
PlayBtnHidePos = (0, 0, -1.1)
OptionsBtnPos = (-.9, 0, -0.6)
OptionsBtnHidePos = (-.9, 0, -1.7)
DiscordBtnPos = (-.3, 0, -0.6)
DiscordBtnHidePos = (-.3, 0, -1.7)
CreditsBtnPos = (.3, 0, -0.6)
CreditsBtnHidePos = (.3, 0, -1.7)
QuitBtnPos = (.9, 0, -0.6)
QuitBtnHidePos = (.9, 0, -1.7) | StarcoderdataPython |
1722246 | <gh_stars>0
from doab.tests.test_types import IntersectAcceptanceTest, TestManager, ReferenceParsingTest
from doab.parsing.reference_miners import (
BloomsburyAcademicMiner,
CambridgeCoreParser,
CitationTXTReferenceMiner,
SpringerMiner,
)
@TestManager.register
class PalgraveCUPIntersect(IntersectAcceptanceTest):
CITATION = "<NAME>. 1993. The Two Cultures"
BOOK_IDS = {"16498", "27401"}
@TestManager.register
class PalgraveAcceptanceTestA(IntersectAcceptanceTest):
CITATION = "Foucault, M. (1991). Discipline and Punish. The Birth of the Prison St. Ives: Penguin"
BOOK_IDS = {"24596", "20716", "27401"} #24598
@TestManager.register
class PalgraveAcceptanceTestB(IntersectAcceptanceTest):
CITATION = "<NAME>., <NAME>., & <NAME>. (2016). The impact of delays on maternal and neonatal outcomes in Ugandan public health facilities: The role of absenteeism. Health Policy and Planning, 1–10. doi:10.1093/heapol/czw046."
BOOK_IDS = {"21612", "20717", "21612"}
@TestManager.register
class PalgraveAcceptanceTestC(IntersectAcceptanceTest):
CITATION = "<NAME>., & <NAME>. (2014). Developing cultural sensitivity and awareness in nursing overseas. Nursing Standard, 28(44), 39–43.CrossRef"
BOOK_IDS = {"21610", "21612"}
@TestManager.register
class OpenEditionsTestA(IntersectAcceptanceTest):
CITATION = "<NAME>, Les Structures anthropologiques de l'imaginaire, Paris, Bordas, 1969."
BOOK_IDS = {"16988", "19329", "20818", "20855", "20862", "20941", "21060", "21251", "22074", "22229", "22264"}
@TestManager.register
class OpenEditionsTestB(IntersectAcceptanceTest):
CITATION = "Foucault M. (1975), Surveiller et punir, Paris, Gallimard."
BOOK_IDS = {"9337", "20851", "21101", "21176", "21251"}
@TestManager.register
class OpenEditionsTestC(IntersectAcceptanceTest):
CITATION = "Brynen Rex 1995, The Neopatrimonial Dimension of Palestinian Politics , Journal of Palestine Studies 1, p. 23-36."
BOOK_IDS = {"15809", "15815", "16571", "16583", "16604"}
@TestManager.register
class CEDEJParsingTest(ReferenceParsingTest):
PUBLISHER_NAME = "CEDEJ"
BOOK_REFERENCE_COUNTS = {
"22138": 43,
"22141": 51,
"22142": 127,
"22143": 103,
"22213": 102,
}
MINER = CitationTXTReferenceMiner
@TestManager.register
class PalgraveParsingTest(ReferenceParsingTest):
PUBLISHER_NAME = "<NAME>"
BOOK_REFERENCE_COUNTS = {
"26919": 957,
"27363": 387,
"27364": 157,
"27401": 209,
"27402": 398,
}
MINER = SpringerMiner
@TestManager.register
class BloomsburyParsingTest(ReferenceParsingTest):
PUBLISHER_NAME = "<NAME>"
BOOK_REFERENCE_COUNTS = {
"14368": 94,
"15449": 145,
"14372": 15,
"14373": 211,
"14376": 32,
}
MINER = BloomsburyAcademicMiner
@TestManager.register
class CasaVelazquezParsingTest(ReferenceParsingTest):
PUBLISHER_NAME = "<NAME>"
BOOK_REFERENCE_COUNTS = {
"22583": 431,
"22584": 84,
"22585": 531,
"22586": 495,
"22587": 453,
}
MINER = CitationTXTReferenceMiner
@TestManager.register
class CambridgeCoreParsingTest(ReferenceParsingTest):
PUBLISHER_NAME = "Cambridge University Press"
BOOK_REFERENCE_COUNTS = {
"15986": 773,
"15989": 338,
"16001": 477,
"16498": 387,
"21821": 388,
}
MINER = CambridgeCoreParser
| StarcoderdataPython |
1613291 | #!/usr/bin/env python
# -*- coding: utf8 -*-
"""
Adds N number of rows of dummy data to the manifest table specified
"""
from __future__ import unicode_literals, print_function
import random
import boto3
import logging
import sys
import json
import time
from datetime import datetime
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
resource_map = {
'udb': {
'title': 'Unlocked Dynamic Bible',
'type': 'book',
'format': 'text/usfm'
},
'ulb': {
'title': 'Unlocked Literal Bible',
'type': 'book',
'format': 'text/usfm'
},
'reg': {
'title': 'Regular',
'type': 'book',
'format': 'text/usfm'
},
'bible': {
'title': 'Unlocked Bible',
'type': 'book',
'format': 'text/usfm'
},
'obs': {
'title': 'Open Bible Stories',
'type': 'book',
'format': 'text/markdown'
},
'obs-tn': {
'title': 'Open Bible Stories translationNotes',
'type': 'help',
'format': 'text/markdown'
},
'tn': {
'title': 'translationNotes',
'type': 'help',
'format': 'text/markdown'
},
'tw': {
'title': 'translationWords',
'type': 'dict',
'format': 'text/markdown'
},
'tq': {
'title': 'translationQuestions',
'type': 'help',
'format': 'text/markdown'
},
'ta': {
'title': 'translationAcademy',
'type': 'man',
'format': 'text/markdown'
}
}
def strTimeProp(start, end, format, prop):
"""Get a time at a proportion of a range of two formatted times.
start and end should be strings specifying times formated in the
given format (strftime-style), giving an interval [start, end].
prop specifies how a proportion of the interval to be taken after
start. The returned time will be in the specified format.
"""
stime = time.mktime(time.strptime(start, format))
etime = time.mktime(time.strptime(end, format))
ptime = stime + prop * (etime - stime)
return time.strftime(format, time.localtime(ptime))
def randomDate(start, end, prop):
return strTimeProp(start, end, '%Y-%m-%dT%H:%M:%SZ', prop)
lang_codes = {
'aa':'Afaraf',
'es': 'español',
'pt': 'português',
'pt-br': 'Português',
'en': 'English',
'de': 'Deutsch',
'ja': '日本語 (にほんご) ',
'fr': 'français',
'es-419': 'Español Latin America',
'zh': '中文 (Zhōngwén)',
'cfa-x-dijim': 'Dǝjim',
'tl': 'Wikang Tagalog',
'tpi': 'Tok Pisin'
}
def add_dummy_data_to_manifest_table(table_name, new_rows, start):
manifest_table = boto3.resource('dynamodb').Table(table_name)
for i in range(start, start+new_rows):
print("Adding row {0} of {1}".format(i, start+new_rows-1))
repo_name_lower = 'repo{0}'.format(i)
user_name_lower = 'user{0}'.format(i)
type = resource_map.keys()[random.randint(1, len(resource_map.keys()))-1]
resource = resource_map[type]
last_updated = randomDate("2015-01-01T00:00:00Z", datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ"), random.random())
lang_code = random.choice(lang_codes.keys())
data = {
'repo_name_lower': repo_name_lower,
'user_name_lower': user_name_lower,
'repo_name': repo_name_lower,
'user_name': user_name_lower,
'lang_code': lang_code,
'resource_id': type,
'resource_type': resource['type'],
'title': resource['title'],
'views': random.randint(0, 500),
'last_updated': last_updated
}
data['manifest'] = json.dumps({
'checking': {'checking_entity': ['Wycliffe Associates'], 'checking_level': '1'},
'dublin_core': {
'conformsto': 'rc0.2',
'contributor': ['unfoldingWord', 'Wycliffe Associates'],
'creator': 'Wy<NAME>',
'description': '',
'format': resource['format'],
'issued': datetime.utcnow().strftime('%Y-%m-%d'),
'modified': datetime.utcnow().strftime('%Y-%m-%d'),
'identifier': data['resource_id'],
'language': {'identifier': data['lang_code'], 'direction': 'ltr', 'title': lang_codes[lang_code]},
'type': data['resource_type'],
'title': data['title']
},
'projects': [{
'sort': '1',
'identifier': data['resource_id'],
'title': data['title'],
'path': './',
'versification': '',
'categories': []
}]
})
data['manifest_lower'] = data['manifest'].lower()
manifest_table.put_item(Item=data)
def main():
if len(sys.argv) < 3:
logger.critical('You must provide a table name and how many dummy rows to add.')
logger.critical('Example: ./add_dummy_data_to_manifest_table.py tx-manifest 5000')
exit(1)
table_name = sys.argv[1]
new_rows = int(sys.argv[2])
start = 1
if len(sys.argv) > 3:
start = int(sys.argv[3])
add_dummy_data_to_manifest_table(table_name, new_rows, start)
if __name__ == '__main__':
main()
| StarcoderdataPython |
100791 | <reponame>7wikd/R_Pi-Surveillance<filename>new.py
array = ['Welcome','to','Turing']
for i in array:
array.append(i.upper()) | StarcoderdataPython |
1669026 | <filename>saleor/rest/serializers/product/attribute_value.py
from django.apps import apps
from rest_flex_fields import FlexFieldsModelSerializer
__all__ = [
'AttributeValueSerializer',
]
AttributeValue = apps.get_model(*'product.AttributeValue'.split())
class AttributeValueSerializer(FlexFieldsModelSerializer):
"""Serializer for :model:`product.AttributeValue`:
`**Fields:**`
01. `attribute` : `ForeignKey` [:model:`product.Attribute`]
02. `id` : `AutoField`
03. `name` : `CharField`
04. `slug` : `SlugField`
05. `sort_order` : `PositiveIntegerField`
06. `value` : `CharField`
`**Reverse Fields:**`
01. `translations` : `ForeignKey` [:model:`product.AttributeValueTranslation`]
"""
class Meta:
model = AttributeValue
fields = [
# Fields
'attribute',
'id',
'name',
'slug',
'sort_order',
'value',
# Reverse Fields
# 'translations',
]
read_only_fields = []
# def create(self, validated_data):
# return super().create(validated_data)
# def update(self, instance, validated_data):
# return super().update(instance, validated_data)
| StarcoderdataPython |
1667550 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""Modelo de datos del programa.
Con esto se pretende abstraer toda la logica del programa para que sea mucho,
mas fácil encontrar en donde se encuentra cada parte del programa.
"""
# para crear clases con padres e hijos
from abc import ABC, abstractmethod
# para la reproducción de sonidos
from pydub import AudioSegment as mix
from pydub.playback import play
# para calculos matematicos
from scipy import signal
from scipy.io import wavfile
# para tratamiento de imagenes
from scipy.misc import imread, imsave, imresize, imrotate, imshow
# para valores random de las señales
import random
#
import struct
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy
import os
import sys
#
class Model(ABC):
"""Clase controlador."""
def __init__(self):
"""Constructor por defecto."""
# para la represntación interactiva
plt.ion()
@abstractmethod
def hacer_algo(self):
pass
| StarcoderdataPython |
1785361 | import abc
import random
import six
from optinum.algorithm import base
from optinum.common import objects
__all__ = ['HCFirstImprovement', 'HCBestImprovement']
@six.add_metaclass(abc.ABCMeta)
class HillClimbing(base.Algorithm):
def __init__(self, name="HillClimbing", max_evaluations=50):
super(HillClimbing, self).__init__(name)
self._evaluations = 1
self._max_evaluations = max_evaluations
@property
def depth_search(self):
return True
@abc.abstractmethod
def move_operator(self):
pass
def evaluate(self, chromosome):
return self.task.objective_function.compute(chromosome)
def update_chromosome(self, chromosome, score):
self._chromosome = chromosome
self._score = score
def process(self, task):
self._chromosome = objects.Chromosome.random(task.variables,
task.space)
self._score = self.evaluate(self._chromosome)
self._evaluations = 1
while self._evaluations < self._max_evaluations:
move_made = False
for candidate_chromosome in self.move_operator():
candidate_score = self.evaluate(candidate_chromosome)
if candidate_score < self._score:
move_made = True
self.update_chromosome(candidate_chromosome,
candidate_score)
if not self.depth_search:
break
self._evaluations = self._evaluations + 1
if not move_made and self.depth_search:
break
class HCFirstImprovement(HillClimbing):
def __init__(self, name="HillClimbing: First Improvement"):
super(HCFirstImprovement, self).__init__(name=name)
@property
def depth_search(self):
return False
def move_operator(self):
genetic_info = self._chromosome.get_raw_data()
index_list = range(len(genetic_info))
random.shuffle(index_list)
for index in index_list:
hamming_neighbor = list(genetic_info)
hamming_neighbor[index] = int(not(genetic_info[index]))
yield objects.Chromosome.from_raw(hamming_neighbor, self.space)
class HCBestImprovement(HillClimbing):
def __init__(self, name="HillClimbing: Best Improvement"):
super(HCFirstImprovement, self).__init__(name=name)
@property
def depth_search(self):
return True
def move_operator(self):
genetic_info = self._chromosome.get_raw_data()
for index in range(len(genetic_info)):
hamming_neighbor = list(genetic_info)
hamming_neighbor[index] = int(not(genetic_info[index]))
yield objects.Chromosome.from_raw(hamming_neighbor, self.space)
| StarcoderdataPython |
3286596 | def on(self, event, handler):
self._events = self._events
handlers = self._events[event] = self._events[event] if event in self._events else []
handlers.append(handler)
def off(self, event, handler):
handlers = self._events[event] = self._events[event] if event in self._events else []
handlers.remove(handler)
def emit(self, event, arguments):
# print self._events
# print self._events[event]
handlers = self._events[event] if event in self._events else []
for handler in handlers:
handler(*arguments)
def once(self, event, arguments, handler):
def temporary_handler(*arguments):
self.off(event, temporary_handler)
handler(*arguments)
self.on(event, temporary_handler)
# class Emit:
def Patch(clzz):
clzz.on = on
clzz.emit = emit
clzz.off = off
clzz.once = once
old_init = clzz.__init__
def new_init(self, *k, **kw):
self._events = {}
old_init(self, *k, **kw)
clzz.__init__ = new_init
# Patch = staticmethod(Patch)
| StarcoderdataPython |
1600665 | <reponame>sonucr7/PythonOOP
class DataScientist:
def __init__(self, name, age, level, salary):
self.name = name
self.age = age
self.level = level
self.salary = salary
#instance method
def code(self):
print(f"{self.name} is writing code.........")
role1 = DataScientist("Sonu", 24, "Senior", 40000)
role2 = DataScientist("Sneha", 23, "Senior", 45000)
role1.code()
role2.code() | StarcoderdataPython |
158973 | import sys
import argparse
from yolo import YOLO, detect_video
from PIL import Image
import glob
import os
from shutil import copyfile
src_dir="F:/比赛事宜/裂纹识别/复赛数据/challengedataset-semifinal/test/test"
dst_dir="F:/比赛事宜/裂纹识别/复赛数据/challengedataset-semifinal/test/seg"
k=0
file_1=open('result_11_24.txt')
file_2=open('result_11_22_98.44.txt')
result_1=[]
result_2=[]
for line in file_1.readlines():
curLine=line.strip().split(" ")
result_1.append(curLine[1])
for line in file_2.readlines():
curLine = line.strip().split(" ")
result_2.append(curLine[1])
with open("image_name.txt","w") as w:
with open("vote.txt", "w") as f:
for i in range(len(result_1)):
if (result_1[i] == "1") and (result_2[i] == "1"):
f.write("{}.jpg {}\n".format(i + 1, 1))
w.write("{}.jpg {}\n".format(i + 1, 1))
k = k + 1
copyfile(src_dir+"/"+str(i+1)+".jpg", dst_dir+"/"+str(i+1)+".jpg")
else:
f.write("{}.jpg {}\n".format(i + 1, 0))
print(k)
| StarcoderdataPython |
4823595 | <gh_stars>0
# namespace, pipe can use to share data between process
import multiprocessing
manager = multiprocessing.Manager()
namespace = manager.Namespace()
namespace.spam = 123
namespace.eggs = 456
| StarcoderdataPython |
1696308 | from huobi.constant.result import OutputKey
from huobi.impl.utils import *
from huobi.impl.utils.channelparser import ChannelParser
from huobi.impl.utils.timeservice import convert_cst_in_millisecond_to_utc
from huobi.model.constant import *
from huobi.model.candlestick import Candlestick
class CandlestickEvent:
"""
The candlestick/kline data received by subscription of candlestick/kline.
:member
symbol: the symbol you subscribed.
timestamp: the UNIX formatted timestamp generated by server in UTC.
interval: candlestick/kline interval you subscribed.
data: the data of candlestick/kline.
"""
def __init__(self):
self.symbol = ""
self.timestamp = 0
self.interval = CandlestickInterval.INVALID
self.data = Candlestick()
@staticmethod
def json_parse(json_wrapper):
ch = json_wrapper.get_string(OutputKey.KeyChannelCh)
parse = ChannelParser(ch)
candlestick_event = CandlestickEvent()
candlestick_event.symbol = parse.symbol
candlestick_event.interval = ""
candlestick_event.timestamp = convert_cst_in_millisecond_to_utc(json_wrapper.get_int("ts"))
tick = json_wrapper.get_object(OutputKey.KeyTick)
data = Candlestick.json_parse(tick)
candlestick_event.data = data
return candlestick_event
| StarcoderdataPython |
1736327 | if __name__ == "__main__":
# What I want to dispatch
from plunk.tw.py2py_front_example.simple_pycode import foo, bar, confuser
funcs = [foo, bar, confuser]
# My dispatching
from py2http import run_app
run_app(funcs, publish_openapi=True)
| StarcoderdataPython |
3268187 | import arcpy
# get passed in arguments
mapDoc = arcpy.GetParameterAsText(0)
wrkspc = arcpy.GetParameterAsText(1)
datasetName = arcpy.GetParameterAsText(2)
#wrkspc = r"C:\Data\OSM\Mxds\NewOSMDEV.sde\sde.SDE.TempTest08"
# set mxd
mxd = arcpy.mapping.MapDocument(mapDoc)
# change data source locations
for lyr in arcpy.mapping.ListLayers(mxd):
if lyr.supports("DATASOURCE"):
print lyr.dataSource
lyrDs = lyr.dataSource
i = lyrDs.rfind("_osm_")
if i > 0:
fCNameExt = lyrDs[i:]
newFCName = datasetName + fCNameExt
print newFCName
lyr.replaceDataSource(wrkspc, "SDE_WORKSPACE", newFCName)
print lyr.dataSource
# find any broken data sources and delete layer
for df in arcpy.mapping.ListDataFrames(mxd):
for lyr in arcpy.mapping.ListLayers(mxd, "", df):
for brklyr in arcpy.mapping.ListBrokenDataSources(lyr):
print 'Removing layer ' + brklyr.name + ' due to broken data source. '
arcpy.mapping.RemoveLayer(df, brklyr)
# Set data frame extent
df = arcpy.mapping.ListDataFrames(mxd)[0]
desc = arcpy.Describe(wrkspc + '\\' + datasetName)
df.extent = desc.extent
# Save mxd
mxd.save() | StarcoderdataPython |
3321995 | <filename>encryption_code.py
# 读入图片
def read_img():
import cv2
path = input("原图路径: ")
img = cv2.imread(path)
return img, img.shape
# 映射
def word_to_num(shape):
import json
words = list(input("需要加密的话(支持中、英及混合): "))
assert (shape[0] * shape[1]) / 2 > len(words), 'picture is too small'
with open('data/word_code.json') as f:
word_code = json.load(f)
nums_list = []
for word in words:
nums = word_code.get(word)
nums_list.append(nums)
return nums_list
# 加密
def write_to_img(nums_list, img):
import numpy as np
import cv2
path = input("改写后图片存储路径: ")
# 生成随机位置
value_equal = True
while value_equal:
random_place_x = np.random.randint(0, img.shape[1], len(nums_list))
random_place_y = np.random.randint(0, img.shape[0], len(nums_list))
random_place = []
for i in range(len(nums_list)):
random_place.append([random_place_x[i], random_place_y[i]])
# 按x大小排序
random_place.sort(key=lambda x: x[0])
count = 0
for place in range(len(nums_list)):
if place == 0:
print(random_place[place][0], random_place[place][1])
index_x = random_place[place][0]
index_y = random_place[place][1]
if (img[index_x][index_y] == np.array(nums_list[place])).all():
break
else:
print('de', img[index_x][index_y])
img[index_x][index_y] = np.array(nums_list[place])
count += 1
print("en", img[index_x][index_y])
value_equal = False
cv2.imwrite(path, img)
if __name__ == '__main__':
print("===仅支持png图片格式===")
img, shape = read_img()
nums_list = word_to_num(shape)
write_to_img(nums_list, img)
| StarcoderdataPython |
4834750 | #!/usr/bin/env python3
from setuptools import setup, find_packages
import gtabview_cli
# long description with latest release notes
readme = open('README.rst').read()
news = open('NEWS.rst').read()
long_description = (readme + "\n\nLatest release notes\n====================\n"
+ '\n'.join(news.split('\n\n\n', 1)[0].splitlines()[2:]))
# the actual setup
setup(name='gtabview', version=gtabview_cli.__version__,
description='A simple graphical tabular data viewer',
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
long_description=long_description,
long_description_content_type='text/x-rst',
url="https://github.com/TabViewer/gtabview",
keywords='data spreadsheet view viewer csv comma separated values',
classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: Win32 (MS Windows)',
'Environment :: MacOS X',
'Environment :: X11 Applications :: Qt',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Office/Business :: Financial :: Spreadsheet',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Software Development :: User Interfaces',
'Topic :: Software Development :: Widget Sets',
'Topic :: Utilities'],
packages=find_packages(),
setup_requires=['setuptools'],
extras_require={'test': ['nose']},
test_suite='nose.collector',
entry_points={
'console_scripts': [
'gtabview=gtabview_cli.gtabview:main',
],
},
data_files=[('share/doc/gtabview', ['README.rst', 'NEWS.rst'])])
| StarcoderdataPython |
184704 | '''
Authors: <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
If this code is useful to you, please cite the following paper:
<NAME>, <NAME>, and <NAME>. Learning topology from synthetic data for unsupervised depth completion.
In the Robotics and Automation Letters (RA-L) 2021 and Proceedings of International Conference on Robotics and Automation (ICRA) 2021
@article{wong2021learning,
title={Learning topology from synthetic data for unsupervised depth completion},
author={<NAME> and <NAME> and <NAME>},
journal={IEEE Robotics and Automation Letters},
volume={6},
number={2},
pages={1495--1502},
year={2021},
publisher={IEEE}
}
'''
import sys, os, glob
import numpy as np
import cv2
import multiprocessing as mp
from skimage import morphology as skmorph
sys.path.insert(0, 'src')
import data_utils
'''
Paths for KITTI dataset
'''
KITTI_ROOT_DIRPATH = os.path.join('data', 'kitti_depth_completion')
KITTI_TRAIN_SPARSE_DEPTH_DIRPATH = os.path.join(
KITTI_ROOT_DIRPATH, 'train_val_split', 'sparse_depth', 'train')
# To be concatenated to sequence path
KITTI_SPARSE_DEPTH_REFPATH = os.path.join('proj_depth', 'velodyne_raw')
'''
Paths for Virtual KITTI dataset
'''
VKITTI_ROOT_DIRPATH = os.path.join('data', 'virtual_kitti')
VKITTI_TRAIN_DEPTH_REFPATH = 'vkitti_1.3.1_depthgt'
# Note: we only need to use the clone directory since lighting change only affects RGB
VKITTI_TRAIN_DENSE_DEPTH_DIRPATH = \
os.path.join(VKITTI_ROOT_DIRPATH, VKITTI_TRAIN_DEPTH_REFPATH)
'''
Output directory
'''
OUTPUT_ROOT_DIRPATH = os.path.join('data', 'virtual_kitti_learning_topology')
OUTPUT_REF_DIRPATH = os.path.join('training', 'vkitti')
OUTPUT_SPARSE_DEPTH_FILEPATH = os.path.join(
OUTPUT_REF_DIRPATH, 'vkitti_train_sparse_depth.txt')
OUTPUT_VALIDITY_MAP_FILEPATH = os.path.join(
OUTPUT_REF_DIRPATH, 'vkitti_train_validity_map.txt')
OUTPUT_SEMI_DENSE_DEPTH_FILEPATH = os.path.join(
OUTPUT_REF_DIRPATH, 'vkitti_train_semi_dense_depth.txt')
OUTPUT_DENSE_DEPTH_FILEPATH = os.path.join(
OUTPUT_REF_DIRPATH, 'vkitti_train_dense_depth.txt')
OUTPUT_GROUND_TRUTH_FILEPATH = os.path.join(
OUTPUT_REF_DIRPATH, 'vkitti_train_ground_truth.txt')
def process_frame(inputs):
'''
Processes a single depth frame
Args:
inputs : tuple
KITTI sparse depth path,
Virtual KITTI ground truth path,
output directory paths in order of:
sparse depth, validity map, semi-dense depth, dense depth, groundtruth
Returns:
str : Virtual KITTI output sparse depth path
str : Virtual KITTI output validity map path
str : Virtual KITTI output semi-dense depth (convex hull of sparse points) path
str : Virtual KITTI output dense depth path (ground truth without sky)
str : Virtual KITTI output ground truth path
'''
# Separate arguments into individual variables
kitti_sparse_depth_path, vkitti_ground_truth_path, output_dirpaths = inputs
# Extract validity map from KITTI sparse depth
_, kitti_validity_map = data_utils.load_depth_with_validity_map(kitti_sparse_depth_path)
# Load Virtual KITTI ground truth
vkitti_ground_truth = \
cv2.imread(vkitti_ground_truth_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
# Convert Virtual KITTI ground truth to meters
vkitti_ground_truth = vkitti_ground_truth / 100.0
if kitti_validity_map.shape != vkitti_ground_truth.shape:
# Resize KITTI validity map to VKITTI size
kitti_validity_map = cv2.resize(
kitti_validity_map,
dsize=(vkitti_ground_truth.shape[1], vkitti_ground_truth.shape[0]),
interpolation=cv2.INTER_NEAREST)
assert(np.all(np.unique(kitti_validity_map) == [0, 1]))
# Get Virtual KITTI dense depth without sky
vkitti_validity_map = np.ones(vkitti_ground_truth.shape)
vkitti_validity_map[vkitti_ground_truth > 600.0] = 0.0
vkitti_dense_depth = vkitti_validity_map * vkitti_ground_truth
# Get Virtual KITTI sparse depth
vkitti_sparse_depth = kitti_validity_map * vkitti_dense_depth
# Get Virtual KITTI semi-dense depth (convex hull of sparse points)
vkitti_semi_dense_depth = \
np.where(skmorph.convex_hull_image(kitti_validity_map), 1, 0) * vkitti_dense_depth
# Create output filepaths
filename = os.path.basename(vkitti_ground_truth_path)
output_sparse_depth_dirpath, \
output_validity_map_dirpath, \
output_semi_dense_depth_dirpath, \
output_dense_depth_dirpath, \
output_ground_truth_dirpath = output_dirpaths
output_sparse_depth_path = os.path.join(output_sparse_depth_dirpath, filename)
output_validity_map_path = os.path.join(output_validity_map_dirpath, filename)
output_semi_dense_depth_path = os.path.join(output_semi_dense_depth_dirpath, filename)
output_dense_depth_path = os.path.join(output_dense_depth_dirpath, filename)
output_ground_truth_path = os.path.join(output_ground_truth_dirpath, filename)
# Write to disk
data_utils.save_depth(vkitti_sparse_depth, output_sparse_depth_path)
data_utils.save_validity_map(kitti_validity_map, output_validity_map_path)
data_utils.save_depth(vkitti_semi_dense_depth, output_semi_dense_depth_path)
data_utils.save_depth(vkitti_dense_depth, output_dense_depth_path)
data_utils.save_depth(vkitti_ground_truth, output_ground_truth_path)
return (output_sparse_depth_path,
output_validity_map_path,
output_semi_dense_depth_path,
output_dense_depth_path,
output_ground_truth_path)
'''
Select KITTI and Virtual KITTI paths
'''
# Obtain the set of sequence dirpaths
kitti_sequence_dirpaths = glob.glob(os.path.join(KITTI_TRAIN_SPARSE_DEPTH_DIRPATH, '*/'))
vkitti_sequence_dirpaths = glob.glob(os.path.join(VKITTI_TRAIN_DENSE_DEPTH_DIRPATH, '*/'))
# Get the longest sequence from VKITTI
max_vkitti_filepaths = 0
for vkitti_sequence_dirpath in vkitti_sequence_dirpaths:
# Select filepaths in Virtual KITTI sequence
vkitti_sequence_dirpath = os.path.join(vkitti_sequence_dirpath, 'clone')
vkitti_sequence_filepaths = glob.glob(os.path.join(vkitti_sequence_dirpath, '*.png'))
n_vkitti_filepaths = len(vkitti_sequence_filepaths)
if n_vkitti_filepaths > max_vkitti_filepaths:
max_vkitti_filepaths = n_vkitti_filepaths
# Select from the KITTI sequences that have at least the number of files as VKITTI
kitti_sequence_dirpath_pool = []
for kitti_sequence_dirpath in kitti_sequence_dirpaths:
# Select filepaths in KITTI sequence
kitti_sequence_filepaths = glob.glob(
os.path.join(kitti_sequence_dirpath, KITTI_SPARSE_DEPTH_REFPATH, 'image_02', '*.png'))
n_kitti_filepaths = len(kitti_sequence_filepaths)
if n_kitti_filepaths >= max_vkitti_filepaths:
kitti_sequence_dirpath_pool.append(kitti_sequence_dirpath)
'''
Process data to generate sparse depth for Virtual KITTI
'''
if not os.path.exists(OUTPUT_REF_DIRPATH):
os.makedirs(OUTPUT_REF_DIRPATH)
output_sparse_depth_paths = []
output_validity_map_paths = []
output_semi_dense_depth_paths = []
output_dense_depth_paths = []
output_ground_truth_paths = []
for vkitti_sequence_dirpath in vkitti_sequence_dirpaths:
print('Processing Virtual KITTI sequence: {}'.format(vkitti_sequence_dirpath))
# Select filepath in Virtual KITTI sequence
vkitti_sequence_dirpath = os.path.join(vkitti_sequence_dirpath, 'clone')
vkitti_sequence = vkitti_sequence_dirpath.split(os.sep)[-2]
vkitti_sequence_depth_filepaths = sorted(glob.glob(os.path.join(vkitti_sequence_dirpath, '*.png')))
n_vkitti_filepaths = len(vkitti_sequence_depth_filepaths)
output_sequence_dirpath = os.path.join(
OUTPUT_ROOT_DIRPATH, VKITTI_TRAIN_DEPTH_REFPATH, vkitti_sequence)
for kitti_sequence_dirpath in kitti_sequence_dirpath_pool:
# Select KITTI sequence, since it is a directory last element is empty so grab the second til last
kitti_sequence = kitti_sequence_dirpath.split(os.sep)[-2]
kitti_sequence_dirpath = os.path.join(kitti_sequence_dirpath, KITTI_SPARSE_DEPTH_REFPATH)
for camera_dirpath in ['image_02', 'image_03']:
kitti_sequence_filepaths = sorted(glob.glob(
os.path.join(kitti_sequence_dirpath, camera_dirpath, '*.png')))
kitti_sequence_filepaths = kitti_sequence_filepaths[0:n_vkitti_filepaths]
output_sparse_depth_dirpath = os.path.join(
output_sequence_dirpath, kitti_sequence, camera_dirpath, 'sparse_depth')
output_validity_map_dirpath = os.path.join(
output_sequence_dirpath, kitti_sequence, camera_dirpath, 'validity_map')
output_semi_dense_depth_dirpath = os.path.join(
output_sequence_dirpath, kitti_sequence, camera_dirpath, 'semi_dense_depth')
output_dense_depth_dirpath = os.path.join(
output_sequence_dirpath, kitti_sequence, camera_dirpath, 'dense_depth')
output_ground_truth_dirpath = os.path.join(
output_sequence_dirpath, kitti_sequence, camera_dirpath, 'ground_truth')
output_dirpaths = [
output_sparse_depth_dirpath,
output_validity_map_dirpath,
output_semi_dense_depth_dirpath,
output_dense_depth_dirpath,
output_ground_truth_dirpath
]
for output_dirpath in output_dirpaths:
if not os.path.exists(output_dirpath):
os.makedirs(output_dirpath)
pool_input = [
(kitti_sequence_filepaths[idx], vkitti_sequence_depth_filepaths[idx], output_dirpaths)
for idx in range(n_vkitti_filepaths)
]
with mp.Pool() as pool:
pool_results = pool.map(process_frame, pool_input)
for result in pool_results:
output_sparse_depth_path, \
output_validity_map_path, \
output_semi_dense_depth_path, \
output_dense_depth_path, \
output_ground_truth_path = result
# Collect filepaths
output_sparse_depth_paths.append(output_sparse_depth_path)
output_validity_map_paths.append(output_validity_map_path)
output_semi_dense_depth_paths.append(output_semi_dense_depth_path)
output_dense_depth_paths.append(output_dense_depth_path)
output_ground_truth_paths.append(output_ground_truth_path)
print('Completed generating {} depth samples for using KITTI sequence={} camera={}'.format(
n_vkitti_filepaths, kitti_sequence, camera_dirpath))
print('Storing sparse depth file paths into: %s' % OUTPUT_SPARSE_DEPTH_FILEPATH)
data_utils.write_paths(
OUTPUT_SPARSE_DEPTH_FILEPATH, output_sparse_depth_paths)
print('Storing validity map file paths into: %s' % OUTPUT_VALIDITY_MAP_FILEPATH)
data_utils.write_paths(
OUTPUT_VALIDITY_MAP_FILEPATH, output_validity_map_paths)
print('Storing semi dense depth file paths into: %s' % OUTPUT_SEMI_DENSE_DEPTH_FILEPATH)
data_utils.write_paths(
OUTPUT_SEMI_DENSE_DEPTH_FILEPATH, output_semi_dense_depth_paths)
print('Storing dense depth file paths into: %s' % OUTPUT_DENSE_DEPTH_FILEPATH)
data_utils.write_paths(
OUTPUT_DENSE_DEPTH_FILEPATH, output_dense_depth_paths)
print('Storing ground-truth depth file paths into: %s' % OUTPUT_GROUND_TRUTH_FILEPATH)
data_utils.write_paths(
OUTPUT_GROUND_TRUTH_FILEPATH, output_ground_truth_paths)
| StarcoderdataPython |
3243609 | <gh_stars>0
from django.conf.urls import url
from . import views
app_name = 'analyze'
urlpatterns = [
url(r'^works/$', views.WorkAnalyze.as_view(), name='works_analyze'),
url(r'^data/$', views.get_datatables_data, name='get_data'),
url(r'^data-orig/$', views.get_datatables_data_orig, name='get_data_orig'),
]
| StarcoderdataPython |
1672930 | # pylint: disable=no-member
from typing import Iterable, Union
import asyncpg
from fastapi import BackgroundTasks, Depends, HTTPException
from . import models, pagination, schemes, tasks, utils
from .db import db
async def user_count():
return await db.func.count(models.User.id).gino.scalar()
async def create_user(user: schemes.CreateUser):
count = await user_count()
return await models.User.create(
username=user.username,
hashed_password=utils.get_password_hash(user.password),
email=user.email,
is_superuser=True if count == 0 else False,
)
async def create_invoice(
invoice: schemes.CreateInvoice, background_tasks: BackgroundTasks
):
d = invoice.dict()
products = d.get("products")
obj, xpub = await models.Invoice.create(**d)
created = []
for i in products: # type: ignore
created.append(
(
await models.ProductxInvoice.create(invoice_id=obj.id, product_id=i)
).product_id
)
obj.products = created
background_tasks.add_task(tasks.poll_updates, obj, xpub, True)
return obj
async def invoice_add_related(item: models.Invoice):
# add related products
result = (
await models.ProductxInvoice.select("product_id")
.where(models.ProductxInvoice.invoice_id == item.id)
.gino.all()
)
item.products = [product_id for product_id, in result if product_id]
async def invoices_add_related(items: Iterable[models.Invoice]):
for item in items:
await invoice_add_related(item)
return items
async def get_invoice(model_id: Union[int, str]):
try:
item = await models.Invoice.get(model_id)
except asyncpg.exceptions.DataError as e:
raise HTTPException(422, e.message)
if not item:
raise HTTPException(
status_code=404, detail=f"Object with id {model_id} does not exist!"
)
await invoice_add_related(item)
return item
async def get_invoices(pagination: pagination.Pagination = Depends()):
return await pagination.paginate(models.Invoice, postprocess=invoices_add_related)
async def delete_invoice(model_id: int):
item = await get_invoice(model_id)
await invoice_add_related(item)
await models.ProductxInvoice.delete.where(
models.ProductxInvoice.invoice_id == item.id
).gino.status()
await item.delete()
return item
| StarcoderdataPython |
3388425 | import sys
from lark import Lark, Transformer, v_args
WHITE = 1
BLACK = -1
TOGGLE = -1
# FLIP_DIRECTIONS = dict(nw="se", ne="sw", e="w",
# se="nw", sw="ne", w="e")
def debug(m):
print(m)
grammar = """
start: direction+ -> finish
?direction: "nw"
| "ne"
| "sw"
| "se"
| "e"
| "w"
"""
@v_args(inline=True)
class DirectionLexer(Transformer):
def finish(self, *args):
return [str(t) for t in args]
class Floor:
def __init__(self):
self.x = 0
self.y = 0
self.z = 0
self.deltas = dict(e=(1, -1, 0),
se=(0, -1, 1),
sw=(-1, 0, 1),
w=(-1, 1, 0),
nw=(0, 1, -1),
ne=(1, 0, -1))
self.tile_colors = dict()
def warp_to_origin(self):
self.x = 0
self.y = 0
self.z = 0
def step_in_direction(self, direction):
d = self.deltas[direction]
self.x += d[0]
self.y += d[1]
self.z += d[2]
def flip_current_tile(self):
p = (self.x, self.y, self.z)
if p not in self.tile_colors:
self.tile_colors[p] = BLACK
else:
self.tile_colors[p] *= TOGGLE
def count_black_tiles(self):
return list(self.tile_colors.values()).count(BLACK)
def main():
parser = Lark(grammar, parser='lalr', transformer=DirectionLexer(), keep_all_tokens=True)
walks = [parser.parse(line.strip()) for line in sys.stdin if line.strip()]
floor = Floor()
for walk in walks:
floor.warp_to_origin()
for step in walk:
floor.step_in_direction(step)
floor.flip_current_tile()
print(floor.count_black_tiles())
if __name__ == '__main__':
main()
| StarcoderdataPython |
4843249 | <reponame>ChihHsuanLin/bevel
import pandas as pd
def pivot_proportions(df, groups, responses, weights=1):
"""
Pivot data to show the breakdown of responses for each group.
Parameters:
df: a pandas DataFrame with data to be aggregated
groups: the name of the column containing the groups to partition by
respones: the name of the column that contains responses to aggregate into proportions
weights: the statistical weighting associated with each response
Returns:
a pandas DataFrame containing the proportion of responses within each group
"""
pivot_data = df[[groups, responses]].assign(weights=weights)
pivoted_counts = pd.pivot_table(
pivot_data,
columns=groups,
index=responses,
aggfunc='sum'
)
pivoted_counts = pivoted_counts['weights'].sort_index(axis=1)
return (pivoted_counts / pivoted_counts.sum()).fillna(0)
| StarcoderdataPython |
164162 | import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
import xml.etree.ElementTree as ET
from os.path import isfile, join
from os import getcwd
from scipy.spatial import distance
##############################
# MACROS
#################################
# # Geometry data
# A = -65
# B = 25
# YMAX = 20
# THICKNESS = -10 # negative to match equation
# # Mesh data
# VERTICAL_RES = 60
# N_LAYERS = 4 # from endo to epi, not including endo
# CIRCUNFERENTIAL_RES = 30
# Geo
A = -65
B = 25
H = 0
K = 0
YMAX = 20
_TYPE = 1
N_INTERNAL_LAYERS = 3 # Horizontal res --> will add two layers (internal and external)
N_NODES_PER_LAYER = 20 # Vertical res --> will add one or 2 nodes to fix top/bottom constrains
N_REVS = 9 # Circf. res --> need to be multiple of 3
##############################
# 2D Functions
#################################
class vector2d:
def __init__(self, p1, p2, has_normal=True):
self.p1 = p1
self.p2 = p2
self.vec = self.vector2d()
self.unit = self.unit_vector()
self.to_plot = [[p1[0], p2[0]], [p1[1],p2[1]]]
self.normal = vector2d([-self.vec[1], self.vec[0]], [self.vec[1], -self.vec[0]], has_normal=False) if has_normal else None
def __call__(self):
return self.vec
def __str__(self):
return "Vector2d: p1: {p1:} p2: {p2:}".format(p1=self.p1, p2=self.p2)
def vector2d(self):
return np.array([self.p2[a] - self.p1[a] for a in range(len(self.p1))])
def unit_vector(self):
return self.vec / np.linalg.norm(self.vec)
def rotate(self,theta):
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
p2 = np.matmul(rotation_matrix, self.vec)
p2 += np.array(self.p1)
return vector2d(self.p1, p2)
def vector2dFromP1(center, length, dir):
p1 = np.array(center)
p2 = np.array([length * dir[0], length * dir[1]]) + p1
return vector2d(p1,p2)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2' """
return np.arccos(np.clip(np.dot(v1.unit, v2.unit), -1.0, 1.0))
def regress(xs,ys,deg):
coeffs = np.polyfit(xs,ys,deg)
# if _print == True:
# a = ['(x^'+str(len(coeffs)-(i+1))+") * "+str(y) if i+1 !=len(coeffs) else str(y) for i, y in enumerate(coeffs)]
# print("Coeffs: " + str(coeffs) + " | " + " + ".join(a)[:-1:])
# return lambda x: np.sum([(x**len(coeffs)-(i+1))*y if i+1 !=len(coeffs) else y for i, y in enumerate(coeffs)])
return np.poly1d(coeffs)
# Ellipse Functions
def ellipse(a, b, h=0, k=0, _type=0, ref=-1):
def eq(val):
if _type == 0: # solved for y (return y, given x)
return (a/b) * -ref * np.sqrt(b**2 - (val-h)**2) + k
# return np.sqrt((1 - (val - h)**2 ) /b**2) + k
elif _type == 1: # solved for x (return x, given y)
return (b/a) * ref * np.sqrt(a**2 - (val-k)**2) + h
return eq
def ellipse_focci(a,b,h=0,k=0):
c = np.sqrt(a**2 - b**2)
return np.array([h, k + c]), np.array([h, k - c])
def sctattered_ellipse(a,b,h,k, yrange, xrange, x_res, y_res):
# Define eq of elipse
y_ellpisis = ellipse(a,b,h,k,1)
x_ellpisis = ellipse(a,b,h,k,0)
# Get min and max values
ymin = np.min(yrange)
ymax = np.max(yrange)
xmin = np.min(xrange)
xmax = np.max(xrange)
# Calculate undistributed points
ys_ybased = np.linspace(ymin, ymax, x_res)
xs_ybased = np.array([y_ellpisis(y) for y in ys_ybased ])
xs_xbased = np.linspace(xmin, xmax, y_res)
ys_xbased = np.array([x_ellpisis(x) for x in xs_xbased ])
# Set points in a single array
xs = np.append(xs_ybased, xs_xbased)
ys = np.append(ys_ybased, ys_xbased)
# Sort points
s1 = np.zeros((len(xs), 2))
for i, x in enumerate(xs):
s1[i][0] = x
s1[i][1] = ys[i]
s1 = s1[np.argsort(s1[:, 1])]
s2 = np.zeros((2,len(s1)))
for i in range(len(s1)):
s2[0][i] = s1[i][0]
s2[1][i] = s1[i][1]
return s1, s2
def regressed_ellipse(a,b,h,k, yrange, xrange, yswitch=0.80, breakpoints=[0], res=100, deg=2, axis=1):
# Define min and max values
ymin = np.min(yrange)
ymax = np.max(yrange)
xmin = np.min(xrange)
xmax = np.max(xrange)
# Calculate scattered ellipse
s_original, _ = sctattered_ellipse(a,b,h,k, yrange, xrange, res, res)
# Set yswtich based on the basal value of a
yswitch = a * yswitch
# print("yswith:",yswitch)
# Remove breakpoints before yswitch
# breakpoints = np.delete(breakpoints, [i for i, p in enumerate(breakpoints) if p <= yswitch])
# Insert min and max breakpoints if they are not already included (do not duplicate)
# breakpoints = np.insert(breakpoints, 0, yswitch) if yswitch > ymin else breakpoints
breakpoints = np.insert(breakpoints, 0, ymin) if ymin not in breakpoints else breakpoints
breakpoints = np.append(breakpoints, ymax) if ymax not in breakpoints else breakpoints
# print("Breakpoints:", breakpoints)
# Break s_original based on breakpoints
polys = []
r_range = range(len(breakpoints) - 1)
count = 1
for i in r_range:
brkpoint1 = breakpoints[i]
brkpoint2 = breakpoints[i+1]
s = [[],[]]
for j in range(count-1,len(s_original)):
yval = s_original[j][1]
# print(yval)
if breakpoints[i] <= yval <= breakpoints[i+1]:
s[0].append(s_original[j][0])
s[1].append(s_original[j][1])
# s.append([s_original[j][0], s_original[j][1]])
count += 1
else:
break
# print("---")
# print("brk1:", breakpoints[i])
# print("brk2:", breakpoints[i+1])
# print("s[0]:")
# print(s[0])
# print("s[1]:")
# print(s[1])
# print("---")
polys.append(regress(s[1], s[0], deg))
# # Calculate yss and xss
# r_range = range(len(breakpoints) - 1)
# yss = []
# for i in r_range:
# brkpoint1 = breakpoints[i]
# brkpoint2 = breakpoints[i+1]
# if brkpoint2 <= yswitch:
# yss.append(np.linspace(breakpoints[i], ellpisis(i+1), res))
# else:
# yss.append(np.linspace(breakpoints[i], breakpoints[i+1], res))
# yss = [np.linspace(breakpoints[i], breakpoints[i+1], res) for i in r_range]
# xss = [[ellpisis(y) for y in ys] for ys in yss]
# polys = [regress(xss[i], yss[i], deg) for i in r_range]
def reg_ell(val):
if val == ymin:
return 0
else:
for i in r_range:
if breakpoints[i] <= val <= breakpoints[i+1]:
index = i
break
return polys[index](val)
return reg_ell
def distributed_ellipse(a,b,h,k, yrange, xrange, x_res=500, y_res=500, dist_res=50, err=0.05):
# Calculate original ellipse
ell_original_coords, ell_original = sctattered_ellipse(a,b,h,k, yrange, xrange, x_res, y_res)
# Calculate total length of the curve
dist_matrix = distance.cdist(ell_original_coords, ell_original_coords, 'euclidean')
# Get dist resolution
dist = dist_matrix[0][-1] / (dist_res - 1)
# Set min and max dist according to allowed error
min_dist = dist*(1-err)
max_dist = dist*(1+err)
diff_sum = 0
# Bound first coord
ell_distr_coords = [ell_original_coords[0]]
ell_distr = [[ell_original[0][0]],[ell_original[1][0]]]
for i in range(len(dist_matrix) - 1):
prev_dist = dist_matrix[i][0]
next_dist = dist_matrix[i+1][0]
diff_dist = next_dist - prev_dist
diff_sum += diff_dist
if min_dist <= diff_sum <= max_dist:
ell_distr_coords.append(ell_original_coords[i])
ell_distr[0].append(ell_original[0][i])
ell_distr[1].append(ell_original[1][i])
diff_sum = 0
ell_distr_coords.append(ell_original_coords[-1])
ell_distr[0].append(ell_original[0][-1])
ell_distr[1].append(ell_original[1][-1])
return np.array(ell_distr_coords), np.array(ell_distr)
# Geometry build functions
def refractions(ell_coords, focci, n1, n2, bias_factor=0, plot_ax=None, flat_top=True):
# NOTE: Refreaction only inside object (not on edges)
def snellsLaw(n1,n2,theta1):
""" Returns theta2 based on snell's refraction law"""
theta2 = np.arcsin((n1/n2) * np.sin(theta1))
# if theta2 <= np.pi * 0.5:
# print("true")
# theta2 = -theta2
return theta2
refracs = []
for i in range(-1, len(ell_coords) - 1):
# Calculate "refraction" rays for borders along y axis
if i < 0 and ell_coords[i+1][0] == 0:
incomming_ray = vector2d(focci, ell_coords[i+1])
ref_vector = vector2d(ell_coords[i+1],ell_coords[i+2]) # Not really used (just for plot consistence)
n_vec1 = vector2dFromP1(ref_vector.p1, 5, incomming_ray.normal.unit_vector())
n_vec2 = vector2dFromP1(ref_vector.p1, 5, -incomming_ray.normal.unit_vector())
refracted_ray = vector2dFromP1(ref_vector.p1, 5, incomming_ray.unit_vector())
elif flat_top == True and i >= len(ell_coords) - 4:
incomming_ray = vector2d(focci, ell_coords[i+1])
ref_vector = vector2d(ell_coords[i+1], [ell_coords[i+1][0] + 5, ell_coords[i+1][1]]) # Not really used (just for plot consistence)
n_vec1 = vector2dFromP1(ref_vector.p1, 5, -ref_vector.unit_vector())
n_vec2 = vector2dFromP1(ref_vector.p1, 5, ref_vector.unit_vector())
refracted_ray = vector2dFromP1(ref_vector.p1, 5, ref_vector.unit_vector())
else:
# Get incomming ray and ref vectors
incomming_ray = vector2d(focci, ell_coords[i+1])
ref_vector = vector2d(ell_coords[i],ell_coords[i+1])
# Get normal vectors (2 of them for plotting)
n_vec1 = vector2dFromP1(ref_vector.p2, 5, -ref_vector.normal.unit_vector())
n_vec2 = vector2dFromP1(ref_vector.p2, 5, ref_vector.normal.unit_vector())
# Refraction angle will be used for yvals below than zero
if n_vec2.p1[1] < 0:
# Calculate refraction angle
theta1 = angle_between(incomming_ray, n_vec1)
theta2 = snellsLaw(n1,n2,theta1)
# Apply bias factor
bias_factor = bias_factor * 1/np.log(abs(n_vec2.p1[1]) + 100)
theta2 = theta2 * (1 - bias_factor)
# Rotate vec_2 based on theta 2
refracted_ray = n_vec2.rotate(-theta2) if n_vec2.p1[1] < 0 else n_vec2.rotate(theta2)
else:
refracted_ray = n_vec2
# n_vec2 = n_vec1
refracs.append((refracted_ray, n_vec2))
# Storing info for plot
if plot_ax != None:
xs = []
ys = []
xs.extend(incomming_ray.to_plot[0])
xs.extend(ref_vector.to_plot[0])
xs.extend(refracted_ray.to_plot[0])
ys.extend(incomming_ray.to_plot[1])
ys.extend(ref_vector.to_plot[1])
ys.extend(refracted_ray.to_plot[1])
xs1 = []
ys1 = []
# xs1.extend(n_vec1.to_plot[0])
xs1.extend(n_vec2.to_plot[0])
# ys1.extend(n_vec1.to_plot[1])
ys1.extend(n_vec2.to_plot[1])
xs2 = []
ys2 = []
xs2.extend(refracted_ray.to_plot[0])
ys2.extend(refracted_ray.to_plot[1])
plot_ax.plot(xs,ys)
plot_ax.plot(xs1,ys1, linestyle="--", c="k")
# # Calculate "refraction" rays for borders along y axis
# for i in range(0,len(ell_coords), len(ell_coords) -1):
# if ell_coords[i][0] == 0:
# incomming_ray = vector2d(focci, ell_coords[i])
# n_vec2 = vector2dFromP1(ref_vector.p2, 5, ref_vector.normal.unit_vector())
return refracs, [(xs,ys), (xs2,ys1)] #plot data
def ref_nodes(refracts, thickness, n_layers, focci=np.array([0,0]), flat_top=True):
layers_space = np.linspace(0,thickness,n_layers + 2)
print(layers_space)
points_matrix_coords = []
points_matrix = [[],[]]
ref_vectors = np.copy(refracts)
dL = layers_space[1] - layers_space[0]
print("dL:",dL)
for L in layers_space:
for i, vecs in enumerate(ref_vectors):
refracted_vec = vecs[0]
normal_vec = vecs[1]
theta = angle_between(normal_vec,refracted_vec)
if theta == np.pi*0.5:
theta = 0
if L > 0:
# vec = vector2dFromP1(refracted_vec.p1, L, refracted_vec.unit)
# vec = vector2dFromP1(refracted_vec.p1, L, normal_vec.unit)
cosTheta = np.cos(theta)
cdL = L * np.reciprocal(cosTheta) if cosTheta > 0 else 0
print("L:", round(L,3), "| theta:", round(np.degrees(theta),3), "| cdL", round(cdL,5), "| L+cdL:", round(L + cdL,5))
vec = vector2dFromP1(refracted_vec.p1, L, normal_vec.unit)
# print(vec)
# # print(vec)
# vec = vec.rotate(theta)
# print("vec*unit:",vec.vec * refracted_vec.unit)
# vec = vector2d(normal_vec.p1, vec.vec * refracted_vec.unit + vec.p1)
points_matrix_coords.append(vec.p2)
points_matrix[0].append(vec.p2[0])
points_matrix[1].append(vec.p2[1])
else:
vec = refracted_vec
points_matrix_coords.append(vec.p1)
points_matrix[0].append(vec.p1[0])
points_matrix[1].append(vec.p1[1])
# print(vec)
return np.array(points_matrix_coords), np.array(points_matrix)
def ref_nodes2(refracts, thickness, n_layers, focci=np.array([0,0]), layer_res=N_NODES_PER_LAYER+2, flat_top=True):
def is_parallel(p1, vec, err=np.radians(1)):
if p1[0] != vec.p1[0] and p1[1] != vec.p1[1]:
v1 = vector2d(p1,vec.p1)
theta = angle_between(vec, v1)
# print(np.degrees(theta))
if theta <= err or (np.pi - err <= theta <= np.pi + err) or theta >= np.pi*2 - err:
return True
else:
return False
else:
return True
layers_space = np.linspace(0,thickness,n_layers + 2)
points_matrix_coords = []
points_matrix = [[],[]]
ref_vectors = np.copy(refracts)
for Lindex, L in enumerate(layers_space):
if Lindex == 0:
for vecs in ref_vectors:
ref_coord = vecs[0].p1
points_matrix_coords.append(ref_coord)
points_matrix[0].append(ref_coord[0])
points_matrix[1].append(ref_coord[1])
print("node_per_layer:", len(points_matrix_coords))
else:
layer_coords, layer_xy = sctattered_ellipse(A-L,B+L,H,K, [A-L,YMAX], [0,B+L], 600, 600)
node_per_layer_counter = 0
angle_err = np.radians(0.5)
# while node_per_layer_counter != layer_res:
# node_per_layer_counter = 0
tracker = 0
for vecs in ref_vectors:
found_match = False
angle_err = np.radians(0.5)
while not found_match:
local_tracker = tracker
for i in range(tracker,len(layer_coords)):
# print("tracker", tracker, "local_tracker", local_tracker)
if is_parallel(layer_coords[i],vecs[0], err=angle_err):
points_matrix_coords.append(layer_coords[i])
points_matrix[0].append(layer_xy[0][i])
points_matrix[1].append(layer_xy[1][i])
node_per_layer_counter += 1
found_match = True
break
else:
local_tracker += 1
angle_err += np.radians(0.5) # increase a tolerable degree
tracker = local_tracker
print("node_per_layer:",node_per_layer_counter)
return np.array(points_matrix_coords), np.array(points_matrix)
def make_3d(points_matrix_coords, points_matrix, shift_yz=True):
points_matrix_coords_3d = []
for a in points_matrix_coords:
if shift_yz == True:
a = np.insert(a,1,0.)
else:
a = np.append(a,0)
points_matrix_coords_3d.append(a)
if len(points_matrix) > 0:
z = np.zeros(len(points_matrix[0]))
if shift_yz == True:
a = points_matrix[0]
b = points_matrix[1]
points_matrix = np.vstack((a,z))
points_matrix = np.vstack((points_matrix,b))
# points_matrix = np.insert(points_matrix, 1, z)
else:
points_matrix = np.vstack(points_matrix, z)
return np.array(points_matrix_coords_3d), points_matrix
def revolute(points_matrix_coords, rev=360, res=4, exclude_axis=True, axis='z'):
def rotation_matrix(theta, axis='z'):
if axis == 'z':
return np.array([
[np.cos(theta), np.sin(theta), 0],
[-np.sin(theta), np.cos(theta), 0],
[0, 0, 1],
]
)
elif axis == 'y':
return np.array([
[np.cos(theta), 0, -np.sin(theta)],
[0, 1, 0],
[np.sin(theta), 0, np.cos(theta)],
]
)
point_cloud_by_coord = {}
theta_space = np.linspace(0, rev, res + 1)
node_count = 0
section_count = 0
for dtheta in theta_space[:-1]:
for coord in points_matrix_coords:
coord = np.matmul(coord, rotation_matrix(np.radians(dtheta)))
# Conditioning rotation on axis
if coord[0] == 0:
section = 0
else:
section = section_count
point_cloud_by_coord[tuple(coord)] = (coord, node_count, section)
node_count += 1
section_count += 1
# print("number of nodes:", node_count - 1)
# print("n_sections ", section_count - 1 )
# Setting a dict of point cloud by node number
point_cloud = {}
for key in point_cloud_by_coord:
point = point_cloud_by_coord[key]
point_cloud[point[1]] = (point[0], point[1], point[2])
# Setting a matrix of x,y,z (explicit coordinates matrix)
point_matrix = np.zeros((3, len(point_cloud)))
for i, point in enumerate(point_cloud):
point_matrix[0][i] = point_cloud[point][0][0]
point_matrix[1][i] = point_cloud[point][0][1]
point_matrix[2][i] = point_cloud[point][0][2]
return point_cloud, point_cloud_by_coord, point_matrix
def hex8(point_cloud, nodes, n_layers=N_INTERNAL_LAYERS+2):
# def find_nearest(array, value):
# array = np.asarray(array)
# return (np.abs(array - value)).argmin()
# def mask(arrays, idx):
# for arr in arrays:
# arr.mask[idx] = True
def distance(p1,p2):
return np.linalg.norm(np.array(p1)-np.array(p2))
# def get_point(key,dic):
# p = dic[key][0]
# return p, p[0], p[1], p[2]
# def get_key_by_nearest(key_list, xyz_list, ref_val, axis):
# return key_list[find_nearest(xyz_list[axis], ref_val)]
# def get_elem(i,j,k,shape):
# if i != len(shape) -1:
# i2 = i + 1
# else:
# i2 = 0
# return(np.array([
# shape[i2][j][k+1], # P6
# shape[i][j][k+1], # P2
# shape[i][j+1][k+1], # P4
# shape[i2][j+1][k+1], # P8
# shape[i2][j][k], # P5
# shape[i][j][k], # P1
# shape[i][j+1][k], # P3
# shape[i2][j+1][k], # P7
# ]))
# def sort_by_axis(dic, axis, reverse=False, returnDict=False):
# if returnDict:
# return {k: v for k, v in sorted(dic.items(), key=lambda item: item[1][0][axis], reverse=reverse)}
# else:
# return sorted(dic.items(), key=lambda item: item[1][0][axis], reverse=reverse)
# # shape((s,z,d))
# shape = dict()
# for key in point_cloud:
# data = point_cloud[key]
# s = data[-1] # section number
# z = data[0][2] # z value
# d = distance([0,0],[data[0][0],data[0][1]]) # distance from center (0,0)
# shape_key = (s, z, d)
# shape[shape_key] = data
# # --- Note
# # Not sure if it will ALWAYS have different values. If it happens, will have to use the following:
# # if shape_key in shape:
# # shape[shape_key].append(data[0])
# # else:
# # shape[shape_key] = [data[0]]
# # ---
# ------------------------------------------------
## NEED TO DO: TRIAL ONE:
# # sort shape
# shape = {k: v for k, v in sorted(shape.items(), key=lambda item: (item[0][0], -item[0][1]) )}
# # shape (section, height, distance from center), sorted by section, reverse height, but not distance
# # need to transfor to this:
# # shape(i,j,k) where i is the section, j is the layer and k the ordered node pos along the layer
## --------------------------------------------------------------------------
# a = {} # Temporary dict to organize data into sections and layers
# layer_tracker = -1
# for i, (s, z, d) in enumerate(shape):
# if i % n_layers == 0:
# layer_tracker += 1
# key = (s, layer_tracker)
# else:
# if key in a:
# a[key].append(shape[(s,z,d)])
# else:
# a[key] = [shape[(s,z,d)]]
# if layer_tracker == n_layers -1:
# layer_tracker = -1
# shape = dict() # final dictionay with data in (sections, layers, k)
# for s, l in a:
# # Make sure content is sorted
# content = a[(s,l)]
# # content = sorted(content, key=lambda item: item[0][2], reverse=True)
# for i, b in enumerate(content):
# shape[s,l,i] = b
def display(dic,withVal=False):
for key in dic:
if withVal:
print(key, dic[key])
else:
print(key)
print("Length:",len(dic))
# point_cloud = {"0": [coord, nodeNumber, nodeSection] }
# Endgoal --> shape(i,j,k) where i is the section, j is the layer number, k is the node number (with respect to height)
# Sorting into a dict based on section --> sections = {"0": [cood, nodeNumber]}
sections = dict()
for key in point_cloud:
data = point_cloud[key]
s = data[-1] # section number
if s in sections:
sections[s].append(data[:-1])
else:
sections[s] = [data[:-1]]
# print("n_layers:", n_layers)
temp_dict = dict() # nodes by height
temp_dict2 = dict() # nodes by radius
print("sorting by height and radius")
for s in sections:
# sorted by height
nodes_in_section = sorted(sections[s], key=lambda item: item[0][2])
nodes_in_section2 = sorted(sections[s], key=lambda item: distance([0,0], item[0][:-1]))
for i, data in enumerate(nodes_in_section):
key = (s,i)
if key in temp_dict:
temp_dict[key].append(data)
temp_dict2[key].append(nodes_in_section2[i])
else:
temp_dict[key] = [data]
temp_dict2[key] = [nodes_in_section2[i]]
for i in range(10):
print("byHeight:", temp_dict[(0,i)][0], "byRadius", temp_dict2[(0,i)][0])
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# cs = ['b','r','k','g','c','b','r','k','g','c','b','r','k','g','c','b','r','k','g','c','b']
# for key in temp_dict:
# print(key)
# if key[0] == 0:
# for arr in temp_dict[key]:
# p = arr[0]
# print(key[1])
# ax.scatter3D(p[0], p[1], p[2], c=cs[key[1]])
# print("sorting by distance")
# def sprint(x):
# print("sort method:", x)
# return x
# shape = dict()
# for s, l in temp_dict:
# verticalLayer = sorted(temp_dict[(s,l)], key=lambda item: distance([0,0,0], item[0])) # sort by height
# # print("--")
# # for k in verticalLayer:
# # print(k)
# # print("---")
# for n, data in enumerate(verticalLayer):
# shape[(s,n,l)] = data
# print("lenshape",len(shape), "len_pointcloud",len(point_cloud))
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# for key in temp_dict:
# if key[0] != 0:
# break
# for data in temp_dict[key]:
# p = temp_dict[key][0]
# ax.scatter3D(p[0], p[1], p[2])
# display(shape,withVal=True)
# print("---")
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# # cs = ['b','r','b','g','c',]
# # for key in shape:
# # p = shape[key][0]
# # ax.scatter3D(p[0], p[1], p[2], c=cs[key[1]])
# else:
# break
# Sections work fine
# print("SECTIONS")
# display(sections)
# print("---")
# print("Length section[0]",len(sections[0]))
# print("n_layers:", n_layers)
# temp_dict = dict()
# print("sorting by height")
# for s in sections:
# nodes_in_section = sorted(sections[s], key=lambda item: item[0][2], reverse=True) # sort by height
# # for c in nodes_in_section:
# # print(c)
# n_nodes_in_section = len(nodes_in_section)
# n_nodes_per_layer = int(round(n_nodes_in_section / n_layers))
# # print("n_nodes:", n_nodes_in_section, "n_nodes_per_layer", n_nodes_per_layer)
# layerTracker = -1
# heightTracker = -1
# for h, data in enumerate(nodes_in_section):
# if h % n_layers == 0:
# heightTracker += 1
# # if layerTracker == n_layers -1:
# # layerTracker = -1
# # layerTracker += 1
# key = (s,heightTracker)
# # print(key, data)
# if key in temp_dict:
# temp_dict[key].append(data)
# else:
# temp_dict[key] = [data]
# print("sorting by distance")
# shape = dict()
# for s, h in temp_dict:
# horizontalLayer = sorted(temp_dict[(s,h)], key=lambda item: distance([0,0], [item[0][0], item[0][1]]), reverse=True) # sort by distance to 0,0
# for n, data in enumerate(horizontalLayer):
# # print("len_HorizontalLayer:",len(horizontalLayer))
# shape[(s,n,h)] = data
# print("lenshape",len(shape), "len_pointcloud",len(point_cloud))
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# cs = ['b','r','b','g','c',]
# for key in shape:
# p = shape[key][0]
# ax.scatter3D(p[0], p[1], p[2], c=cs[key[1]])
# fig = plt.figure()
# ax = fig.add_subplot(111)
# cs = ['b','r','k','g','c',]
# for key in shape:
# if key[0] == 0:
# p = shape[key][0]
# ax.scatter(p[0], p[2], c=cs[key[1]])
# else:
# break
# print("sorting by distance")
# shape = dict()
# for s, h in temp_dict:
# nodes_in_height_h = sorted(temp_dict[(s,h)], key=lambda item: distance([0,0], [item[0][0], item[0][1]]), reverse=True) # sort by distance to 0,0
# n_nodes_in_section = len(nodes_in_height_h)
# n_nodes_per_layer = int(round(n_nodes_in_section / n_layers))
# counter = -1
# for i, data in enumerate(nodes_in_height_h):
# if i % n_nodes_per_layer == 0:
# counter = -1
# counter += 1
# key = (s,h, i)
# print(key, data)
# shape[key] = data
# print("SHAPE")
# display(shape)
# print("----")
# shape = temp_dict
# print("TEMP DICT")
# display(temp_dict, withVal=True)
# print("---")
# shape = dict()
# layer_tracker = -1
# height_tracker = -1
# for i, (s, h) in enumerate(temp_dict):
# data = temp_dict[(s,h)]
# if i % n_layers == 0:
# layer_tracker += 1
# height_tracker += 1
# key = (s, layer_tracker, height_tracker)
# # print(key)
# shape[key] = data
# if layer_tracker == n_layers -1:
# layer_tracker = 0
# height_tracker = 0
# for key in shape:
# print(key, shape[key][0])
# print(shape[(0,0,1)])
# print(shape[(0,0,2)])
# print(shape[(0,0,3)])
# print(shape[(0,0,4)])
# print(shape[(0,0,5)])
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# for k in range(20):
# p = shape[0,k,0][0]
# print(p)
# ax.scatter3D(p[0], p[1], p[2])
# print(len(point_cloud), len(shape))
# print(temp_dict)
# for s in sections:
# content = sorted(sections[s], key=lambda item: (distance([0,0], [item[0][0], item[0][1]]), item[0][2]), reverse=True) # sort by layer
# layer_tracker = - 1
# for i, data in enumerate(content):
# if i % n_layers == 0:
# layer_tracker += 1
# key = (s, layer_tracker)
# else:
# if key in temp_dict:
# temp_dict[key].append(data)
# else:
# temp_dict[key] = [data]
# if layer_tracker == n_layers -1:
# layer_tracker = -1
# print(temp_dict[0,0])
# print("temp_dict:")
# for c in temp_dict[(0,0)]:
# print(c[0])
# print("--")
# print("temp_dict:")
# for c in temp_dict[(1,0)]:
# print(c[0])
# print("--")
# print("len_cloud", len(point_cloud), "temp_dict",len(temp_dict))
# shape = dict()
# for s, l in temp_dict:
# content = sorted(temp_dict[(s,l)], key=lambda item: item[0][2], reverse=True) # sort by height
# for i, data in enumerate(content):
# shape[(s,l,i)] = data
# # print(shape[(0,0,0)])
# # print(shape[(0,0,1)])
# # print(shape[(0,0,2)])
# print("len_cloud", len(point_cloud), "len_shape",len(shape))
# shape2 = dict()
# layerTracker = 0
# zcount = 0
# for s in shape:
# if layerTracker <= 3:
# new_key = (s[0],zcount)
# if new_key in shape2:
# arr = shape2[new_key]
# arr.append((shape[s][1], s[2]))
# arr = sorted(arr, key=lambda item: item[1])
# shape2[new_key] = arr
# else:
# shape2[new_key] = [(shape[s][1], s[2])]
# layerTracker += 1
# zcount += 1
# else:
# zcount = 0
# layerTracker = 0
# for s in shape2:
# print(s, shape2[s])
def hexalise(shape):
def get_elem(i,j,k,shape):
if i != len(shape) -1:
i2 = i + 1
else:
i2 = 0
return(np.array([
shape[i2][j][k+1], # P6
shape[i][j][k+1], # P2
shape[i][j+1][k+1], # P4
shape[i2][j+1][k+1], # P8
shape[i2][j][k], # P5
shape[i][j][k], # P1
shape[i][j+1][k], # P3
shape[i2][j+1][k], # P7
]))
elements = {}
elem_count = 1
n_sections = len(shape)
for i in range(len(shape)): # sections
for j in range(len(shape[i]) -1): # layers
for k in range(len(shape[i][j]) -1): # points
elem = get_elem(i,j,k,shape)
elements[elem_count] = elem
elem_count += 1
return elements
def write_geometry(nodes, elems, file_name, path_to_output_folder):
# Create MeshData Element
geometry = ET.Element('Geometry')
tree = ET.ElementTree(geometry)
nodes_tag = ET.SubElement(geometry,"Nodes")
nodes_tag.set("name","Object01")
elems_tag = ET.SubElement(geometry,"Elements")
elems_tag.set("type","hex8")
elems_tag.set("name","Part1")
# Add nodes data
for node in nodes:
# Create sub-elements
_node = ET.SubElement(nodes_tag, "node")
_node.set("id",str(node))
_node.text = ",".join([str(x) for x in nodes[node]])
# Add elems data
for elem in elems:
# Create sub-elements
_elem = ET.SubElement(elems_tag, "elem")
_elem.set("id",str(elem))
_elem.text = ",".join([str(x) for x in elems[elem]])
# print(ET.tostring(geometry))
# root = ET.ElementTree(geometry)
# print(root)
indent(tree.getroot())
tree.write(join(path_to_output_folder,file_name),encoding="ISO-8859-1")
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for e in elem:
indent(e, level+1)
if not e.tail or not e.tail.strip():
e.tail = i + " "
if not e.tail or not e.tail.strip():
e.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
######################################
if __name__ == "__main__":
print("==== Test case ===")
fig = plt.figure()
axs = fig.add_subplot(121)
axs2 = fig.add_subplot(122)
fig2 = plt.figure()
axs3 = fig2.add_subplot(111, projection='3d')
## Focci points
focci_pos, focci_neg = ellipse_focci(A,B,H,K)
# plt.scatter(focci_pos[0], focci_pos[1],c='y')
## Scattered ellipse
ell_original_coords, ell_original = sctattered_ellipse(A,B,H,K, [A,YMAX], [0,B], 1000, 1000)
axs.scatter(ell_original[0], ell_original[1],c='b')
ell_distr_coords, ell_distr = distributed_ellipse(A,B,H,K, [A,YMAX], [0,B], dist_res=N_NODES_PER_LAYER)
axs.scatter(ell_distr[0], ell_distr[1],c='g')
refractions, _ = refractions(ell_distr_coords, [0,0], n1=1, n2=0.85, bias_factor=-1.5, plot_ax=axs)
# ell_2_coords, ell_2 = sctattered_ellipse(A-10,B+10,H,K, [A-10,YMAX], [0,B+10], 100, 100)
# axs2.scatter(ell_2[0], ell_2[1],c='g')
ref_nodes_coords, ref_nodes = ref_nodes2(refractions, 10, N_INTERNAL_LAYERS)
print("total n nodes:", len(ref_nodes_coords))
axs2.scatter(ref_nodes[0], ref_nodes[1])
ref_nodes_coords, ref_nodes = make_3d(ref_nodes_coords, ref_nodes)
node_cloud, _, nodes = revolute(ref_nodes_coords, res=N_REVS, axis='z')
axs3.scatter3D(nodes[0],nodes[1],nodes[2])
hex8(node_cloud, nodes)
# xnodes = np.ma.array([0,1,2,3], mask=False)
# ynodes = np.ma.array([0,1,2,3], mask=False)
# def mask(arrays, idx):
# for arr in arrays:
# arr.mask[idx] = True
# mask([xnodes, ynodes], 1)
# print(xnodes)
axs.grid()
axs.axis('equal')
axs2.grid()
axs2.y_res = 2
axs2.axis('equal')
# axs2.x_res = 5
plt.show()
| StarcoderdataPython |
1747443 |
"""
FNAME
Machine-generated model code
"""
class SFCModel(object):
"""
Model
Implements the following system of equations.
Endogenous variables and parameters
===================================
x = y + 2,
y = .5 * x,
where lagged variables are:
LAG_x(t) = x(t-1)
Exogenous Variables
===================
dummy
"""
def __init__(self):
self.MaxIterations = 100
self.MaxTime = 3
self.T = 0
self.x = [0.000000,]
self.y = [0.000000,]
self.dummy = [1., 1., 1.]
def Iterator(self, in_vec):
x, y, LAG_x, dummy = in_vec
NEW_x = y + 2
NEW_y = .5 * x
NEW_LAG_x = LAG_x
NEW_dummy = dummy
return NEW_x, NEW_y, NEW_LAG_x, NEW_dummy
def main(self):
for t in range(0, self.MaxTime):
self.T = t
self.RunOneStep()
def RunOneStep(self):
x = self.x[-1]
y = self.y[-1]
LAG_x = self.x[self.T -1]
dummy = self.dummy[self.T]
orig_vector = (x, y, LAG_x, dummy)
err = 1.
cnt = 0
while err > .001:
new_vector = self.Iterator(orig_vector)
err = self.CalcError(orig_vector, new_vector)
orig_vector = new_vector
cnt += 1
if cnt > self.MaxIterations:
raise ValueError('No Convergence!')
x = orig_vector[0]
self.x.append(x)
y = orig_vector[1]
self.y.append(y)
@staticmethod
def CalcError(vec1, vec2):
err = 0.
for val1, val2 in zip(vec1, vec2):
err += abs(val1 - val2)
return err
if __name__ == '__main__':
obj = SFCModel()
obj.main()
| StarcoderdataPython |
1607434 | <filename>blackjack_rl/script/basic_strategy.py
from blackjack_rl.envs.eleven_ace import BlackjackEnv
import os, pickle
import datetime
# environment seed
seed = 3
# make_sample episode count
N_epoch = 10
# LSPI train count
N_episode = 10000
# Evaluation count per leaning
N_eval = 10000
# data dir
_base = os.path.dirname(os.path.abspath(__file__)) # 実行中のファイル(このファイル)の絶対パス
data_dir = os.path.join(_base, "../../data") # 実行中のファイルからの相対パスでdataの出力先を決定
detail_dir = os.path.join(_base, "../../data/detail")
hard_policy = [[True, True, False, False, False, True, True, True, True, True],
[False, False, False, False, False, True, True, True, True, True],
[False, False, False, False, False, True, True, True, True, True],
[False, False, False, False, False, True, True, True, True, True],
[False, False, False, False, False, True, True, True, True, True]]
soft_policy = [False, True, True, True, True, False, False, True, True, True]
def basic_strategy(state):
dealer, player, ace = state
assert 2 <= dealer <= 11
if ace:
if player == 18:
return soft_policy[dealer - 2]
elif player < 18:
return True
else:
return False
else:
if player < 12:
return True
elif player > 16:
return False
else:
return hard_policy[player - 12][dealer - 2]
assert False
if __name__ == '__main__':
# initialize
env = BlackjackEnv(seed=seed)
# rewards = []
# weights_eleven = []
# weights_one = []
# weights = []
for __ in range(N_epoch):
env.player_bursts = [0, 0]
env.player_stand_lose = [0, 0]
mean = 0.0
for _ in range(N_eval):
result = env.run_one_game(policy=basic_strategy)
if _ == 0:
print(result)
mean += result[-1][2]
mean /= N_eval
# think reward mean as performance
print(f"performance:{mean}")
loses = [env.player_bursts, env.player_stand_lose]
print(f"player_burst:{env.player_bursts[0]},{env.player_bursts[1]} player_stand_lose:{env.player_stand_lose[0]},{env.player_stand_lose[1]}")
# # save result
# print(rewards)
# os.makedirs(data_dir, exist_ok=True)
# os.makedirs(detail_dir, exist_ok=True)
# now = datetime.datetime.now()
# with open(os.path.join(data_dir, f"basic_lose.pkl"), "wb") as f:
# pickle.dump(loses, f)
# with open(os.path.join(detail_dir, f"basic_lose_"+now.strftime('%Y%m%d_%H%M%S')+".pkl"), "wb") as f:
# pickle.dump(loses, f)
| StarcoderdataPython |
3327670 | from rest_framework.response import Response
from rest_framework import viewsets
class HelloworldView(viewsets.ViewSet):
def list(self, request, *args, **kwargs):
return Response("helloworld")
| StarcoderdataPython |
1663006 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2022 <NAME>.
#
# Invenio-App-RDM is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Record migration script from InvenioRDM 7.0 to 8.0.
Disclaimer: This script is intended to be executed *only once*, namely when
upgrading from InvenioRDM 7.0 to 8.0!
If this script is executed at any other time, probably the best case scenario
is that nothing happens!
"""
from pathlib import Path
import invenio_rdm_records.fixtures as fixtures
from click import echo, secho
from invenio_access.permissions import system_identity
from invenio_db import db
from invenio_rdm_records.fixtures import (
PrioritizedVocabulariesFixtures,
VocabulariesFixture,
)
from invenio_vocabularies.proxies import current_service as vocabulary_svc
def execute_upgrade():
"""Execute the upgrade from InvenioRDM 7.0 to 8.0.
Please read the disclaimer on this module before thinking about executing
this function!
"""
def update_license(license_dict):
"""Update the stored vocabulary with the new icon."""
try:
echo(f"Updating license: {license_dict['id']}... ", nl=False)
license_ = vocabulary_svc.read(
system_identity, ("licenses", license_dict["id"])
)._obj
# NOTE: we don't use the service update method here because we
# want to evade validation errors, and the migration guide tells
# the users to completely rebuild the search indices anyway
# and we pop the '$schema' because it might be outdated and is
# a constant field anyway
license_["icon"] = license_dict["icon"]
license_.pop("$schema", None)
license_.commit()
secho("OK", fg="green")
except Exception as e:
secho("Error", fg="red")
return f"Error updating license '{license_dict['id']}': {e}"
return None
def update_licenses_from_fixture(fixture):
"""Use the given fixture to update the license vocabularies."""
errors = []
did_load_licenses = False
for id_, entry in fixture.read():
if id_ == "licenses":
for license_dict in entry.iterate(ignore=[]):
did_load_licenses = True
error = update_license(license_dict)
if error:
errors.append(error)
return did_load_licenses, errors
# let the prioritized vocabularies fixture take care of the
# path building and entrypoint definition, etc.
dir_ = Path(fixtures.__file__).parent
pf = PrioritizedVocabulariesFixtures(
system_identity,
app_data_folder=Path("./app_data"),
pkg_data_folder=(dir_ / "data"),
filename="vocabularies.yaml",
)
errors = []
licenses_loaded = False
# we're checking the same places as the prioritized vocabularies fixtures,
# and in the same order
# 1: check the app_data directory for the invenio instance
app_data_yaml_file = pf._app_data_folder / pf._filename
if app_data_yaml_file.exists():
app_data_fixture = VocabulariesFixture(system_identity, app_data_yaml_file)
licenses_loaded, errs = update_licenses_from_fixture(app_data_fixture)
errors.extend(errs)
# 2: check the entry points
extensions = [ep.load() for ep in pf._entry_points()]
for module in extensions:
directory = Path(module.__file__).parent
filepath = directory / pf._filename
vocab_list = pf.peek_vocabularies(filepath)
# check if the entry points define something for licenses
if not licenses_loaded and "licenses" in vocab_list:
extension_fixture = VocabulariesFixture(system_identity, filepath)
licenses_loaded, errs = update_licenses_from_fixture(extension_fixture)
errors.extend(errs)
# 3: check the default vocabularies from rdm-records
pkg_data_yaml_file = pf._pkg_data_folder / pf._filename
if not licenses_loaded and pkg_data_yaml_file.exists():
pkg_data_fixture = VocabulariesFixture(system_identity, pkg_data_yaml_file)
licenses_loaded, errs = update_licenses_from_fixture(pkg_data_fixture)
errors.extend(errs)
success = not errors
# give feedback on the operation
if not licenses_loaded:
secho(
"Warning: No licenses were upgraded, which is unexpected.",
fg="yellow",
)
if success:
db.session.commit()
secho(
"Data migration completed, please rebuild the search indices now.",
fg="green",
)
else:
db.session.rollback()
secho(
"Upgrade aborted due to the following errors:",
fg="red",
err=True,
)
for error in errors:
secho(error, fg="red", err=True)
msg = (
"The changes have been rolled back. "
"Please fix the above listed errors and try the upgrade again",
)
secho(msg, fg="yellow", err=True)
# if the script is executed on its own, perform the upgrade
if __name__ == "__main__":
execute_upgrade()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.