hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1c44a2ece75af5f3adb234674129a8dbefcc0d
| 96
|
py
|
Python
|
gyomei_trainer/metrics/__init__.py
|
KonstantinDob/gyomei_trainer
|
028628d09205b0bb4c411d39d51a00add58aecc2
|
[
"MIT"
] | null | null | null |
gyomei_trainer/metrics/__init__.py
|
KonstantinDob/gyomei_trainer
|
028628d09205b0bb4c411d39d51a00add58aecc2
|
[
"MIT"
] | null | null | null |
gyomei_trainer/metrics/__init__.py
|
KonstantinDob/gyomei_trainer
|
028628d09205b0bb4c411d39d51a00add58aecc2
|
[
"MIT"
] | null | null | null |
from gyomei_trainer.metrics.loss import Loss
from gyomei_trainer.metrics.metrics import Metrics
| 32
| 50
| 0.875
|
4a1c44da60a33da5c8e2fa01222b73c47e603799
| 1,593
|
py
|
Python
|
store_project/urls.py
|
cristian-rincon/py-drf-crud
|
4f75dd5aeb99efba9be91af3f00372aa9990c083
|
[
"MIT"
] | null | null | null |
store_project/urls.py
|
cristian-rincon/py-drf-crud
|
4f75dd5aeb99efba9be91af3f00372aa9990c083
|
[
"MIT"
] | null | null | null |
store_project/urls.py
|
cristian-rincon/py-drf-crud
|
4f75dd5aeb99efba9be91af3f00372aa9990c083
|
[
"MIT"
] | null | null | null |
"""store_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.conf.urls import url
from django.urls import include, path
from django.conf.urls.static import static
# JWT Utilities
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
TokenVerifyView
)
from users.views import profile_upload
urlpatterns = [
url(r'^accounts/login/', include('django_gitlab_auth.urls')),
path('admin/', admin.site.urls),
path('accounts/', include('rest_registration.api.urls')),
path('accounts/upload-csv/', profile_upload, name='profile_upload'),
path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('api/token/verify/', TokenVerifyView.as_view(), name='token_verify'),
path('api/', include('store.urls'))
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 38.853659
| 81
| 0.731325
|
4a1c45925f763e04e93ee28e21427a3dbbabac5c
| 2,358
|
py
|
Python
|
examples/glyphs/choropleth.py
|
rothnic/bokeh
|
8da5e16b260a75caa8e7ef4caf215bb93dd784db
|
[
"BSD-3-Clause"
] | 1
|
2015-07-17T13:57:01.000Z
|
2015-07-17T13:57:01.000Z
|
examples/glyphs/choropleth.py
|
evidation-health/bokeh
|
2c580d93419033b962d36e3c46d7606cc2f24606
|
[
"BSD-3-Clause"
] | null | null | null |
examples/glyphs/choropleth.py
|
evidation-health/bokeh
|
2c580d93419033b962d36e3c46d7606cc2f24606
|
[
"BSD-3-Clause"
] | 1
|
2016-03-18T03:01:59.000Z
|
2016-03-18T03:01:59.000Z
|
from __future__ import print_function
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models.glyphs import Patches
from bokeh.models import (
Plot, DataRange1d, ColumnDataSource, ResizeTool
)
from bokeh.resources import INLINE
from bokeh.sampledata import us_states, us_counties, unemployment
us_states = us_states.data.copy()
us_counties = us_counties.data
unemployment = unemployment.data
del us_states["HI"]
del us_states["AK"]
state_source = ColumnDataSource(
data=dict(
state_xs=[us_states[code]["lons"] for code in us_states],
state_ys=[us_states[code]["lats"] for code in us_states],
)
)
colors = ["#F1EEF6", "#D4B9DA", "#C994C7", "#DF65B0", "#DD1C77", "#980043"]
county_colors = []
for county_id in us_counties:
if us_counties[county_id]["state"] in ["ak", "hi", "pr", "gu", "vi", "mp", "as"]:
continue
try:
rate = unemployment[county_id]
idx = min(int(rate/2), 5)
county_colors.append(colors[idx])
except KeyError:
county_colors.append("black")
county_source = ColumnDataSource(
data=dict(
county_xs=[us_counties[code]["lons"] for code in us_counties if us_counties[code]["state"] not in ["ak", "hi", "pr", "gu", "vi", "mp", "as"]],
county_ys=[us_counties[code]["lats"] for code in us_counties if us_counties[code]["state"] not in ["ak", "hi", "pr", "gu", "vi", "mp", "as"]],
county_colors=county_colors
)
)
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(x_range=xdr, y_range=ydr, min_border=0, border_fill="white",
title="2009 Unemployment Data", plot_width=1300, plot_height=800, toolbar_location="left")
county_patches = Patches(xs="county_xs", ys="county_ys", fill_color="county_colors", fill_alpha=0.7, line_color="white", line_width=0.5)
plot.add_glyph(county_source, county_patches)
state_patches = Patches(xs="state_xs", ys="state_ys", fill_alpha=0.0, line_color="#884444", line_width=2)
plot.add_glyph(state_source, state_patches)
plot.add_tools(ResizeTool())
doc = Document()
doc.add(plot)
if __name__ == "__main__":
filename = "choropleth.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Choropleth of all US counties, Unemployment 2009"))
print("Wrote %s" % filename)
view(filename)
| 33.211268
| 150
| 0.693384
|
4a1c45bc049f562e3ec498f8204fb821e6720677
| 539
|
py
|
Python
|
learn1.py
|
8055aa/Python3Code
|
ac832241b87c02a3b3e1b216dbcd3cfbd0293b7c
|
[
"Apache-2.0"
] | null | null | null |
learn1.py
|
8055aa/Python3Code
|
ac832241b87c02a3b3e1b216dbcd3cfbd0293b7c
|
[
"Apache-2.0"
] | null | null | null |
learn1.py
|
8055aa/Python3Code
|
ac832241b87c02a3b3e1b216dbcd3cfbd0293b7c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import sys
countries = ["Denmark","Finland","Norway","Sweden"]
list1 = ['spam', 'eggs', 'Apple', 100, 1234]
def getInt(msg):
while True:
try:
i = int(input(msg))
return i
except ValueError as err:
print("please input Int")
for x in countries:
try:
lens = len(str(x))
print("{0}'lens is .....{1}".format(x, len(x)))
except ValueError as err:
lens = len(str(x))
print("{0}'lens is .....{1}".format(x, len(x)))
| 19.962963
| 55
| 0.513915
|
4a1c45f5fc2f01cebff02c9d13f577a5f3828c78
| 13,096
|
py
|
Python
|
wikidataQuery.py
|
nvbln/LT
|
4ba7ffc6222fa6a8159ff4f0222ee0ea15e098ed
|
[
"MIT"
] | null | null | null |
wikidataQuery.py
|
nvbln/LT
|
4ba7ffc6222fa6a8159ff4f0222ee0ea15e098ed
|
[
"MIT"
] | 12
|
2019-06-08T14:25:52.000Z
|
2019-06-11T20:56:13.000Z
|
wikidataQuery.py
|
nvbln/LT
|
4ba7ffc6222fa6a8159ff4f0222ee0ea15e098ed
|
[
"MIT"
] | null | null | null |
# Functions needed for making queries for WikiData
import requests
import settings
import traceback
import json
import simplejson
import datefinder
from textblob import TextBlob
# TODO account for band member occupation properties
# A pre-defined dictionary for difficult terms
property_dict = {'band members': 'has part', 'members': 'has part',
'member': 'has part', 'band member': 'has part',
'founding year': 'inception', 'bandmember': 'has part',
'bandmembers': 'has part', 'founding': 'inception',
'play': 'instrument', 'real name':'birth name',
'album':'part of'}
# List of w-words, feel free to add any words I forgot
w_words_dict = {'what':'basic', 'who':'person', 'when':'date', 'where':'place',
'why':'cause', 'how':'cause', 'which':'basic', 'how many':'count'}
def makeQuery(keywords):
entity_id = []
property_ids = []
filters = []
# Querying for IRIs
# Identify query type
if "question_word" in keywords:
query_type = w_words_dict.get(keywords["question_word"][0].lower(), 'yes/no')
if len(keywords["question_word"]) > 1 and keywords["question_word"][1] == 'many':
query_type = 'count'
else:
query_type = 'yes/no'
# Get list of possible properties
if "property" in keywords:
# Singularize property, if it's a noun
blob = TextBlob(keywords["property"][0])
prop = ' '.join([word.singularize() for word in blob.words])
# Replace property, if it's a defficult one
prop = property_dict.get(prop, prop)
if settings.verbose:
print('property:', prop)
# Try to look for properties
property_ids = searchEntities(prop, "property")
# If no properties found, it could be occupation of a member in the group
if not property_ids:
property_ids = [{'id':str(searchEntity(prop, 'entity')), 'is_entity':True},
{'id':'P527'}]
# Get entity IRI
if "entity" in keywords:
entity_id = searchEntity(keywords["entity"][0], "entity")
# Add filters from question
if "property_attribute" in keywords:
if keywords["question_id"][0] == 9:
# Likely a 'yes/no question'
# ('X is Y of Z', with (Z == property_attribute)? as required answer.)
addFilter(filters, searchEntity(keywords["property_attribute"][0], "entity"))
query_type = 'yes/no'
# Add filters from questions
if "specification" in keywords:
if keywords["question_id"][0] == 7:
# Likely a 'X is Y of Z', with Z as required answer.
addFilter(filters, searchEntity(keywords["specification"][0], "entity"))
query_type = 'specified'
qualifiers = []
if "date_word" in keywords:
date_word = keywords["date_word"][0]
if date_word == "between" or date_word == "during":
date1 = keywords["date1"][0].strftime("%Y-%m-%d")
date2 = keywords["date2"][0].strftime("%Y-%m-%d")
qualifiers.append(date1)
qualifiers.append(date2)
else:
if settings.verbose:
print("Dates have been wrongly analyzed.")
# Firing the query
answer = submitTypeQuery(entity_id, property_ids, filters, query_type, qualifiers)
return answer
def searchEntity(entity, string_type):
url = 'https://www.wikidata.org/w/api.php'
if string_type != 'entity':
url = url + '?type=' + string_type
params = {'action':'wbsearchentities',
'language':'en',
'format':'json'}
params['search'] = entity.rstrip()
json = requests.get(url,params).json()
# Return the most likely entity
if len(json['search']) > 0:
if settings.verbose:
print(entity + '->' + json['search'][0]['label'])
if settings.verbose:
print(json['search'][0]['id'])
return json['search'][0]['id']
def searchEntities(entity, string_type):
url = 'https://www.wikidata.org/w/api.php'
if string_type != 'entity':
url = url + '?type=' + string_type
params = {'action':'wbsearchentities',
'language':'en',
'format':'json'}
params['search'] = entity.rstrip()
json = requests.get(url,params).json()
# Return all the entities
return json['search']
def submitTypeQuery(entity_id, property_ids, filters, query_type, qualifiers):
url = 'https://query.wikidata.org/sparql'
qualifierVariables = ""
qualifierLines = ""
if len(qualifiers) != 0:
qualifierVariables = "?startTime ?endTime"
qualifierLines = '''
?statement pq:P580 ?startTime .
?statement pq:P582 ?endTime .
'''
# If it's a 'who'-question
if query_type == 'person':
# Format the query without adding extra line for standard property
if not property_ids or not property_ids[0].get('is_entity', False) or not property_ids[0]['id']:
query = query_dict[query_type][0].format(qualifierVariables, entity_id, '', qualifierLines)
# If the property is member's occupation, add extra line to account for that
else:
extra_line = '?ps_ wdt:P106 wd:{0}.'.format(property_ids[0]['id'])
query = query_dict[query_type][0].format(qualifierVariables, entity_id, extra_line, qualifierLines)
else:
# Otherwise the query formatting is generalized
query = query_dict[query_type][0].format(qualifierVariables, entity_id, qualifierLines)
data = []
# Try to fire a query
try:
data = requests.get(url, params={'query': query, 'format': 'json'}).json()
except (json.decoder.JSONDecodeError, simplejson.errors.JSONDecodeError):
if settings.verbose:
print("Problem with the following query:")
print(query)
print(traceback.format_exc())
return []
answers = []
chosen_property = None
# Filter data with custom and default filters
# Variable by which we filter is in query_dict[query_type][1]
# Values by which we filter are in query_dict[query_type][2] + filters
processed_data = filterBy(data, query_dict[query_type][1], query_dict[query_type][2] + filters)
# Check what property is found in all filtered properties of the entity
for prop_id in property_ids:
for item in processed_data:
if (chosen_property != None and item['wd']['value'] != chosen_property):
continue
if ("http://www.wikidata.org/entity/" + prop_id['id'] == item['wd']['value']):
if len(qualifiers) != 0:
startCheck = False
endCheck = False
qStartTime = list(datefinder.find_dates(qualifiers[0], index=True))[0][0]
qEndTime = list(datefinder.find_dates(qualifiers[0], index=True))[0][0]
startTime = list(datefinder.find_dates(item['startTime']['value'], index=True))[0][0].replace(tzinfo=None)
endTime = list(datefinder.find_dates(item['endTime']['value'], index=True))[0][0].replace(tzinfo=None)
if startTime >= qStartTime:
startCheck = True
if endTime <= qEndTime:
endCheck = True
if startCheck or endCheck:
answers.append(item['ps_Label']['value'])
else:
answers.append(item['ps_Label']['value'])
chosen_property = "http://www.wikidata.org/entity/" + prop_id['id']
# Desperate case, when no property was found print out all the filtered values
if chosen_property == None:
for item in processed_data:
if len(qualifiers) != 0:
startCheck = False
endCheck = False
qStartTime = list(datefinder.find_dates(qualifiers[0], index=True))[0][0]
qEndTime = list(datefinder.find_dates(qualifiers[0], index=True))[0][0]
startTime = list(datefinder.find_dates(item['startTime']['value'], index=True))[0][0].replace(tzinfo=None)
endTime = list(datefinder.find_dates(item['endTime']['value'], index=True))[0][0].replace(tzinfo=None)
if startTime >= qStartTime:
startCheck = True
if endTime <= qEndTime:
endCheck = True
if startCheck or endCheck:
answers.append(item['ps_Label']['value'])
else:
answers.append(item['ps_Label']['value'])
# Optionally, convert answer to binary
if query_type == 'yes/no':
if not answers:
answers = ['No']
else:
answers = ['Yes']
elif query_type == 'count':
if not (len(answers) == 1 and answers[0].isdigit()):
# The property does not give number, we have to count manually
answers = [str(len(answers))]
if settings.verbose:
print('chosen property:', chosen_property)
return answers
query_dict = {
'basic':['''
SELECT ?wd ?ps_Label {0} {{
VALUES (?entity) {{(wd:{1})}}
?entity ?p ?statement .
?statement ?ps ?ps_ .
{2}
?wd wikibase:statementProperty ?ps.
SERVICE wikibase:label {{ bd:serviceParam wikibase:language "en" }}
}}''', 'wd', []],
'count':['''
SELECT ?wd ?ps_Label {{
VALUES (?entity) {{(wd:{0})}}
?entity ?p ?statement .
?statement ?ps ?ps_ .
?wd wikibase:statementProperty ?ps.
SERVICE wikibase:label {{ bd:serviceParam wikibase:language "en" }}
}}''', 'wd', []],
'specified':['''
SELECT ?wd ?ps_Label ?spec {0} {{
VALUES (?entity) {{(wd:{1})}}
?entity ?p ?statement .
?statement ?ps ?ps_ .
{2}
?wd wikibase:statementProperty ?ps.
?ps_ wdt:P31/wdt:P279 ?spec.
SERVICE wikibase:label {{ bd:serviceParam wikibase:language "en" }}
}}''', 'spec', []],
'date':['''
SELECT ?wd ?ps_Label {0} {{
VALUES (?entity) {{(wd:{1})}}
?entity ?p ?statement .
?statement ?ps ?ps_ .
{2}
?wd wikibase:statementProperty ?ps.
FILTER(DATATYPE(?ps_) = xsd:dateTime).
SERVICE wikibase:label {{ bd:serviceParam wikibase:language "en" }}
}}''','wd', []],
'place': ['''
SELECT ?wd ?ps_Label ?is_place {0} {{
VALUES (?entity) {{(wd:{1})}}
?entity ?p ?statement .
?statement ?ps ?ps_ .
{2}
?wd wikibase:statementProperty ?ps.
?wd wdt:P31 ?is_place.
SERVICE wikibase:label {{ bd:serviceParam wikibase:language "en" }}
}}''', 'is_place', ['http://www.wikidata.org/entity/Q18635217']],
'person':['''
SELECT ?wd ?ps_Label ?is_human {0} {{
VALUES (?entity) {{(wd:{1})}}
?entity ?p ?statement .
?statement ?ps ?ps_ .
?wd wikibase:statementProperty ?ps.
?ps_ wdt:P31 ?is_human.
{2}
{3}
SERVICE wikibase:label {{ bd:serviceParam wikibase:language "en" }}
}}''','is_human', ['http://www.wikidata.org/entity/Q5']],
'cause':['''
SELECT ?wd ?ps_Label ?is_cause {0} {{
VALUES (?entity) {{(wd:{1})}}
?entity ?p ?statement .
?statement ?ps ?ps_ .
{2}
?wd wikibase:statementProperty ?ps.
?wd wdt:P1629 ?cause_type.
?cause_type wdt:P279 ?is_cause.
SERVICE wikibase:label {{ bd:serviceParam wikibase:language "en" }}
}}''','is_cause', ['http://www.wikidata.org/entity/Q179289']],
'yes/no':['''
SELECT ?wd ?ps_Label ?ps_ {0} {{
VALUES (?entity) {{(wd:{1})}}
?entity ?p ?statement .
?statement ?ps ?ps_ .
{2}
?wd wikibase:statementProperty ?ps.
SERVICE wikibase:label {{ bd:serviceParam wikibase:language "en" }}
}}''', 'ps_', []]
}
def filterBy(data, var_id, entities_id):
new_data = []
# Look through all values of var_id, return only
# item with values available in entities_id
for item in data['results']['bindings']:
if (not entities_id or item[var_id]['value'] in entities_id):
new_data.append(item)
return new_data
def addFilter(filters, f):
# If there is a filter to append, append it
if f != None:
filters.append('http://www.wikidata.org/entity/' + f)
| 37.204545
| 126
| 0.551924
|
4a1c46505af14d934f7ad0041c80713f6efc9372
| 21,576
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20181101/express_route_circuit_connection.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20181101/express_route_circuit_connection.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20181101/express_route_circuit_connection.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ExpressRouteCircuitConnectionArgs', 'ExpressRouteCircuitConnection']
@pulumi.input_type
class ExpressRouteCircuitConnectionArgs:
def __init__(__self__, *,
circuit_name: pulumi.Input[str],
peering_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
address_prefix: Optional[pulumi.Input[str]] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
connection_name: Optional[pulumi.Input[str]] = None,
express_route_circuit_peering: Optional[pulumi.Input['SubResourceArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
peer_express_route_circuit_peering: Optional[pulumi.Input['SubResourceArgs']] = None):
"""
The set of arguments for constructing a ExpressRouteCircuitConnection resource.
:param pulumi.Input[str] circuit_name: The name of the express route circuit.
:param pulumi.Input[str] peering_name: The name of the peering.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] address_prefix: /29 IP address space to carve out Customer addresses for tunnels.
:param pulumi.Input[str] authorization_key: The authorization key.
:param pulumi.Input[str] connection_name: The name of the express route circuit connection.
:param pulumi.Input['SubResourceArgs'] express_route_circuit_peering: Reference to Express Route Circuit Private Peering Resource of the circuit initiating connection.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input['SubResourceArgs'] peer_express_route_circuit_peering: Reference to Express Route Circuit Private Peering Resource of the peered circuit.
"""
pulumi.set(__self__, "circuit_name", circuit_name)
pulumi.set(__self__, "peering_name", peering_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if address_prefix is not None:
pulumi.set(__self__, "address_prefix", address_prefix)
if authorization_key is not None:
pulumi.set(__self__, "authorization_key", authorization_key)
if connection_name is not None:
pulumi.set(__self__, "connection_name", connection_name)
if express_route_circuit_peering is not None:
pulumi.set(__self__, "express_route_circuit_peering", express_route_circuit_peering)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if peer_express_route_circuit_peering is not None:
pulumi.set(__self__, "peer_express_route_circuit_peering", peer_express_route_circuit_peering)
@property
@pulumi.getter(name="circuitName")
def circuit_name(self) -> pulumi.Input[str]:
"""
The name of the express route circuit.
"""
return pulumi.get(self, "circuit_name")
@circuit_name.setter
def circuit_name(self, value: pulumi.Input[str]):
pulumi.set(self, "circuit_name", value)
@property
@pulumi.getter(name="peeringName")
def peering_name(self) -> pulumi.Input[str]:
"""
The name of the peering.
"""
return pulumi.get(self, "peering_name")
@peering_name.setter
def peering_name(self, value: pulumi.Input[str]):
pulumi.set(self, "peering_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> Optional[pulumi.Input[str]]:
"""
/29 IP address space to carve out Customer addresses for tunnels.
"""
return pulumi.get(self, "address_prefix")
@address_prefix.setter
def address_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address_prefix", value)
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> Optional[pulumi.Input[str]]:
"""
The authorization key.
"""
return pulumi.get(self, "authorization_key")
@authorization_key.setter
def authorization_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authorization_key", value)
@property
@pulumi.getter(name="connectionName")
def connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the express route circuit connection.
"""
return pulumi.get(self, "connection_name")
@connection_name.setter
def connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_name", value)
@property
@pulumi.getter(name="expressRouteCircuitPeering")
def express_route_circuit_peering(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Reference to Express Route Circuit Private Peering Resource of the circuit initiating connection.
"""
return pulumi.get(self, "express_route_circuit_peering")
@express_route_circuit_peering.setter
def express_route_circuit_peering(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "express_route_circuit_peering", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="peerExpressRouteCircuitPeering")
def peer_express_route_circuit_peering(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Reference to Express Route Circuit Private Peering Resource of the peered circuit.
"""
return pulumi.get(self, "peer_express_route_circuit_peering")
@peer_express_route_circuit_peering.setter
def peer_express_route_circuit_peering(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "peer_express_route_circuit_peering", value)
class ExpressRouteCircuitConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_prefix: Optional[pulumi.Input[str]] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
connection_name: Optional[pulumi.Input[str]] = None,
express_route_circuit_peering: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
peer_express_route_circuit_peering: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
peering_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Express Route Circuit Connection in an ExpressRouteCircuitPeering resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address_prefix: /29 IP address space to carve out Customer addresses for tunnels.
:param pulumi.Input[str] authorization_key: The authorization key.
:param pulumi.Input[str] circuit_name: The name of the express route circuit.
:param pulumi.Input[str] connection_name: The name of the express route circuit connection.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] express_route_circuit_peering: Reference to Express Route Circuit Private Peering Resource of the circuit initiating connection.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] peer_express_route_circuit_peering: Reference to Express Route Circuit Private Peering Resource of the peered circuit.
:param pulumi.Input[str] peering_name: The name of the peering.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ExpressRouteCircuitConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Express Route Circuit Connection in an ExpressRouteCircuitPeering resource.
:param str resource_name: The name of the resource.
:param ExpressRouteCircuitConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ExpressRouteCircuitConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_prefix: Optional[pulumi.Input[str]] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
connection_name: Optional[pulumi.Input[str]] = None,
express_route_circuit_peering: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
peer_express_route_circuit_peering: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
peering_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ExpressRouteCircuitConnectionArgs.__new__(ExpressRouteCircuitConnectionArgs)
__props__.__dict__["address_prefix"] = address_prefix
__props__.__dict__["authorization_key"] = authorization_key
if circuit_name is None and not opts.urn:
raise TypeError("Missing required property 'circuit_name'")
__props__.__dict__["circuit_name"] = circuit_name
__props__.__dict__["connection_name"] = connection_name
__props__.__dict__["express_route_circuit_peering"] = express_route_circuit_peering
__props__.__dict__["id"] = id
__props__.__dict__["name"] = name
__props__.__dict__["peer_express_route_circuit_peering"] = peer_express_route_circuit_peering
if peering_name is None and not opts.urn:
raise TypeError("Missing required property 'peering_name'")
__props__.__dict__["peering_name"] = peering_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["circuit_connection_status"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["provisioning_state"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20181101:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20180201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20180401:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20180601:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20180701:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20180801:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20181001:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20181201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20190201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20190401:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20190601:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20190701:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20190801:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20190901:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20191101:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20191201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20200301:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20200401:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20200501:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20200601:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20200701:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20200801:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200801:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20201101:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20201101:ExpressRouteCircuitConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExpressRouteCircuitConnection, __self__).__init__(
'azure-native:network/v20181101:ExpressRouteCircuitConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExpressRouteCircuitConnection':
"""
Get an existing ExpressRouteCircuitConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ExpressRouteCircuitConnectionArgs.__new__(ExpressRouteCircuitConnectionArgs)
__props__.__dict__["address_prefix"] = None
__props__.__dict__["authorization_key"] = None
__props__.__dict__["circuit_connection_status"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["express_route_circuit_peering"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peer_express_route_circuit_peering"] = None
__props__.__dict__["provisioning_state"] = None
return ExpressRouteCircuitConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
/29 IP address space to carve out Customer addresses for tunnels.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> pulumi.Output[Optional[str]]:
"""
The authorization key.
"""
return pulumi.get(self, "authorization_key")
@property
@pulumi.getter(name="circuitConnectionStatus")
def circuit_connection_status(self) -> pulumi.Output[str]:
"""
Express Route Circuit Connection State. Possible values are: 'Connected' and 'Disconnected'.
"""
return pulumi.get(self, "circuit_connection_status")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="expressRouteCircuitPeering")
def express_route_circuit_peering(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
Reference to Express Route Circuit Private Peering Resource of the circuit initiating connection.
"""
return pulumi.get(self, "express_route_circuit_peering")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peerExpressRouteCircuitPeering")
def peer_express_route_circuit_peering(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
Reference to Express Route Circuit Private Peering Resource of the peered circuit.
"""
return pulumi.get(self, "peer_express_route_circuit_peering")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning state of the circuit connection resource. Possible values are: 'Succeeded', 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
| 57.536
| 4,005
| 0.706433
|
4a1c46e495a995e378e61e588042dc8749b44a4b
| 560
|
py
|
Python
|
saefportal/analyzer/tests/utils.py
|
harry-consulting/SAEF
|
12ef43bbcc3178b8a988e21c1bef035881cf6e6d
|
[
"BSD-2-Clause"
] | 4
|
2020-12-16T13:14:26.000Z
|
2022-03-26T08:54:12.000Z
|
saefportal/analyzer/tests/utils.py
|
harry-consulting/SAEF
|
12ef43bbcc3178b8a988e21c1bef035881cf6e6d
|
[
"BSD-2-Clause"
] | 1
|
2022-03-26T09:09:04.000Z
|
2022-03-26T09:09:04.000Z
|
saefportal/analyzer/tests/utils.py
|
harry-consulting/SAEF
|
12ef43bbcc3178b8a988e21c1bef035881cf6e6d
|
[
"BSD-2-Clause"
] | 1
|
2020-12-16T13:20:17.000Z
|
2020-12-16T13:20:17.000Z
|
from datetime import datetime
def validate_configuration(configuration_dict):
missing_configuration = []
for key, value in configuration_dict.items():
if not value:
missing_configuration.append(key)
if len(missing_configuration) > 0:
raise ValueError(
f'Missing configuration of {missing_configuration} in the settings.ini file')
def make_naive(value):
aware_datetime = datetime.strptime(value, "%Y-%m-%d %H:%M:%S.%f%z")
return (aware_datetime - aware_datetime.utcoffset()).replace(tzinfo=None)
| 31.111111
| 89
| 0.701786
|
4a1c4756e96c07ef67eb135e2e99ac897b495b0c
| 1,099
|
py
|
Python
|
var/spack/repos/builtin/packages/py-lockfile/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-lockfile/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-lockfile/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyLockfile(PythonPackage):
"""The lockfile package exports a LockFile class which provides a
simple API for locking files. Unlike the Windows msvcrt.locking
function, the fcntl.lockf and flock functions, and the
deprecated posixfile module, the API is identical across both
Unix (including Linux and Mac) and Windows platforms. The lock
mechanism relies on the atomic nature of the link (on Unix) and
mkdir (on Windows) system calls. An implementation based on
SQLite is also provided, more as a demonstration of the
possibilities it provides than as production-quality code.
"""
pypi = "lockfile/lockfile-0.10.2.tar.gz"
version('0.10.2', sha256='9e42252f17d1dd89ee31745e0c4fbe58862c25147eb0ef5295c9cd9bcb4ea2c1')
depends_on("py-setuptools", type='build')
depends_on("py-pbr", type='build')
| 42.269231
| 96
| 0.734304
|
4a1c48b3c98799b8559a03a75ce50f134a69ea2f
| 1,676
|
py
|
Python
|
B2G/gecko/testing/mozbase/mozdevice/tests/sut_mkdir.py
|
wilebeast/FireFox-OS
|
43067f28711d78c429a1d6d58c77130f6899135f
|
[
"Apache-2.0"
] | 3
|
2015-08-31T15:24:31.000Z
|
2020-04-24T20:31:29.000Z
|
B2G/gecko/testing/mozbase/mozdevice/tests/sut_mkdir.py
|
wilebeast/FireFox-OS
|
43067f28711d78c429a1d6d58c77130f6899135f
|
[
"Apache-2.0"
] | null | null | null |
B2G/gecko/testing/mozbase/mozdevice/tests/sut_mkdir.py
|
wilebeast/FireFox-OS
|
43067f28711d78c429a1d6d58c77130f6899135f
|
[
"Apache-2.0"
] | 3
|
2015-07-29T07:17:15.000Z
|
2020-11-04T06:55:37.000Z
|
from sut import MockAgent
import mozdevice
import unittest
class PushTest(unittest.TestCase):
def test_mkdirs(self):
subTests = [ { 'cmds': [ ("isdir /mnt", "TRUE"),
("isdir /mnt/sdcard", "TRUE"),
("isdir /mnt/sdcard/baz", "FALSE"),
("mkdr /mnt/sdcard/baz",
"/mnt/sdcard/baz successfully created"),
("isdir /mnt/sdcard/baz/boop", "FALSE"),
("mkdr /mnt/sdcard/baz/boop",
"/mnt/sdcard/baz/boop successfully created") ],
'expectException': False },
{ 'cmds': [ ("isdir /mnt", "TRUE"),
("isdir /mnt/sdcard", "TRUE"),
("isdir /mnt/sdcard/baz", "FALSE"),
("mkdr /mnt/sdcard/baz",
"##AGENT-WARNING## Could not create the directory /mnt/sdcard/baz") ],
'expectException': True },
]
for subTest in subTests:
a = MockAgent(self, commands = subTest['cmds'])
exceptionThrown = False
try:
mozdevice.DroidSUT.debug = 4
d = mozdevice.DroidSUT("127.0.0.1", port=a.port)
d.mkDirs("/mnt/sdcard/baz/boop/bip")
except mozdevice.DMError, e:
exceptionThrown = True
self.assertEqual(exceptionThrown, subTest['expectException'])
a.wait()
if __name__ == '__main__':
unittest.main()
| 41.9
| 104
| 0.440334
|
4a1c4a596e768f241e34bb3af818a607eda8dda5
| 14,225
|
py
|
Python
|
kmip/core/factories/attribute_values.py
|
richscze/PyKMIP
|
8b3643b135341b2b3de419b0b19a19849bbe6fe0
|
[
"Apache-2.0"
] | 179
|
2015-03-20T06:08:59.000Z
|
2022-03-14T02:24:38.000Z
|
kmip/core/factories/attribute_values.py
|
imharshr/PyKMIP
|
9403ff3d2aa83de4c786b8eedeb85d169fd4a594
|
[
"Apache-2.0"
] | 600
|
2015-04-08T14:14:48.000Z
|
2022-03-28T13:49:47.000Z
|
kmip/core/factories/attribute_values.py
|
imharshr/PyKMIP
|
9403ff3d2aa83de4c786b8eedeb85d169fd4a594
|
[
"Apache-2.0"
] | 131
|
2015-03-30T12:51:49.000Z
|
2022-03-23T04:34:34.000Z
|
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kmip.core import attributes
from kmip.core import enums
from kmip.core import primitives
from kmip.core import utils
class AttributeValueFactory(object):
def create_attribute_value(self, name, value):
# Switch on the name of the attribute
if name is enums.AttributeType.UNIQUE_IDENTIFIER:
return attributes.UniqueIdentifier(value)
elif name is enums.AttributeType.NAME:
return self._create_name(value)
elif name is enums.AttributeType.OBJECT_TYPE:
return attributes.ObjectType(value)
elif name is enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM:
return attributes.CryptographicAlgorithm(value)
elif name is enums.AttributeType.CRYPTOGRAPHIC_LENGTH:
return self._create_cryptographic_length(value)
elif name is enums.AttributeType.CRYPTOGRAPHIC_PARAMETERS:
return self._create_cryptographic_parameters(value)
elif name is enums.AttributeType.CRYPTOGRAPHIC_DOMAIN_PARAMETERS:
raise NotImplementedError()
elif name is enums.AttributeType.CERTIFICATE_TYPE:
return primitives.Enumeration(
enums.CertificateType,
value=value,
tag=enums.Tags.CERTIFICATE_TYPE
)
elif name is enums.AttributeType.CERTIFICATE_LENGTH:
return primitives.Integer(value, enums.Tags.CERTIFICATE_LENGTH)
elif name is enums.AttributeType.X_509_CERTIFICATE_IDENTIFIER:
raise NotImplementedError()
elif name is enums.AttributeType.X_509_CERTIFICATE_SUBJECT:
raise NotImplementedError()
elif name is enums.AttributeType.X_509_CERTIFICATE_ISSUER:
raise NotImplementedError()
elif name is enums.AttributeType.CERTIFICATE_IDENTIFIER:
raise NotImplementedError()
elif name is enums.AttributeType.CERTIFICATE_SUBJECT:
raise NotImplementedError()
elif name is enums.AttributeType.CERTIFICATE_ISSUER:
raise NotImplementedError()
elif name is enums.AttributeType.DIGITAL_SIGNATURE_ALGORITHM:
raise NotImplementedError()
elif name is enums.AttributeType.DIGEST:
return attributes.Digest()
elif name is enums.AttributeType.OPERATION_POLICY_NAME:
return attributes.OperationPolicyName(value)
elif name is enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK:
return self._create_cryptographic_usage_mask(value)
elif name is enums.AttributeType.LEASE_TIME:
return primitives.Interval(value, enums.Tags.LEASE_TIME)
elif name is enums.AttributeType.USAGE_LIMITS:
raise NotImplementedError()
elif name is enums.AttributeType.STATE:
return attributes.State(value)
elif name is enums.AttributeType.INITIAL_DATE:
return primitives.DateTime(value, enums.Tags.INITIAL_DATE)
elif name is enums.AttributeType.ACTIVATION_DATE:
return primitives.DateTime(value, enums.Tags.ACTIVATION_DATE)
elif name is enums.AttributeType.PROCESS_START_DATE:
return primitives.DateTime(value, enums.Tags.PROCESS_START_DATE)
elif name is enums.AttributeType.PROTECT_STOP_DATE:
return primitives.DateTime(value, enums.Tags.PROTECT_STOP_DATE)
elif name is enums.AttributeType.DEACTIVATION_DATE:
return primitives.DateTime(value, enums.Tags.DEACTIVATION_DATE)
elif name is enums.AttributeType.DESTROY_DATE:
return primitives.DateTime(value, enums.Tags.DESTROY_DATE)
elif name is enums.AttributeType.COMPROMISE_OCCURRENCE_DATE:
return primitives.DateTime(
value, enums.Tags.COMPROMISE_OCCURRENCE_DATE)
elif name is enums.AttributeType.COMPROMISE_DATE:
return primitives.DateTime(value, enums.Tags.COMPROMISE_DATE)
elif name is enums.AttributeType.REVOCATION_REASON:
raise NotImplementedError()
elif name is enums.AttributeType.ARCHIVE_DATE:
return primitives.DateTime(value, enums.Tags.ARCHIVE_DATE)
elif name is enums.AttributeType.OBJECT_GROUP:
return primitives.TextString(value, enums.Tags.OBJECT_GROUP)
elif name is enums.AttributeType.FRESH:
return primitives.Boolean(value, enums.Tags.FRESH)
elif name is enums.AttributeType.LINK:
raise NotImplementedError()
elif name is enums.AttributeType.APPLICATION_SPECIFIC_INFORMATION:
return self._create_application_specific_information(value)
elif name is enums.AttributeType.CONTACT_INFORMATION:
return self._create_contact_information(value)
elif name is enums.AttributeType.LAST_CHANGE_DATE:
return primitives.DateTime(value, enums.Tags.LAST_CHANGE_DATE)
elif name is enums.AttributeType.SENSITIVE:
return primitives.Boolean(value, enums.Tags.SENSITIVE)
elif name is enums.AttributeType.CUSTOM_ATTRIBUTE:
return attributes.CustomAttribute(value)
else:
if not isinstance(name, str):
raise ValueError('Unrecognized attribute type: '
'{0}'.format(name))
elif name.startswith('x-'):
# Custom attribute indicated
return attributes.CustomAttribute(value)
def create_attribute_value_by_enum(self, enum, value):
# Switch on the name of the attribute
if enum is enums.Tags.UNIQUE_IDENTIFIER:
return attributes.UniqueIdentifier(value)
elif enum is enums.Tags.NAME:
return self._create_name(value)
elif enum is enums.Tags.OBJECT_TYPE:
return attributes.ObjectType(value)
elif enum is enums.Tags.CRYPTOGRAPHIC_ALGORITHM:
return attributes.CryptographicAlgorithm(value)
elif enum is enums.Tags.CRYPTOGRAPHIC_LENGTH:
return self._create_cryptographic_length(value)
elif enum is enums.Tags.CRYPTOGRAPHIC_PARAMETERS:
return self._create_cryptographic_parameters(value)
elif enum is enums.Tags.CRYPTOGRAPHIC_DOMAIN_PARAMETERS:
raise NotImplementedError()
elif enum is enums.Tags.CERTIFICATE_TYPE:
raise NotImplementedError()
elif enum is enums.Tags.CERTIFICATE_LENGTH:
return primitives.Integer(value, enums.Tags.CERTIFICATE_LENGTH)
elif enum is enums.Tags.X_509_CERTIFICATE_IDENTIFIER:
raise NotImplementedError()
elif enum is enums.Tags.X_509_CERTIFICATE_SUBJECT:
raise NotImplementedError()
elif enum is enums.Tags.X_509_CERTIFICATE_ISSUER:
raise NotImplementedError()
elif enum is enums.Tags.CERTIFICATE_IDENTIFIER:
raise NotImplementedError()
elif enum is enums.Tags.CERTIFICATE_SUBJECT:
raise NotImplementedError()
elif enum is enums.Tags.CERTIFICATE_ISSUER:
raise NotImplementedError()
elif enum is enums.Tags.DIGITAL_SIGNATURE_ALGORITHM:
raise NotImplementedError()
elif enum is enums.Tags.DIGEST:
return attributes.Digest()
elif enum is enums.Tags.OPERATION_POLICY_NAME:
return attributes.OperationPolicyName(value)
elif enum is enums.Tags.CRYPTOGRAPHIC_USAGE_MASK:
return self._create_cryptographic_usage_mask(value)
elif enum is enums.Tags.LEASE_TIME:
return primitives.Interval(value, enums.Tags.LEASE_TIME)
elif enum is enums.Tags.USAGE_LIMITS:
raise NotImplementedError()
elif enum is enums.Tags.STATE:
return attributes.State(value)
elif enum is enums.Tags.INITIAL_DATE:
return primitives.DateTime(value, enums.Tags.INITIAL_DATE)
elif enum is enums.Tags.ACTIVATION_DATE:
return primitives.DateTime(value, enums.Tags.ACTIVATION_DATE)
elif enum is enums.Tags.PROCESS_START_DATE:
return primitives.DateTime(value, enums.Tags.PROCESS_START_DATE)
elif enum is enums.Tags.PROTECT_STOP_DATE:
return primitives.DateTime(value, enums.Tags.PROTECT_STOP_DATE)
elif enum is enums.Tags.DEACTIVATION_DATE:
return primitives.DateTime(value, enums.Tags.DEACTIVATION_DATE)
elif enum is enums.Tags.DESTROY_DATE:
return primitives.DateTime(value, enums.Tags.DESTROY_DATE)
elif enum is enums.Tags.COMPROMISE_OCCURRENCE_DATE:
return primitives.DateTime(
value, enums.Tags.COMPROMISE_OCCURRENCE_DATE)
elif enum is enums.Tags.COMPROMISE_DATE:
return primitives.DateTime(value, enums.Tags.COMPROMISE_DATE)
elif enum is enums.Tags.REVOCATION_REASON:
raise NotImplementedError()
elif enum is enums.Tags.ARCHIVE_DATE:
return primitives.DateTime(value, enums.Tags.ARCHIVE_DATE)
elif enum is enums.Tags.OBJECT_GROUP:
return primitives.TextString(value, enums.Tags.OBJECT_GROUP)
return self._create_object_group(value)
elif enum is enums.Tags.FRESH:
return primitives.Boolean(value, enums.Tags.FRESH)
elif enum is enums.Tags.LINK:
raise NotImplementedError()
elif enum is enums.Tags.APPLICATION_SPECIFIC_INFORMATION:
return self._create_application_specific_information(value)
elif enum is enums.Tags.CONTACT_INFORMATION:
return self._create_contact_information(value)
elif enum is enums.Tags.LAST_CHANGE_DATE:
return primitives.DateTime(value, enums.Tags.LAST_CHANGE_DATE)
elif enum is enums.Tags.SENSITIVE:
return primitives.Boolean(value, enums.Tags.SENSITIVE)
elif enum is enums.Tags.CUSTOM_ATTRIBUTE:
return attributes.CustomAttribute(value)
else:
raise ValueError("Unrecognized attribute type: {}".format(enum))
def _create_name(self, name):
if name is not None:
if isinstance(name, attributes.Name):
return attributes.Name.create(name.name_value, name.name_type)
elif isinstance(name, str):
return attributes.Name.create(
name,
enums.NameType.UNINTERPRETED_TEXT_STRING
)
else:
raise ValueError('Unrecognized attribute type: '
'{0}'.format(name))
else:
return attributes.Name()
def _create_cryptographic_length(self, length):
if length is not None and not isinstance(length, int):
msg = utils.build_er_error(attributes.CryptographicLength,
'constructor argument type', int,
type(length))
raise TypeError(msg)
return attributes.CryptographicLength(length)
def _create_cryptographic_parameters(self, params):
if params is None:
params = {}
if isinstance(params, dict):
return attributes.CryptographicParameters(
block_cipher_mode=params.get('block_cipher_mode', None),
padding_method=params.get('padding_method', None),
hashing_algorithm=params.get('hashing_algorithm', None),
key_role_type=params.get('key_role_type', None),
digital_signature_algorithm=params.get(
'digital_signature_algorithm',
None
),
cryptographic_algorithm=params.get(
'cryptographic_algorithm',
None
),
random_iv=params.get('random_iv', None),
iv_length=params.get('iv_length', None),
tag_length=params.get('tag_length', None),
fixed_field_length=params.get('fixed_field_length', None),
invocation_field_length=params.get(
'invocation_field_length',
None
),
counter_length=params.get('counter_length', None),
initial_counter_value=params.get(
'initial_counter_value',
None
)
)
else:
raise TypeError("cryptographic parameters must be a dict")
def _create_cryptographic_usage_mask(self, flags):
mask = None
if flags is not None:
mask = 0
for flag in flags:
mask |= flag.value
return attributes.CryptographicUsageMask(mask)
def _create_application_specific_information(self, info):
if info:
return attributes.ApplicationSpecificInformation(
application_namespace=info.get("application_namespace"),
application_data=info.get("application_data")
)
else:
return attributes.ApplicationSpecificInformation()
def _create_contact_information(self, info):
if info is None:
return attributes.ContactInformation()
else:
if not isinstance(info, str):
msg = utils.build_er_error(attributes.ContactInformation,
'constructor argument type', str,
type(info))
raise TypeError(msg)
return attributes.ContactInformation(info)
| 48.220339
| 78
| 0.65884
|
4a1c4bcf5e6c96cf2d0d45cbf954d7dacb9e09a8
| 3,103
|
py
|
Python
|
packages/python/plotly/plotly/validators/funnel/hoverlabel/font/__init__.py
|
sgn/plotly.py
|
587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6
|
[
"MIT"
] | 3
|
2020-02-04T21:39:20.000Z
|
2020-11-17T19:07:07.000Z
|
packages/python/plotly/plotly/validators/funnel/hoverlabel/font/__init__.py
|
sgn/plotly.py
|
587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6
|
[
"MIT"
] | 12
|
2020-06-06T01:22:26.000Z
|
2022-03-12T00:13:42.000Z
|
packages/python/plotly/plotly/validators/funnel/hoverlabel/font/__init__.py
|
sgn/plotly.py
|
587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6
|
[
"MIT"
] | 17
|
2019-11-21T14:11:29.000Z
|
2019-11-21T15:26:23.000Z
|
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="sizesrc", parent_name="funnel.hoverlabel.font", **kwargs
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="funnel.hoverlabel.font", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="familysrc", parent_name="funnel.hoverlabel.font", **kwargs
):
super(FamilysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="family", parent_name="funnel.hoverlabel.font", **kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="funnel.hoverlabel.font", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="funnel.hoverlabel.font", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
**kwargs
)
| 30.722772
| 85
| 0.627457
|
4a1c4bf42f6c4652cea29c42b9c6c1b6d1a141b5
| 823
|
py
|
Python
|
mayan/apps/document_indexing/admin.py
|
mdsteveb/mayan-edms
|
f67856fccdb1041202c179bdb85e24d5ca3d277e
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/document_indexing/admin.py
|
mdsteveb/mayan-edms
|
f67856fccdb1041202c179bdb85e24d5ca3d277e
|
[
"Apache-2.0"
] | 1
|
2022-03-12T01:03:39.000Z
|
2022-03-12T01:03:39.000Z
|
mayan/apps/document_indexing/admin.py
|
mdsteveb/mayan-edms
|
f67856fccdb1041202c179bdb85e24d5ca3d277e
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from .models import Index, IndexInstanceNode, IndexTemplateNode
class IndexTemplateNodeInline(admin.StackedInline):
extra = 0
list_display = ('expression', 'enabled', 'link_documents')
model = IndexTemplateNode
class IndexAdmin(admin.ModelAdmin):
filter_horizontal = ('document_types',)
inlines = [IndexTemplateNodeInline]
list_display = ('name', 'title', 'enabled', 'get_document_types')
def get_document_types(self, instance):
return ', '.join(['"{0}"'.format(document_type) for document_type in instance.document_types.all()]) or _('None')
get_document_types.short_description = _('Document types')
admin.site.register(Index, IndexAdmin)
| 27.433333
| 121
| 0.747266
|
4a1c4c073321d9b072eeda04fbbd221c8c419e00
| 937
|
py
|
Python
|
GPM/FunctionsPd.py
|
ChampionApe/GPM_v06
|
643c8cf6a2dc63475582ae2fb90e76f392ef450c
|
[
"MIT"
] | null | null | null |
GPM/FunctionsPd.py
|
ChampionApe/GPM_v06
|
643c8cf6a2dc63475582ae2fb90e76f392ef450c
|
[
"MIT"
] | null | null | null |
GPM/FunctionsPd.py
|
ChampionApe/GPM_v06
|
643c8cf6a2dc63475582ae2fb90e76f392ef450c
|
[
"MIT"
] | null | null | null |
from _Equations import *
def fPointer(v,**kwargs):
if isinstance(v,tuple):
return v[0](v[1],**kwargs)
elif isinstance(v, dict):
return rc_pd(v)
elif isinstance(v, (rctree_scalar_types,rctree_admissable_types)):
return v
def adjustsparsedomains(v, bctype='infer',index = None, CheckDomains = False, **kwargs):
if bctype != 'full':
if CheckDomains or ((not isinstance(v, rctree_scalar_types)) and not (index.names == v.index.names)):
return broadcast([v], **(kwargs | {'bctype':'full'}))
else:
return v
def fSum(args,bctype='infer',**kwargs):
if not args:
return broadcast(args, **(kwargs | {'bctype':'full'}))
elif len(args)==1:
return broadcast([fPointer(v,**kwargs) for v in args], **(kwargs | {'bctype':'full'}))[0]
else:
nd,oned,dom = broadcast2np([fPointer(v,**kwargs) for v in args],**kwargs)
return adjustsparsedomains(pd.Series(nd.sum(axis=1)+oned.sum(),index=dom) if dom else oned.sum(),**kwargs)
| 36.038462
| 108
| 0.684098
|
4a1c4c662572696a50f5401648cabb21c9e0ecae
| 3,723
|
py
|
Python
|
test_muffin.py
|
elunna/muffin
|
1d5179148ef00ce3c1c63891c06c47efe9a7429e
|
[
"Apache-2.0"
] | null | null | null |
test_muffin.py
|
elunna/muffin
|
1d5179148ef00ce3c1c63891c06c47efe9a7429e
|
[
"Apache-2.0"
] | null | null | null |
test_muffin.py
|
elunna/muffin
|
1d5179148ef00ce3c1c63891c06c47efe9a7429e
|
[
"Apache-2.0"
] | null | null | null |
""" Tests for the muffin module. """
import os
import pytest
from .muffin import *
from test_configs import *
@pytest.yield_fixture(autouse=True)
def cleanup():
wipe_dir(TEST_PROJ)
yield None
wipe_dir(TEST_PROJ)
def test_wipedir_created_dir_dne():
testdir = 'some_random_directory_xxx1234'
ensure_dir(testdir)
wipe_dir(testdir)
assert os.path.isdir(testdir) is False
def test_ensuredir_exists():
testdir = 'some_random_directory_xxx1234'
ensure_dir(testdir)
assert os.path.isdir(testdir)
wipe_dir(testdir)
def test_writelicense_MIT():
ensure_dir(TEST_PROJ)
lic_path = TEST_PROJ + '/LICENSE'
write_license(MIT_CONFIG)
assert os.path.exists(lic_path) # LICENSE file doesn't exist
def test_writelicense_GNU():
ensure_dir(TEST_PROJ)
lic_path = TEST_PROJ + '/LICENSE'
write_license(GNU_CONFIG)
assert os.path.exists(lic_path) # LICENSE file doesn't exist
def test_writelicense_WTFPL():
ensure_dir(TEST_PROJ)
lic_path = TEST_PROJ + '/LICENSE'
write_license(WTFPL_CONFIG)
assert os.path.exists(lic_path) # LICENSE file doesn't exist
def test_makesetupsh_exists():
make_setup_sh(FULL_CONFIG)
assert os.path.exists(FULL_CONFIG['projectname'] + '/setup.sh') # Error making setup.sh
def test_setupgit_git_dir_exists():
setup_git(FULL_CONFIG)
assert os.path.isdir(FULL_CONFIG['projectname'] + '/.git') # Error making .git dir
def test_setupgit_git_config_exists():
setup_git(FULL_CONFIG)
assert os.path.exists(FULL_CONFIG['projectname'] + '/.git/config') # Error making .git/config
def test_cptemplate_makes_project_dir():
cp_templates(FULL_CONFIG)
assert os.path.isdir(TEST_PROJ) # Error making project dir
def test_cptemplate__subdirs():
# Uses a test generator to go through all the subdirectories we want to test.
for i in SUBDIRS:
subdir = TEST_PROJ + '/' + i
yield check_dir, subdir
def check_dir(d):
# This has to go here bc cleanup get called before and after this method.
cp_templates(FULL_CONFIG)
assert os.path.isdir(d) # Directory doesn't exist
def test_cptemplates_gitignore_exists():
cp_templates(FULL_CONFIG)
assert os.path.exists(TEST_PROJ + '/.gitignore') # .gitignore not created
def test_cptemplates_env_exists():
cp_templates(FULL_CONFIG)
assert os.path.exists(TEST_PROJ + '/.env') # Error making main.py
def test_cptemplates_konchrc_exists():
cp_templates(FULL_CONFIG)
assert os.path.exists(TEST_PROJ + '/.konchrc') # Error making main.py
def test_cptemplates_pytest_ini_exists():
cp_templates(FULL_CONFIG)
assert os.path.exists(TEST_PROJ + '/pytest.ini') # Error making main.py
def test_cptemplates_main_py_exists():
cp_templates(FULL_CONFIG)
assert os.path.exists(TEST_PROJ + '/main.py') # Error making main.py
def test_cptemplates_logger_py_exists():
cp_templates(FULL_CONFIG)
assert os.path.exists(TEST_PROJ + '/src/logger.py') # Error making /src/logger.py
def test_cptemplates_init_files():
init_files = ['/', '/src', '/tests']
# Uses a test generator to go through all the init files we want to test.
for i in init_files:
init = TEST_PROJ + i + '/__init__.py'
yield check_init_file, init
def check_init_file(filename):
cp_templates(FULL_CONFIG)
assert os.path.exists(filename) # Filename doesn't exist
def test_saveconfig_exists():
save_config(FULL_CONFIG)
projectdir = FULL_CONFIG['projectname']
assert os.path.isdir(projectdir) # Project directory doesn't exist
assert os.path.exists(projectdir + '/config.json') # config.json doesn't exist
| 28.204545
| 105
| 0.715821
|
4a1c4c8920de42224cca02c1482784f8d2164c83
| 499
|
py
|
Python
|
utility_scripts/charpartition_generator.py
|
roblanf/PartitionedAlignments
|
64f59e313703f0281071d77df95cbd0291eefdc3
|
[
"CC-BY-3.0"
] | 10
|
2017-05-15T02:50:27.000Z
|
2021-09-27T06:27:03.000Z
|
utility_scripts/charpartition_generator.py
|
roblanf/BenchmarkAlignments
|
64f59e313703f0281071d77df95cbd0291eefdc3
|
[
"CC-BY-3.0"
] | 20
|
2018-10-09T03:05:06.000Z
|
2019-05-01T01:58:35.000Z
|
utility_scripts/charpartition_generator.py
|
roblanf/BenchmarkAlignments
|
64f59e313703f0281071d77df95cbd0291eefdc3
|
[
"CC-BY-3.0"
] | 5
|
2017-05-15T02:50:46.000Z
|
2020-06-24T05:43:59.000Z
|
# a little script to generate charpartitions from files of charsets
filename = "/Users/roblanfear/Desktop/sets.txt"
charsets = open(filename, 'r').readlines()
names = [s.split("=")[0].split("CHARSET")[1].strip() for s in charsets]
parts = []
for i, n in enumerate(names):
print i, n
part = ''.join([str(i+1), ":", n, ","])
parts.append(part)
charpartition = ' '.join(parts)
charpartition = charpartition.rstrip(",")
charpartition = ''.join([charpartition, ";"])
print charpartition
| 26.263158
| 71
| 0.663327
|
4a1c4d38d0ef8737c641174dfc41ee874b155168
| 1,437
|
py
|
Python
|
code-challanges/401_code_challenges/left_join/test_left_join.py
|
schoentr/data-structures-and-algorithms
|
535ac617a2ab32293014946b043bdb40a647d43b
|
[
"MIT"
] | null | null | null |
code-challanges/401_code_challenges/left_join/test_left_join.py
|
schoentr/data-structures-and-algorithms
|
535ac617a2ab32293014946b043bdb40a647d43b
|
[
"MIT"
] | 1
|
2019-03-11T02:13:58.000Z
|
2019-03-11T02:13:58.000Z
|
code-challanges/401_code_challenges/left_join/test_left_join.py
|
schoentr/data-structures-and-algorithms
|
535ac617a2ab32293014946b043bdb40a647d43b
|
[
"MIT"
] | null | null | null |
from left_join.left_join import left_join
from hashtable.hashtable import Hashtable
def test_one():
ht1 = Hashtable()
ht1.add('chris', 'ball')
ht1.add('tim', 'schoen')
ht1.add('tony', 'tiger')
ht1.add('uncle', 'joe')
ht2 = Hashtable()
ht2.add('chris','jones')
ht2.add('jack','danials')
ht2.add('tony','see')
ht2.add('uncle','nickols')
actual = left_join(ht1,ht2)
assert actual == [['tim', 'schoen', None],['tony', 'tiger', 'see'],['uncle', 'joe', 'nickols'],['chris','ball','jones']]
def test_two():
ht1 = Hashtable()
ht1.add('chris', 'ball')
ht1.add('tim', 'schoen')
ht1.add('tony', 'tiger')
ht1.add('evy', 'joe')
ht2 = Hashtable()
ht2.add('chris','jones')
ht2.add('jack','danials')
ht2.add('tony','see')
ht2.add('uncle','nickols')
actual = left_join(ht1,ht2)
assert actual == [['tim', 'schoen', None],['evy', 'joe', None],['tony', 'tiger', 'see'],['chris','ball','jones']]
def test_three():
ht1 = Hashtable()
ht1.add('chris', 'ball')
ht1.add('tim', 'schoen')
ht1.add('tony', 'tiger')
ht1.add('uncle', 'joe')
ht2 = Hashtable()
actual = left_join(ht1,ht2)
assert actual == [['tim', 'schoen', None],['tony', 'tiger', None],['uncle', 'joe', None],['chris','ball', None]]
def test_four():
ht1 = None
ht2 = Hashtable()
actual = left_join(ht1,ht2)
assert actual == 'No Hash Table Found'
| 28.74
| 125
| 0.568546
|
4a1c4d8ba101731f0a9a1b0982e7ff956f6c68de
| 925
|
py
|
Python
|
Notify/urls.py
|
MichaelNjoroge254/Notify-Alerts
|
877f820db060f7f631105bca515b8839b3ad4411
|
[
"MIT"
] | null | null | null |
Notify/urls.py
|
MichaelNjoroge254/Notify-Alerts
|
877f820db060f7f631105bca515b8839b3ad4411
|
[
"MIT"
] | null | null | null |
Notify/urls.py
|
MichaelNjoroge254/Notify-Alerts
|
877f820db060f7f631105bca515b8839b3ad4411
|
[
"MIT"
] | null | null | null |
"""Notify URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django_registration.backends.one_step.views import RegistrationView
from django.contrib.auth.views import LoginView
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('Alerts.urls')),
]
| 35.576923
| 77
| 0.718919
|
4a1c4dd140ba59b714432b86f0ef95849127f1b5
| 3,942
|
py
|
Python
|
models.py
|
hjdw2/GAN_model_parallel
|
587d80f7647438721393b13c3c282ebe1840e02d
|
[
"Apache-2.0"
] | 1
|
2020-05-07T05:57:02.000Z
|
2020-05-07T05:57:02.000Z
|
models.py
|
hjdw2/GAN_model_parallel
|
587d80f7647438721393b13c3c282ebe1840e02d
|
[
"Apache-2.0"
] | null | null | null |
models.py
|
hjdw2/GAN_model_parallel
|
587d80f7647438721393b13c3c282ebe1840e02d
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import ctypes
import multiprocessing as mp
import torch
import torch.nn as nn
from torch.autograd import Variable
class Generator(nn.Module):
def __init__(self, args):
super(Generator, self).__init__()
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(args.nz, args.ngf, 4, 1, 0, bias=False),
nn.BatchNorm2d(args.ngf ),
nn.ReLU(True),
# state size. (args.ngf*8) x 4 x 4
nn.ConvTranspose2d(args.ngf, args.ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(args.ngf ),
nn.ReLU(True),
# state size. (args.ngf*4) x 8 x 8
nn.ConvTranspose2d(args.ngf, args.ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(args.ngf ),
nn.ReLU(True),
nn.ConvTranspose2d(args.ngf, args.ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(args.ngf),
nn.ReLU(True),
# state size. (args.ngf) x 32 x 32
nn.ConvTranspose2d(args.ngf, args.nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (args.nc) x 64 x 64
)
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def forward(self, input):
output = self.main(input)
return output
class Discriminator(nn.Module):
def __init__(self, args):
super(Discriminator, self).__init__()
self.main = nn.Sequential(
# input is (args.nc) x 64 x 64
nn.Conv2d(args.nc, args.ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (args.ndf) x 32 x 32
nn.Conv2d(args.ndf, args.ndf, 4, 2, 1, bias=False),
nn.BatchNorm2d(args.ndf ),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(args.ndf, args.ndf, 4, 2, 1, bias=False),
nn.BatchNorm2d(args.ndf ),
nn.LeakyReLU(0.2, inplace=True),
# state size. (args.ndf*4) x 8 x 8
nn.Conv2d(args.ndf, args.ndf, 4, 2, 1, bias=False),
nn.BatchNorm2d(args.ndf ),
nn.LeakyReLU(0.2, inplace=True),
# state size. (args.ndf*8) x 4 x 4
nn.Conv2d(args.ndf, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def forward(self, input):
output = self.main(input)
return output.view(-1, 1).squeeze(1)
class Discriminator_LC(nn.Module):
def __init__(self, args):
super(Discriminator_LC, self).__init__()
self.main = nn.Sequential(
# state size. (args.ndf*2) x 16 x 16
nn.Conv2d(args.nc, 128, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (args.ndf) x 32 x 32
nn.Conv2d(128, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(128, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def forward(self, input):
output = self.main(input)
return output.view(-1, 1).squeeze(1)
| 38.271845
| 72
| 0.529934
|
4a1c4ec7cdf6a8e40502c4e75291dbd5f7d97d18
| 9,901
|
py
|
Python
|
jina/clients/mixin.py
|
kuraakhilesh8230/jina
|
7cc23944fcdfd9944dc805ce8a116818d45317ee
|
[
"Apache-2.0"
] | null | null | null |
jina/clients/mixin.py
|
kuraakhilesh8230/jina
|
7cc23944fcdfd9944dc805ce8a116818d45317ee
|
[
"Apache-2.0"
] | 1
|
2022-03-08T18:46:28.000Z
|
2022-03-08T18:47:24.000Z
|
jina/clients/mixin.py
|
kuraakhilesh8230/jina
|
7cc23944fcdfd9944dc805ce8a116818d45317ee
|
[
"Apache-2.0"
] | null | null | null |
from functools import partialmethod, wraps
from typing import Optional, Dict, List, AsyncGenerator, TYPE_CHECKING, Union
import warnings
from inspect import signature
from jina.helper import run_async, get_or_reuse_loop
from jina.importer import ImportExtensions
if TYPE_CHECKING:
from jina.clients.base import CallbackFnType, InputType
from jina.types.request import Response
from jina import DocumentArray
def _include_results_field_in_param(parameters: Optional['Dict']) -> 'Dict':
key_result = '__results__'
if parameters:
if key_result in parameters:
if not isinstance(parameters[key_result], dict):
warnings.warn(
f'It looks like you passed a dictionary with the key `{key_result}` to `parameters`.'
'This key is reserved, so the associated value will be deleted.'
)
parameters.update({key_result: dict()})
else:
parameters = {key_result: dict()}
return parameters
class MutateMixin:
"""The GraphQL Mutation Mixin for Client and Flow"""
def mutate(
self,
mutation: str,
variables: Optional[dict] = None,
timeout: Optional[float] = None,
headers: Optional[dict] = None,
):
"""Perform a GraphQL mutation
:param mutation: the GraphQL mutation as a single string.
:param variables: variables to be substituted in the mutation. Not needed if no variables are present in the mutation string.
:param timeout: HTTP request timeout
:param headers: HTTP headers
:return: dict containing the optional keys ``data`` and ``errors``, for response data and errors.
"""
with ImportExtensions(required=True):
from sgqlc.endpoint.http import HTTPEndpoint as SgqlcHTTPEndpoint
proto = 'https' if self.args.https else 'http'
graphql_url = f'{proto}://{self.args.host}:{self.args.port}/graphql'
endpoint = SgqlcHTTPEndpoint(graphql_url)
res = endpoint(
mutation, variables=variables, timeout=timeout, extra_headers=headers
)
return res
class AsyncMutateMixin(MutateMixin):
"""The async GraphQL Mutation Mixin for Client and Flow"""
async def mutate(
self,
mutation: str,
variables: Optional[dict] = None,
timeout: Optional[float] = None,
headers: Optional[dict] = None,
):
"""Perform a GraphQL mutation, asynchronously
:param mutation: the GraphQL mutation as a single string.
:param variables: variables to be substituted in the mutation. Not needed if no variables are present in the mutation string.
:param timeout: HTTP request timeout
:param headers: HTTP headers
:return: dict containing the optional keys ``data`` and ``errors``, for response data and errors.
"""
return await get_or_reuse_loop().run_in_executor(
None, super().mutate, mutation, variables, timeout, headers
)
class PostMixin:
"""The Post Mixin class for Client and Flow"""
def post(
self,
on: str,
inputs: Optional['InputType'] = None,
on_done: Optional['CallbackFnType'] = None,
on_error: Optional['CallbackFnType'] = None,
on_always: Optional['CallbackFnType'] = None,
parameters: Optional[Dict] = None,
target_executor: Optional[str] = None,
request_size: int = 100,
show_progress: bool = False,
continue_on_error: bool = False,
**kwargs,
) -> Optional[Union['DocumentArray', List['Response']]]:
"""Post a general data request to the Flow.
:param inputs: input data which can be an Iterable, a function which returns an Iterable, or a single Document id.
:param on: the endpoint is used for identifying the user-defined ``request_type``, labeled by ``@requests(on='/abc')``
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is either resolved or rejected.
:param parameters: the kwargs that will be sent to the executor
:param target_executor: a regex string. Only matching Executors will process the request.
:param request_size: the number of Documents per request. <=0 means all inputs in one request.
:param show_progress: if set, client will show a progress bar on receiving every request.
:param continue_on_error: if set, a Request that causes callback error will be logged only without blocking the further requests.
:param kwargs: additional parameters
:return: None or DocumentArray containing all response Documents
.. warning::
``target_executor`` uses ``re.match`` for checking if the pattern is matched.
``target_executor=='foo'`` will match both deployments with the name ``foo`` and ``foo_what_ever_suffix``.
"""
return_results = False
async def _get_results(*args, **kwargs):
result = []
c = self.client
c.show_progress = show_progress
c.continue_on_error = continue_on_error
async for resp in c._get_results(*args, **kwargs):
if return_results:
result.append(resp)
if return_results:
if not c.args.return_responses:
docs = [r.data.docs for r in result]
if len(docs) < 1:
return docs
else:
return docs[0].reduce_all(docs[1:])
else:
return result
if (on_always is None) and (on_done is None):
return_results = True
parameters = _include_results_field_in_param(parameters)
on_error = _wrap_on_error(on_error) if on_error is not None else on_error
return run_async(
_get_results,
inputs=inputs,
on_done=on_done,
on_error=on_error,
on_always=on_always,
exec_endpoint=on,
target_executor=target_executor,
parameters=parameters,
request_size=request_size,
**kwargs,
)
# ONLY CRUD, for other request please use `.post`
index = partialmethod(post, '/index')
search = partialmethod(post, '/search')
update = partialmethod(post, '/update')
delete = partialmethod(post, '/delete')
class AsyncPostMixin:
"""The Async Post Mixin class for AsyncClient and AsyncFlow"""
async def post(
self,
on: str,
inputs: Optional['InputType'] = None,
on_done: Optional['CallbackFnType'] = None,
on_error: Optional['CallbackFnType'] = None,
on_always: Optional['CallbackFnType'] = None,
parameters: Optional[Dict] = None,
target_executor: Optional[str] = None,
request_size: int = 100,
show_progress: bool = False,
continue_on_error: bool = False,
**kwargs,
) -> AsyncGenerator[None, 'Response']:
"""Post a general data request to the Flow.
:param inputs: input data which can be an Iterable, a function which returns an Iterable, or a single Document id.
:param on: the endpoint is used for identifying the user-defined ``request_type``, labeled by ``@requests(on='/abc')``
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param parameters: the kwargs that will be sent to the executor
:param target_executor: a regex string. Only matching Executors will process the request.
:param request_size: the number of Documents per request. <=0 means all inputs in one request.
:param show_progress: if set, client will show a progress bar on receiving every request.
:param continue_on_error: if set, a Request that causes callback error will be logged only without blocking the further requests.
:param kwargs: additional parameters
:yield: Response object
"""
c = self.client
c.show_progress = show_progress
c.continue_on_error = continue_on_error
parameters = _include_results_field_in_param(parameters)
on_error = _wrap_on_error(on_error) if on_error is not None else on_error
async for r in c._get_results(
inputs=inputs,
on_done=on_done,
on_error=on_error,
on_always=on_always,
exec_endpoint=on,
target_executor=target_executor,
parameters=parameters,
request_size=request_size,
**kwargs,
):
yield r
# ONLY CRUD, for other request please use `.post`
index = partialmethod(post, '/index')
search = partialmethod(post, '/search')
update = partialmethod(post, '/update')
delete = partialmethod(post, '/delete')
def _wrap_on_error(on_error):
num_args = len(signature(on_error).parameters)
if num_args == 1:
warnings.warn(
"on_error callback taking only the response parameters is deprecated. Please add one parameter "
"to handle additional optional Exception as well",
DeprecationWarning,
)
@wraps(on_error)
def _fn(resp, exception): # just skip the exception
return on_error(resp)
return _fn
else:
return on_error
| 40.08502
| 137
| 0.63741
|
4a1c4ee6b2d9001fb07063832e7eeadf393ae865
| 8,913
|
py
|
Python
|
salt/states/ssh_auth.py
|
sunbenxin/salt
|
b821f6a174e67a3e1def1ba7fa16885cd985bb0c
|
[
"Apache-2.0"
] | null | null | null |
salt/states/ssh_auth.py
|
sunbenxin/salt
|
b821f6a174e67a3e1def1ba7fa16885cd985bb0c
|
[
"Apache-2.0"
] | null | null | null |
salt/states/ssh_auth.py
|
sunbenxin/salt
|
b821f6a174e67a3e1def1ba7fa16885cd985bb0c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Control of entries in SSH authorized_key files.
===============================================
The information stored in a user's SSH authorized key file can be easily
controlled via the ssh_auth state. Defaults can be set by the enc, options,
and comment keys. These defaults can be overridden by including them in the
name.
Since the YAML specification limits the length of simple keys to 1024
characters, and since SSH keys are often longer than that, you may have
to use a YAML 'explicit key', as demonstrated in the second example below.
.. code-block:: yaml
AAAAB3NzaC1kc3MAAACBAL0sQ9fJ5bYTEyY==:
ssh_auth:
- present
- user: root
- enc: ssh-dss
? AAAAB3NzaC1kc3MAAACBAL0sQ9fJ5bYTEyY==...
:
ssh_auth:
- present
- user: root
- enc: ssh-dss
thatch:
ssh_auth:
- present
- user: root
- source: salt://ssh_keys/thatch.id_rsa.pub
sshkeys:
ssh_auth:
- present
- user: root
- enc: ssh-rsa
- options:
- option1="value1"
- option2="value2 flag2"
- comment: myuser
- names:
- AAAAB3NzaC1kc3MAAACBAL0sQ9fJ5bYTEyY==
- ssh-dss AAAAB3NzaCL0sQ9fJ5bYTEyY== user@domain
- option3="value3" ssh-dss AAAAB3NzaC1kcQ9J5bYTEyY== other@testdomain
- AAAAB3NzaC1kcQ9fJFF435bYTEyY== newcomment
'''
# Import python libs
import re
import sys
def _present_test(user, name, enc, comment, options, source, config, env):
'''
Run checks for "present"
'''
result = None
if source:
keys = __salt__['ssh.check_key_file'](
user,
source,
config,
env)
if keys:
comment = ''
for key, status in keys.items():
if status == 'exists':
continue
comment += 'Set to {0}: {1}\n'.format(status, key)
if comment:
return result, comment
err = sys.modules[
__salt__['test.ping'].__module__
].__context__.pop('ssh_auth.error', None)
if err:
return False, err
else:
return (
True,
'All host keys in file {0} are already present'.format(source)
)
check = __salt__['ssh.check_key'](
user,
name,
enc,
comment,
options,
config)
if check == 'update':
comment = (
'Key {0} for user {1} is set to be updated'
).format(name, user)
elif check == 'add':
comment = (
'Key {0} for user {1} is set to be added'
).format(name, user)
elif check == 'exists':
result = True
comment = ('The authorized host key {0} is already present '
'for user {1}'.format(name, user))
return result, comment
def present(
name,
user,
enc='ssh-rsa',
comment='',
source='',
options=None,
config='.ssh/authorized_keys',
**kwargs):
'''
Verifies that the specified SSH key is present for the specified user
name
The SSH key to manage
user
The user who owns the SSH authorized keys file to modify
enc
Defines what type of key is being used; can be ecdsa, ssh-rsa or ssh-dss
comment
The comment to be placed with the SSH public key
source
The source file for the key(s). Can contain any number of public keys,
in standard "authorized_keys" format. If this is set, comment, enc,
and options will be ignored.
.. note::
The source file must contain keys in the format ``<enc> <key>
<comment>``. If you have generated a keypair using PuTTYgen, then you
will need to do the following to retrieve an OpenSSH-compatible public
key.
1. In PuTTYgen, click ``Load``, and select the *private* key file (not
the public key), and click ``Open``.
2. Copy the public key from the box labeled ``Public key for pasting
into OpenSSH authorized_keys file``.
3. Paste it into a new file.
options
The options passed to the key, pass a list object
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/authorized_keys"
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if __opts__['test']:
ret['result'], ret['comment'] = _present_test(
user,
name,
enc,
comment,
options or [],
source,
config,
kwargs.get('__env__', 'base')
)
return ret
if source != '':
data = __salt__['ssh.set_auth_key_from_file'](
user,
source,
config,
kwargs.get('__env__', 'base'))
else:
# check if this is of form {options} {enc} {key} {comment}
sshre = re.compile(r'^(.*?)\s?((?:ssh\-|ecds)[\w-]+\s.+)$')
fullkey = sshre.search(name)
# if it is {key} [comment]
if not fullkey:
key_and_comment = name.split()
name = key_and_comment[0]
if len(key_and_comment) == 2:
comment = key_and_comment[1]
else:
# if there are options, set them
if fullkey.group(1):
options = fullkey.group(1).split(',')
# key is of format: {enc} {key} [comment]
comps = fullkey.group(2).split()
enc = comps[0]
name = comps[1]
if len(comps) == 3:
comment = comps[2]
data = __salt__['ssh.set_auth_key'](
user,
name,
enc,
comment,
options or [],
config)
if data == 'replace':
ret['changes'][name] = 'Updated'
ret['comment'] = ('The authorized host key {0} for user {1} was '
'updated'.format(name, user))
return ret
elif data == 'no change':
ret['comment'] = ('The authorized host key {0} is already present '
'for user {1}'.format(name, user))
elif data == 'new':
ret['changes'][name] = 'New'
ret['comment'] = ('The authorized host key {0} for user {1} was added'
.format(name, user))
elif data == 'fail':
ret['result'] = False
err = sys.modules[
__salt__['test.ping'].__module__
].__context__.pop('ssh_auth.error', None)
if err:
ret['comment'] = err
else:
ret['comment'] = ('Failed to add the ssh key. Is the home '
'directory available, and/or does the key file '
'exist?')
elif data == 'invalid':
ret['result'] = False
ret['comment'] = 'Invalid public ssh key, most likely has spaces'
return ret
def absent(name,
user,
enc='ssh-rsa',
comment='',
options=None,
config='.ssh/authorized_keys'):
'''
Verifies that the specified SSH key is absent
name
The SSH key to manage
user
The user who owns the SSH authorized keys file to modify
enc
Defines what type of key is being used; can be ecdsa, ssh-rsa or ssh-dss
comment
The comment to be placed with the SSH public key
options
The options passed to the key, pass a list object
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/authorized_keys"
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
# Get just the key
name = name.split(' ')[0]
if __opts__['test']:
check = __salt__['ssh.check_key'](
user,
name,
enc,
comment,
options or [],
config)
if check == 'update' or check == 'exists':
ret['result'] = None
ret['comment'] = 'Key {0} is set for removal'.format(name)
return ret
else:
ret['comment'] = 'Key is already absent'
return ret
ret['comment'] = __salt__['ssh.rm_auth_key'](user, name, config)
if ret['comment'] == 'User authorized keys file not present':
ret['result'] = False
return ret
elif ret['comment'] == 'Key removed':
ret['changes'][name] = 'Removed'
return ret
| 29.611296
| 80
| 0.518232
|
4a1c4f072f1694db5693711e081d4e96ae05ef98
| 1,150
|
py
|
Python
|
scripts/glamod-config.py
|
glamod/glamod-cdm-lite
|
026d87d499feaf7ee3611cf1c112384f3819e653
|
[
"BSD-2-Clause"
] | 1
|
2020-06-16T14:29:26.000Z
|
2020-06-16T14:29:26.000Z
|
scripts/glamod-config.py
|
glamod/glamod-cdm-lite
|
026d87d499feaf7ee3611cf1c112384f3819e653
|
[
"BSD-2-Clause"
] | 75
|
2020-01-17T12:25:58.000Z
|
2021-04-29T14:48:52.000Z
|
scripts/glamod-config.py
|
glamod/glamod-cdm-lite
|
026d87d499feaf7ee3611cf1c112384f3819e653
|
[
"BSD-2-Clause"
] | 2
|
2020-07-03T11:11:04.000Z
|
2020-08-03T14:19:54.000Z
|
#!/usr/bin/env python
"""
glamod-config.py
================
Script to query/access glamod configuration info.
"""
import os
import sys
# Work out base directory and add lib to path
BASE_DIR = '/'.join(os.path.abspath(__file__).split('/')[:-2])
sys.path.append(f'{BASE_DIR}/lib')
import glamod.settings as gs
def show_help():
print("""glamod-config.py
================
Takes a config setting and returns the appropriate path.
Usage:
glamod-config.py <config-string>
Where:
config-string: is colon-separated identifiers representing:
release : profile : domain : stage : table
Example:
```
$ glamod-config.py r2.0:full:land:incoming:source_configuration
/gws/nopw/j04/c3s311a_lot2/data/level2/land/r202005/source_configuration
```
""")
def main():
args = sys.argv[1:]
if len(args) == 0 or args[0].lower() in ('-h', '--h', '-help', '--help', 'help'):
return show_help()
settings_string = args[0]
try:
print(gs.get(settings_string))
except Exception:
raise KeyError(f'Setting not found: {settings_string}')
if __name__ == '__main__':
main()
| 18.548387
| 85
| 0.634783
|
4a1c51a7e20ff64ec19dab06e7fb28fda892824c
| 47
|
py
|
Python
|
enthought/traits/ui/qt4/menu.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/traits/ui/qt4/menu.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/traits/ui/qt4/menu.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from traitsui.qt4.menu import *
| 15.666667
| 31
| 0.765957
|
4a1c5392f82600eea607f8364f0c7440dbc0ef8e
| 4,356
|
py
|
Python
|
python/sqlcommenter-python/tests/sqlalchemy/tests.py
|
Thiyagu2009/opentelemetry-sqlcommenter
|
9c9b371c50ecd2956f527377b2663a113a2c47fb
|
[
"Apache-2.0"
] | null | null | null |
python/sqlcommenter-python/tests/sqlalchemy/tests.py
|
Thiyagu2009/opentelemetry-sqlcommenter
|
9c9b371c50ecd2956f527377b2663a113a2c47fb
|
[
"Apache-2.0"
] | null | null | null |
python/sqlcommenter-python/tests/sqlalchemy/tests.py
|
Thiyagu2009/opentelemetry-sqlcommenter
|
9c9b371c50ecd2956f527377b2663a113a2c47fb
|
[
"Apache-2.0"
] | 1
|
2021-11-13T13:22:21.000Z
|
2021-11-13T13:22:21.000Z
|
#!/usr/bin/python
#
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
import sqlalchemy
from google.cloud.sqlcommenter.sqlalchemy.executor import BeforeExecuteFactory
from ..compat import mock, skipIfPy2
from ..opencensus_mock import mock_opencensus_tracer
from ..opentelemetry_mock import mock_opentelemetry_context
class MockConnection:
@property
def engine(self):
class Engine:
@property
def driver(self):
return 'driver'
return Engine()
class SQLAlchemyTestCase(TestCase):
def assertSQL(self, expected_sql, **kwargs):
before_cursor_execute = BeforeExecuteFactory(**kwargs)
sql, params = before_cursor_execute(
MockConnection(), None, 'SELECT 1;', ('param,'), None, None,
)
self.assertEqual(sql, expected_sql)
self.assertEqual(params, ('param,'))
class Tests(SQLAlchemyTestCase):
def test_no_args(self):
self.assertSQL('SELECT 1;')
def test_db_driver(self):
self.assertSQL(
"SELECT 1; /*db_driver='driver'*/",
with_db_driver=True,
)
def test_db_framework(self):
self.assertSQL(
"SELECT 1; /*db_framework='sqlalchemy%%3A{}'*/".format(sqlalchemy.__version__),
with_db_framework=True,
)
def test_opencensus(self):
with mock_opencensus_tracer():
self.assertSQL(
"SELECT 1; /*traceparent='00-trace%%20id-span%%20id-00',"
"tracestate='congo%%3Dt61rcWkgMzE%%2Crojo%%3D00f067aa0ba902b7'*/",
with_opencensus=True,
)
@skipIfPy2
def test_opentelemetry(self):
with mock_opentelemetry_context():
self.assertSQL(
"SELECT 1; /*traceparent='00-000000000000000000000000deadbeef-000000000000beef-00',"
"tracestate='some_key%%3Dsome_value'*/",
with_opentelemetry=True,
)
@skipIfPy2
def test_both_opentelemetry_and_opencensus_warn(self):
with mock.patch(
"google.cloud.sqlcommenter.sqlalchemy.executor.logger"
) as logger_mock, mock_opencensus_tracer(), mock_opentelemetry_context():
self.assertSQL(
"SELECT 1; /*traceparent='00-000000000000000000000000deadbeef-000000000000beef-00',"
"tracestate='some_key%%3Dsome_value'*/",
with_opentelemetry=True,
with_opencensus=True,
)
self.assertEqual(len(logger_mock.warning.mock_calls), 1)
class FlaskTests(SQLAlchemyTestCase):
flask_info = {
'framework': 'flask',
'controller': 'c',
'route': '/',
}
@mock.patch('google.cloud.sqlcommenter.sqlalchemy.executor.get_flask_info', return_value=flask_info)
def test_all_data(self, get_info):
self.assertSQL(
"SELECT 1; /*controller='c',framework='flask',route='/'*/",
)
@mock.patch('google.cloud.sqlcommenter.sqlalchemy.executor.get_flask_info', return_value=flask_info)
def test_framework_disabled(self, get_info):
self.assertSQL(
"SELECT 1; /*controller='c',route='/'*/",
with_framework=False,
)
@mock.patch('google.cloud.sqlcommenter.sqlalchemy.executor.get_flask_info', return_value=flask_info)
def test_controller_disabled(self, get_info):
self.assertSQL(
"SELECT 1; /*framework='flask',route='/'*/",
with_controller=False,
)
@mock.patch('google.cloud.sqlcommenter.sqlalchemy.executor.get_flask_info', return_value=flask_info)
def test_route_disabled(self, get_info):
self.assertSQL(
"SELECT 1; /*controller='c',framework='flask'*/",
with_route=False,
)
| 33.767442
| 104
| 0.648072
|
4a1c54c3853a2261d12cde97a67d07505b60e07e
| 961
|
py
|
Python
|
lookup_inst_nodes.py
|
DarkStarSword/miasmata-fixes
|
d320f5e68cd5ebabd14efd7af021afa7e63d161e
|
[
"MIT"
] | 10
|
2015-06-13T17:27:18.000Z
|
2021-02-14T13:03:11.000Z
|
lookup_inst_nodes.py
|
DarkStarSword/miasmata-fixes
|
d320f5e68cd5ebabd14efd7af021afa7e63d161e
|
[
"MIT"
] | 2
|
2020-07-11T18:34:57.000Z
|
2021-03-07T02:27:46.000Z
|
lookup_inst_nodes.py
|
DarkStarSword/miasmata-fixes
|
d320f5e68cd5ebabd14efd7af021afa7e63d161e
|
[
"MIT"
] | 1
|
2016-03-23T22:26:23.000Z
|
2016-03-23T22:26:23.000Z
|
#!/usr/bin/env python
import sys
import os
import inst_header
def main():
(x, y) = map(float, sys.argv[1:3])
# Rotate coord 90 degrees clockwise (transpose then mirror x):
(x, y) = (inst_header.width - y, x)
print 'Rotated back to inst coordinates: %d x %d' % (x, y)
plot_point(x, y, (255, 255, 255), (230, 230, 230))
for (n, (x1, y1, z1, x2, y2, z2)) in inst_header.get_points():
assert(x2 > x1)
assert(y2 > y1)
if x >= int(x1) and x <= int(x2) and \
y >= int(y1) and y <= int(y2):
c = r = ''
exists = 64
if not os.path.exists('nodes/inst_node%d' % n):
c = '\x1b[31m'
r = '\x1b[0m'
exists = 0
print '%sinst_node%-6d | %8.3f %8.3f %9.3f x %8.3f %8.3f %8.3f | %4.0f x %-4.0f%s' % \
(c, n, x1, y1, z1, x2, y2, z2, x2-x1, y2-y1, r)
inst_header.plot_node(x1, y1, z1, x2, y2, z2, 128, 128, exists)
inst_header.save_image('lookup_nodes.png')
if __name__ == '__main__':
main()
# vi:noexpandtab:sw=8:ts=8
| 26.694444
| 93
| 0.579605
|
4a1c551403406fde8129cec8a4afdf18e2fadba7
| 1,017
|
py
|
Python
|
centreseq/bin/tree/wrappers.py
|
bfssi-forest-dussault/centreseq
|
6e2ba259d21336b6e610ea46be0d2d3414650ddc
|
[
"MIT"
] | 1
|
2019-07-20T02:00:33.000Z
|
2019-07-20T02:00:33.000Z
|
centreseq/bin/tree/wrappers.py
|
BFSSI-Bioinformatics-Lab/centreseq
|
6e2ba259d21336b6e610ea46be0d2d3414650ddc
|
[
"MIT"
] | null | null | null |
centreseq/bin/tree/wrappers.py
|
BFSSI-Bioinformatics-Lab/centreseq
|
6e2ba259d21336b6e610ea46be0d2d3414650ddc
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from centreseq.bin.core.accessories import run_subprocess
def call_snp_sites(aligned_multifasta: Path, outdir: Path) -> Path:
"""
Calls snp-sites on an aligned multiFASTA file and produces a VCF file as output.
Will only generate an output file if variants are detected.
https://github.com/sanger-pathogens/snp-sites
:param aligned_multifasta: Path to multi-FASTA containing alignment of a core gene
:param outdir: Path to desired output directory
:return: Path to VCF
"""
outvcf = outdir / aligned_multifasta.with_suffix(".vcf").name
cmd = f"snp-sites -v -o {outvcf} {aligned_multifasta}"
err = run_subprocess(cmd, get_stdout=True)
return outvcf
def call_muscle(infile: Path) -> Path:
"""
Produces an aligned version of an input FASTA file (overwrites the original)
https://www.drive5.com/muscle/
"""
cmd = f"muscle -in {infile} -out {infile} -maxiters 1"
run_subprocess(cmd, get_stdout=True)
return infile
| 31.78125
| 86
| 0.711898
|
4a1c5603694030b5bf35c2c781cba3a88443eb91
| 4,075
|
py
|
Python
|
src/collectors/jolokia/cassandra_jolokia.py
|
prune998/Diamond-1
|
fc12f0d9291d90c4cb4ae4367857a2f6d777f687
|
[
"MIT"
] | null | null | null |
src/collectors/jolokia/cassandra_jolokia.py
|
prune998/Diamond-1
|
fc12f0d9291d90c4cb4ae4367857a2f6d777f687
|
[
"MIT"
] | null | null | null |
src/collectors/jolokia/cassandra_jolokia.py
|
prune998/Diamond-1
|
fc12f0d9291d90c4cb4ae4367857a2f6d777f687
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
Collects Cassandra JMX metrics from the Jolokia Agent. Extends the JolokiaCollector to
interpret Histogram beans with information about the distribution of request latencies.
#### Example Configuration
CassandraJolokiaCollector uses a regular expression to determine which attributes represent histograms.
This regex can be overridden by providing a `histogram_regex` in your configuration. You can also override
`percentiles` to collect specific percentiles from the histogram statistics. The format is shown below
with the default values.
CassandraJolokiaCollector.conf
```
percentiles '50,95,99'
histogram_regex '.*HistogramMicros$'
```
"""
from jolokia import JolokiaCollector
import math
import string
import re
class CassandraJolokiaCollector(JolokiaCollector):
# override to allow setting which percentiles will be collected
def get_default_config_help(self):
config_help = super(CassandraJolokiaCollector, self).get_default_config_help()
config_help.update({
'percentiles': 'Comma separated list of percentiles to be collected (e.g., "50,95,99").',
'histogram_regex': 'Filter to only process attributes that match this regex'
})
return config_help
# override to allow setting which percentiles will be collected
def get_default_config(self):
config = super(CassandraJolokiaCollector, self).get_default_config()
config.update({
'percentiles': '50,95,99',
'histogram_regex': '.*HistogramMicros$'
})
return config
def __init__(self, config, handlers):
super(CassandraJolokiaCollector, self).__init__(config, handlers)
self.offsets = self.create_offsets(91)
self.update_config(self.config)
def update_config(self, config):
if config.has_key('percentiles'):
self.percentiles = map(int, string.split(config['percentiles'], ','))
if config.has_key('histogram_regex'):
self.histogram_regex = re.compile(config['histogram_regex'])
# override: Interpret beans that match the `histogram_regex` as histograms, and collect
# percentiles from them.
def interpret_bean_with_list(self, prefix, values):
if not self.histogram_regex.match(prefix):
return
buckets = values
for percentile in self.percentiles:
percentile_value = self.compute_percentile(self.offsets, buckets, percentile)
self.publish("%s.p%s" % (prefix, percentile), percentile_value)
# Adapted from Cassandra docs: http://www.datastax.com/documentation/cassandra/2.0/cassandra/tools/toolsCFhisto.html
# The index corresponds to the x-axis in a histogram. It represents buckets of values, which are
# a series of ranges. Each offset includes the range of values greater than the previous offset
# and less than or equal to the current offset. The offsets start at 1 and each subsequent offset
# is calculated by multiplying the previous offset by 1.2, rounding up, and removing duplicates. The
# offsets can range from 1 to approximately 25 million, with less precision as the offsets get larger.
def compute_percentile(self, offsets, buckets, percentile_int):
non_zero_points_sum = sum(buckets)
if non_zero_points_sum is 0:
return 0
middle_point_index = math.floor(non_zero_points_sum * (percentile_int / float(100)))
points_seen = 0
for index, bucket in enumerate(buckets):
points_seen += bucket
if points_seen >= middle_point_index:
return round((offsets[index] - offsets[index - 1]) / 2)
# Returns a list of offsets for `n` buckets.
def create_offsets(self, bucket_count):
last_num = 1
offsets = [last_num]
for index in range(bucket_count):
next_num = round(last_num * 1.2)
if next_num == last_num:
next_num += 1
offsets.append(next_num)
last_num = next_num
return offsets
| 41.581633
| 120
| 0.69546
|
4a1c5923637e4fcec45e723278650465503813dd
| 1,953
|
py
|
Python
|
core/minimax_lua.py
|
GeeksIncorporated/umka
|
aa1800543f197f882fad912808169f25478815ba
|
[
"MIT"
] | 1
|
2019-09-25T20:28:23.000Z
|
2019-09-25T20:28:23.000Z
|
core/minimax_lua.py
|
GeeksIncorporated/umka
|
aa1800543f197f882fad912808169f25478815ba
|
[
"MIT"
] | 1
|
2019-03-22T11:05:11.000Z
|
2019-03-22T11:05:11.000Z
|
core/minimax_lua.py
|
GeeksIncorporated/umka
|
aa1800543f197f882fad912808169f25478815ba
|
[
"MIT"
] | null | null | null |
import time
import chess
from lupa import LuaRuntime
class MiniMaxLua:
def __init__(self, umka):
self.umka = umka
self.lua = LuaRuntime(unpack_returned_tuples=True)
def run(self, board, depth):
self.nodes = 0
self.st = time.time()
best_val = float('-inf')
beta = float('inf')
best_move = None
minimax = self.lua.eval("""
local function minimax(board, depth, maximize, bestScore)
if board.is_game_over() or depth <= 0 then
return tree:heuristic(node)
end
local children = tree:children(node)
if maximize then
bestScore = -math.huge
for i, child in ipairs(children) do
bestScore = math.max(bestScore, minimax(tree, child, depth - 1, false))
end
return bestScore
else
bestScore = math.huge
for i, child in ipairs(children) do
bestScore = math.min(bestScore, minimax(tree, child, depth - 1, true))
end
return bestScore
end
end""")
return minimax(self.umka.evaluate, board)
if __name__ == "__main__":
# umka = Umka(path="model/model.pth.tar", training_enabled=False)
# brain = MiniMaxLua(umka)show
# play(brain)
board = chess.Board()
st = time.time()
lua = LuaRuntime(unpack_returned_tuples=True)
res = []
is_over = lua.eval("""
function (board, bulk)
for i=1,1000000 do
board.push_uci('e2e4')
board.pop()
end
end""")
bulk_size = 1000
is_over(board, bulk_size)
print(bulk_size / (time.time() - st))
st = time.time()
lua = LuaRuntime(unpack_returned_tuples=True)
res = []
for i in range(bulk_size):
board.push_uci("e2e4")
board.pop()
print(100000.0/(time.time() - st))
| 28.304348
| 91
| 0.549923
|
4a1c595416b40ec2ac83fd04967d681b90d0c49c
| 4,238
|
py
|
Python
|
service-mgmt-client/sm-client/sm_client/openstack/common/rootwrap/cmd.py
|
SidneyAn/ha
|
cdac11bc63e02c70b7bb533e17cf981bb2c71aef
|
[
"Apache-2.0"
] | null | null | null |
service-mgmt-client/sm-client/sm_client/openstack/common/rootwrap/cmd.py
|
SidneyAn/ha
|
cdac11bc63e02c70b7bb533e17cf981bb2c71aef
|
[
"Apache-2.0"
] | null | null | null |
service-mgmt-client/sm-client/sm_client/openstack/common/rootwrap/cmd.py
|
SidneyAn/ha
|
cdac11bc63e02c70b7bb533e17cf981bb2c71aef
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2014 Wind River Systems, Inc.
#
"""
Root wrapper for OpenStack services
"""
from __future__ import print_function
from six.moves import configparser
import logging
import os
import pwd
import signal
import subprocess
import sys
RC_UNAUTHORIZED = 99
RC_NOCOMMAND = 98
RC_BADCONFIG = 97
RC_NOEXECFOUND = 96
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def _exit_error(execname, message, errorcode, log=True):
print("%s: %s" % (execname, message))
if log:
logging.error(message)
sys.exit(errorcode)
def main():
# Split arguments, require at least a command
execname = sys.argv.pop(0)
if len(sys.argv) < 2:
_exit_error(execname, "No command specified", RC_NOCOMMAND, log=False)
configfile = sys.argv.pop(0)
userargs = sys.argv[:]
# Add ../ to sys.path to allow running from branch
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname),
os.pardir, os.pardir))
if os.path.exists(os.path.join(possible_topdir, "sm_client",
"__init__.py")):
sys.path.insert(0, possible_topdir)
from sm_client.openstack.common.rootwrap import wrapper
# Load configuration
try:
rawconfig = configparser.RawConfigParser()
rawconfig.read(configfile)
config = wrapper.RootwrapConfig(rawconfig)
except ValueError as exc:
msg = "Incorrect value in %s: %s" % (configfile, str(exc))
_exit_error(execname, msg, RC_BADCONFIG, log=False)
except configparser.Error:
_exit_error(execname, "Incorrect configuration file: %s" % configfile,
RC_BADCONFIG, log=False)
if config.use_syslog:
wrapper.setup_syslog(execname,
config.syslog_log_facility,
config.syslog_log_level)
# Execute command if it matches any of the loaded filters
filters = wrapper.load_filters(config.filters_path)
try:
filtermatch = wrapper.match_filter(filters, userargs,
exec_dirs=config.exec_dirs)
if filtermatch:
command = filtermatch.get_command(userargs,
exec_dirs=config.exec_dirs)
if config.use_syslog:
logging.info("(%s > %s) Executing %s (filter match = %s)" % (
os.getlogin(), pwd.getpwuid(os.getuid())[0],
command, filtermatch.name))
obj = subprocess.Popen(command,
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
preexec_fn=_subprocess_setup,
env=filtermatch.get_environment(userargs))
obj.wait()
sys.exit(obj.returncode)
except wrapper.FilterMatchNotExecutable as exc:
msg = ("Executable not found: %s (filter match = %s)"
% (exc.match.exec_path, exc.match.name))
_exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog)
except wrapper.NoFilterMatched:
msg = ("Unauthorized command: %s (no filter matched)"
% ' '.join(userargs))
_exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog)
| 34.455285
| 78
| 0.619632
|
4a1c5a5e74e865e19042296554cb37c0e27035ee
| 688
|
py
|
Python
|
test/rml-star/RMLSTARTC006b/test_RMLSTARTC006b_CSV.py
|
ArenasGuerreroJulian/morph-kgc
|
d14a773ff95922918daf32a447833e012bbde660
|
[
"Apache-2.0"
] | 24
|
2021-10-07T21:53:22.000Z
|
2022-01-05T13:19:48.000Z
|
test/rml-star/RMLSTARTC006b/test_RMLSTARTC006b_CSV.py
|
ArenasGuerreroJulian/morph-kgc
|
d14a773ff95922918daf32a447833e012bbde660
|
[
"Apache-2.0"
] | 16
|
2021-09-30T16:50:25.000Z
|
2022-01-02T11:20:41.000Z
|
test/rml-star/RMLSTARTC006b/test_RMLSTARTC006b_CSV.py
|
ArenasGuerreroJulian/morph-kgc
|
d14a773ff95922918daf32a447833e012bbde660
|
[
"Apache-2.0"
] | 2
|
2022-02-02T12:14:45.000Z
|
2022-03-03T13:05:22.000Z
|
__author__ = "Julián Arenas-Guerrero"
__credits__ = ["Julián Arenas-Guerrero"]
__license__ = "Apache-2.0"
__maintainer__ = "Julián Arenas-Guerrero"
__email__ = "arenas.guerrero.julian@outlook.com"
import os
import morph_kgc
from pyoxigraph import Store
def test_RMLSTARTC006b():
g = Store()
g.bulk_load(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'output.nq'), 'application/n-quads')
mapping_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mapping.ttl')
config = f'[CONFIGURATION]\noutput_format=N-QUADS\n[DataSource]\nmappings={mapping_path}'
g_morph = morph_kgc.materialize_oxigraph(config)
assert set(g) == set(g_morph)
| 27.52
| 110
| 0.739826
|
4a1c5af24cfdec7cbc0265d2b43ee68ea5dabb8a
| 6,063
|
py
|
Python
|
sdk/python/pulumi_azure_native/cdn/v20150601/get_origin.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/cdn/v20150601/get_origin.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/cdn/v20150601/get_origin.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetOriginResult',
'AwaitableGetOriginResult',
'get_origin',
]
@pulumi.output_type
class GetOriginResult:
"""
CDN origin is the source of the content being delivered via CDN. When the edge nodes represented by an endpoint do not have the requested content cached, they attempt to fetch it from one or more of the configured origins.
"""
def __init__(__self__, host_name=None, http_port=None, https_port=None, id=None, name=None, provisioning_state=None, resource_state=None, type=None):
if host_name and not isinstance(host_name, str):
raise TypeError("Expected argument 'host_name' to be a str")
pulumi.set(__self__, "host_name", host_name)
if http_port and not isinstance(http_port, int):
raise TypeError("Expected argument 'http_port' to be a int")
pulumi.set(__self__, "http_port", http_port)
if https_port and not isinstance(https_port, int):
raise TypeError("Expected argument 'https_port' to be a int")
pulumi.set(__self__, "https_port", https_port)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_state and not isinstance(resource_state, str):
raise TypeError("Expected argument 'resource_state' to be a str")
pulumi.set(__self__, "resource_state", resource_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="hostName")
def host_name(self) -> str:
"""
The address of the origin. Domain names, IPv4 addresses, and IPv6 addresses are supported.
"""
return pulumi.get(self, "host_name")
@property
@pulumi.getter(name="httpPort")
def http_port(self) -> Optional[int]:
"""
The value of the HTTP port. Must be between 1 and 65535.
"""
return pulumi.get(self, "http_port")
@property
@pulumi.getter(name="httpsPort")
def https_port(self) -> Optional[int]:
"""
The value of the https port. Must be between 1 and 65535.
"""
return pulumi.get(self, "https_port")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning status of the origin.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceState")
def resource_state(self) -> str:
"""
Resource status of the origin.
"""
return pulumi.get(self, "resource_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetOriginResult(GetOriginResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetOriginResult(
host_name=self.host_name,
http_port=self.http_port,
https_port=self.https_port,
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
resource_state=self.resource_state,
type=self.type)
def get_origin(endpoint_name: Optional[str] = None,
origin_name: Optional[str] = None,
profile_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOriginResult:
"""
CDN origin is the source of the content being delivered via CDN. When the edge nodes represented by an endpoint do not have the requested content cached, they attempt to fetch it from one or more of the configured origins.
:param str endpoint_name: Name of the endpoint within the CDN profile.
:param str origin_name: Name of the origin, an arbitrary value but it needs to be unique under endpoint
:param str profile_name: Name of the CDN profile within the resource group.
:param str resource_group_name: Name of the resource group within the Azure subscription.
"""
__args__ = dict()
__args__['endpointName'] = endpoint_name
__args__['originName'] = origin_name
__args__['profileName'] = profile_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:cdn/v20150601:getOrigin', __args__, opts=opts, typ=GetOriginResult).value
return AwaitableGetOriginResult(
host_name=__ret__.host_name,
http_port=__ret__.http_port,
https_port=__ret__.https_port,
id=__ret__.id,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_state=__ret__.resource_state,
type=__ret__.type)
| 37.196319
| 226
| 0.652647
|
4a1c5b9c7080af9758031f0e4c02fb7608cac079
| 7,432
|
py
|
Python
|
core/models/event.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 13
|
2015-11-29T12:19:12.000Z
|
2021-02-21T15:42:11.000Z
|
core/models/event.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 23
|
2015-04-29T19:43:34.000Z
|
2021-02-10T05:50:17.000Z
|
core/models/event.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 11
|
2015-09-20T18:59:00.000Z
|
2020-02-07T08:47:34.000Z
|
import logging
from datetime import timedelta
from django.conf import settings
from django.db import models
from django.db.models import Q
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from ..utils import (
format_date_range,
pick_attrs,
SLUG_FIELD_PARAMS,
slugify,
event_meta_property,
)
logger = logging.getLogger('kompassi')
class Event(models.Model):
slug = models.CharField(**SLUG_FIELD_PARAMS)
name = models.CharField(max_length=63, verbose_name='Tapahtuman nimi')
organization = models.ForeignKey('core.Organization', on_delete=models.CASCADE, verbose_name='Järjestäjätaho', related_name='events')
name_genitive = models.CharField(
max_length=63,
verbose_name='Tapahtuman nimi genetiivissä',
help_text='Esimerkki: Susiconin',
)
name_illative = models.CharField(
max_length=63,
verbose_name='Tapahtuman nimi illatiivissä',
help_text='Esimerkki: Susiconiin',
)
name_inessive = models.CharField(
max_length=63,
verbose_name='Tapahtuman nimi inessiivissä',
help_text='Esimerkki: Susiconissa',
)
description = models.TextField(blank=True, verbose_name='Kuvaus')
venue = models.ForeignKey('core.Venue', on_delete=models.CASCADE,
verbose_name='Tapahtumapaikka',
)
start_time = models.DateTimeField(
null=True,
blank=True,
verbose_name='Alkamisaika',
)
end_time = models.DateTimeField(
null=True,
blank=True,
verbose_name='Päättymisaika',
)
homepage_url = models.CharField(
blank=True,
max_length=255,
verbose_name='Tapahtuman kotisivu',
)
public = models.BooleanField(
default=True,
verbose_name='Julkinen',
help_text='Julkiset tapahtumat näytetään etusivulla.'
)
cancelled = models.BooleanField(
default=False,
verbose_name=_("Cancelled"),
)
logo_file = models.FileField(
upload_to='event_logos',
blank=True,
verbose_name='Tapahtuman logo',
help_text='Näkyy tapahtumasivulla. Jos sekä tämä että logon URL -kenttä on täytetty, käytetään tätä.'
)
logo_url = models.CharField(
blank=True,
max_length=255,
default='',
verbose_name='Tapahtuman logon URL',
help_text='Voi olla paikallinen (alkaa /-merkillä) tai absoluuttinen (alkaa http/https)',
)
description = models.TextField(
blank=True,
default='',
verbose_name='Tapahtuman kuvaus',
help_text='Muutaman kappaleen mittainen kuvaus tapahtumasta. Näkyy tapahtumasivulla.',
)
panel_css_class = models.CharField(
blank=True,
max_length=255,
default='panel-default',
verbose_name='Etusivun paneelin väri',
choices=[
('panel-default', 'Harmaa'),
('panel-primary', 'Kompassi (turkoosi)'),
('panel-success', 'Desucon (vihreä)'),
('panel-info', 'Yukicon (vaaleansininen)'),
('panel-warning', 'Popcult (oranssi)'),
('panel-danger', 'Tracon (punainen)'),
]
)
created_at = models.DateTimeField(null=True, blank=True, auto_now_add=True)
updated_at = models.DateTimeField(null=True, blank=True, auto_now=True)
class Meta:
verbose_name = 'Tapahtuma'
verbose_name_plural = 'Tapahtumat'
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if self.name:
for field, suffix in [
('name_genitive', 'in'),
('name_illative', 'iin'),
('name_inessive', 'issa'),
]:
if not getattr(self, field, None):
setattr(self, field, self.name + suffix)
return super(Event, self).save(*args, **kwargs)
@property
def name_and_year(self):
return "{name} ({year})".format(
name=self.name,
year=self.start_time.year,
)
@property
def formatted_start_and_end_date(self):
return format_date_range(self.start_time, self.end_time)
@property
def headline(self):
headline_parts = [
(self.venue.name_inessive if self.venue else None),
(self.formatted_start_and_end_date if self.start_time and self.end_time else None),
]
headline_parts = [part for part in headline_parts if part]
return ' '.join(headline_parts)
@property
def venue_name(self):
return self.venue.name if self.venue else None
@classmethod
def get_or_create_dummy(cls, name='Dummy event'):
from .venue import Venue
from .organization import Organization
# TODO not the best place for this, encap. see also admin command core_update_maysendinfo
from django.contrib.auth.models import Group
Group.objects.get_or_create(name=settings.KOMPASSI_MAY_SEND_INFO_GROUP_NAME)
venue, unused = Venue.get_or_create_dummy()
organization, unused = Organization.get_or_create_dummy()
t = timezone.now()
return cls.objects.get_or_create(
name=name,
defaults=dict(
venue=venue,
start_time=t + timedelta(days=60),
end_time=t + timedelta(days=61),
slug=slugify(name),
organization=organization,
),
)
@property
def people(self):
"""
Returns people associated with this event
"""
from .person import Person
# have signups
q = Q(signups__event=self)
# or programmes
q |= Q(programme_roles__programme__category__event=self)
return Person.objects.filter(q).distinct()
@property
def either_logo_url(self):
if self.logo_file:
return self.logo_file.url
else:
return self.logo_url
labour_event_meta = event_meta_property('labour')
programme_event_meta = event_meta_property('programme')
badges_event_meta = event_meta_property('badges')
tickets_event_meta = event_meta_property('tickets')
sms_event_meta = event_meta_property('sms')
enrollment_event_meta = event_meta_property('enrollment')
intra_event_meta = event_meta_property('intra')
def get_app_event_meta(self, app_label: str):
return getattr(self, '{}_event_meta'.format(app_label))
def as_dict(self, format='default'):
if format == 'default':
return pick_attrs(self,
'slug',
'name',
'homepage_url',
'headline',
organization=self.organization.as_dict(),
)
elif format == 'listing':
return pick_attrs(self,
'slug',
'name',
'headline',
'venue_name',
'homepage_url',
'start_time',
'end_time',
'cancelled',
)
else:
raise NotImplementedError(format)
def get_claims(self, **extra_claims):
"""
Shorthand for commonly used CBAC claims.
"""
return dict(
organization=self.organization.slug,
event=self.slug,
**extra_claims
)
| 29.03125
| 137
| 0.608854
|
4a1c5cec3f134469672854321e6e0144068750b3
| 206
|
py
|
Python
|
dota2website/models.py
|
theLambda/DBH-project1
|
9b7b1c9bd9f6629724c53872c60b1171e9ba1fa2
|
[
"MIT"
] | null | null | null |
dota2website/models.py
|
theLambda/DBH-project1
|
9b7b1c9bd9f6629724c53872c60b1171e9ba1fa2
|
[
"MIT"
] | null | null | null |
dota2website/models.py
|
theLambda/DBH-project1
|
9b7b1c9bd9f6629724c53872c60b1171e9ba1fa2
|
[
"MIT"
] | null | null | null |
from django.db import models
class Topic(models.Model):
text = models.CharField(max_length=200)
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.text
| 25.75
| 56
| 0.728155
|
4a1c5d0e859fda3bc3ec5a73562bb74257ea74d4
| 4,229
|
py
|
Python
|
analyzer/random-process-name/src/fr_model.py
|
Foundstone/OpenCNA
|
ff2ff08b9f6439cd1176bf020ea428cc03710d17
|
[
"Apache-2.0"
] | 7
|
2017-11-29T23:24:12.000Z
|
2021-02-01T02:33:51.000Z
|
analyzer/random-process-name/src/fr_model.py
|
Foundstone/OpenCNA
|
ff2ff08b9f6439cd1176bf020ea428cc03710d17
|
[
"Apache-2.0"
] | null | null | null |
analyzer/random-process-name/src/fr_model.py
|
Foundstone/OpenCNA
|
ff2ff08b9f6439cd1176bf020ea428cc03710d17
|
[
"Apache-2.0"
] | 4
|
2017-11-29T16:29:16.000Z
|
2018-01-19T17:00:29.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
# Copyright (c) 2017 McAfee Inc. - All Rights Reserved.
################################################################################
__author__ = "Jorge Couchet"
import traceback
import cPickle as pickle
from urlparse import urlparse
import pandas as pd
import ng_model as mdl
import parse as prs
import utils
_SCALING_FACTOR_UNSEEN = 5
_PREDICTOR_VARIABLES = ['ENT', 'MK2_1', 'MK2_2', 'MK3_1', 'MK3_2']
def calculate_features(line, entr_model, ng_models, is_filter, is_epsilon,
epsilon, scaling_unseen, is_label=False, label=None):
features = []
features.append(mdl.calculate_str_entropy(line, entr_model))
for ngm in ng_models:
if is_epsilon:
line_aux = epsilon + line + epsilon
prob1, prob2 = mdl.compute_query_string_prob_helper(
line_aux, ngm[1], ngm[2], ngm[0], scaling_unseen)
features.append(prob1)
features.append(prob2)
if is_label and (label is not None):
features.append(label)
return features
def predict_query_string(query_str, model, entr_model, ng_models, is_filter,
is_folder=True, is_proc=False, is_fqdn=False,
is_fqdn_ready=True, is_epsilon=True,
epsilon=utils.EPSILON_CHARACTER,
folder_separator=utils.FOLDER_SEPARATOR,
scaling_unseen=_SCALING_FACTOR_UNSEEN):
explanations = []
observations = []
is_random = False
query_str_split = []
all_features = []
df_ft = {}
dfl = []
if is_folder:
query_str_split = query_str.split(folder_separator)
# The drive names (as 'c:') are not processed
first_sf = query_str_split[0]
if first_sf.endswith(':'):
query_str_split = query_str_split[1:]
else:
if is_proc:
query_str_split = [query_str]
else:
if is_fqdn:
if not is_fqdn_ready:
query_str = urlparse(query_str).netloc
is_punycode, is_non_ascii = prs.test_domain(query_str)
if is_punycode or is_non_ascii:
if is_punycode:
observations.append('punycode')
if is_non_ascii:
observations.append('non ascii')
else:
query_str_split = query_str.split('.')
if query_str_split:
for fd in query_str_split:
# if is_epsilon:
# fd_aux = epsilon + fd + epsilon
features = calculate_features(
fd, entr_model, ng_models, is_filter, is_epsilon, epsilon, scaling_unseen)
all_features.append(features)
for fts in all_features:
for ft, col in zip(fts, _PREDICTOR_VARIABLES):
if col not in df_ft:
df_ft[col] = []
df_ft[col].append(ft)
for col in _PREDICTOR_VARIABLES:
dfl.append((col, df_ft[col]))
df = pd.DataFrame.from_items(dfl)
predictions = model.predict(df)
for idx, pred in enumerate(predictions):
if pred == 1:
is_random = True
explanations.append(query_str_split[idx])
return is_random, explanations, observations
def load_all(ng_2_model_name, ng_3_model_name, fr_model_name):
ng_model_2 = mdl.load_model(ng_2_model_name)
ng_model_3 = mdl.load_model(ng_3_model_name)
ng_models = []
ng_models.append(
(2, ng_model_2['transition_probs'], ng_model_2['min_prob']))
ng_models.append(
(3, ng_model_3['transition_probs'], ng_model_3['min_prob']))
entr_model = ng_model_3['entr_model']
fr_model = load_model(fr_model_name)
return entr_model, ng_models, fr_model
def load_model(file_name):
model = None
try:
with open(file_name, 'rb') as handler:
model = pickle.load(handler)
except:
print 'There was a problem loading the model from the file: ' + str(file_name)
traceback.print_exc()
finally:
return model
| 35.537815
| 90
| 0.584062
|
4a1c5de8ac64f8d2cf36a1efaff8678c68403b49
| 55
|
py
|
Python
|
scripts/show_release.py
|
mardiros/purgatory
|
5905619c0f153eae090c46ed5cd7f165c86eafd5
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/show_release.py
|
mardiros/purgatory
|
5905619c0f153eae090c46ed5cd7f165c86eafd5
|
[
"BSD-3-Clause"
] | 11
|
2021-12-29T21:28:50.000Z
|
2022-01-17T08:09:38.000Z
|
scripts/show_release.py
|
mardiros/purgatory
|
5905619c0f153eae090c46ed5cd7f165c86eafd5
|
[
"BSD-3-Clause"
] | null | null | null |
import purgatory
print(purgatory.__version__, end="")
| 13.75
| 36
| 0.781818
|
4a1c5e663d3a303ffe7d31c4f2fa267a5dd77293
| 402
|
py
|
Python
|
sdk/python/pulumi_azure/bot/__init__.py
|
apollo2030/pulumi-azure
|
034665c61665f4dc7e291b8813747012d34fa044
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/bot/__init__.py
|
apollo2030/pulumi-azure
|
034665c61665f4dc7e291b8813747012d34fa044
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/bot/__init__.py
|
apollo2030/pulumi-azure
|
034665c61665f4dc7e291b8813747012d34fa044
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .channel_email import *
from .channel_teams import *
from .channel_slack import *
from .channels_registration import *
from .connection import *
from .web_app import *
| 33.5
| 87
| 0.741294
|
4a1c5e866985ea1105348a2f37966dd28dc08c1e
| 7,396
|
py
|
Python
|
Tools/python/Linux-Like/cp.py
|
walogo/Pythonista-scripts
|
760451a0cdbe5dd76008a4e616d74191385bbd8b
|
[
"MIT"
] | 2
|
2019-04-24T19:25:55.000Z
|
2019-05-04T11:27:54.000Z
|
Tools/python/Linux-Like/cp.py
|
walogo/Tools
|
760451a0cdbe5dd76008a4e616d74191385bbd8b
|
[
"MIT"
] | 4
|
2019-04-08T02:02:56.000Z
|
2019-05-04T10:30:26.000Z
|
Tools/python/Linux-Like/cp.py
|
walogo/Tools
|
760451a0cdbe5dd76008a4e616d74191385bbd8b
|
[
"MIT"
] | 1
|
2020-10-07T17:49:09.000Z
|
2020-10-07T17:49:09.000Z
|
from shutil import make_archive, copy2, copytree, move, rmtree, copyfile
from tempfile import mkdtemp
from logging import info, basicConfig, INFO
from os.path import join
from os import mkdir
from argparse import ArgumentParser
# Check if the wanted archive format can be used also returns it
def getFormat(path):
# Reference for any ztar format
reference = {'.tar.gz': 'gztar', '.tar.bz': 'bztar', '.tar.xz': '.xztar'}
if path[-4:] in ('.zip', '.tar'):
archive_format = path[-3:]
else:
if reference.setdefault(path[-7:], False):
archive_format = reference[path[-7:]]
else:
archive_format = 'zip'
# Debug
info(f'Using: {archive_format}')
return archive_format
# Handler for the cp command
class CPHandler:
def __init__(self):
# Source of data
self.__source = None
# Dest of data
self.__dest = None
# Verbose mode
self.__verbose = None
# Preserve metadata
self.__preserve = copyfile
# Copy sources to directory (target)
self.__targetAsDirectory = None
# Set the destination file
def Dest(self, dest):
dest = dest.replace('\\', '/')
if '/' not in dest:
dest = './' + dest
self.__dest = dest
# Set the source file
def Source(self, source):
source = source.replace('\\', '/')
if '/' not in source:
source = './' + source
self.__source = source
# Set preserve metadata
# --preserve[=ATTR_LIST]
# preserve the specified attributes (default:
# mode,ownership,timestamps), if possible additional attributes:
# context, links, xattr, all
def setPreserve(self, value):
if value:
self.__preserve = copy2
# Set verbose
# -v, --verbose
# explain what is being done
def setVerbose(self, value):
if value:
basicConfig(level=INFO, format='%(message)s')
# Copy all the source files to the folder destination
# -t, --target-directory=DIRECTORY
# copy all SOURCE arguments into DIRECTORY
def setTargetDirectory(self, value):
if value:
self.__targetAsDirectory = value
## Copy functions
# Normal copy function for files
def normal_copy(self):
sources = self.__source.split(';')
if self.__targetAsDirectory:
dest = self.__dest
try:
mkdir(dest)
except Exception as e:
info(str(e))
for file in sources:
try:
print(file)
self.__preserve(file, join(dest, file.split('/')[-1]))
info(f'File {file} copied to {dest}')
except Exception as e:
info(str(e))
else:
dest = self.__dest.split(';')
if len(dest) != len(sources):
info('Wrong number of destinations or sources')
exit(-1)
for number, file in enumerate(sources):
try:
self.__preserve(file, dest[number])
info(f'File {file} copied to {dest}')
except Exception as e:
info(str(e))
# Create archieve of a folder
# -a, --archive
# same as -dR --preserve=all
def archive(self):
# Create a temporal location to save only a moment the archive
# This way we except having a archive inside another (infinite loop)
temporal_location = mkdtemp()
try:
# Get the archive format
archive_format = getFormat(self.__dest)
# Temporary archive name
temporary_name = str(hash(self.__dest) - hash(self.__source))[1:]
# Temporal archive path
archive_name = join(temporal_location, temporary_name)
splited_source = self.__source.split('/')
# Root directory
root_dir = self.__source[:len(self.__source) - len(splited_source[-1])]
# base Directory
base_dir = splited_source[-1]
# Check for anu error in root and base
if not len(root_dir):
root_dir = '.'
if not len(base_dir):
base_dir = '.'
# Make a archive of the source directory
make_archive(archive_name, archive_format, root_dir, base_dir, verbose=self.__verbose, )
# When the format wanted is not zip or tar
if len(archive_format) > 3:
archive_format = f'tar.{archive_format[:2]}'
# Move the archive created to the wanted destination
move(f'{archive_name}.{archive_format}', self.__dest)
info('Done, archive successfully created')
except Exception as e:
info(str(e))
# Always remove the temporary file
finally:
# remove that tempory folder created and everything inside it
rmtree(temporal_location)
info('Temporary file removed')
# Recursive copy of a folder
# -R, -r, --recursive
# copy directories recursively
def recursive(self):
copytree(self.__source, self.__dest, copy_function=self.__preserve)
def main(args=None):
if not args:
parser = ArgumentParser()
parser.add_argument('Source', help='Source1;Source2;..SourceN')
parser.add_argument('Dest', help='Dest1;Dest2;..DestN')
parser.add_argument('-R', '-r', '--recursive', help='Recursive copy of a folder', action='store_const',
const=True, default=False, dest='recursive')
parser.add_argument('-a', '--archive',
help='Create archive of a folder in any of this formats (zip,tar,tar.gz,tar.xz,tar.bz)',
action='store_const', const=True, default=False, dest='archive')
parser.add_argument('-t', '--target-directory', help='Use the destination as a folder for all the source files',
action='store_const',
const=True, default=False, dest='setTargetDirectory')
parser.add_argument('-v', '--verbose', dest='setVerbose', help='Set verbose mode', action='store_const',
const=True, default=False)
parser.add_argument('--preserve', dest='setPreserve', help='Preserve all metadata of source[s]',
action='store_const',
const=True, default=False)
args = vars(parser.parse_args())
# Handler
cp_command = CPHandler()
# Source[s]
cp_command.Source(args['Source'])
# Dest[s]
cp_command.Dest(args['Dest'])
# Delete non necessary keys
del args['Source'], args['Dest']
# Set cpy function to use
if args['archive']:
copy_function = getattr(cp_command, 'archive')
elif args['recursive']:
copy_function = getattr(cp_command, 'recursove')
else:
copy_function = getattr(cp_command, 'normal_copy')
# Set variables
for key in ('setVerbose', 'setPreserve', 'setTargetDirectory'):
if args[key]:
getattr(cp_command, key)(args[key])
# Set all variables
copy_function()
if __name__ == '__main__':
main()
| 37.543147
| 120
| 0.570443
|
4a1c601c2eeaf45c7d495a35c85e7b4271d1fc16
| 2,344
|
py
|
Python
|
src/model/DiceEnum.py
|
slavi010/random_dice_bot
|
68742314dddcc06b03f961b7da66a6cd65e01c2e
|
[
"MIT"
] | 1
|
2020-05-28T20:31:36.000Z
|
2020-05-28T20:31:36.000Z
|
src/model/DiceEnum.py
|
slavi010/random_dice_bot
|
68742314dddcc06b03f961b7da66a6cd65e01c2e
|
[
"MIT"
] | 8
|
2020-05-28T14:15:00.000Z
|
2022-01-13T02:47:35.000Z
|
src/model/DiceEnum.py
|
slavi010/random_dice_bot
|
68742314dddcc06b03f961b7da66a6cd65e01c2e
|
[
"MIT"
] | 1
|
2021-10-13T21:47:00.000Z
|
2021-10-13T21:47:00.000Z
|
#################################################################################
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
#################################################################################
#
# Contributors :
# Copyright (c) 2020 slavi010 pro@slavi.dev
#
from enum import Enum
# 47 Dices
class DiceEnum(Enum):
FIRE = 0
ELECTRIC = 1
WIND = 2
POISON = 3
ICE = 4
IRON = 5
BROKEN = 6
GAMBLE = 7
LOCK = 8
MINE = 9
LIGHT = 10
THORN = 11
CRACK = 12
CRITICAL = 13
ENERGY = 14
SACRIFICIAL = 15
BOW = 16
DEATH = 17
TELEPORT = 18
LASER = 19
MIMIC = 20
INFECT = 21
MODIFIED_ELECTRIC = 22
ABSORB = 23
MIGHTY_WIND = 24
SWITCH = 25
GEAR = 26
WAVE = 27
NUCLEAR = 28
LANDMINE = 29
SAND_SWAMP = 30
JOKER = 31
HOLY8SWORD = 32
HELL = 33
SHIELD = 34
BLIZZARD = 35
GROWTH = 36
SUMMONER = 37
SOLAR = 38
ASSASSIN = 39
GUN = 40
ELEMENT = 41
SUPPLEMENT = 42
METASTASIS = 43
TYPHOON = 44
TIME = 45
COMBO = 46
class DiceColorEnum(Enum):
FIRE = (0, [(48, 39, 210)])
ELECTRIC = (1, [(15, 177, 254)])
WIND = (2, [(162, 201, 1)])
POISON = (3, [(28, 191, 45)])
ICE = (4, [(243, 145, 61)])
IRON = (5, [(177, 177, 177)])
BROKEN = (6, [(251, 8, 143)])
GAMBLE = (7, [(255, 8, 90)])
LOCK = (8, [(74, 75, 75)])
MINE = (9, [(251, 232, 1)])
LIGHT = (10, [])
THORN = (11, [(0, 0, 0)])
CRACK = (12, [(0, 0, 0)])
CRITICAL = (13, [])
ENERGY = (14, [(205, 234, 27)])
SACRIFICIAL = (15, [(235, 34, 60)])
BOW = (16, [(0, 0, 0)])
MIMIC = (20, [])
JOKER = (31, [])
GROWTH = (36, [(145, 41, 58)])
COMBO = (46, [(145, 32, 162)])
METASTASIS =(43, [(108, 35, 73)])
| 24.416667
| 81
| 0.486348
|
4a1c6116123dc95e370d80e93ddc5ccb13a4039c
| 1,558
|
py
|
Python
|
ml-agents-envs/mlagents/envs/side_channel/side_channel.py
|
robertnoneman/ml-agents
|
797b0e880f4db61ab36783357bf555621affce2a
|
[
"Apache-2.0"
] | 1
|
2019-01-20T19:57:46.000Z
|
2019-01-20T19:57:46.000Z
|
ml-agents-envs/mlagents/envs/side_channel/side_channel.py
|
ruairidhcumming/ml-agents
|
d4205fed06b5ac5c2cac6c594bbd25dfe128103f
|
[
"Apache-2.0"
] | null | null | null |
ml-agents-envs/mlagents/envs/side_channel/side_channel.py
|
ruairidhcumming/ml-agents
|
d4205fed06b5ac5c2cac6c594bbd25dfe128103f
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABC, abstractmethod
from enum import IntEnum
class SideChannelType(IntEnum):
FloatProperties = 1
EngineSettings = 2
# Raw bytes channels should start here to avoid conflicting with other
# Unity ones.
RawBytesChannelStart = 1000
# custom side channels should start here to avoid conflicting with Unity
# ones.
UserSideChannelStart = 2000
class SideChannel(ABC):
"""
The side channel just get access to a bytes buffer that will be shared
between C# and Python. For example, We will create a specific side channel
for properties that will be a list of string (fixed size) to float number,
that can be modified by both C# and Python. All side channels are passed
to the Env object at construction.
"""
def __init__(self):
self.message_queue = []
def queue_message_to_send(self, data: bytearray) -> None:
"""
Queues a message to be sent by the environment at the next call to
step.
"""
self.message_queue.append(data)
@abstractmethod
def on_message_received(self, data: bytearray) -> None:
"""
Is called by the environment to the side channel. Can be called
multiple times per step if multiple messages are meant for that
SideChannel.
"""
pass
@property
@abstractmethod
def channel_type(self) -> int:
"""
:return:The type of side channel used. Will influence how the data is
processed in the environment.
"""
pass
| 29.961538
| 78
| 0.661746
|
4a1c616f89b39a0f7a8ee3f3e61b3a4fb60eeab2
| 14,534
|
py
|
Python
|
src/wagtailtrans/models.py
|
ahwebd/wagtailtrans
|
00125e7f5118b9730b3d5181b58f041190dbb908
|
[
"BSD-3-Clause"
] | null | null | null |
src/wagtailtrans/models.py
|
ahwebd/wagtailtrans
|
00125e7f5118b9730b3d5181b58f041190dbb908
|
[
"BSD-3-Clause"
] | 1
|
2019-04-02T14:45:04.000Z
|
2019-04-02T14:45:04.000Z
|
src/wagtailtrans/models.py
|
ahwebd/wagtailtrans
|
00125e7f5118b9730b3d5181b58f041190dbb908
|
[
"BSD-3-Clause"
] | 1
|
2020-06-16T13:43:41.000Z
|
2020-06-16T13:43:41.000Z
|
from operator import itemgetter
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q
from django.http import Http404
from django.shortcuts import redirect
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.translation import activate
from django.utils.translation import ugettext_lazy as _
from wagtail.admin.edit_handlers import FieldPanel, MultiFieldPanel, PageChooserPanel
from wagtail.admin.forms import WagtailAdminModelForm, WagtailAdminPageForm
from wagtail.contrib.settings.models import BaseSetting
from wagtail.contrib.settings.registry import register_setting
from wagtail.core.models import Page
from wagtail.search.index import FilterField
from .conf import get_wagtailtrans_setting
from .edit_handlers import CanonicalPageWidget, ReadOnlyWidget
from .managers import LanguageManager
from .permissions import TranslatableUserPagePermissionsProxy
class WagtailAdminLanguageForm(WagtailAdminModelForm):
"""Custom wagtailadmin form so we can make use of the panels
property, used by ``wagtail.contrib.modeladmin``.
"""
code = forms.ChoiceField(
label=_("Language"), choices=settings.LANGUAGES,
help_text=_("One of the languages defined in LANGUAGES"))
class Meta:
fields = [
'code',
'is_default',
'position',
'live',
]
def __init__(self, *args, **kwargs):
super(WagtailAdminLanguageForm, self).__init__(*args, **kwargs)
sorted_choices = sorted(self.fields['code'].choices, key=itemgetter(1))
self.fields['code'].choices = sorted_choices
def clean_is_default(self):
is_default = self.cleaned_data['is_default']
if self.initial.get('is_default') and not is_default:
raise ValidationError(_(
"You can not remove is_default from a language. To change the "
"default language, select is_default on a different language"))
return is_default
def save(self, commit=True):
is_default = self.cleaned_data.get('is_default', False)
if (
not self.initial.get('is_default') == is_default and
is_default and
not get_wagtailtrans_setting('LANGUAGES_PER_SITE')
):
from wagtailtrans.utils.language_switch import change_default_language # noqa
change_default_language(self.instance)
return super(WagtailAdminLanguageForm, self).save(commit=commit)
def get_language_panels():
children = [
FieldPanel('code'),
FieldPanel('position'),
FieldPanel('live'),
]
if not get_wagtailtrans_setting('LANGUAGES_PER_SITE'):
children.insert(1, FieldPanel('is_default'))
return [
MultiFieldPanel(heading=_("Language details"), children=children),
]
class Language(models.Model):
"""User defined language."""
code = models.CharField(max_length=12, unique=True)
is_default = models.BooleanField(
default=False, help_text="""Visitors with no language preference will see the site in this language""")
position = models.IntegerField(
default=0, help_text="""Language choices and translations will be displayed in this order""")
live = models.BooleanField(default=True, help_text="Is this language available for visitors to view?")
objects = LanguageManager()
base_form_class = WagtailAdminLanguageForm
panels = get_language_panels()
class Meta:
ordering = ['position']
verbose_name = _('Language')
verbose_name_plural = _('Languages')
def __str__(self):
return force_text(dict(settings.LANGUAGES).get(self.code))
def has_pages_in_site(self, site):
return self.pages.filter(path__startswith=site.root_page.path).exists()
class AdminTranslatablePageForm(WagtailAdminPageForm):
"""Form to be used in the wagtail admin."""
def __init__(self, *args, **kwargs):
super(AdminTranslatablePageForm, self).__init__(*args, **kwargs)
self.fields['canonical_page'].widget = CanonicalPageWidget(
canonical_page=self.instance.specific.canonical_page)
language_display = Language.objects.filter(pk=self.initial['language']).first()
if self.instance.specific.is_canonical and language_display:
language_display = "{} - {}".format(language_display, "canonical")
self.fields['language'].widget = ReadOnlyWidget(text_display=language_display if language_display else '')
def _language_default():
# Let the default return a PK, so migrations can also work with this value.
# The FakeORM model in the migrations differ from this Django model.
default_language = Language.objects.default()
if default_language is None:
return None
else:
return default_language.pk
class TranslatablePage(Page):
#: Defined with a unique name, to prevent field clashes..
translatable_page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
canonical_page = models.ForeignKey(
'self', related_name='translations', blank=True, null=True, on_delete=models.SET_NULL)
language = models.ForeignKey(Language, related_name='pages', on_delete=models.PROTECT, default=_language_default)
is_creatable = False
search_fields = Page.search_fields + [
FilterField('language_id'),
]
settings_panels = Page.settings_panels + [
MultiFieldPanel(
heading=_("Translations"),
children=[
FieldPanel('language'),
PageChooserPanel('canonical_page'),
]
)
]
base_form_class = AdminTranslatablePageForm
def get_admin_display_title(self):
return "{} ({})".format(super(TranslatablePage, self).get_admin_display_title(), self.language)
def serve(self, request, *args, **kwargs):
activate(self.language.code)
return super(TranslatablePage, self).serve(request, *args, **kwargs)
def move(self, target, pos=None, suppress_sync=False):
"""Move the page to another target.
:param target: the new target to move the page to
:param pos: position of the page in the new target
:param suppress_sync: suppress syncing the translated pages
"""
super(TranslatablePage, self).move(target, pos)
if get_wagtailtrans_setting('LANGUAGES_PER_SITE'):
site = self.get_site()
lang_settings = SiteLanguages.for_site(site)
is_default = lang_settings.default_language == self.language
else:
is_default = self.language.is_default
if not suppress_sync and get_wagtailtrans_setting('SYNC_TREE') and is_default:
self.move_translated_pages(canonical_target=target, pos=pos)
def move_translated_pages(self, canonical_target, pos=None):
"""Move only the translated pages of this instance (not self).
This is only called when WAGTAILTRANS_SYNC_TREE is enabled
:param canonical_target: Parent of the canonical page
:param pos: position
"""
translations = self.get_translations(only_live=False)
if getattr(canonical_target, 'canonical_page', False):
canonical_target = canonical_target.canonical_page
for page in translations:
# get target because at this point we assume the tree is in sync.
target = TranslatablePage.objects.filter(
Q(language=page.language),
Q(canonical_page=canonical_target) | Q(pk=canonical_target.pk)
).get()
page.move(target=target, pos=pos, suppress_sync=True)
def get_translations(self, only_live=True, include_self=False):
"""Get all translations of this page.
This page itself is not included in the result, all pages
are sorted by the language position.
:param only_live: Boolean to filter on live pages & languages.
:return: TranslatablePage instance
"""
canonical_page_id = self.canonical_page_id or self.pk
translations = TranslatablePage.objects.filter(Q(canonical_page=canonical_page_id) | Q(pk=canonical_page_id))
if not include_self:
translations = translations.exclude(pk=self.pk)
if only_live:
translations = translations.live().filter(language__live=True)
return translations
def has_translation(self, language):
"""Check if page isn't already translated in given language.
:param language: Language instance
:return: Boolean
"""
return language.pages.filter(canonical_page=self).exists()
def get_translation_parent(self, language):
site = self.get_site()
if not language.has_pages_in_site(site):
return site.root_page
translation_parent = (
TranslatablePage.objects
.filter(canonical_page=self.get_parent(), language=language, path__startswith=site.root_page.path)
.first()
)
return translation_parent
def create_translation(self, language, copy_fields=False, parent=None):
"""Create a translation for this page. If tree syncing is enabled the
copy will also be moved to the corresponding language tree.
:param language: Language instance
:param copy_fields: Boolean specifying if the content should be copied
:param parent: Parent page instance for the translation
:return: new Translated page (or subclass) instance
"""
if self.has_translation(language):
raise Exception("Translation already exists")
if not parent:
parent = self.get_translation_parent(language)
if self.slug == self.language.code:
slug = language.code
else:
slug = '%s-%s' % (self.slug, language.code)
update_attrs = {
'title': self.title,
'slug': slug,
'language': language,
'live': False,
'canonical_page': self,
}
if copy_fields:
kwargs = {'update_attrs': update_attrs}
if parent != self.get_parent():
kwargs['to'] = parent
new_page = self.copy(**kwargs)
else:
model_class = self.content_type.model_class()
new_page = model_class(**update_attrs)
parent.add_child(instance=new_page)
return new_page
@cached_property
def has_translations(self):
return self.translations.exists()
@cached_property
def is_canonical(self):
return not self.canonical_page_id and self.has_translations
class Meta:
verbose_name = _('Translatable page')
verbose_name_plural = _('Translatable pages')
def get_user_language(request):
"""Get the Language corresponding to a request.
return default language if Language does not exist in site
:param request: Request object
:return: Language instance
"""
if hasattr(request, 'LANGUAGE_CODE'):
language = Language.objects.live().filter(code=request.LANGUAGE_CODE).first()
if language:
return language
return Language.objects.default_for_site(site=request.site)
class TranslatableSiteRootPage(Page):
"""Root page of any translatable site.
This page should be used as the root page because it will
route the requests to the right language.
"""
parent_page_types = ['wagtailcore.Page']
def serve(self, request, *args, **kwargs):
"""Serve TranslatablePage in the correct language
:param request: request object
:return: Http302 or Http404
"""
language = get_user_language(request)
candidates = TranslatablePage.objects.live().specific().child_of(self)
try:
translation = candidates.filter(language=language).get()
return redirect(translation.url)
except TranslatablePage.DoesNotExist:
raise Http404
def page_permissions_for_user(self, user):
"""Patch for the page permissions adding our custom proxy
Note: Since wagtail doesn't call this method on the
specific page we need to patch the default page
implementation for this.
:param user: User instance
:return: user permissions for page
"""
user_perms = TranslatableUserPagePermissionsProxy(user)
return user_perms.for_page(self)
Page.permissions_for_user = page_permissions_for_user
class SiteLanguagesForm(WagtailAdminModelForm):
"""Form to be used in the wagtail admin."""
def clean_other_languages(self):
if (
'default_language' in self.cleaned_data and
self.cleaned_data['default_language'] in self.cleaned_data['other_languages']
):
raise forms.ValidationError(_("Default language cannot be in other_languages"))
return self.cleaned_data['other_languages']
def save(self, commit=True):
data = self.cleaned_data
if not data['default_language'].pk == self.initial['default_language']:
from wagtailtrans.utils.language_switch import change_default_language # noqa
change_default_language(data['default_language'], self.instance.site)
return super(SiteLanguagesForm, self).save(commit=commit)
def register_site_languages():
def decorate(func):
if get_wagtailtrans_setting('LANGUAGES_PER_SITE'):
return register_setting(func)
return func
return decorate
@register_site_languages()
class SiteLanguages(BaseSetting):
"""Site specific settings are stored in the database"""
default_language = models.ForeignKey(
Language, related_name="site_default_language", null=True, on_delete=models.PROTECT)
other_languages = models.ManyToManyField(Language, blank=True)
panels = [
MultiFieldPanel(
heading=_("Languages"),
children=[
FieldPanel('default_language'),
FieldPanel(
'other_languages', widget=forms.CheckboxSelectMultiple),
]
),
]
base_form_class = SiteLanguagesForm
class Meta:
verbose_name = _("Site languages")
verbose_name_plural = _("Site languages")
| 34.359338
| 117
| 0.676483
|
4a1c623bdccf1958d9facaea8b77fa37f88f2504
| 147
|
py
|
Python
|
tests/asserts/comprehension.py
|
liwt31/NPython
|
1159715bb6d4ff9502e9fa4466ddc6f36a8b63d2
|
[
"CNRI-Python"
] | 18
|
2019-01-29T16:14:42.000Z
|
2021-02-11T07:34:11.000Z
|
tests/asserts/comprehension.py
|
liwt31/NPython
|
1159715bb6d4ff9502e9fa4466ddc6f36a8b63d2
|
[
"CNRI-Python"
] | 1
|
2018-12-28T10:52:17.000Z
|
2018-12-29T14:21:07.000Z
|
tests/asserts/comprehension.py
|
liwt31/NPython
|
1159715bb6d4ff9502e9fa4466ddc6f36a8b63d2
|
[
"CNRI-Python"
] | null | null | null |
import xfail
l = [i for i in range(10)]
def foo():
print(i)
xfail.xfail(foo, NameError)
for i in range(10):
assert l[i] == i
print("ok")
| 13.363636
| 27
| 0.605442
|
4a1c628e3a4d34abc18fd6b5ee0313752b58b6a9
| 385
|
py
|
Python
|
ykdl/extractors/netease/__init__.py
|
shakenetwork/ykdl
|
68711756290980b78d63cbd9ff7e3e8a5457d504
|
[
"MIT"
] | 3
|
2018-09-04T09:33:51.000Z
|
2021-11-01T09:03:27.000Z
|
ykdl/extractors/netease/__init__.py
|
hpuyj/ykdl
|
7933263435d380b6b12538afc58a42d7a927c8f3
|
[
"MIT"
] | null | null | null |
ykdl/extractors/netease/__init__.py
|
hpuyj/ykdl
|
7933263435d380b6b12538afc58a42d7a927c8f3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
def get_extractor(url):
if re.search("cc.163", url):
from . import live as s
elif re.search("open.163", url):
from . import openc as s
elif re.search("music.163", url):
from . import music as s
return s.get_extractor(url)
else:
from . import video as s
return s.site
| 22.647059
| 37
| 0.581818
|
4a1c632d754b23558aff4551f357a1024d603ea5
| 2,686
|
py
|
Python
|
scripts/elnaz/calc_apd_specific_cells/plot_apd_over_a_line.py
|
ElnazP/MonoAlg3D_C
|
3e81952771e8747f8fb713c31225b50117c61a2d
|
[
"MIT"
] | null | null | null |
scripts/elnaz/calc_apd_specific_cells/plot_apd_over_a_line.py
|
ElnazP/MonoAlg3D_C
|
3e81952771e8747f8fb713c31225b50117c61a2d
|
[
"MIT"
] | null | null | null |
scripts/elnaz/calc_apd_specific_cells/plot_apd_over_a_line.py
|
ElnazP/MonoAlg3D_C
|
3e81952771e8747f8fb713c31225b50117c61a2d
|
[
"MIT"
] | null | null | null |
# =================================================================
# Author: Lucas Berg
#
# Program that plot the APD over a line
# This script only works if the grid is a plain tissue
# =================================================================
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy import interpolate
def plot_apd_over_a_line (x, apd, f):
xnew = np.linspace(min(x), max(x), num=1000, endpoint=True)
plt.grid()
#plt.plot(x, apd, 'o')
#plt.plot(x, apd, label="APD", c="black", linewidth=1.0)
plt.plot(xnew, f(xnew), c="black", linewidth=1.0)
plt.xlabel("x (um)",fontsize=15)
plt.ylabel("APD (ms)",fontsize=15)
plt.title("Action potential duration (APD)",fontsize=14)
plt.legend(loc=0,fontsize=14)
#plt.show()
plt.savefig("ap.pdf")
def sort_apd_by_x (x,apd):
n = x.shape[0]
# TODO: Change this bubblesort ...
for i in range(n):
for j in range(n):
if (x[i] < x[j]):
aux = x[i]
x[i] = x[j]
x[j] = aux
aux = apd[i]
apd[i] = apd[j]
apd[j] = aux
def interpolate_data (x,y):
#f = interp1d(x, y)
f = interp1d(x, y, kind='cubic')
#tck = interpolate.splrep(x, y, s=0)
#ynew = interpolate.splev(x, tck, der=0)
return f
#return ynew
def main():
if len(sys.argv) != 3:
print("-------------------------------------------------------------------------")
print("Usage:> python %s <cells_positions_filename> <cells_apd_filename>" % sys.argv[0])
print("-------------------------------------------------------------------------")
print("<cells_positions_filename> = Input file with the positions of each cell")
print("<cells_apd_filename> = Input file with the APD of each cell")
print("-------------------------------------------------------------------------")
print("Example:> python %s inputs/cells_positions_inside_region.txt outputs/cells-apd-inside-region.txt" % sys.argv[0])
return 1
cells_position_filename = sys.argv[1]
cells_apd_filename = sys.argv[2]
# Read the input files as Numpy arrays
cells_indexes_positions = np.genfromtxt(cells_position_filename)
cells_apd = np.genfromtxt(cells_apd_filename)
# Get the x position from each cell
x = cells_indexes_positions[:,1]*1.0e-04
# TODO: Find a way to store (x,cells_apd) as a structure and sort it by the 'x' value
sort_apd_by_x(x,cells_apd)
xnew = []
ynew = []
for i in range(len(x)):
if (i % 8 == 0):
xnew.append(x[i])
ynew.append(cells_apd[i])
#f = interpolate_data(x,cells_apd)
f = interpolate_data(xnew,ynew)
#ynew = interpolate_data(x,cells_apd)
plot_apd_over_a_line(xnew,ynew,f)
#plot_apd_over_a_line(x,ynew)
if __name__ == "__main__":
main()
| 29.195652
| 121
| 0.592703
|
4a1c633a67f9fbcc44a5b71a054f8082c192040e
| 687
|
py
|
Python
|
examples/python/json-get-plugins.py
|
alexanderfefelov/bgbilling-http-examples
|
25b4cce6e8c7f855ecc8a9a697805253c1723ef8
|
[
"MIT"
] | null | null | null |
examples/python/json-get-plugins.py
|
alexanderfefelov/bgbilling-http-examples
|
25b4cce6e8c7f855ecc8a9a697805253c1723ef8
|
[
"MIT"
] | null | null | null |
examples/python/json-get-plugins.py
|
alexanderfefelov/bgbilling-http-examples
|
25b4cce6e8c7f855ecc8a9a697805253c1723ef8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import configparser
import json
import urllib.request
config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
config.read('bgbilling.conf')
url = config.get('http.json', 'url_endpoint_plugincfg')
request_template = config.get('http.json', 'request_template')
method = 'getPlugins'
parameters = '{}'
request_json = request_template % (method, parameters)
request = urllib.request.Request(url)
request.add_header('Content-Type', 'application/json; charset=utf-8')
response = urllib.request.urlopen(request, json.dumps(json.loads(request_json)).encode('utf8'))
response_body = response.read().decode('utf8')
print(response_body)
| 28.625
| 95
| 0.778748
|
4a1c6575eec6ea0ab1dff1aca99ad1c037735dbd
| 1,132
|
py
|
Python
|
src/reward/get_reward.py
|
yulun-rayn/DGAPN
|
6d87376fa933a0a5efff180ebe1fe5772a060987
|
[
"MIT"
] | 5
|
2022-01-21T21:15:59.000Z
|
2022-01-24T20:02:46.000Z
|
src/reward/get_reward.py
|
yulun-rayn/DGAPN
|
6d87376fa933a0a5efff180ebe1fe5772a060987
|
[
"MIT"
] | null | null | null |
src/reward/get_reward.py
|
yulun-rayn/DGAPN
|
6d87376fa933a0a5efff180ebe1fe5772a060987
|
[
"MIT"
] | null | null | null |
from .logp.get_score import get_logp_score, get_penalized_logp
from .qed.get_score import get_qed_score
from .sa.get_score import get_sa_score
from .adtgpu.get_score import get_dock_score
def get_reward(states, reward_type, args=None):
if reward_type == 'logp':
return get_logp_score(states)
elif reward_type == 'plogp':
return get_penalized_logp(states)
elif reward_type == 'qed':
qed = get_qed_score(states)
# scale QED to 0-10
if isinstance(qed, list):
return [10.* (s-0.009)/0.939 for s in qed]
else:
return 10.* (qed-0.009)/0.939
elif reward_type == 'sa':
sa = get_sa_score(states)
# scale SA to 0-10
if isinstance(sa, list):
return [10.* (10.-s)/9. for s in sa]
else:
return 10.* (10.-sa)/9.
elif reward_type == 'dock':
dock = get_dock_score(states, args=args)
# negative dock
if isinstance(dock, list):
return [-s for s in dock]
else:
return -dock
else:
raise ValueError("Reward type not recognized.")
| 28.3
| 62
| 0.595406
|
4a1c65d40590de1f657e5fde52bdf5d1414d0fa0
| 14,482
|
py
|
Python
|
google/cloud/forseti/scanner/audit/service_account_key_rules_engine.py
|
Sandesh36/forseti-security
|
e0a2b92485ab3f57a5034aaee375484c8647db68
|
[
"Apache-2.0"
] | 1
|
2018-10-06T23:16:59.000Z
|
2018-10-06T23:16:59.000Z
|
google/cloud/forseti/scanner/audit/service_account_key_rules_engine.py
|
Sandesh36/forseti-security
|
e0a2b92485ab3f57a5034aaee375484c8647db68
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/forseti/scanner/audit/service_account_key_rules_engine.py
|
Sandesh36/forseti-security
|
e0a2b92485ab3f57a5034aaee375484c8647db68
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules engine for checking service account key age."""
from collections import namedtuple
import threading
from google.cloud.forseti.common.gcp_type import errors as resource_errors
from google.cloud.forseti.common.gcp_type import resource as resource_mod
from google.cloud.forseti.common.gcp_type import resource_util
from google.cloud.forseti.common.util import logger, date_time, string_formats
from google.cloud.forseti.scanner.audit import base_rules_engine as bre
from google.cloud.forseti.scanner.audit import errors as audit_errors
LOGGER = logger.get_logger(__name__)
class ServiceAccountKeyRulesEngine(bre.BaseRulesEngine):
"""Rules engine for service account key scanner."""
def __init__(self, rules_file_path, snapshot_timestamp=None):
"""Initialize.
Args:
rules_file_path (str): file location of rules
snapshot_timestamp (str): snapshot timestamp. Defaults to None.
If set, this will be the snapshot timestamp
used in the engine.
"""
super(ServiceAccountKeyRulesEngine,
self).__init__(rules_file_path=rules_file_path)
self.rule_book = None
self.snapshot_timestamp = snapshot_timestamp
self._lock = threading.Lock()
def build_rule_book(self, global_configs=None):
"""Build ServiceAccountKeyRuleBook from the rules definition file.
Args:
global_configs (dict): Global configurations.
"""
with self._lock:
self.rule_book = ServiceAccountKeyRuleBook(
self._load_rule_definitions())
# TODO: The naming is confusing and needs to be fixed in all scanners.
def find_policy_violations(self, service_account, force_rebuild=False):
"""Determine whether service account key age violates rules.
Args:
service_account (ServiceAccount): A service account resource to
check.
force_rebuild (bool): If True, rebuilds the rule book. This will
reload the rules definition file and add the rules to the book.
Returns:
generator: A generator of rule violations.
"""
if self.rule_book is None or force_rebuild:
self.build_rule_book()
return self.rule_book.find_violations(service_account)
class ServiceAccountKeyRuleBook(bre.BaseRuleBook):
"""The RuleBook for service account key age rules."""
def __init__(self, rule_defs=None):
"""Initialization.
Args:
rule_defs (list): Serviceaccount keys rule definition dicts
"""
super(ServiceAccountKeyRuleBook, self).__init__()
self._lock = threading.Lock()
self.resource_rules_map = {}
if not rule_defs:
self.rule_defs = {}
else:
self.rule_defs = rule_defs
self.add_rules(rule_defs)
def add_rules(self, rule_defs):
"""Add rules to the rule book.
Args:
rule_defs (dict): rule definitions dictionary
"""
for (i, rule) in enumerate(rule_defs.get('rules', [])):
self.add_rule(rule, i)
def add_rule(self, rule_def, rule_index):
"""Add a rule to the rule book.
Args:
rule_def (dict): A dictionary containing rule definition
properties.
rule_index (int): The index of the rule from the rule definitions.
Assigned automatically when the rule book is built.
"""
with self._lock:
for resource in rule_def.get('resource'):
resource_ids = resource.get('resource_ids')
try:
resource_type = resource_mod.ResourceType.verify(
resource.get('type'))
except resource_errors.InvalidResourceTypeError:
raise audit_errors.InvalidRulesSchemaError(
'Missing resource type in rule {}'.format(rule_index))
if not resource_ids or len(resource_ids) < 1:
raise audit_errors.InvalidRulesSchemaError(
'Missing resource ids in rule {}'.format(rule_index))
key_max_age_str = rule_def.get('max_age', None)
try:
key_max_age = int(key_max_age_str)
except (ValueError, TypeError):
raise audit_errors.InvalidRulesSchemaError(
'Service account key "max_age" missing or not an '
'integer in rule {}'.format(rule_index))
# For each resource id associated with the rule, create a
# mapping of resource => rules.
for resource_id in resource_ids:
gcp_resource = resource_util.create_resource(
resource_id=resource_id,
resource_type=resource_type)
rule = Rule(
rule_def.get('name'),
rule_index,
key_max_age)
resource_rules = self.resource_rules_map.setdefault(
gcp_resource, ResourceRules(resource=gcp_resource))
if rule not in resource_rules.rules:
resource_rules.rules.add(rule)
# pylint: enable=invalid-name
def get_resource_rules(self, resource):
"""Get all the resource rules for resource.
Args:
resource (Resource): The gcp_type Resource find in the map.
Returns:
ResourceRules: A ResourceRules object.
"""
return self.resource_rules_map.get(resource)
def find_violations(self, service_account):
"""Find violations in the rule book.
Args:
service_account (ServiceAccount): service account resource.
Returns:
list: RuleViolation
"""
LOGGER.debug('Looking for service account key violations: %s',
service_account.full_name)
violations = []
resource_ancestors = resource_util.get_ancestors_from_full_name(
service_account.full_name)
LOGGER.debug('Ancestors of resource: %r', resource_ancestors)
checked_wildcards = set()
for curr_resource in resource_ancestors:
if not curr_resource:
# The leaf node in the hierarchy
continue
resource_rule = self.get_resource_rules(curr_resource)
if resource_rule:
violations.extend(
resource_rule.find_policy_violations(service_account))
wildcard_resource = resource_util.create_resource(
resource_id='*', resource_type=curr_resource.type)
if wildcard_resource in checked_wildcards:
continue
checked_wildcards.add(wildcard_resource)
resource_rule = self.get_resource_rules(wildcard_resource)
if resource_rule:
violations.extend(
resource_rule.find_policy_violations(service_account))
LOGGER.debug('Returning violations: %r', violations)
return violations
class ResourceRules(object):
"""An association of a resource to rules."""
def __init__(self,
resource=None,
rules=None):
"""Initialize.
Args:
resource (Resource): The resource to associate with the rule.
rules (set): rules to associate with the resource.
"""
if not isinstance(rules, set):
rules = set([])
self.resource = resource
self.rules = rules
def find_policy_violations(self, service_account):
"""Determine if the policy binding matches this rule's criteria.
Args:
service_account (ServiceAccount): service account resource.
Returns:
list: RuleViolation
"""
violations = []
for rule in self.rules:
rule_violations = rule.find_policy_violations(service_account)
if rule_violations:
violations.extend(rule_violations)
return violations
def __eq__(self, other):
"""Compare == with another object.
Args:
other (ResourceRules): object to compare with
Returns:
int: comparison result
"""
if not isinstance(other, type(self)):
return NotImplemented
return (self.resource == other.resource and
self.rules == other.rules)
def __ne__(self, other):
"""Compare != with another object.
Args:
other (object): object to compare with
Returns:
int: comparison result
"""
return not self == other
def __repr__(self):
"""String representation of this node.
Returns:
str: debug string
"""
return 'ServiceAccountKeyResourceRules<resource={}, rules={}>'.format(
self.resource, self.rules)
class Rule(object):
"""Rule properties from the rule definition file, also finds violations."""
def __init__(self, rule_name, rule_index,
key_max_age):
"""Initialize.
Args:
rule_name (str): Name of the loaded rule
rule_index (int): The index of the rule from the rule definitions
key_max_age (int): Max allowed age in days of service
account key
"""
self.rule_name = rule_name
self.rule_index = rule_index
self.key_max_age = key_max_age
def _is_more_than_max_age(self, created_time, scan_time):
"""Check if the key has been rotated: is the key creation time older
than max_age in the policy
Args:
created_time (str): The time at which the key was created (this
is the validAfterTime in the key API response (in
string_formats.DEFAULT_FORSETI_TIMESTAMP) format
scan_time (datetime): Snapshot timestamp.
Returns:
bool: Returns true if un_rotated
"""
created_time = date_time.get_datetime_from_string(
created_time, string_formats.DEFAULT_FORSETI_TIMESTAMP)
if (scan_time - created_time).days > self.key_max_age:
return True
return False
def find_policy_violations(self, service_account):
"""Find service account key age violations based on the max_age.
Args:
service_account (ServiceAccount): ServiceAccount object.
Returns:
list: Returns a list of RuleViolation named tuples
"""
# Note: We're checking the age as of "now", the scanner run time
# We could consider changing this to when the key was inventoried.
scan_time = date_time.get_utc_now_datetime()
violations = []
for key in service_account.keys:
key_id = key.get('key_id')
full_name = key.get('full_name')
LOGGER.debug('Checking key rotation for %s', full_name)
created_time = key.get('valid_after_time')
if self._is_more_than_max_age(created_time, scan_time):
violation_reason = ('Key ID %s not rotated since %s.' %
(key_id, created_time))
violations.append(RuleViolation(
resource_type=resource_mod.ResourceType.SERVICE_ACCOUNT_KEY,
resource_id=service_account.email,
resource_name=service_account.email,
service_account_name=service_account.display_name,
full_name=full_name,
rule_name='%s (older than %s days)' % (self.rule_name,
self.key_max_age),
rule_index=self.rule_index,
violation_type='SERVICE_ACCOUNT_KEY_VIOLATION',
violation_reason=violation_reason,
project_id=service_account.project_id,
key_id=key_id,
key_created_time=created_time,
resource_data=str(key)))
return violations
def __eq__(self, other):
"""Test whether Rule equals other Rule.
Args:
other (Rule): object to compare to
Returns:
int: comparison result
"""
if not isinstance(other, type(self)):
return NotImplemented
return (self.rule_name == other.rule_name and
self.rule_index == other.rule_index and
(self.key_max_age == other.key_max_age))
def __ne__(self, other):
"""Test whether Rule is not equal to another Rule.
Args:
other (object): object to compare to
Returns:
int: comparison result
"""
return not self == other
def __hash__(self):
"""Make a hash of the rule index.
For now, this will suffice since the rule index is assigned
automatically when the rules map is built, and the scanner
only handles one rule file at a time. Later on, we'll need to
revisit this hash method when we process multiple rule files.
Returns:
int: The hash of the rule index.
"""
return hash(self.rule_index)
RuleViolation = namedtuple('RuleViolation',
['resource_type', 'resource_id', 'resource_name',
'service_account_name', 'full_name', 'rule_name',
'rule_index', 'violation_type', 'violation_reason',
'project_id', 'key_id', 'key_created_time',
'resource_data'])
| 36.386935
| 80
| 0.603093
|
4a1c661e766e6f8c553b6da291d341ca99d1a4aa
| 2,025
|
py
|
Python
|
sequentia/lib/sequentia/classifiers/hmm/topologies/ergodic.py
|
eonu/inf4-hons
|
4b7372272860f19c0f5ea2910f122a62531d7d2e
|
[
"CC-BY-4.0"
] | null | null | null |
sequentia/lib/sequentia/classifiers/hmm/topologies/ergodic.py
|
eonu/inf4-hons
|
4b7372272860f19c0f5ea2910f122a62531d7d2e
|
[
"CC-BY-4.0"
] | null | null | null |
sequentia/lib/sequentia/classifiers/hmm/topologies/ergodic.py
|
eonu/inf4-hons
|
4b7372272860f19c0f5ea2910f122a62531d7d2e
|
[
"CC-BY-4.0"
] | null | null | null |
import numpy as np
from warnings import warn
from .topology import _Topology
class _ErgodicTopology(_Topology):
"""Represents the topology for an ergodic HMM, imposing non-zero probabilities in the transition matrix.
Parameters
----------
n_states: int
Number of states in the HMM.
random_state: numpy.random.RandomState
A random state object for reproducible randomness.
"""
def __init__(self, n_states: int, random_state: np.random.RandomState):
super().__init__(n_states, random_state)
def uniform_transitions(self) -> np.ndarray:
"""Sets the transition matrix as uniform (equal probability of transitioning
to all other possible states from each state) corresponding to the topology.
Returns
-------
transitions: numpy.ndarray
The uniform transition matrix of shape `(n_states, n_states)`.
"""
return np.ones((self._n_states, self._n_states)) / self._n_states
def random_transitions(self) -> np.ndarray:
"""Sets the transition matrix as random (random probability of transitioning
to all other possible states from each state) by sampling probabilities
from a Dirichlet distribution - according to the topology.
Parameters
----------
transitions: numpy.ndarray
The random transition matrix of shape `(n_states, n_states)`.
"""
return self._random_state.dirichlet(np.ones(self._n_states), size=self._n_states)
def validate_transitions(self, transitions: np.ndarray) -> None:
"""Validates a transition matrix according to the topology's restrictions.
Parameters
----------
transitions: numpy.ndarray
The transition matrix to validate.
"""
super().validate_transitions(transitions)
if not np.all(transitions > 0):
warn('Zero probabilities in ergodic transition matrix - these transition probabilities will not be learned')
| 38.207547
| 120
| 0.66963
|
4a1c6623fa9fd2df5e19cabdca184398aae53d0b
| 2,004
|
py
|
Python
|
settings/local.py
|
dcs-dev/sfswitch
|
b45c6e34d23cb735327d591363e5f321e2f023da
|
[
"Unlicense"
] | null | null | null |
settings/local.py
|
dcs-dev/sfswitch
|
b45c6e34d23cb735327d591363e5f321e2f023da
|
[
"Unlicense"
] | null | null | null |
settings/local.py
|
dcs-dev/sfswitch
|
b45c6e34d23cb735327d591363e5f321e2f023da
|
[
"Unlicense"
] | null | null | null |
import os
from settings.base import *
from decouple import config
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
#SECRET_KEY = os.environ.get('SECRET_KEY')
SECRET_KEY = 'test'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
THUMBNAIL_DEBUG = DEBUG
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = False
ALLOWED_HOSTS = ['*', '127.0.0.1', 'sftoolkit.test']
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
# import dj_database_url
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres', # os.environ['PG_DB_NAME']
'USER': 'postgres', # os.environ['PG_USERNAME']
'PASSWORD': 'sFt007k1t', # os.environ['PG_PASSWORD']
'HOST': 'localhost', # os.environ['PG_HOST']
'PORT': '5432'
}
}
# Celery settings
CELERY_BROKER_POOL_LIMIT = 1
BROKER_URL = 'redis://127.0.0.1:6379/0'
CELERY_BROKER_URL = 'redis://127.0.0.1:6379/0'
CELERY_RESULT_BACKEND = 'redis://127.0.0.1:6379/0'
# REDISTOGO_URL = 'redis://localhost:6379/0'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATIC_ROOT = 'static'
STATICFILES_DIRS = [
# os.path.join(PROJECT_PATH, 'static'),
os.path.join(BASE_DIR, 'sfswitch' 'static')
]
# Replace this value with the URL from ngrok when running locally
# **NOTE: This must match the value in the connected app in Salesforce
# ngrok http 8000
DJANGO_APP_DOMAIN = 'dcs.ngrok.io'
SALESFORCE_API_VERSION = config('SALESFORCE_API_VERSION', default='50')
SALESFORCE_CONSUMER_KEY = config('SALESFORCE_CONSUMER_KEY', default='')
SALESFORCE_CONSUMER_SECRET = config('SALESFORCE_CONSUMER_SECRET', default='')
SALESFORCE_REDIRECT_URI = 'https://' + DJANGO_APP_DOMAIN + '/oauth_response'
| 27.081081
| 78
| 0.718563
|
4a1c663c444f3c59c2622dc50b138baa975a23c2
| 75,333
|
py
|
Python
|
test/test_mpls.py
|
pogobanane/vpp-fork
|
fccc95ae06e98a1605c4600b1e441811b67abfa8
|
[
"Apache-2.0"
] | 2
|
2019-04-03T10:09:09.000Z
|
2020-09-23T08:56:24.000Z
|
test/test_mpls.py
|
pogobanane/vpp-fork
|
fccc95ae06e98a1605c4600b1e441811b67abfa8
|
[
"Apache-2.0"
] | 2
|
2021-03-20T05:38:00.000Z
|
2021-06-02T03:49:49.000Z
|
test/test_mpls.py
|
AbduSami-bK/vpp
|
ad3f68b9d8c27206ab5dfbf129352a1c844a7004
|
[
"Apache-2.0"
] | 1
|
2019-03-12T18:19:23.000Z
|
2019-03-12T18:19:23.000Z
|
#!/usr/bin/env python
import unittest
import socket
from framework import VppTestCase, VppTestRunner
from vpp_ip import DpoProto
from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \
VppMplsIpBind, VppIpMRoute, VppMRoutePath, \
MRouteItfFlags, MRouteEntryFlags, VppIpTable, VppMplsTable, \
VppMplsLabel, MplsLspMode, find_mpls_route
from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface
import scapy.compat
from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, ICMP
from scapy.layers.inet6 import IPv6, ICMPv6TimeExceeded
from scapy.contrib.mpls import MPLS
def verify_filter(capture, sent):
if not len(capture) == len(sent):
# filter out any IPv6 RAs from the capture
for p in capture:
if p.haslayer(IPv6):
capture.remove(p)
return capture
def verify_mpls_stack(tst, rx, mpls_labels):
# the rx'd packet has the MPLS label popped
eth = rx[Ether]
tst.assertEqual(eth.type, 0x8847)
rx_mpls = rx[MPLS]
for ii in range(len(mpls_labels)):
tst.assertEqual(rx_mpls.label, mpls_labels[ii].value)
tst.assertEqual(rx_mpls.cos, mpls_labels[ii].exp)
tst.assertEqual(rx_mpls.ttl, mpls_labels[ii].ttl)
if ii == len(mpls_labels) - 1:
tst.assertEqual(rx_mpls.s, 1)
else:
# not end of stack
tst.assertEqual(rx_mpls.s, 0)
# pop the label to expose the next
rx_mpls = rx_mpls[MPLS].payload
class TestMPLS(VppTestCase):
""" MPLS Test Case """
@classmethod
def setUpClass(cls):
super(TestMPLS, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestMPLS, cls).tearDownClass()
def setUp(self):
super(TestMPLS, self).setUp()
# create 2 pg interfaces
self.create_pg_interfaces(range(4))
# setup both interfaces
# assign them different tables.
table_id = 0
self.tables = []
tbl = VppMplsTable(self, 0)
tbl.add_vpp_config()
self.tables.append(tbl)
for i in self.pg_interfaces:
i.admin_up()
if table_id != 0:
tbl = VppIpTable(self, table_id)
tbl.add_vpp_config()
self.tables.append(tbl)
tbl = VppIpTable(self, table_id, is_ip6=1)
tbl.add_vpp_config()
self.tables.append(tbl)
i.set_table_ip4(table_id)
i.set_table_ip6(table_id)
i.config_ip4()
i.resolve_arp()
i.config_ip6()
i.resolve_ndp()
i.enable_mpls()
table_id += 1
def tearDown(self):
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.ip6_disable()
i.set_table_ip4(0)
i.set_table_ip6(0)
i.disable_mpls()
i.admin_down()
super(TestMPLS, self).tearDown()
# the default of 64 matches the IP packet TTL default
def create_stream_labelled_ip4(
self,
src_if,
mpls_labels,
ping=0,
ip_itf=None,
dst_ip=None,
chksum=None,
ip_ttl=64,
n=257):
self.reset_packet_infos()
pkts = []
for i in range(0, n):
info = self.create_packet_info(src_if, src_if)
payload = self.info_to_payload(info)
p = Ether(dst=src_if.local_mac, src=src_if.remote_mac)
for ii in range(len(mpls_labels)):
p = p / MPLS(label=mpls_labels[ii].value,
ttl=mpls_labels[ii].ttl,
cos=mpls_labels[ii].exp)
if not ping:
if not dst_ip:
p = (p / IP(src=src_if.local_ip4,
dst=src_if.remote_ip4,
ttl=ip_ttl) /
UDP(sport=1234, dport=1234) /
Raw(payload))
else:
p = (p / IP(src=src_if.local_ip4, dst=dst_ip, ttl=ip_ttl) /
UDP(sport=1234, dport=1234) /
Raw(payload))
else:
p = (p / IP(src=ip_itf.remote_ip4,
dst=ip_itf.local_ip4,
ttl=ip_ttl) /
ICMP())
if chksum:
p[IP].chksum = chksum
info.data = p.copy()
pkts.append(p)
return pkts
def create_stream_ip4(self, src_if, dst_ip, ip_ttl=64, ip_dscp=0):
self.reset_packet_infos()
pkts = []
for i in range(0, 257):
info = self.create_packet_info(src_if, src_if)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
IP(src=src_if.remote_ip4, dst=dst_ip,
ttl=ip_ttl, tos=ip_dscp) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
pkts.append(p)
return pkts
def create_stream_ip6(self, src_if, dst_ip, ip_ttl=64, ip_dscp=0):
self.reset_packet_infos()
pkts = []
for i in range(0, 257):
info = self.create_packet_info(src_if, src_if)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
IPv6(src=src_if.remote_ip6, dst=dst_ip,
hlim=ip_ttl, tc=ip_dscp) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
pkts.append(p)
return pkts
def create_stream_labelled_ip6(self, src_if, mpls_labels,
hlim=64, dst_ip=None):
if dst_ip is None:
dst_ip = src_if.remote_ip6
self.reset_packet_infos()
pkts = []
for i in range(0, 257):
info = self.create_packet_info(src_if, src_if)
payload = self.info_to_payload(info)
p = Ether(dst=src_if.local_mac, src=src_if.remote_mac)
for l in mpls_labels:
p = p / MPLS(label=l.value, ttl=l.ttl, cos=l.exp)
p = p / (IPv6(src=src_if.remote_ip6, dst=dst_ip, hlim=hlim) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
pkts.append(p)
return pkts
def verify_capture_ip4(self, src_if, capture, sent, ping_resp=0,
ip_ttl=None, ip_dscp=0):
try:
capture = verify_filter(capture, sent)
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
tx = sent[i]
rx = capture[i]
# the rx'd packet has the MPLS label popped
eth = rx[Ether]
self.assertEqual(eth.type, 0x800)
tx_ip = tx[IP]
rx_ip = rx[IP]
if not ping_resp:
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
self.assertEqual(rx_ip.tos, ip_dscp)
if not ip_ttl:
# IP processing post pop has decremented the TTL
self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
else:
self.assertEqual(rx_ip.ttl, ip_ttl)
else:
self.assertEqual(rx_ip.src, tx_ip.dst)
self.assertEqual(rx_ip.dst, tx_ip.src)
except:
raise
def verify_capture_labelled_ip4(self, src_if, capture, sent,
mpls_labels, ip_ttl=None):
try:
capture = verify_filter(capture, sent)
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
tx = sent[i]
rx = capture[i]
tx_ip = tx[IP]
rx_ip = rx[IP]
verify_mpls_stack(self, rx, mpls_labels)
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
if not ip_ttl:
# IP processing post pop has decremented the TTL
self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
else:
self.assertEqual(rx_ip.ttl, ip_ttl)
except:
raise
def verify_capture_labelled_ip6(self, src_if, capture, sent,
mpls_labels, ip_ttl=None):
try:
capture = verify_filter(capture, sent)
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
tx = sent[i]
rx = capture[i]
tx_ip = tx[IPv6]
rx_ip = rx[IPv6]
verify_mpls_stack(self, rx, mpls_labels)
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
if not ip_ttl:
# IP processing post pop has decremented the TTL
self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
else:
self.assertEqual(rx_ip.hlim, ip_ttl)
except:
raise
def verify_capture_tunneled_ip4(self, src_if, capture, sent, mpls_labels):
try:
capture = verify_filter(capture, sent)
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
tx = sent[i]
rx = capture[i]
tx_ip = tx[IP]
rx_ip = rx[IP]
verify_mpls_stack(self, rx, mpls_labels)
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
# IP processing post pop has decremented the TTL
self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
except:
raise
def verify_capture_labelled(self, src_if, capture, sent,
mpls_labels):
try:
capture = verify_filter(capture, sent)
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
rx = capture[i]
verify_mpls_stack(self, rx, mpls_labels)
except:
raise
def verify_capture_ip6(self, src_if, capture, sent,
ip_hlim=None, ip_dscp=0):
try:
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
tx = sent[i]
rx = capture[i]
# the rx'd packet has the MPLS label popped
eth = rx[Ether]
self.assertEqual(eth.type, 0x86DD)
tx_ip = tx[IPv6]
rx_ip = rx[IPv6]
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
self.assertEqual(rx_ip.tc, ip_dscp)
# IP processing post pop has decremented the TTL
if not ip_hlim:
self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
else:
self.assertEqual(rx_ip.hlim, ip_hlim)
except:
raise
def verify_capture_ip6_icmp(self, src_if, capture, sent):
try:
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
tx = sent[i]
rx = capture[i]
# the rx'd packet has the MPLS label popped
eth = rx[Ether]
self.assertEqual(eth.type, 0x86DD)
tx_ip = tx[IPv6]
rx_ip = rx[IPv6]
self.assertEqual(rx_ip.dst, tx_ip.src)
# ICMP sourced from the interface's address
self.assertEqual(rx_ip.src, src_if.local_ip6)
# hop-limit reset to 255 for IMCP packet
self.assertEqual(rx_ip.hlim, 255)
icmp = rx[ICMPv6TimeExceeded]
except:
raise
def test_swap(self):
""" MPLS label swap tests """
#
# A simple MPLS xconnect - eos label in label out
#
route_32_eos = VppMplsRoute(self, 32, 1,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[VppMplsLabel(33)])])
route_32_eos.add_vpp_config()
self.assertTrue(
find_mpls_route(self, 0, 32, 1,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[VppMplsLabel(33)])]))
#
# a stream that matches the route for 10.0.0.1
# PG0 is in the default table
#
tx = self.create_stream_labelled_ip4(self.pg0,
[VppMplsLabel(32, ttl=32, exp=1)])
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_labelled(self.pg0, rx, tx,
[VppMplsLabel(33, ttl=31, exp=1)])
self.assertEqual(route_32_eos.get_stats_to()['packets'], 257)
#
# A simple MPLS xconnect - non-eos label in label out
#
route_32_neos = VppMplsRoute(self, 32, 0,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[VppMplsLabel(33)])])
route_32_neos.add_vpp_config()
#
# a stream that matches the route for 10.0.0.1
# PG0 is in the default table
#
tx = self.create_stream_labelled_ip4(self.pg0,
[VppMplsLabel(32, ttl=21, exp=7),
VppMplsLabel(99)])
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_labelled(self.pg0, rx, tx,
[VppMplsLabel(33, ttl=20, exp=7),
VppMplsLabel(99)])
self.assertEqual(route_32_neos.get_stats_to()['packets'], 257)
#
# A simple MPLS xconnect - non-eos label in label out, uniform mode
#
route_42_neos = VppMplsRoute(
self, 42, 0,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[VppMplsLabel(43, MplsLspMode.UNIFORM)])])
route_42_neos.add_vpp_config()
tx = self.create_stream_labelled_ip4(self.pg0,
[VppMplsLabel(42, ttl=21, exp=7),
VppMplsLabel(99)])
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_labelled(self.pg0, rx, tx,
[VppMplsLabel(43, ttl=20, exp=7),
VppMplsLabel(99)])
#
# An MPLS xconnect - EOS label in IP out
#
route_33_eos = VppMplsRoute(self, 33, 1,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[])])
route_33_eos.add_vpp_config()
tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(33)])
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip4(self.pg0, rx, tx)
#
# disposed packets have an invalid IPv4 checksum
#
tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(33)],
dst_ip=self.pg0.remote_ip4,
n=65,
chksum=1)
self.send_and_assert_no_replies(self.pg0, tx, "Invalid Checksum")
#
# An MPLS xconnect - EOS label in IP out, uniform mode
#
route_3333_eos = VppMplsRoute(
self, 3333, 1,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)])])
route_3333_eos.add_vpp_config()
tx = self.create_stream_labelled_ip4(
self.pg0,
[VppMplsLabel(3333, ttl=55, exp=3)])
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip4(self.pg0, rx, tx, ip_ttl=54, ip_dscp=0x60)
tx = self.create_stream_labelled_ip4(
self.pg0,
[VppMplsLabel(3333, ttl=66, exp=4)])
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip4(self.pg0, rx, tx, ip_ttl=65, ip_dscp=0x80)
#
# An MPLS xconnect - EOS label in IPv6 out
#
route_333_eos = VppMplsRoute(
self, 333, 1,
[VppRoutePath(self.pg0.remote_ip6,
self.pg0.sw_if_index,
labels=[],
proto=DpoProto.DPO_PROTO_IP6)])
route_333_eos.add_vpp_config()
tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(333)])
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip6(self.pg0, rx, tx)
#
# disposed packets have an TTL expired
#
tx = self.create_stream_labelled_ip6(self.pg0,
[VppMplsLabel(333, ttl=64)],
dst_ip=self.pg1.remote_ip6,
hlim=1)
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip6_icmp(self.pg0, rx, tx)
#
# An MPLS xconnect - EOS label in IPv6 out w imp-null
#
route_334_eos = VppMplsRoute(
self, 334, 1,
[VppRoutePath(self.pg0.remote_ip6,
self.pg0.sw_if_index,
labels=[VppMplsLabel(3)],
proto=DpoProto.DPO_PROTO_IP6)])
route_334_eos.add_vpp_config()
tx = self.create_stream_labelled_ip6(self.pg0,
[VppMplsLabel(334, ttl=64)])
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip6(self.pg0, rx, tx)
#
# An MPLS xconnect - EOS label in IPv6 out w imp-null in uniform mode
#
route_335_eos = VppMplsRoute(
self, 335, 1,
[VppRoutePath(self.pg0.remote_ip6,
self.pg0.sw_if_index,
labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)],
proto=DpoProto.DPO_PROTO_IP6)])
route_335_eos.add_vpp_config()
tx = self.create_stream_labelled_ip6(
self.pg0,
[VppMplsLabel(335, ttl=27, exp=4)])
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip6(self.pg0, rx, tx, ip_hlim=26, ip_dscp=0x80)
#
# disposed packets have an TTL expired
#
tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(334)],
dst_ip=self.pg1.remote_ip6,
hlim=0)
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip6_icmp(self.pg0, rx, tx)
#
# An MPLS xconnect - non-EOS label in IP out - an invalid configuration
# so this traffic should be dropped.
#
route_33_neos = VppMplsRoute(self, 33, 0,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[])])
route_33_neos.add_vpp_config()
tx = self.create_stream_labelled_ip4(self.pg0,
[VppMplsLabel(33),
VppMplsLabel(99)])
self.send_and_assert_no_replies(
self.pg0, tx,
"MPLS non-EOS packets popped and forwarded")
#
# A recursive EOS x-connect, which resolves through another x-connect
# in pipe mode
#
route_34_eos = VppMplsRoute(self, 34, 1,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_via_label=32,
labels=[VppMplsLabel(44),
VppMplsLabel(45)])])
route_34_eos.add_vpp_config()
tx = self.create_stream_labelled_ip4(self.pg0,
[VppMplsLabel(34, ttl=3)])
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_labelled(self.pg0, rx, tx,
[VppMplsLabel(33),
VppMplsLabel(44),
VppMplsLabel(45, ttl=2)])
self.assertEqual(route_34_eos.get_stats_to()['packets'], 257)
self.assertEqual(route_32_neos.get_stats_via()['packets'], 257)
#
# A recursive EOS x-connect, which resolves through another x-connect
# in uniform mode
#
route_35_eos = VppMplsRoute(
self, 35, 1,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_via_label=42,
labels=[VppMplsLabel(44)])])
route_35_eos.add_vpp_config()
tx = self.create_stream_labelled_ip4(self.pg0,
[VppMplsLabel(35, ttl=3)])
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_labelled(self.pg0, rx, tx,
[VppMplsLabel(43, ttl=2),
VppMplsLabel(44, ttl=2)])
#
# A recursive non-EOS x-connect, which resolves through another
# x-connect
#
route_34_neos = VppMplsRoute(self, 34, 0,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_via_label=32,
labels=[VppMplsLabel(44),
VppMplsLabel(46)])])
route_34_neos.add_vpp_config()
tx = self.create_stream_labelled_ip4(self.pg0,
[VppMplsLabel(34, ttl=45),
VppMplsLabel(99)])
rx = self.send_and_expect(self.pg0, tx, self.pg0)
# it's the 2nd (counting from 0) label in the stack that is swapped
self.verify_capture_labelled(self.pg0, rx, tx,
[VppMplsLabel(33),
VppMplsLabel(44),
VppMplsLabel(46, ttl=44),
VppMplsLabel(99)])
#
# an recursive IP route that resolves through the recursive non-eos
# x-connect
#
ip_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_via_label=34,
labels=[VppMplsLabel(55)])])
ip_10_0_0_1.add_vpp_config()
tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_labelled_ip4(self.pg0, rx, tx,
[VppMplsLabel(33),
VppMplsLabel(44),
VppMplsLabel(46),
VppMplsLabel(55)])
self.assertEqual(ip_10_0_0_1.get_stats_to()['packets'], 257)
ip_10_0_0_1.remove_vpp_config()
route_34_neos.remove_vpp_config()
route_34_eos.remove_vpp_config()
route_33_neos.remove_vpp_config()
route_33_eos.remove_vpp_config()
route_32_neos.remove_vpp_config()
route_32_eos.remove_vpp_config()
def test_bind(self):
""" MPLS Local Label Binding test """
#
# Add a non-recursive route with a single out label
#
route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[VppMplsLabel(45)])])
route_10_0_0_1.add_vpp_config()
# bind a local label to the route
binding = VppMplsIpBind(self, 44, "10.0.0.1", 32)
binding.add_vpp_config()
# non-EOS stream
tx = self.create_stream_labelled_ip4(self.pg0,
[VppMplsLabel(44),
VppMplsLabel(99)])
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_labelled(self.pg0, rx, tx,
[VppMplsLabel(45, ttl=63),
VppMplsLabel(99)])
# EOS stream
tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(44)])
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_labelled(self.pg0, rx, tx,
[VppMplsLabel(45, ttl=63)])
# IP stream
tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_labelled_ip4(self.pg0, rx, tx, [VppMplsLabel(45)])
#
# cleanup
#
binding.remove_vpp_config()
route_10_0_0_1.remove_vpp_config()
def test_imposition(self):
""" MPLS label imposition test """
#
# Add a non-recursive route with a single out label
#
route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[VppMplsLabel(32)])])
route_10_0_0_1.add_vpp_config()
#
# a stream that matches the route for 10.0.0.1
# PG0 is in the default table
#
tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_labelled_ip4(self.pg0, rx, tx, [VppMplsLabel(32)])
#
# Add a non-recursive route with a 3 out labels
#
route_10_0_0_2 = VppIpRoute(self, "10.0.0.2", 32,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[VppMplsLabel(32),
VppMplsLabel(33),
VppMplsLabel(34)])])
route_10_0_0_2.add_vpp_config()
tx = self.create_stream_ip4(self.pg0, "10.0.0.2",
ip_ttl=44, ip_dscp=0xff)
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_labelled_ip4(self.pg0, rx, tx,
[VppMplsLabel(32),
VppMplsLabel(33),
VppMplsLabel(34)],
ip_ttl=43)
#
# Add a non-recursive route with a single out label in uniform mode
#
route_10_0_0_3 = VppIpRoute(
self, "10.0.0.3", 32,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[VppMplsLabel(32,
mode=MplsLspMode.UNIFORM)])])
route_10_0_0_3.add_vpp_config()
tx = self.create_stream_ip4(self.pg0, "10.0.0.3",
ip_ttl=54, ip_dscp=0xbe)
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_labelled_ip4(self.pg0, rx, tx,
[VppMplsLabel(32, ttl=53, exp=5)])
#
# Add a IPv6 non-recursive route with a single out label in
# uniform mode
#
route_2001_3 = VppIpRoute(
self, "2001::3", 128,
[VppRoutePath(self.pg0.remote_ip6,
self.pg0.sw_if_index,
proto=DpoProto.DPO_PROTO_IP6,
labels=[VppMplsLabel(32,
mode=MplsLspMode.UNIFORM)])],
is_ip6=1)
route_2001_3.add_vpp_config()
tx = self.create_stream_ip6(self.pg0, "2001::3",
ip_ttl=54, ip_dscp=0xbe)
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_labelled_ip6(self.pg0, rx, tx,
[VppMplsLabel(32, ttl=53, exp=5)])
#
# add a recursive path, with output label, via the 1 label route
#
route_11_0_0_1 = VppIpRoute(self, "11.0.0.1", 32,
[VppRoutePath("10.0.0.1",
0xffffffff,
labels=[VppMplsLabel(44)])])
route_11_0_0_1.add_vpp_config()
#
# a stream that matches the route for 11.0.0.1, should pick up
# the label stack for 11.0.0.1 and 10.0.0.1
#
tx = self.create_stream_ip4(self.pg0, "11.0.0.1")
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_labelled_ip4(self.pg0, rx, tx,
[VppMplsLabel(32),
VppMplsLabel(44)])
self.assertEqual(route_11_0_0_1.get_stats_to()['packets'], 257)
#
# add a recursive path, with 2 labels, via the 3 label route
#
route_11_0_0_2 = VppIpRoute(self, "11.0.0.2", 32,
[VppRoutePath("10.0.0.2",
0xffffffff,
labels=[VppMplsLabel(44),
VppMplsLabel(45)])])
route_11_0_0_2.add_vpp_config()
#
# a stream that matches the route for 11.0.0.1, should pick up
# the label stack for 11.0.0.1 and 10.0.0.1
#
tx = self.create_stream_ip4(self.pg0, "11.0.0.2")
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_labelled_ip4(self.pg0, rx, tx,
[VppMplsLabel(32),
VppMplsLabel(33),
VppMplsLabel(34),
VppMplsLabel(44),
VppMplsLabel(45)])
self.assertEqual(route_11_0_0_2.get_stats_to()['packets'], 257)
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_labelled_ip4(self.pg0, rx, tx,
[VppMplsLabel(32),
VppMplsLabel(33),
VppMplsLabel(34),
VppMplsLabel(44),
VppMplsLabel(45)])
self.assertEqual(route_11_0_0_2.get_stats_to()['packets'], 514)
#
# cleanup
#
route_11_0_0_2.remove_vpp_config()
route_11_0_0_1.remove_vpp_config()
route_10_0_0_2.remove_vpp_config()
route_10_0_0_1.remove_vpp_config()
def test_tunnel_pipe(self):
""" MPLS Tunnel Tests - Pipe """
#
# Create a tunnel with a single out label
#
mpls_tun = VppMPLSTunnelInterface(
self,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[VppMplsLabel(44),
VppMplsLabel(46)])])
mpls_tun.add_vpp_config()
mpls_tun.admin_up()
#
# add an unlabelled route through the new tunnel
#
route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32,
[VppRoutePath("0.0.0.0",
mpls_tun._sw_if_index)])
route_10_0_0_3.add_vpp_config()
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "10.0.0.3")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_tunneled_ip4(self.pg0, rx, tx,
[VppMplsLabel(44),
VppMplsLabel(46)])
#
# add a labelled route through the new tunnel
#
route_10_0_0_4 = VppIpRoute(self, "10.0.0.4", 32,
[VppRoutePath("0.0.0.0",
mpls_tun._sw_if_index,
labels=[33])])
route_10_0_0_4.add_vpp_config()
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "10.0.0.4")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_tunneled_ip4(self.pg0, rx, tx,
[VppMplsLabel(44),
VppMplsLabel(46),
VppMplsLabel(33, ttl=255)])
def test_tunnel_uniform(self):
""" MPLS Tunnel Tests - Uniform """
#
# Create a tunnel with a single out label
# The label stack is specified here from outer to inner
#
mpls_tun = VppMPLSTunnelInterface(
self,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[VppMplsLabel(44, ttl=32),
VppMplsLabel(46, MplsLspMode.UNIFORM)])])
mpls_tun.add_vpp_config()
mpls_tun.admin_up()
#
# add an unlabelled route through the new tunnel
#
route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32,
[VppRoutePath("0.0.0.0",
mpls_tun._sw_if_index)])
route_10_0_0_3.add_vpp_config()
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "10.0.0.3", ip_ttl=24)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_tunneled_ip4(self.pg0, rx, tx,
[VppMplsLabel(44, ttl=32),
VppMplsLabel(46, ttl=23)])
#
# add a labelled route through the new tunnel
#
route_10_0_0_4 = VppIpRoute(
self, "10.0.0.4", 32,
[VppRoutePath("0.0.0.0",
mpls_tun._sw_if_index,
labels=[VppMplsLabel(33, ttl=47)])])
route_10_0_0_4.add_vpp_config()
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "10.0.0.4")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_tunneled_ip4(self.pg0, rx, tx,
[VppMplsLabel(44, ttl=32),
VppMplsLabel(46, ttl=47),
VppMplsLabel(33, ttl=47)])
def test_mpls_tunnel_many(self):
""" Multiple Tunnels """
for ii in range(10):
mpls_tun = VppMPLSTunnelInterface(
self,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[VppMplsLabel(44, ttl=32),
VppMplsLabel(46, MplsLspMode.UNIFORM)])])
mpls_tun.add_vpp_config()
mpls_tun.admin_up()
def test_v4_exp_null(self):
""" MPLS V4 Explicit NULL test """
#
# The first test case has an MPLS TTL of 0
# all packet should be dropped
#
tx = self.create_stream_labelled_ip4(self.pg0,
[VppMplsLabel(0, ttl=0)])
self.send_and_assert_no_replies(self.pg0, tx,
"MPLS TTL=0 packets forwarded")
#
# a stream with a non-zero MPLS TTL
# PG0 is in the default table
#
tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(0)])
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip4(self.pg0, rx, tx)
#
# a stream with a non-zero MPLS TTL
# PG1 is in table 1
# we are ensuring the post-pop lookup occurs in the VRF table
#
tx = self.create_stream_labelled_ip4(self.pg1, [VppMplsLabel(0)])
rx = self.send_and_expect(self.pg1, tx, self.pg1)
self.verify_capture_ip4(self.pg1, rx, tx)
def test_v6_exp_null(self):
""" MPLS V6 Explicit NULL test """
#
# a stream with a non-zero MPLS TTL
# PG0 is in the default table
#
tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(2)])
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip6(self.pg0, rx, tx)
#
# a stream with a non-zero MPLS TTL
# PG1 is in table 1
# we are ensuring the post-pop lookup occurs in the VRF table
#
tx = self.create_stream_labelled_ip6(self.pg1, [VppMplsLabel(2)])
rx = self.send_and_expect(self.pg1, tx, self.pg1)
self.verify_capture_ip6(self.pg0, rx, tx)
def test_deag(self):
""" MPLS Deagg """
#
# A de-agg route - next-hop lookup in default table
#
route_34_eos = VppMplsRoute(self, 34, 1,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_table_id=0)])
route_34_eos.add_vpp_config()
#
# ping an interface in the default table
# PG0 is in the default table
#
tx = self.create_stream_labelled_ip4(self.pg0,
[VppMplsLabel(34)],
ping=1,
ip_itf=self.pg0)
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip4(self.pg0, rx, tx, ping_resp=1)
#
# A de-agg route - next-hop lookup in non-default table
#
route_35_eos = VppMplsRoute(self, 35, 1,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_table_id=1)])
route_35_eos.add_vpp_config()
#
# ping an interface in the non-default table
# PG0 is in the default table. packet arrive labelled in the
# default table and egress unlabelled in the non-default
#
tx = self.create_stream_labelled_ip4(
self.pg0, [VppMplsLabel(35)], ping=1, ip_itf=self.pg1)
rx = self.send_and_expect(self.pg0, tx, self.pg1)
self.verify_capture_ip4(self.pg1, rx, tx, ping_resp=1)
#
# Double pop
#
route_36_neos = VppMplsRoute(self, 36, 0,
[VppRoutePath("0.0.0.0",
0xffffffff)])
route_36_neos.add_vpp_config()
tx = self.create_stream_labelled_ip4(self.pg0,
[VppMplsLabel(36),
VppMplsLabel(35)],
ping=1, ip_itf=self.pg1)
rx = self.send_and_expect(self.pg0, tx, self.pg1)
self.verify_capture_ip4(self.pg1, rx, tx, ping_resp=1)
route_36_neos.remove_vpp_config()
route_35_eos.remove_vpp_config()
route_34_eos.remove_vpp_config()
def test_interface_rx(self):
""" MPLS Interface Receive """
#
# Add a non-recursive route that will forward the traffic
# post-interface-rx
#
route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
table_id=1,
paths=[VppRoutePath(self.pg1.remote_ip4,
self.pg1.sw_if_index)])
route_10_0_0_1.add_vpp_config()
#
# An interface receive label that maps traffic to RX on interface
# pg1
# by injecting the packet in on pg0, which is in table 0
# doing an interface-rx on pg1 and matching a route in table 1
# if the packet egresses, then we must have swapped to pg1
# so as to have matched the route in table 1
#
route_34_eos = VppMplsRoute(self, 34, 1,
[VppRoutePath("0.0.0.0",
self.pg1.sw_if_index,
is_interface_rx=1)])
route_34_eos.add_vpp_config()
#
# ping an interface in the default table
# PG0 is in the default table
#
tx = self.create_stream_labelled_ip4(self.pg0,
[VppMplsLabel(34)],
dst_ip="10.0.0.1")
rx = self.send_and_expect(self.pg0, tx, self.pg1)
self.verify_capture_ip4(self.pg1, rx, tx)
def test_mcast_mid_point(self):
""" MPLS Multicast Mid Point """
#
# Add a non-recursive route that will forward the traffic
# post-interface-rx
#
route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
table_id=1,
paths=[VppRoutePath(self.pg1.remote_ip4,
self.pg1.sw_if_index)])
route_10_0_0_1.add_vpp_config()
#
# Add a mcast entry that replicate to pg2 and pg3
# and replicate to a interface-rx (like a bud node would)
#
route_3400_eos = VppMplsRoute(
self, 3400, 1,
[VppRoutePath(self.pg2.remote_ip4,
self.pg2.sw_if_index,
labels=[VppMplsLabel(3401)]),
VppRoutePath(self.pg3.remote_ip4,
self.pg3.sw_if_index,
labels=[VppMplsLabel(3402)]),
VppRoutePath("0.0.0.0",
self.pg1.sw_if_index,
is_interface_rx=1)],
is_multicast=1)
route_3400_eos.add_vpp_config()
#
# ping an interface in the default table
# PG0 is in the default table
#
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip4(self.pg0,
[VppMplsLabel(3400, ttl=64)],
n=257,
dst_ip="10.0.0.1")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture(257)
self.verify_capture_ip4(self.pg1, rx, tx)
rx = self.pg2.get_capture(257)
self.verify_capture_labelled(self.pg2, rx, tx,
[VppMplsLabel(3401, ttl=63)])
rx = self.pg3.get_capture(257)
self.verify_capture_labelled(self.pg3, rx, tx,
[VppMplsLabel(3402, ttl=63)])
def test_mcast_head(self):
""" MPLS Multicast Head-end """
#
# Create a multicast tunnel with two replications
#
mpls_tun = VppMPLSTunnelInterface(
self,
[VppRoutePath(self.pg2.remote_ip4,
self.pg2.sw_if_index,
labels=[VppMplsLabel(42)]),
VppRoutePath(self.pg3.remote_ip4,
self.pg3.sw_if_index,
labels=[VppMplsLabel(43)])],
is_multicast=1)
mpls_tun.add_vpp_config()
mpls_tun.admin_up()
#
# add an unlabelled route through the new tunnel
#
route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32,
[VppRoutePath("0.0.0.0",
mpls_tun._sw_if_index)])
route_10_0_0_3.add_vpp_config()
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "10.0.0.3")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg2.get_capture(257)
self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [VppMplsLabel(42)])
rx = self.pg3.get_capture(257)
self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [VppMplsLabel(43)])
#
# An an IP multicast route via the tunnel
# A (*,G).
# one accepting interface, pg0, 1 forwarding interface via the tunnel
#
route_232_1_1_1 = VppIpMRoute(
self,
"0.0.0.0",
"232.1.1.1", 32,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(mpls_tun._sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_232_1_1_1.add_vpp_config()
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "232.1.1.1")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg2.get_capture(257)
self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [VppMplsLabel(42)])
rx = self.pg3.get_capture(257)
self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [VppMplsLabel(43)])
def test_mcast_ip4_tail(self):
""" MPLS IPv4 Multicast Tail """
#
# Add a multicast route that will forward the traffic
# post-disposition
#
route_232_1_1_1 = VppIpMRoute(
self,
"0.0.0.0",
"232.1.1.1", 32,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
table_id=1,
paths=[VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_232_1_1_1.add_vpp_config()
#
# An interface receive label that maps traffic to RX on interface
# pg1
# by injecting the packet in on pg0, which is in table 0
# doing an rpf-id and matching a route in table 1
# if the packet egresses, then we must have matched the route in
# table 1
#
route_34_eos = VppMplsRoute(self, 34, 1,
[VppRoutePath("0.0.0.0",
self.pg1.sw_if_index,
nh_table_id=1,
rpf_id=55)],
is_multicast=1)
route_34_eos.add_vpp_config()
#
# Drop due to interface lookup miss
#
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
dst_ip="232.1.1.1", n=1)
self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop none")
#
# set the RPF-ID of the entry to match the input packet's
#
route_232_1_1_1.update_rpf_id(55)
tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
dst_ip="232.1.1.1")
rx = self.send_and_expect(self.pg0, tx, self.pg1)
self.verify_capture_ip4(self.pg1, rx, tx)
#
# disposed packets have an invalid IPv4 checksum
#
tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
dst_ip="232.1.1.1", n=65,
chksum=1)
self.send_and_assert_no_replies(self.pg0, tx, "Invalid Checksum")
#
# set the RPF-ID of the entry to not match the input packet's
#
route_232_1_1_1.update_rpf_id(56)
tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
dst_ip="232.1.1.1")
self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")
def test_mcast_ip6_tail(self):
""" MPLS IPv6 Multicast Tail """
#
# Add a multicast route that will forward the traffic
# post-disposition
#
route_ff = VppIpMRoute(
self,
"::",
"ff01::1", 32,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
table_id=1,
paths=[VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
is_ip6=1)
route_ff.add_vpp_config()
#
# An interface receive label that maps traffic to RX on interface
# pg1
# by injecting the packet in on pg0, which is in table 0
# doing an rpf-id and matching a route in table 1
# if the packet egresses, then we must have matched the route in
# table 1
#
route_34_eos = VppMplsRoute(
self, 34, 1,
[VppRoutePath("::",
self.pg1.sw_if_index,
nh_table_id=1,
rpf_id=55,
proto=DpoProto.DPO_PROTO_IP6)],
is_multicast=1)
route_34_eos.add_vpp_config()
#
# Drop due to interface lookup miss
#
tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(34)],
dst_ip="ff01::1")
self.send_and_assert_no_replies(self.pg0, tx, "RPF Miss")
#
# set the RPF-ID of the entry to match the input packet's
#
route_ff.update_rpf_id(55)
tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(34)],
dst_ip="ff01::1")
rx = self.send_and_expect(self.pg0, tx, self.pg1)
self.verify_capture_ip6(self.pg1, rx, tx)
#
# disposed packets have hop-limit = 1
#
tx = self.create_stream_labelled_ip6(self.pg0,
[VppMplsLabel(34)],
dst_ip="ff01::1",
hlim=1)
rx = self.send_and_expect(self.pg0, tx, self.pg0)
self.verify_capture_ip6_icmp(self.pg0, rx, tx)
#
# set the RPF-ID of the entry to not match the input packet's
#
route_ff.update_rpf_id(56)
tx = self.create_stream_labelled_ip6(self.pg0,
[VppMplsLabel(34)],
dst_ip="ff01::1")
self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")
class TestMPLSDisabled(VppTestCase):
""" MPLS disabled """
@classmethod
def setUpClass(cls):
super(TestMPLSDisabled, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestMPLSDisabled, cls).tearDownClass()
def setUp(self):
super(TestMPLSDisabled, self).setUp()
# create 2 pg interfaces
self.create_pg_interfaces(range(2))
self.tbl = VppMplsTable(self, 0)
self.tbl.add_vpp_config()
# PG0 is MPLS enabled
self.pg0.admin_up()
self.pg0.config_ip4()
self.pg0.resolve_arp()
self.pg0.enable_mpls()
# PG 1 is not MPLS enabled
self.pg1.admin_up()
def tearDown(self):
for i in self.pg_interfaces:
i.unconfig_ip4()
i.admin_down()
self.pg0.disable_mpls()
super(TestMPLSDisabled, self).tearDown()
def test_mpls_disabled(self):
""" MPLS Disabled """
tx = (Ether(src=self.pg1.remote_mac,
dst=self.pg1.local_mac) /
MPLS(label=32, ttl=64) /
IPv6(src="2001::1", dst=self.pg0.remote_ip6) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
#
# A simple MPLS xconnect - eos label in label out
#
route_32_eos = VppMplsRoute(self, 32, 1,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[33])])
route_32_eos.add_vpp_config()
#
# PG1 does not forward IP traffic
#
self.send_and_assert_no_replies(self.pg1, tx, "MPLS disabled")
#
# MPLS enable PG1
#
self.pg1.enable_mpls()
#
# Now we get packets through
#
self.pg1.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture(1)
#
# Disable PG1
#
self.pg1.disable_mpls()
#
# PG1 does not forward IP traffic
#
self.send_and_assert_no_replies(self.pg1, tx, "IPv6 disabled")
self.send_and_assert_no_replies(self.pg1, tx, "IPv6 disabled")
class TestMPLSPIC(VppTestCase):
""" MPLS PIC edge convergence """
@classmethod
def setUpClass(cls):
super(TestMPLSPIC, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestMPLSPIC, cls).tearDownClass()
def setUp(self):
super(TestMPLSPIC, self).setUp()
# create 2 pg interfaces
self.create_pg_interfaces(range(4))
mpls_tbl = VppMplsTable(self, 0)
mpls_tbl.add_vpp_config()
tbl4 = VppIpTable(self, 1)
tbl4.add_vpp_config()
tbl6 = VppIpTable(self, 1, is_ip6=1)
tbl6.add_vpp_config()
# core links
self.pg0.admin_up()
self.pg0.config_ip4()
self.pg0.resolve_arp()
self.pg0.enable_mpls()
self.pg1.admin_up()
self.pg1.config_ip4()
self.pg1.resolve_arp()
self.pg1.enable_mpls()
# VRF (customer facing) link
self.pg2.admin_up()
self.pg2.set_table_ip4(1)
self.pg2.config_ip4()
self.pg2.resolve_arp()
self.pg2.set_table_ip6(1)
self.pg2.config_ip6()
self.pg2.resolve_ndp()
self.pg3.admin_up()
self.pg3.set_table_ip4(1)
self.pg3.config_ip4()
self.pg3.resolve_arp()
self.pg3.set_table_ip6(1)
self.pg3.config_ip6()
self.pg3.resolve_ndp()
def tearDown(self):
self.pg0.disable_mpls()
self.pg1.disable_mpls()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.set_table_ip4(0)
i.set_table_ip6(0)
i.admin_down()
super(TestMPLSPIC, self).tearDown()
def test_mpls_ibgp_pic(self):
""" MPLS iBGP PIC edge convergence
1) setup many iBGP VPN routes via a pair of iBGP peers.
2) Check EMCP forwarding to these peers
3) withdraw the IGP route to one of these peers.
4) check forwarding continues to the remaining peer
"""
#
# IGP+LDP core routes
#
core_10_0_0_45 = VppIpRoute(self, "10.0.0.45", 32,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[45])])
core_10_0_0_45.add_vpp_config()
core_10_0_0_46 = VppIpRoute(self, "10.0.0.46", 32,
[VppRoutePath(self.pg1.remote_ip4,
self.pg1.sw_if_index,
labels=[46])])
core_10_0_0_46.add_vpp_config()
#
# Lot's of VPN routes. We need more the 64 so VPP will build
# the fast convergence indirection
#
vpn_routes = []
pkts = []
for ii in range(64):
dst = "192.168.1.%d" % ii
vpn_routes.append(VppIpRoute(self, dst, 32,
[VppRoutePath("10.0.0.45",
0xffffffff,
labels=[145],
is_resolve_host=1),
VppRoutePath("10.0.0.46",
0xffffffff,
labels=[146],
is_resolve_host=1)],
table_id=1))
vpn_routes[ii].add_vpp_config()
pkts.append(Ether(dst=self.pg2.local_mac,
src=self.pg2.remote_mac) /
IP(src=self.pg2.remote_ip4, dst=dst) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
#
# Send the packet stream (one pkt to each VPN route)
# - expect a 50-50 split of the traffic
#
self.pg2.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg0._get_capture(1)
rx1 = self.pg1._get_capture(1)
# not testing the LB hashing algorithm so we're not concerned
# with the split ratio, just as long as neither is 0
self.assertNotEqual(0, len(rx0))
self.assertNotEqual(0, len(rx1))
#
# use a test CLI command to stop the FIB walk process, this
# will prevent the FIB converging the VPN routes and thus allow
# us to probe the interim (post-fail, pre-converge) state
#
self.vapi.ppcli("test fib-walk-process disable")
#
# Withdraw one of the IGP routes
#
core_10_0_0_46.remove_vpp_config()
#
# now all packets should be forwarded through the remaining peer
#
self.vapi.ppcli("clear trace")
self.pg2.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg0.get_capture(len(pkts))
#
# enable the FIB walk process to converge the FIB
#
self.vapi.ppcli("test fib-walk-process enable")
#
# packets should still be forwarded through the remaining peer
#
self.pg2.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg0.get_capture(64)
#
# Add the IGP route back and we return to load-balancing
#
core_10_0_0_46.add_vpp_config()
self.pg2.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg0._get_capture(1)
rx1 = self.pg1._get_capture(1)
self.assertNotEqual(0, len(rx0))
self.assertNotEqual(0, len(rx1))
def test_mpls_ebgp_pic(self):
""" MPLS eBGP PIC edge convergence
1) setup many eBGP VPN routes via a pair of eBGP peers
2) Check EMCP forwarding to these peers
3) withdraw one eBGP path - expect LB across remaining eBGP
"""
#
# Lot's of VPN routes. We need more the 64 so VPP will build
# the fast convergence indirection
#
vpn_routes = []
vpn_bindings = []
pkts = []
for ii in range(64):
dst = "192.168.1.%d" % ii
local_label = 1600 + ii
vpn_routes.append(VppIpRoute(self, dst, 32,
[VppRoutePath(self.pg2.remote_ip4,
0xffffffff,
nh_table_id=1,
is_resolve_attached=1),
VppRoutePath(self.pg3.remote_ip4,
0xffffffff,
nh_table_id=1,
is_resolve_attached=1)],
table_id=1))
vpn_routes[ii].add_vpp_config()
vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 32,
ip_table_id=1))
vpn_bindings[ii].add_vpp_config()
pkts.append(Ether(dst=self.pg0.local_mac,
src=self.pg0.remote_mac) /
MPLS(label=local_label, ttl=64) /
IP(src=self.pg0.remote_ip4, dst=dst) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg2._get_capture(1)
rx1 = self.pg3._get_capture(1)
self.assertNotEqual(0, len(rx0))
self.assertNotEqual(0, len(rx1))
#
# use a test CLI command to stop the FIB walk process, this
# will prevent the FIB converging the VPN routes and thus allow
# us to probe the interim (post-fail, pre-converge) state
#
self.vapi.ppcli("test fib-walk-process disable")
#
# withdraw the connected prefix on the interface.
#
self.pg2.unconfig_ip4()
#
# now all packets should be forwarded through the remaining peer
#
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg3.get_capture(len(pkts))
#
# enable the FIB walk process to converge the FIB
#
self.vapi.ppcli("test fib-walk-process enable")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg3.get_capture(len(pkts))
#
# put the connecteds back
#
self.pg2.config_ip4()
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg2._get_capture(1)
rx1 = self.pg3._get_capture(1)
self.assertNotEqual(0, len(rx0))
self.assertNotEqual(0, len(rx1))
def test_mpls_v6_ebgp_pic(self):
""" MPLSv6 eBGP PIC edge convergence
1) setup many eBGP VPNv6 routes via a pair of eBGP peers
2) Check EMCP forwarding to these peers
3) withdraw one eBGP path - expect LB across remaining eBGP
"""
#
# Lot's of VPN routes. We need more the 64 so VPP will build
# the fast convergence indirection
#
vpn_routes = []
vpn_bindings = []
pkts = []
for ii in range(64):
dst = "3000::%d" % ii
local_label = 1600 + ii
vpn_routes.append(VppIpRoute(
self, dst, 128,
[VppRoutePath(self.pg2.remote_ip6,
0xffffffff,
nh_table_id=1,
is_resolve_attached=1,
proto=DpoProto.DPO_PROTO_IP6),
VppRoutePath(self.pg3.remote_ip6,
0xffffffff,
nh_table_id=1,
proto=DpoProto.DPO_PROTO_IP6,
is_resolve_attached=1)],
table_id=1,
is_ip6=1))
vpn_routes[ii].add_vpp_config()
vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128,
ip_table_id=1,
is_ip6=1))
vpn_bindings[ii].add_vpp_config()
pkts.append(Ether(dst=self.pg0.local_mac,
src=self.pg0.remote_mac) /
MPLS(label=local_label, ttl=64) /
IPv6(src=self.pg0.remote_ip6, dst=dst) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg2._get_capture(1)
rx1 = self.pg3._get_capture(1)
self.assertNotEqual(0, len(rx0))
self.assertNotEqual(0, len(rx1))
#
# use a test CLI command to stop the FIB walk process, this
# will prevent the FIB converging the VPN routes and thus allow
# us to probe the interim (post-fail, pre-converge) state
#
self.vapi.ppcli("test fib-walk-process disable")
#
# withdraw the connected prefix on the interface.
# and shutdown the interface so the ND cache is flushed.
#
self.pg2.unconfig_ip6()
self.pg2.admin_down()
#
# now all packets should be forwarded through the remaining peer
#
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg3.get_capture(len(pkts))
#
# enable the FIB walk process to converge the FIB
#
self.vapi.ppcli("test fib-walk-process enable")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg3.get_capture(len(pkts))
#
# put the connecteds back
#
self.pg2.admin_up()
self.pg2.config_ip6()
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg2._get_capture(1)
rx1 = self.pg3._get_capture(1)
self.assertNotEqual(0, len(rx0))
self.assertNotEqual(0, len(rx1))
class TestMPLSL2(VppTestCase):
""" MPLS-L2 """
@classmethod
def setUpClass(cls):
super(TestMPLSL2, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestMPLSL2, cls).tearDownClass()
def setUp(self):
super(TestMPLSL2, self).setUp()
# create 2 pg interfaces
self.create_pg_interfaces(range(2))
# create the default MPLS table
self.tables = []
tbl = VppMplsTable(self, 0)
tbl.add_vpp_config()
self.tables.append(tbl)
# use pg0 as the core facing interface
self.pg0.admin_up()
self.pg0.config_ip4()
self.pg0.resolve_arp()
self.pg0.enable_mpls()
# use the other 2 for customer facing L2 links
for i in self.pg_interfaces[1:]:
i.admin_up()
def tearDown(self):
for i in self.pg_interfaces[1:]:
i.admin_down()
self.pg0.disable_mpls()
self.pg0.unconfig_ip4()
self.pg0.admin_down()
super(TestMPLSL2, self).tearDown()
def verify_capture_tunneled_ethernet(self, capture, sent, mpls_labels):
capture = verify_filter(capture, sent)
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
tx = sent[i]
rx = capture[i]
# the MPLS TTL is 255 since it enters a new tunnel
verify_mpls_stack(self, rx, mpls_labels)
tx_eth = tx[Ether]
rx_eth = Ether(scapy.compat.raw(rx[MPLS].payload))
self.assertEqual(rx_eth.src, tx_eth.src)
self.assertEqual(rx_eth.dst, tx_eth.dst)
def test_vpws(self):
""" Virtual Private Wire Service """
#
# Create an MPLS tunnel that pushes 1 label
# For Ethernet over MPLS the uniform mode is irrelevant since ttl/cos
# information is not in the packet, but we test it works anyway
#
mpls_tun_1 = VppMPLSTunnelInterface(
self,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[VppMplsLabel(42, MplsLspMode.UNIFORM)])],
is_l2=1)
mpls_tun_1.add_vpp_config()
mpls_tun_1.admin_up()
#
# Create a label entry to for 55 that does L2 input to the tunnel
#
route_55_eos = VppMplsRoute(
self, 55, 1,
[VppRoutePath("0.0.0.0",
mpls_tun_1.sw_if_index,
is_interface_rx=1,
proto=DpoProto.DPO_PROTO_ETHERNET)])
route_55_eos.add_vpp_config()
#
# Cross-connect the tunnel with one of the customers L2 interfaces
#
self.vapi.sw_interface_set_l2_xconnect(self.pg1.sw_if_index,
mpls_tun_1.sw_if_index,
enable=1)
self.vapi.sw_interface_set_l2_xconnect(mpls_tun_1.sw_if_index,
self.pg1.sw_if_index,
enable=1)
#
# inject a packet from the core
#
pcore = (Ether(dst=self.pg0.local_mac,
src=self.pg0.remote_mac) /
MPLS(label=55, ttl=64) /
Ether(dst="00:00:de:ad:ba:be",
src="00:00:de:ad:be:ef") /
IP(src="10.10.10.10", dst="11.11.11.11") /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
tx0 = pcore * 65
rx0 = self.send_and_expect(self.pg0, tx0, self.pg1)
payload = pcore[MPLS].payload
self.assertEqual(rx0[0][Ether].dst, payload[Ether].dst)
self.assertEqual(rx0[0][Ether].src, payload[Ether].src)
#
# Inject a packet from the customer/L2 side
#
tx1 = pcore[MPLS].payload * 65
rx1 = self.send_and_expect(self.pg1, tx1, self.pg0)
self.verify_capture_tunneled_ethernet(rx1, tx1, [VppMplsLabel(42)])
def test_vpls(self):
""" Virtual Private LAN Service """
#
# Create an L2 MPLS tunnel
#
mpls_tun = VppMPLSTunnelInterface(
self,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[VppMplsLabel(42)])],
is_l2=1)
mpls_tun.add_vpp_config()
mpls_tun.admin_up()
#
# Create a label entry to for 55 that does L2 input to the tunnel
#
route_55_eos = VppMplsRoute(
self, 55, 1,
[VppRoutePath("0.0.0.0",
mpls_tun.sw_if_index,
is_interface_rx=1,
proto=DpoProto.DPO_PROTO_ETHERNET)])
route_55_eos.add_vpp_config()
#
# add to tunnel to the customers bridge-domain
#
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=mpls_tun.sw_if_index, bd_id=1)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.pg1.sw_if_index, bd_id=1)
#
# Packet from the customer interface and from the core
#
p_cust = (Ether(dst="00:00:de:ad:ba:be",
src="00:00:de:ad:be:ef") /
IP(src="10.10.10.10", dst="11.11.11.11") /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
p_core = (Ether(src="00:00:de:ad:ba:be",
dst="00:00:de:ad:be:ef") /
IP(dst="10.10.10.10", src="11.11.11.11") /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
#
# The BD is learning, so send in one of each packet to learn
#
p_core_encap = (Ether(dst=self.pg0.local_mac,
src=self.pg0.remote_mac) /
MPLS(label=55, ttl=64) /
p_core)
self.pg1.add_stream(p_cust)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.add_stream(p_core_encap)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# we've learnt this so expect it be be forwarded
rx0 = self.pg1.get_capture(1)
self.assertEqual(rx0[0][Ether].dst, p_core[Ether].dst)
self.assertEqual(rx0[0][Ether].src, p_core[Ether].src)
#
# now a stream in each direction
#
self.pg1.add_stream(p_cust * 65)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg0.get_capture(65)
self.verify_capture_tunneled_ethernet(rx0, p_cust*65,
[VppMplsLabel(42)])
#
# remove interfaces from customers bridge-domain
#
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=mpls_tun.sw_if_index, bd_id=1, enable=0)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.pg1.sw_if_index, bd_id=1, enable=0)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| 36.410343
| 79
| 0.501122
|
4a1c66a85365616ffaf0b7864c775b1a402ab87c
| 172
|
py
|
Python
|
ccfd/__init__.py
|
brownc1995/credit-card-fraud-detection
|
3738aa4933811d544f269964c9f9e1fb0f8a94c5
|
[
"MIT"
] | 1
|
2020-07-03T08:51:19.000Z
|
2020-07-03T08:51:19.000Z
|
ccfd/__init__.py
|
brownc1995/credit-card-fraud-detection
|
3738aa4933811d544f269964c9f9e1fb0f8a94c5
|
[
"MIT"
] | 5
|
2020-01-28T21:53:42.000Z
|
2022-02-10T00:27:52.000Z
|
ccfd/__init__.py
|
brownc1995/credit-card-fraud-detection
|
3738aa4933811d544f269964c9f9e1fb0f8a94c5
|
[
"MIT"
] | null | null | null |
"""
Credit card fraud detection library
"""
__all__ = (
'BATCH_SIZE',
'CLASS',
'STEPS_PER_EPOCH',
)
BATCH_SIZE = 2048
CLASS = 'class'
STEPS_PER_EPOCH = 250
| 10.75
| 35
| 0.639535
|
4a1c66ab6b338e5a7ede16364ba4e1cc775daf5b
| 11,330
|
py
|
Python
|
src/uncertainty_forest.py
|
jdey4/progressive-learning
|
410b3525ab63e1f7c32e9838460b2c9af7b9d256
|
[
"Apache-2.0"
] | 1
|
2022-01-03T12:36:28.000Z
|
2022-01-03T12:36:28.000Z
|
src/uncertainty_forest.py
|
jdey4/progressive-learning
|
410b3525ab63e1f7c32e9838460b2c9af7b9d256
|
[
"Apache-2.0"
] | null | null | null |
src/uncertainty_forest.py
|
jdey4/progressive-learning
|
410b3525ab63e1f7c32e9838460b2c9af7b9d256
|
[
"Apache-2.0"
] | null | null | null |
'''
Primary Author: Will LeVine
Email: levinewill@icloud.com
'''
#Model
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
#Infrastructure
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import NotFittedError
#Data Handling
from sklearn.utils.validation import (
check_X_y,
check_array,
NotFittedError,
)
from sklearn.utils.multiclass import check_classification_targets
#Utils
from joblib import Parallel, delayed
import numpy as np
def _finite_sample_correction(posteriors, num_points_in_partition, num_classes):
'''
encourage posteriors to approach uniform when there is low data
'''
correction_constant = 1 / (num_classes * num_points_in_partition)
zero_posterior_idxs = np.where(posteriors == 0)[0]
posteriors[zero_posterior_idxs] = correction_constant
posteriors /= sum(posteriors)
return posteriors
class UncertaintyForest(BaseEstimator, ClassifierMixin):
'''
based off of https://arxiv.org/pdf/1907.00325.pdf
'''
def __init__(
self,
max_depth=30,
min_samples_leaf=1,
max_samples = 0.63,
max_features_tree = "auto",
n_estimators=100,
bootstrap=False,
parallel=True,
n_jobs = None):
#Tree parameters.
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.max_features_tree = max_features_tree
#Bag parameters
self.n_estimators = n_estimators
self.bootstrap = bootstrap
self.max_samples = max_samples
#Model parameters.
self.parallel = parallel
if self.parallel and n_jobs == None:
self.n_jobs = self.n_estimators
else:
self.n_jobs = n_jobs
self.fitted = False
def _check_fit(self):
'''
raise a NotFittedError if the model isn't fit
'''
if not self.fitted:
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator."
)
raise NotFittedError(msg % {"name": type(self).__name__})
def transform(self, X):
'''
get the estimated posteriors across trees
'''
X = check_array(X)
def worker(tree_idx, tree):
#get the nodes of X
# Drop each estimation example down the tree, and record its 'y' value.
return tree.apply(X)
if self.parallel:
return np.array(
Parallel(n_jobs=self.n_jobs)(
delayed(worker)(tree_idx, tree) for tree_idx, tree in enumerate(self.ensemble.estimators_)
)
)
else:
return np.array(
[worker(tree_idx, tree) for tree_idx, tree in enumerate(self.ensemble.estimators_)]
)
# function added to do partition mapping
def _profile_leaf(self):
self.tree_id_to_leaf_profile = {}
leaf_profile = {}
#print('hi')
def worker(node, children_left, children_right, feature, threshold, profile_mat):
if children_left[node] == children_right[node]:
profile_mat_ = profile_mat.copy()
leaf_profile[node] = profile_mat_
#print(node,'nodes')
else:
feature_indx = feature[node]
profile_mat_ = profile_mat.copy()
profile_mat_[feature_indx,1] = threshold[node]
worker(
children_left[node],
children_left,
children_right,
feature,
threshold,
profile_mat_
)
profile_mat_ = profile_mat.copy()
profile_mat_[feature_indx,0] = threshold[node]
worker(
children_right[node],
children_left,
children_right,
feature,
threshold,
profile_mat_
)
profile_mat = np.concatenate(
(
np.zeros((self._feature_dimension,1),dtype=float),
np.ones((self._feature_dimension,1),dtype=float)
),
axis = 1
)
for tree_id, estimator in enumerate(self.ensemble.estimators_):
leaf_profile = {}
feature = estimator.tree_.feature
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
threshold = estimator.tree_.threshold
#print(children_left,children_right)
worker(
0,
children_left,
children_right,
feature,
threshold,
profile_mat.copy()
)
self.tree_id_to_leaf_profile[tree_id] = leaf_profile
#print(self.tree_id_to_leaf_profile,'gdgfg')
def get_transformer(self):
return lambda X : self.transform(X)
def vote(self, nodes_across_trees):
return self.voter.predict(nodes_across_trees)
def get_voter(self):
return self.voter
def fit(self, X, y):
#format X and y
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
#define the ensemble
self.ensemble = BaggingClassifier(
DecisionTreeClassifier(
max_depth=self.max_depth,
min_samples_leaf=self.min_samples_leaf,
max_features=self.max_features_tree
),
n_estimators=self.n_estimators,
max_samples=self.max_samples,
bootstrap=self.bootstrap,
n_jobs = self.n_jobs
)
#fit the ensemble
self.ensemble.fit(X, y)
self._feature_dimension = X.shape[1]
#profile trees for partition mapping
self._profile_leaf()
class Voter(BaseEstimator):
def __init__(self, estimators_samples_, tree_id_to_leaf_profile, classes, parallel, n_jobs):
self.n_estimators = len(estimators_samples_)
self.classes_ = classes
self.parallel = parallel
self.estimators_samples_ = estimators_samples_
self.tree_id_to_leaf_profile = tree_id_to_leaf_profile
self.n_jobs = n_jobs
def fit(self, nodes_across_trees, y, tree_id_to_leaf_profile=None, fitting = False):
if tree_id_to_leaf_profile != None:
self.tree_id_to_leaf_profile = tree_id_to_leaf_profile
self.tree_idx_to_node_ids_to_posterior_map = {}
def worker(tree_idx):
nodes = nodes_across_trees[tree_idx]
oob_samples = np.delete(range(len(nodes)), self.estimators_samples_[tree_idx])
cal_nodes = nodes#[oob_samples] if fitting else nodes
y_cal = y#[oob_samples] if fitting else y
all_nodes = np.array(list(self.tree_id_to_leaf_profile[tree_idx].keys()))
#print(all_nodes,'kkukkuta')
#create a map from the unique node ids to their classwise posteriors
node_ids_to_posterior_map = {}
#fill in the posteriors
for node_id in np.unique(all_nodes):
cal_idxs_of_node_id = np.where(cal_nodes == node_id)[0]
cal_ys_of_node = y_cal[cal_idxs_of_node_id]
class_counts = [len(np.where(cal_ys_of_node == y)[0]) for y in np.unique(y) ]
sample_no = np.sum(class_counts)
if sample_no != 0:
posteriors = np.nan_to_num(np.array(class_counts) / sample_no)
else:
posteriors = np.zeros(len(self.classes_),dtype=float)
#finite sample correction
total_samples = len(cal_idxs_of_node_id)
if total_samples == 0:
total_samples = 1
posteriors_corrected = _finite_sample_correction(posteriors, total_samples, len(self.classes_))
node_ids_to_posterior_map[node_id] = posteriors_corrected
#add the node_ids_to_posterior_map to the overall tree_idx map
self.tree_idx_to_node_ids_to_posterior_map[tree_idx] = node_ids_to_posterior_map
for tree_idx in range(self.n_estimators):
worker(tree_idx)
return self
def predict_proba(self, nodes_across_trees):
def worker(tree_idx):
#get the node_ids_to_posterior_map for this tree
node_ids_to_posterior_map = self.tree_idx_to_node_ids_to_posterior_map[tree_idx]
#get the nodes of X
nodes = nodes_across_trees[tree_idx]
posteriors = []
node_ids = node_ids_to_posterior_map.keys()
#loop over nodes of X
for node in nodes:
#if we've seen this node before, simply get the posterior
if node in node_ids:
posteriors.append(node_ids_to_posterior_map[node])
#if we haven't seen this node before, simply use the uniform posterior
else:
posteriors.append(np.ones((len(np.unique(self.classes_)))) / len(self.classes_))
return posteriors
if self.parallel:
return np.mean(
Parallel(n_jobs=self.n_jobs)(
delayed(worker)(tree_idx) for tree_idx in range(self.n_estimators)
), axis = 0
)
else:
return np.mean(
[worker(tree_idx) for tree_idx in range(self.n_estimators)], axis = 0)
#get the nodes of the calibration set
nodes_across_trees = self.transform(X)
self.voter = Voter(estimators_samples_ = self.ensemble.estimators_samples_, tree_id_to_leaf_profile = self.tree_id_to_leaf_profile, classes = self.classes_, parallel = self.parallel, n_jobs = self.n_jobs)
self.voter.fit(nodes_across_trees, y, fitting = True)
self.fitted = True
def predict(self, X):
return self.classes_[np.argmax(self.predict_proba(X), axis=-1)]
def predict_proba(self, X):
return self.voter.predict_proba(self.transform(X))
| 37.147541
| 212
| 0.549515
|
4a1c66f2efa811f4658113f4fc81412d0a932b9d
| 337
|
py
|
Python
|
apps/common/utils.py
|
laashub/OneStack
|
591046661efd5defbcee3473d2f7ba330f548f33
|
[
"MIT"
] | null | null | null |
apps/common/utils.py
|
laashub/OneStack
|
591046661efd5defbcee3473d2f7ba330f548f33
|
[
"MIT"
] | null | null | null |
apps/common/utils.py
|
laashub/OneStack
|
591046661efd5defbcee3473d2f7ba330f548f33
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
class LoginRequiredMixin(object):
@method_decorator(login_required(login_url='/login'))
def dispatch(self, request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
| 37.444444
| 81
| 0.774481
|
4a1c67cb54e2ba7fe7c15a182c58626d11ca20d8
| 218
|
py
|
Python
|
lab-513.py
|
ZaraTam/DAT208x
|
21b31d640e1f0e03525c3a18b6ef83a73bf2d644
|
[
"MIT"
] | 1
|
2016-06-09T18:54:16.000Z
|
2016-06-09T18:54:16.000Z
|
lab-513.py
|
ZaraTam/DAT208x
|
21b31d640e1f0e03525c3a18b6ef83a73bf2d644
|
[
"MIT"
] | null | null | null |
lab-513.py
|
ZaraTam/DAT208x
|
21b31d640e1f0e03525c3a18b6ef83a73bf2d644
|
[
"MIT"
] | null | null | null |
# Print the last item of gdp_cap and life_exp
print(gdp_cap[-1])
print(life_exp[-1])
# Make a line plot, gdp_cap on the x-axis, life_exp on the y-axis
plt.plot(gdp_cap, life_exp)
# Display the plot
plt.show()
| 24.222222
| 66
| 0.706422
|
4a1c67dd0b162099208fa8a57b881ca5e7d5b861
| 441
|
py
|
Python
|
api/migrations/0004_profile_email.py
|
didoogan/ncube_test_back
|
7c96ebbe1a3b2a80740a711150d1dca6c3e22126
|
[
"MIT"
] | null | null | null |
api/migrations/0004_profile_email.py
|
didoogan/ncube_test_back
|
7c96ebbe1a3b2a80740a711150d1dca6c3e22126
|
[
"MIT"
] | 6
|
2020-06-05T18:36:33.000Z
|
2022-01-13T00:49:07.000Z
|
api/migrations/0004_profile_email.py
|
didoogan/ncube_test_back
|
7c96ebbe1a3b2a80740a711150d1dca6c3e22126
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.4 on 2018-04-07 20:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_profile_role'),
]
operations = [
migrations.AddField(
model_name='profile',
name='email',
field=models.EmailField(default='example@gmail.com', max_length=254),
preserve_default=False,
),
]
| 22.05
| 81
| 0.598639
|
4a1c696cb75ffb0c7d7d1b8f75d24291f4eed480
| 48,155
|
py
|
Python
|
tests/python_client/testcases/entity/test_insert.py
|
NotRyan/milvus
|
1bd3205dbf84ee7734e9849d1e3be30ded1aa619
|
[
"Apache-2.0"
] | null | null | null |
tests/python_client/testcases/entity/test_insert.py
|
NotRyan/milvus
|
1bd3205dbf84ee7734e9849d1e3be30ded1aa619
|
[
"Apache-2.0"
] | null | null | null |
tests/python_client/testcases/entity/test_insert.py
|
NotRyan/milvus
|
1bd3205dbf84ee7734e9849d1e3be30ded1aa619
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from pymilvus import DataType, ParamError, BaseException
from utils.utils import *
from common.constants import *
from common.common_type import CaseLabel
ADD_TIMEOUT = 60
uid = "test_insert"
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
default_single_query = {
"bool": {
"must": [
{"vector": {field_name: {"topk": 10, "query": gen_vectors(1, default_dim), "metric_type": "L2",
"params": {"nprobe": 10}}}}
]
}
}
class TestInsertBase:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
if request.param["index_type"] in index_cpu_not_support():
pytest.skip("CPU not support index_type: ivf_sq8h")
logging.getLogger().info(request.param)
return request.param
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_empty_entity(self, connect, collection):
"""
target: test insert with empty entity list
method: set empty entity list as insert method params
expected: raises a ParamError exception
"""
entities = []
with pytest.raises(ParamError) as e:
connect.insert(collection, entities)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_None(self, connect, collection):
"""
target: test insert with None
method: set None as insert method params
expected: raises a ParamError
"""
entity = None
with pytest.raises(Exception) as e:
connect.insert(collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_collection_not_existed(self, connect):
"""
target: test insert, with collection not existed
method: insert entity into a random named collection
expected: raise a BaseException
"""
collection_name = gen_unique_str(uid)
with pytest.raises(BaseException) as e:
connect.insert(collection_name, default_entities)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_without_connect(self, dis_connect, collection):
"""
target: test insert entities without connection
method: create collection and insert entities in it, check if inserted successfully
expected: raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.insert(collection, default_entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_drop_collection(self, connect, collection):
"""
target: test delete collection after insert entities
method: insert entities and drop collection
expected: has_collection false
"""
result = connect.insert(collection, default_entity)
assert len(result.primary_keys) == 1
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_flush_drop_collection(self, connect, collection):
"""
target: test drop collection after insert entities for a while
method: insert entities, sleep, and delete collection
expected: has_collection false
"""
result = connect.insert(collection, default_entity)
assert len(result.primary_keys) == 1
connect.flush([collection])
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_create_index(self, connect, collection, get_simple_index):
"""
target: test build index insert after entities
method: insert entities and build index
expected: no error raised
"""
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_after_create_index(self, connect, collection, get_simple_index):
"""
target: test build index insert after vector
method: insert entities and build index
expected: no error raised
"""
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_search(self, connect, collection):
"""
target: test search entity after insert entity after a while
method: insert entity, sleep, and search collection
expected: no error raised
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
res = connect.search(collection, default_single_query)
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_segment_row_count(self, connect, collection):
nb = default_segment_row_limit + 1
result = connect.insert(collection, gen_entities(nb))
connect.flush([collection])
assert len(result.primary_keys) == nb
stats = connect.get_collection_stats(collection)
assert len(stats['partitions'][0]['segments']) == 2
for segment in stats['partitions'][0]['segments']:
assert segment['row_count'] in [default_segment_row_limit, 1]
@pytest.fixture(
scope="function",
params=[
1,
2000
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids(self, connect, id_collection, insert_count):
'''
target: test insert entities in collection, use customize ids
method: create collection and insert entities in it, check the ids returned and the collection length after entities inserted
expected: the length of ids and the collection row count
'''
nb = insert_count
ids = [i for i in range(nb)]
entities = gen_entities(nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(result.primary_keys) == nb
assert result.primary_keys == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_the_same_ids(self, connect, id_collection, insert_count):
'''
target: test insert vectors in collection, use customize the same ids
method: create collection and insert vectors in it, check the ids returned and the collection length after vectors inserted
expected: the length of ids and the collection row count
'''
nb = insert_count
ids = [1 for i in range(nb)]
entities = gen_entities(nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(result.primary_keys) == nb
assert result.primary_keys == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields, insert entities into id with ids
method: create collection with diff fields: metric/field_type/..., insert, and count
expected: row count correct
'''
nb = 5
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str("test_collection")
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
"auto_id": False
}
connect.create_collection(collection_name, fields)
ids = [i for i in range(nb)]
entities = gen_entities_by_fields(fields["fields"], nb, default_dim, ids)
logging.getLogger().info(entities)
result = connect.insert(collection_name, entities)
assert result.primary_keys == ids
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_not_match(self, connect, id_collection, insert_count):
'''
target: test insert entities in collection without ids
method: create id_collection and insert entities without
expected: exception raised
'''
nb = insert_count
with pytest.raises(Exception) as e:
entities = gen_entities(nb)
del entities[0]
connect.insert(id_collection, entities)
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_twice_ids_no_ids(self, connect, id_collection):
'''
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use customize ids first, and then use no ids
expected: BaseException raised
'''
ids = [i for i in range(default_nb)]
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
connect.insert(id_collection, entities)
with pytest.raises(Exception) as e:
del entities[0]
connect.insert(id_collection, entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_not_ids(self, connect, id_collection):
'''
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use not ids first, and then use customize ids
expected: error raised
'''
entities = copy.deepcopy(default_entities)
del entities[0]
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids_length_not_match_batch(self, connect, id_collection):
'''
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
'''
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_length_not_match_single(self, connect, id_collection):
'''
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
'''
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
entity = copy.deepcopy(default_entity)
entity[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
'''
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
assert connect.has_partition(collection, default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition_with_ids(self, connect, id_collection):
'''
target: test insert entities in collection created before, insert with ids
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
entities = gen_entities(default_nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities, partition_name=default_tag)
assert result.primary_keys == ids
logging.getLogger().info(connect.describe_collection(id_collection))
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_default_partition(self, connect, collection):
'''
target: test insert entities into default partition
method: create partition and insert info collection without tag params
expected: the collection row count equals to nb
'''
result = connect.insert(collection, default_entities, partition_name=default_partition_name)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_partition_not_existed(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it, with the not existed partition_name param
expected: error raised
'''
tag = gen_unique_str()
with pytest.raises(Exception) as e:
connect.insert(collection, default_entities, partition_name=tag)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_partition_repeatedly(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it repeatly, with the partition_name param
expected: the collection row count equals to nq
'''
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
res = connect.get_collection_stats(collection)
assert res[row_count] == 2 * default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_dim_not_matched(self, connect, collection):
'''
target: test insert entities, the vector dimension is not equal to the collection dimension
method: the entities dimension is half of the collection dimension, check the status
expected: error raised
'''
vectors = gen_vectors(default_nb, int(default_dim) // 2)
insert_entities = copy.deepcopy(default_entities)
insert_entities[-1]["values"] = vectors
with pytest.raises(Exception) as e:
connect.insert(collection, insert_entities)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_name_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field name updated
method: update entity field name
expected: error raised
'''
tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", "int64new")
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_type_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field type updated
method: update entity field type
expected: error raised
'''
tmp_entity = update_field_type(copy.deepcopy(default_entity), "int64", DataType.FLOAT)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_value_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field value updated
method: update entity field value
expected: error raised
'''
tmp_entity = update_field_value(copy.deepcopy(default_entity), DataType.FLOAT, 's')
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_more(self, connect, collection):
'''
target: test insert entities, with more fields than collection schema
method: add entity field
expected: error raised
'''
tmp_entity = add_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_vector_more(self, connect, collection):
'''
target: test insert entities, with more fields than collection schema
method: add entity vector field
expected: error raised
'''
tmp_entity = add_vector_field(default_nb, default_dim)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_less(self, connect, collection):
'''
target: test insert entities, with less fields than collection schema
method: remove entity field
expected: error raised
'''
tmp_entity = remove_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_vector_less(self, connect, collection):
'''
target: test insert entities, with less fields than collection schema
method: remove entity vector field
expected: error raised
'''
tmp_entity = remove_vector_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_value(self, connect, collection):
'''
target: test insert entities, with no vector field value
method: remove entity values of vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["values"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_type(self, connect, collection):
'''
target: test insert entities, with no vector field type
method: remove entity vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["type"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_name(self, connect, collection):
'''
target: test insert entities, with no vector field name
method: remove entity vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["name"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
# todo fix timeout
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(30)
def test_collection_insert_rows_count_multi_threading(self, args, collection):
'''
target: test collection rows_count is correct or not with multi threading
method: create collection and insert entities in it(idmap),
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
'''
if args["handler"] == "HTTP":
pytest.skip("Skip test in http mode")
thread_num = 8
threads = []
milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"], try_connect=False)
def insert(thread_i):
logging.getLogger().info("In thread-%d" % thread_i)
result = milvus.insert(collection, default_entities)
milvus.flush([collection])
for i in range(thread_num):
x = threading.Thread(target=insert, args=(i,))
threads.append(x)
x.start()
for th in threads:
th.join()
stats = milvus.get_collection_stats(collection)
assert stats[row_count] == thread_num * default_nb
# TODO: unable to set config
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_disable_auto_flush(self, connect, collection):
'''
target: test insert entities, with disable autoflush
method: disable autoflush and insert, get entity
expected: the count is equal to 0
'''
delete_nums = 500
disable_flush(connect)
result = connect.insert(collection, default_entities)
ids = result.primary_keys
res = connect.get_entity_by_id(collection, ids[:delete_nums])
assert len(res) == delete_nums
assert res[0] is None
class TestInsertBinary:
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_entities(self, connect, binary_collection):
'''
target: test insert entities in binary collection
method: create collection and insert binary entities in it
expected: the collection row count equals to nb
'''
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_partition(self, connect, binary_collection):
'''
target: test insert entities and create partition tag
method: create collection and insert binary entities in it, with the partition_name param
expected: the collection row count equals to nb
'''
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
assert connect.has_partition(binary_collection, default_tag)
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_multi_times(self, connect, binary_collection):
'''
target: test insert entities multi times and final flush
method: create collection and insert binary entity multi and final flush
expected: the collection row count equals to nb
'''
for i in range(default_nb):
result = connect.insert(binary_collection, default_binary_entity)
assert len(result.primary_keys) == 1
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
'''
target: test insert binary entities after build index
method: build index and insert entities
expected: no error raised
'''
connect.create_index(binary_collection, binary_field_name, get_binary_index)
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
'''
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
'''
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_search(self, connect, binary_collection):
'''
target: test search vector after insert vector after a while
method: insert vector, sleep, and search collection
expected: no error raised
'''
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
query, vecs = gen_query_vectors(binary_field_name, default_binary_entities, default_top_k, 1,
metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
logging.getLogger().debug(res)
assert len(res[0]) == default_top_k
class TestInsertAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
@pytest.fixture(
scope="function",
params=[
1,
1000
],
)
def insert_count(self, request):
yield request.param
def check_status(self, result):
logging.getLogger().info("In callback check status")
assert not result
def check_result(self, result):
logging.getLogger().info("In callback check results")
assert result
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
future = connect.insert(collection, gen_entities(nb), _async=True)
ids = future.result().primary_keys
connect.flush([collection])
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_false(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
result = connect.insert(collection, gen_entities(nb), _async=False)
# ids = future.result()
connect.flush([collection])
assert len(result.primary_keys) == nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async_callback(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_result)
future.done()
ids = future.result().primary_keys
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_long(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = 50000
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_result)
result = future.result()
assert len(result.primary_keys) == nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_callback_timeout(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = 100000
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_status, timeout=1)
with pytest.raises(Exception) as e:
result = future.result()
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async_invalid_params(self, connect):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
collection_new = gen_unique_str()
future = connect.insert(collection_new, default_entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
result = future.result()
# 1339
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_params_raise_exception(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
entities = []
future = connect.insert(collection, entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
future.result()
class TestInsertMultiCollections:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_multi_collections(self, connect):
'''
target: test insert entities
method: create 10 collections and insert entities into them in turn
expected: row count
'''
collection_num = 10
collection_list = []
for i in range(collection_num):
collection_name = gen_unique_str(uid)
collection_list.append(collection_name)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection_name, default_entities)
connect.flush([collection_name])
assert len(result.primary_keys) == default_nb
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == default_nb
for i in range(collection_num):
connect.drop_collection(collection_list[i])
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_insert_entity_another(self, connect, collection):
'''
target: test insert vector to collection_1 after collection_2 deleted
method: delete collection_2 and insert vector to collection_1
expected: row count equals the length of entities inserted
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.drop_collection(collection)
result = connect.insert(collection_name, default_entity)
connect.flush([collection_name])
assert len(result.primary_keys) == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_create_index_insert_entity_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection_name, default_entity)
assert len(result.primary_keys) == 1
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
connect.drop_collection(collection_name)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_entity_create_index_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection_name, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_sleep_create_index_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1 for a while
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_search_entity_insert_entity_another(self, connect, collection):
'''
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.load_collection(collection)
res = connect.search(collection, default_single_query)
assert len(res[0]) == 0
connect.insert(collection_name, default_entity)
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_entity_search_entity_another(self, connect, collection):
'''
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, default_single_query)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_sleep_search_entity_another(self, connect, collection):
'''
target: test insert entity to collection_1 after search collection_2 a while
method: search collection, sleep, and insert entity
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, default_single_query)
assert len(res[0]) == 0
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_entity_during_release_collection(self, connect, collection):
'''
target: test insert entity during release
method: release collection async, then do insert operation
expected: insert ok
'''
for i in range(10):
connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
def release():
connect.release_collection(collection)
t = threading.Thread(target=release, args=(collection,))
t.start()
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
class TestInsertInvalid(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self, connect, id_collection, get_entity_id):
'''
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
'''
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(id_collection, default_entities, ids)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception):
connect.insert(collection_name, default_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_partition_name(self, connect, collection, get_tag_name):
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
if tag_name is not None:
with pytest.raises(Exception):
connect.insert(collection, default_entity, partition_name=tag_name)
else:
connect.insert(collection, default_entity, partition_name=tag_name)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_name(self, connect, collection, get_field_name):
tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_type(self, connect, collection, get_field_type):
field_type = get_field_type
tmp_entity = update_field_type(copy.deepcopy(default_entity), 'float', field_type)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_value(self, connect, collection, get_field_int_value):
field_value = get_field_int_value
tmp_entity = update_field_type(copy.deepcopy(default_entity), 'int64', field_value)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entity_value(self, connect, collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_entity)
src_vector = tmp_entity[-1]["values"]
src_vector[0][1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
class TestInsertInvalidBinary(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_name(self, connect, binary_collection, get_field_name):
tmp_entity = update_field_name(copy.deepcopy(default_binary_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_value(self, connect, binary_collection, get_field_int_value):
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', get_field_int_value)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entity_value(self, connect, binary_collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_binary_entity)
src_vectors = tmp_entity[-1]["values"]
src_vectors[0] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self, connect, binary_id_collection, get_entity_id):
'''
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
'''
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(binary_id_collection, default_binary_entities, ids)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_type(self, connect, binary_collection, get_field_type):
"""
target: test insert with invalid field type
method: insert with invalid field type
expected: raise exception
"""
field_type = get_field_type
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', field_type)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entities_value(self, connect, binary_collection, get_field_vectors_value):
"""
target: test insert with invalid field
method: insert with invalid field value
expected: raise exception
"""
tmp_entities = copy.deepcopy(default_binary_entities)
src_vector = tmp_entities[-1]["values"]
src_vector[1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entities)
| 40.19616
| 133
| 0.66803
|
4a1c6a7365fa145bbf8863a74d9b5c3dab5e75bc
| 58,766
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_vpnmgr_node.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_vpnmgr_node.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_vpnmgr_node.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_vpnmgr_node
short_description: VPN node for VPN Manager.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
vpnmgr_node:
description: the top level parameters set
required: false
type: dict
suboptions:
add-route:
type: str
description: 'Add-Route.'
choices:
- 'disable'
- 'enable'
assign-ip:
type: str
description: 'Assign-Ip.'
choices:
- 'disable'
- 'enable'
assign-ip-from:
type: str
description: 'Assign-Ip-From.'
choices:
- 'range'
- 'usrgrp'
- 'dhcp'
- 'name'
authpasswd:
description: 'Authpasswd.'
type: str
authusr:
type: str
description: 'Authusr.'
authusrgrp:
type: str
description: 'Authusrgrp.'
auto-configuration:
type: str
description: 'Auto-Configuration.'
choices:
- 'disable'
- 'enable'
automatic_routing:
type: str
description: 'Automatic_Routing.'
choices:
- 'disable'
- 'enable'
banner:
type: str
description: 'Banner.'
default-gateway:
type: str
description: 'Default-Gateway.'
dhcp-server:
type: str
description: 'Dhcp-Server.'
choices:
- 'disable'
- 'enable'
dns-mode:
type: str
description: 'Dns-Mode.'
choices:
- 'auto'
- 'manual'
dns-service:
type: str
description: 'Dns-Service.'
choices:
- 'default'
- 'specify'
- 'local'
domain:
type: str
description: 'Domain.'
extgw:
type: str
description: 'Extgw.'
extgw_hubip:
type: str
description: 'Extgw_Hubip.'
extgw_p2_per_net:
type: str
description: 'Extgw_P2_Per_Net.'
choices:
- 'disable'
- 'enable'
extgwip:
type: str
description: 'Extgwip.'
hub_iface:
type: str
description: 'Hub_Iface.'
id:
type: int
description: 'Id.'
iface:
type: str
description: 'Iface.'
ip-range:
description: 'Ip-Range.'
type: list
suboptions:
end-ip:
type: str
description: 'End-Ip.'
id:
type: int
description: 'Id.'
start-ip:
type: str
description: 'Start-Ip.'
ipsec-lease-hold:
type: int
description: 'Ipsec-Lease-Hold.'
ipv4-dns-server1:
type: str
description: 'Ipv4-Dns-Server1.'
ipv4-dns-server2:
type: str
description: 'Ipv4-Dns-Server2.'
ipv4-dns-server3:
type: str
description: 'Ipv4-Dns-Server3.'
ipv4-end-ip:
type: str
description: 'Ipv4-End-Ip.'
ipv4-exclude-range:
description: 'Ipv4-Exclude-Range.'
type: list
suboptions:
end-ip:
type: str
description: 'End-Ip.'
id:
type: int
description: 'Id.'
start-ip:
type: str
description: 'Start-Ip.'
ipv4-netmask:
type: str
description: 'Ipv4-Netmask.'
ipv4-split-include:
type: str
description: 'Ipv4-Split-Include.'
ipv4-start-ip:
type: str
description: 'Ipv4-Start-Ip.'
ipv4-wins-server1:
type: str
description: 'Ipv4-Wins-Server1.'
ipv4-wins-server2:
type: str
description: 'Ipv4-Wins-Server2.'
local-gw:
type: str
description: 'Local-Gw.'
localid:
type: str
description: 'Localid.'
mode-cfg:
type: str
description: 'Mode-Cfg.'
choices:
- 'disable'
- 'enable'
mode-cfg-ip-version:
type: str
description: 'Mode-Cfg-Ip-Version.'
choices:
- '4'
- '6'
net-device:
type: str
description: 'Net-Device.'
choices:
- 'disable'
- 'enable'
peer:
type: str
description: 'Peer.'
peergrp:
type: str
description: 'Peergrp.'
peerid:
type: str
description: 'Peerid.'
peertype:
type: str
description: 'Peertype.'
choices:
- 'any'
- 'one'
- 'dialup'
- 'peer'
- 'peergrp'
protected_subnet:
description: 'Protected_Subnet.'
type: list
suboptions:
addr:
type: str
description: 'Addr.'
seq:
type: int
description: 'Seq.'
public-ip:
type: str
description: 'Public-Ip.'
role:
type: str
description: 'Role.'
choices:
- 'hub'
- 'spoke'
route-overlap:
type: str
description: 'Route-Overlap.'
choices:
- 'use-old'
- 'use-new'
- 'allow'
spoke-zone:
type: str
description: 'Spoke-Zone.'
summary_addr:
description: 'Summary_Addr.'
type: list
suboptions:
addr:
type: str
description: 'Addr.'
priority:
type: int
description: 'Priority.'
seq:
type: int
description: 'Seq.'
tunnel-search:
type: str
description: 'Tunnel-Search.'
choices:
- 'selectors'
- 'nexthop'
unity-support:
type: str
description: 'Unity-Support.'
choices:
- 'disable'
- 'enable'
usrgrp:
type: str
description: 'Usrgrp.'
vpn-interface-priority:
type: int
description: 'Vpn-Interface-Priority.'
vpn-zone:
type: str
description: 'Vpn-Zone.'
vpntable:
type: str
description: 'Vpntable.'
xauthtype:
type: str
description: 'Xauthtype.'
choices:
- 'disable'
- 'client'
- 'pap'
- 'chap'
- 'auto'
exchange-interface-ip:
type: str
description: 'Exchange-Interface-Ip.'
choices:
- 'disable'
- 'enable'
hub-public-ip:
type: str
description: 'Hub-Public-Ip.'
ipv4-split-exclude:
type: str
description: 'Ipv4-Split-Exclude.'
scope member:
description: 'The scope member array of vpnmgr node'
type: list
suboptions:
name:
type: str
description: 'name of scope member'
vdom:
type: str
description: 'vdom of scope member'
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: VPN node for VPN Manager.
fmgr_vpnmgr_node:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
state: <value in [present, absent]>
vpnmgr_node:
add-route: <value in [disable, enable]>
assign-ip: <value in [disable, enable]>
assign-ip-from: <value in [range, usrgrp, dhcp, ...]>
authpasswd: <value of string>
authusr: <value of string>
authusrgrp: <value of string>
auto-configuration: <value in [disable, enable]>
automatic_routing: <value in [disable, enable]>
banner: <value of string>
default-gateway: <value of string>
dhcp-server: <value in [disable, enable]>
dns-mode: <value in [auto, manual]>
dns-service: <value in [default, specify, local]>
domain: <value of string>
extgw: <value of string>
extgw_hubip: <value of string>
extgw_p2_per_net: <value in [disable, enable]>
extgwip: <value of string>
hub_iface: <value of string>
id: <value of integer>
iface: <value of string>
ip-range:
-
end-ip: <value of string>
id: <value of integer>
start-ip: <value of string>
ipsec-lease-hold: <value of integer>
ipv4-dns-server1: <value of string>
ipv4-dns-server2: <value of string>
ipv4-dns-server3: <value of string>
ipv4-end-ip: <value of string>
ipv4-exclude-range:
-
end-ip: <value of string>
id: <value of integer>
start-ip: <value of string>
ipv4-netmask: <value of string>
ipv4-split-include: <value of string>
ipv4-start-ip: <value of string>
ipv4-wins-server1: <value of string>
ipv4-wins-server2: <value of string>
local-gw: <value of string>
localid: <value of string>
mode-cfg: <value in [disable, enable]>
mode-cfg-ip-version: <value in [4, 6]>
net-device: <value in [disable, enable]>
peer: <value of string>
peergrp: <value of string>
peerid: <value of string>
peertype: <value in [any, one, dialup, ...]>
protected_subnet:
-
addr: <value of string>
seq: <value of integer>
public-ip: <value of string>
role: <value in [hub, spoke]>
route-overlap: <value in [use-old, use-new, allow]>
spoke-zone: <value of string>
summary_addr:
-
addr: <value of string>
priority: <value of integer>
seq: <value of integer>
tunnel-search: <value in [selectors, nexthop]>
unity-support: <value in [disable, enable]>
usrgrp: <value of string>
vpn-interface-priority: <value of integer>
vpn-zone: <value of string>
vpntable: <value of string>
xauthtype: <value in [disable, client, pap, ...]>
exchange-interface-ip: <value in [disable, enable]>
hub-public-ip: <value of string>
ipv4-split-exclude: <value of string>
scope member:
-
name: <value of string>
vdom: <value of string>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/vpnmgr/node',
'/pm/config/global/obj/vpnmgr/node'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/vpnmgr/node/{node}',
'/pm/config/global/obj/vpnmgr/node/{node}'
]
url_params = ['adom']
module_primary_key = 'id'
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'forticloud_access_token': {
'type': 'str',
'required': False,
'no_log': True
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'vpnmgr_node': {
'required': False,
'type': 'dict',
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'options': {
'add-route': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'assign-ip': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'assign-ip-from': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'range',
'usrgrp',
'dhcp',
'name'
],
'type': 'str'
},
'authpasswd': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'authusr': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'authusrgrp': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'auto-configuration': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'automatic_routing': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'banner': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'default-gateway': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'dhcp-server': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'dns-mode': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'auto',
'manual'
],
'type': 'str'
},
'dns-service': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'default',
'specify',
'local'
],
'type': 'str'
},
'domain': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'extgw': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'extgw_hubip': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'extgw_p2_per_net': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'extgwip': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'hub_iface': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'id': {
'required': True,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'iface': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'ip-range': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'list',
'options': {
'end-ip': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'id': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'start-ip': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
}
}
},
'ipsec-lease-hold': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'ipv4-dns-server1': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'ipv4-dns-server2': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'ipv4-dns-server3': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'ipv4-end-ip': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'ipv4-exclude-range': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'list',
'options': {
'end-ip': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'id': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'start-ip': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
}
}
},
'ipv4-netmask': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'ipv4-split-include': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'ipv4-start-ip': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'ipv4-wins-server1': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'ipv4-wins-server2': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'local-gw': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'localid': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'mode-cfg': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'mode-cfg-ip-version': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'4',
'6'
],
'type': 'str'
},
'net-device': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'peer': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'peergrp': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'peerid': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'peertype': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'any',
'one',
'dialup',
'peer',
'peergrp'
],
'type': 'str'
},
'protected_subnet': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'list',
'options': {
'addr': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'seq': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
}
}
},
'public-ip': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'role': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'hub',
'spoke'
],
'type': 'str'
},
'route-overlap': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'use-old',
'use-new',
'allow'
],
'type': 'str'
},
'spoke-zone': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'summary_addr': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'list',
'options': {
'addr': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'priority': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'seq': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
}
}
},
'tunnel-search': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'selectors',
'nexthop'
],
'type': 'str'
},
'unity-support': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'usrgrp': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'vpn-interface-priority': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'vpn-zone': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'vpntable': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'xauthtype': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'client',
'pap',
'chap',
'auto'
],
'type': 'str'
},
'exchange-interface-ip': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'hub-public-ip': {
'required': False,
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'ipv4-split-exclude': {
'required': False,
'revision': {
'7.0.0': True
},
'type': 'str'
},
'scope member': {
'required': False,
'type': 'list',
'options': {
'name': {
'required': False,
'type': 'str'
},
'vdom': {
'required': False,
'type': 'str'
}
}
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'vpnmgr_node'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
connection.set_option('forticloud_access_token',
module.params['forticloud_access_token'] if 'forticloud_access_token' in module.params else None)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| 34.875964
| 153
| 0.283497
|
4a1c6aa5726aba215439d0e1f56f44e33f0fe85f
| 608
|
py
|
Python
|
repetition/20.py
|
luan-gomes/python-basic-exercises
|
213844b421b27ab3e9c09be24d4efb37cc6fce08
|
[
"MIT"
] | null | null | null |
repetition/20.py
|
luan-gomes/python-basic-exercises
|
213844b421b27ab3e9c09be24d4efb37cc6fce08
|
[
"MIT"
] | null | null | null |
repetition/20.py
|
luan-gomes/python-basic-exercises
|
213844b421b27ab3e9c09be24d4efb37cc6fce08
|
[
"MIT"
] | null | null | null |
"""
Altere o programa de cálculo do fatorial, permitindo ao usuário calcular
o fatorial várias vezes e limitando o fatorial a números inteiros positivos e menores que 16.
"""
continuar = "sim"
while continuar == "sim":
while True:
number = int(input("Digite um número: "))
if 0 < number < 16:
break
else:
print("O número precisa ser maior do que zero!")
indice = number
while indice > 0:
if indice == number:
fatorial = number
indice-=1
else:
fatorial*=indice
indice-=1
print(f"O fatorial de {number} é: {fatorial}")
continuar = input("Deseja calcular outro fatorial?")
| 21.714286
| 93
| 0.685855
|
4a1c6c9806e5eb49ec3017e4a778b8215f33b960
| 872
|
py
|
Python
|
src/transformers/customs/label_smoothing_loss.py
|
iambotHQ/transformers
|
12f066742e8de6f31daee3a8fa7dd56237b361c1
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/customs/label_smoothing_loss.py
|
iambotHQ/transformers
|
12f066742e8de6f31daee3a8fa7dd56237b361c1
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/customs/label_smoothing_loss.py
|
iambotHQ/transformers
|
12f066742e8de6f31daee3a8fa7dd56237b361c1
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
class LabelSmoothingLoss(nn.Module):
def __init__(self, classes, smoothing: float = 0.0, dim: int = -1, weight=None):
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.cls = classes
self.dim = dim
self.weight = weight
def forward(self, pred, target):
pred = pred.log_softmax(dim=self.dim)
if self.weight is not None:
pred = pred * self.weight.to(pred.device)
with torch.no_grad():
# true_dist = pred.data.clone()
true_dist = torch.zeros_like(pred)
true_dist.fill_(self.smoothing / (self.cls - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))
| 32.296296
| 84
| 0.618119
|
4a1c6d54a84de0065ab37b517f8c693817aea4c3
| 494
|
py
|
Python
|
xibbaz/main.py
|
erik-stephens/xibbaz
|
5c245ee516dcd7e6dbffac364c6a434bd13a69a4
|
[
"MIT"
] | 1
|
2018-04-02T17:22:32.000Z
|
2018-04-02T17:22:32.000Z
|
xibbaz/main.py
|
erik-stephens/xibbaz
|
5c245ee516dcd7e6dbffac364c6a434bd13a69a4
|
[
"MIT"
] | 2
|
2018-08-22T22:46:48.000Z
|
2018-08-27T21:03:45.000Z
|
xibbaz/main.py
|
erik-stephens/xibbaz
|
5c245ee516dcd7e6dbffac364c6a434bd13a69a4
|
[
"MIT"
] | null | null | null |
"""
A wrapper around scripts to facilitate a single Dockerfile ENTRYPOINT.
usage: xibbaz.main <cmd> ...
Where <cmd> is one of the following. Use `-h, --help` for cmd specific usage.
- cli
- group
- template
- triggers
"""
import sys
import importlib
if len(sys.argv) >= 2:
if sys.argv[1] not in ('cli', 'group', 'template', 'triggers'):
print(__doc__)
else:
importlib.import_module('xibbaz.cmd.' + sys.argv[1]).main(sys.argv[2:])
else:
print(__doc__)
| 21.478261
| 79
| 0.637652
|
4a1c6e76cc0c16104673a81c2eb237a53c810c1d
| 1,574
|
py
|
Python
|
GearBot/Util/Emoji.py
|
AEnterprise/GearBot
|
96873483877c31659423ede73c1f8bc0c0a830c5
|
[
"MIT"
] | 20
|
2018-08-15T05:05:33.000Z
|
2019-05-02T16:48:59.000Z
|
GearBot/Util/Emoji.py
|
AEnterprise/GearBot
|
96873483877c31659423ede73c1f8bc0c0a830c5
|
[
"MIT"
] | 137
|
2018-08-12T10:01:30.000Z
|
2019-05-03T13:54:47.000Z
|
GearBot/Util/Emoji.py
|
AEnterprise/GearBot
|
96873483877c31659423ede73c1f8bc0c0a830c5
|
[
"MIT"
] | 42
|
2018-08-23T16:33:35.000Z
|
2019-04-24T03:18:50.000Z
|
from discord import utils
from Util import Configuration
emojis = dict()
BACKUPS = {
"1": "1⃣",
"2": "2⃣",
"3": "3⃣",
"4": "4⃣",
"5": "5⃣",
"6": "6⃣",
"7": "7⃣",
"8": "8⃣",
"9": "9⃣",
"10": "🔟",
"ALTER": "🛠",
"BAD_USER": "😶",
"BAN": "🚪",
"BEAN": "🌱",
"BOOT": "👢",
"BUG": "🐛",
"CATEGORY": "📚",
"CHANNEL": "📝",
"CLOCK": "⏰",
"CREATE": "🔨",
"DELETE": "⛏",
"DIAMOND": "⚙",
"DND": "❤",
"EDIT": "📝",
"EYES": "👀",
"GAMING": "🎮",
"GOLD": "⚙",
"IDLE": "💛",
"INNOCENT": "😇",
"IRON": "⚙",
"JOIN": "📥",
"LEAVE": "📤",
"LEFT": "⬅️",
"LOADING": "⏳",
"LOCK": "🔒",
"MUSIC": "🎵",
"MUTE": "😶",
"NAMETAG": "📛",
"NICKTAG": "📛",
"NO": "🚫",
"OFFLINE": "💙",
"ONLINE": "💚",
"PIN": "📌",
"QUESTION": "❓",
"REFRESH": "🔁",
"RIGHT": "➡️",
"ROLE_ADD": "🛫",
"ROLE_REMOVE": "🛬",
"SEARCH": "🔎",
"SINISTER": "😈",
"SPY": "🕵",
"STONE": "⚙",
"STREAMING": "💜",
"TACO": "🌮",
"THINK": "🤔",
"TODO": "📋",
"TRASH": "🗑",
"VOICE": "🔊",
"WARNING": "⚠",
"WATCHING": "📺",
"WHAT": "☹",
"WINK": "😉",
"WOOD": "⚙",
"WRENCH": "🔧",
"YES": "✅"
}
def initialize(bot):
for name, eid in Configuration.get_master_var("EMOJI", {}).items():
emojis[name] = utils.get(bot.emojis, id=eid)
def get_chat_emoji(name):
return str(get_emoji(name))
def get_emoji(name):
if name in emojis:
return emojis[name]
else:
return BACKUPS[name]
| 17.488889
| 71
| 0.374841
|
4a1c6fc65342e637236d8132825eb0387b6070e5
| 9,170
|
py
|
Python
|
day18/day18.py
|
w-m/aoc2021
|
1d74dea32d3ac73261a019fdae9bfa3c63ee6d1a
|
[
"CC0-1.0"
] | null | null | null |
day18/day18.py
|
w-m/aoc2021
|
1d74dea32d3ac73261a019fdae9bfa3c63ee6d1a
|
[
"CC0-1.0"
] | null | null | null |
day18/day18.py
|
w-m/aoc2021
|
1d74dea32d3ac73261a019fdae9bfa3c63ee6d1a
|
[
"CC0-1.0"
] | null | null | null |
from __future__ import annotations
from dataclasses import dataclass
from os import replace
import pandas as pd
from itertools import combinations
@dataclass
class TreeNode:
left: TreeNode
right: TreeNode
value: int
depth: int
parent: TreeNode
def tolist(self):
lst = []
if self.left:
if self.left.value != -1:
lst.append(self.left.value)
else:
lst.append(self.left.tolist())
if self.right:
if self.right.value != -1:
lst.append(self.right.value)
else:
lst.append(self.right.tolist())
return lst
def tonodelist(self):
lst = []
if self.left:
if self.left.value != -1:
lst.append(self.left)
else:
lst.append(self.left.tonodelist())
if self.right:
if self.right.value != -1:
lst.append(self.right)
else:
lst.append(self.right.tonodelist())
return lst
def __hash__(self):
return hash(id(self))
def __eq__(self, other):
if not isinstance(other, type(self)): return NotImplemented
return self.left == other.left and self.right == other.right and self.value == other.value
def build_tree(number, depth=0, parent=None):
node = TreeNode(None, None, -1, depth, parent)
left, right = number
if type(left) == list:
node.left = build_tree(left, depth + 1, node)
else:
node.left = TreeNode(left=None, right=None, value=left, depth=depth + 1, parent=node)
if type(right) == list:
node.right = build_tree(right, depth+1,node)
else:
node.right = TreeNode(left=None, right=None, value=right, depth=depth + 1, parent=node)
return node
def magnitude(number):
left, right = number
mag = 0
if type(left) == list:
mag += 3 * magnitude(left)
else:
mag += 3 * left
if type(right) == list:
mag += 2 * magnitude(right)
else:
mag += 2 * right
return mag
def add(a, b):
return [a, b]
def find_left(from_node):
cur_node = from_node
visited = set([cur_node])
while True:
# go up and first possible left that isn't on the parent path
if parent := cur_node.parent:
cur_node = parent
visited.add(cur_node)
else:
return None
# first possible left
if cur_node.left and cur_node.left not in visited:
cur_node = cur_node.left
break
while True:
if cur_node.value != -1:
return cur_node
else:
cur_node = cur_node.right
# if cur_node.right.value == -1:
# cur_node = cur_node.right
# continue
# else: # cur_node.value != -1:
# return cur_node
return None
def find_right(from_node):
cur_node = from_node
visited = set([cur_node])
while True:
# go up and first possible right
if parent := cur_node.parent:
cur_node = parent
visited.add(cur_node)
else:
return None
# first possible right
if cur_node.right and cur_node.right not in visited:
cur_node = cur_node.right
break
while True:
if cur_node.value != -1:
return cur_node
else:
cur_node = cur_node.left
# if cur_node.left.value == -1:
# cur_node = cur_node.left
# continue
# else: # cur_node.value != -1:
# return cur_node
return None
def node_iterator(root_node):
return pd.core.common.flatten(root_node.tonodelist())
def explode(explosion_node):
# add first value to leftmost value or drop
if found_left := find_left(explosion_node):
assert found_left.value != -1
assert explosion_node.left.value != -1
found_left.value += explosion_node.left.value
# add right value to right value or drop
if found_right := find_right(explosion_node):
assert found_right.value != -1
assert explosion_node.right.value != -1
found_right.value += explosion_node.right.value
# replace explosion_node with value 0
# assert explosion_node.parent.left == explosion_node
replace_node = TreeNode(None, None, 0, explosion_node.depth, explosion_node.parent)
if explosion_node == explosion_node.parent.left:
explosion_node.parent.left = replace_node
else:
explosion_node.parent.right = replace_node
def split(node):
# replace with pair
# left: half round down, right half round up
value = node.value
rleft = TreeNode(None, None, value // 2, node.depth + 1, None)
rright = TreeNode(None, None, (value + 1) // 2, node.depth + 1, None)
replacement_node = TreeNode(rleft, rright, -1, node.depth, node.parent)
replacement_node.left.parent = replacement_node
replacement_node.right.parent = replacement_node
if node.parent.left == node:
node.parent.left = replacement_node
else:
node.parent.right = replacement_node
def reduce_number(number):
root = build_tree(number)
# print(root.tolist())
has_reduced = True
while has_reduced:
has_reduced = False
# explode
# print(root.tolist())
needs_explosion = None
# find the first depth=4
# exists?
for node in node_iterator(root):
if node.depth > 4 and node.value != -1:
explode(node.parent)
has_reduced = True
break
if has_reduced:
continue
# split
# TODO first to explode OR split, or
# first to explode OR first to split?
for node in node_iterator(root):
if node.value != -1 and node.value > 9:
split(node)
has_reduced = True
break
# number > 9?
# print(f"Reduced: {root.tolist()}")
return root.tolist()
def add_and_reduce(lines):
number = None
for line in lines:
line_number = eval(line.strip())
if not number:
number = line_number
else:
number = add(number, line_number)
number = reduce_number(number)
return number
def day18a(lines):
number = add_and_reduce(lines)
return magnitude(number)
def day18b(lines):
numbers = [eval(line.strip()) for line in lines]
def mradd(a,b):
return magnitude(reduce_number(add(a, b)))
max_magn = max(max(mradd(a,b), mradd(b,a)) for (a, b) in combinations(numbers, 2))
return max_magn
if __name__ == "__main__":
assert magnitude([[1,2],[[3,4],5]]) == 143
assert magnitude([[[[0,7],4],[[7,8],[6,0]]],[8,1]]) == 1384
assert magnitude([[[[1,1],[2,2]],[3,3]],[4,4]]) == 445
assert magnitude([[[[3,0],[5,3]],[4,4]],[5,5]]) == 791
assert magnitude([[[[5,0],[7,4]],[5,5]],[6,6]]) == 1137
assert magnitude([[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]) == 3488
assert build_tree([[1,2],[[3,4],5]]).tolist() == [[1,2],[[3,4],5]]
assert build_tree([[[[0,7],4],[[7,8],[6,0]]],[8,1]]).tolist() == [[[[0,7],4],[[7,8],[6,0]]],[8,1]]
assert build_tree([[[[1,1],[2,2]],[3,3]],[4,4]]).tolist() == [[[[1,1],[2,2]],[3,3]],[4,4]]
assert build_tree([[[[3,0],[5,3]],[4,4]],[5,5]]).tolist() == [[[[3,0],[5,3]],[4,4]],[5,5]]
assert build_tree([[[[5,0],[7,4]],[5,5]],[6,6]]).tolist() == [[[[5,0],[7,4]],[5,5]],[6,6]]
assert build_tree([[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]).tolist() == [[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]
assert reduce_number([[6,[5,[4,[3,2]]]],1]) == [[6,[5,[7,0]]],3]
assert reduce_number([[[[[9,8],1],2],3],4]) == [[[[0,9],2],3],4]
assert reduce_number([7,[6,[5,[4,[3,2]]]]]) == [7,[6,[5,[7,0]]]]
assert reduce_number([[6,[5,[4,[3,2]]]],1]) == [[6,[5,[7,0]]],3]
assert reduce_number([[[[[4,3],4],4],[7,[[8,4],9]]],[1,1]]) == [[[[0,7],4],[[7,8],[6,0]]],[8,1]]
assert add_and_reduce("[1,1]\n[2,2]\n[3,3]\n[4,4]".splitlines()) == [[[[1,1],[2,2]],[3,3]],[4,4]]
assert add_and_reduce("[1,1]\n[2,2]\n[3,3]\n[4,4]\n[5,5]".splitlines()) == [[[[3,0],[5,3]],[4,4]],[5,5]]
assert add_and_reduce("[1,1]\n[2,2]\n[3,3]\n[4,4]\n[5,5]\n[6,6]".splitlines()) == [[[[5,0],[7,4]],[5,5]],[6,6]]
assert reduce_number([[[[0, [4, 5]], [0, 0]], [[[4, 5], [2, 6]], [9, 5]]], [7, [[[3, 7], [4, 3]], [[6, 3], [8, 8]]]]]) == [[[[4,0],[5,4]],[[7,7],[6,0]]],[[8,[7,7]],[[7,9],[5,0]]]]
with open("test_input_reduce.txt", "r") as f:
assert add_and_reduce(f.readlines()) == [[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]
with open("test_input.txt", "r") as f:
assert day18a(f.readlines()) == 4140
with open("test_input.txt", "r") as f:
assert day18b(f.readlines()) == 3993
with open("input.txt", "r") as f:
print(day18a(f.readlines()))
with open("input.txt", "r") as f:
print(day18b(f.readlines()))
| 30.065574
| 183
| 0.542094
|
4a1c7007175552bd40dc1e87d78d128f172267d3
| 32,841
|
py
|
Python
|
env/lib/python3.7/site-packages/sklearn/preprocessing/label.py
|
MarcoMancha/BreastCancerDetector
|
be0dfdcebd1ae66da6d0cf48e2525c24942ae877
|
[
"Apache-2.0"
] | 25
|
2019-03-08T01:03:03.000Z
|
2022-02-14T17:38:32.000Z
|
env/lib/python3.7/site-packages/sklearn/preprocessing/label.py
|
MarcoMancha/BreastCancerDetector
|
be0dfdcebd1ae66da6d0cf48e2525c24942ae877
|
[
"Apache-2.0"
] | 9
|
2020-09-25T22:32:02.000Z
|
2022-02-09T23:45:10.000Z
|
env/lib/python3.7/site-packages/sklearn/preprocessing/label.py
|
MarcoMancha/BreastCancerDetector
|
be0dfdcebd1ae66da6d0cf48e2525c24942ae877
|
[
"Apache-2.0"
] | 31
|
2019-01-15T20:16:50.000Z
|
2022-03-01T05:47:38.000Z
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Joel Nothman <joel.nothman@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.sparsefuncs import min_max_axis
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _encode_numpy(values, uniques=None, encode=False):
# only used in _encode below, see docstring there for details
if uniques is None:
if encode:
uniques, encoded = np.unique(values, return_inverse=True)
return uniques, encoded
else:
# unique sorts
return np.unique(values)
if encode:
diff = _encode_check_unknown(values, uniques)
if diff:
raise ValueError("y contains previously unseen labels: %s"
% str(diff))
encoded = np.searchsorted(uniques, values)
return uniques, encoded
else:
return uniques
def _encode_python(values, uniques=None, encode=False):
# only used in _encode below, see docstring there for details
if uniques is None:
uniques = sorted(set(values))
uniques = np.array(uniques, dtype=values.dtype)
if encode:
table = {val: i for i, val in enumerate(uniques)}
try:
encoded = np.array([table[v] for v in values])
except KeyError as e:
raise ValueError("y contains previously unseen labels: %s"
% str(e))
return uniques, encoded
else:
return uniques
def _encode(values, uniques=None, encode=False):
"""Helper function to factorize (find uniques) and encode values.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
The numpy method has the limitation that the `uniques` need to
be sorted. Importantly, this is not checked but assumed to already be
the case. The calling method needs to ensure this for all non-object
values.
Parameters
----------
values : array
Values to factorize or encode.
uniques : array, optional
If passed, uniques are not determined from passed values (this
can be because the user specified categories, or because they
already have been determined in fit).
encode : bool, default False
If True, also encode the values into integer codes based on `uniques`.
Returns
-------
uniques
If ``encode=False``. The unique values are sorted if the `uniques`
parameter was None (and thus inferred from the data).
(uniques, encoded)
If ``encode=True``.
"""
if values.dtype == object:
try:
res = _encode_python(values, uniques, encode)
except TypeError:
raise TypeError("argument must be a string or number")
return res
else:
return _encode_numpy(values, uniques, encode)
def _encode_check_unknown(values, uniques, return_mask=False):
"""
Helper function to check for unknowns in values to be encoded.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
Parameters
----------
values : array
Values to check for unknowns.
uniques : array
Allowed uniques values.
return_mask : bool, default False
If True, return a mask of the same shape as `values` indicating
the valid values.
Returns
-------
diff : list
The unique values present in `values` and not in `uniques` (the
unknown values).
valid_mask : boolean array
Additionally returned if ``return_mask=True``.
"""
if values.dtype == object:
uniques_set = set(uniques)
diff = list(set(values) - uniques_set)
if return_mask:
if diff:
valid_mask = np.array([val in uniques_set for val in values])
else:
valid_mask = np.ones(len(values), dtype=bool)
return diff, valid_mask
else:
return diff
else:
unique_values = np.unique(values)
diff = list(np.setdiff1d(unique_values, uniques, assume_unique=True))
if return_mask:
if diff:
valid_mask = np.in1d(values, uniques)
else:
valid_mask = np.ones(len(values), dtype=bool)
return diff, valid_mask
else:
return diff
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
See also
--------
sklearn.preprocessing.OrdinalEncoder : encode categorical features
using a one-hot or ordinal encoding scheme.
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
self.classes_ = _encode(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
self.classes_, y = _encode(y, encode=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
y = column_or_1d(y, warn=True)
# transform of empty array is empty array
if _num_samples(y) == 0:
return np.array([])
_, y = _encode(y, uniques=self.classes_, encode=True)
return y
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
y = column_or_1d(y, warn=True)
# inverse transform of empty array is empty array
if _num_samples(y) == 0:
return np.array([])
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if len(diff):
raise ValueError(
"y contains previously unseen labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
def _more_tags(self):
return {'X_types': ['1dlabels']}
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in scikit-learn. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'multiclass-multioutput', 'multilabel-indicator', and 'unknown'.
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
sklearn.preprocessing.OneHotEncoder : encode categorical features
using a one-hot aka one-of-K scheme.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : array of shape [n_samples,] or [n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def fit_transform(self, y):
"""Fit label binarizer and transform multi-class labels to binary
labels.
The output of transform is sometimes referred to as
the 1-of-K coding scheme.
Parameters
----------
y : array or sparse matrix of shape [n_samples,] or \
[n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
return self.fit(y).transform(y)
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as
the 1-of-K coding scheme.
Parameters
----------
y : array or sparse matrix of shape [n_samples,] or \
[n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when ``Y`` contains the output of decision_function
(classifier).
Use 0.5 when ``Y`` contains the output of predict_proba.
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def _more_tags(self):
return {'X_types': ['1dlabels']}
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in scikit-learn. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if n_classes == 1:
if sparse_output:
return sp.csr_matrix((n_samples, 1), dtype=int)
else:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = np.in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = Y.astype(int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = Y.data.astype(int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = min_max_axis(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
return np.repeat(classes[0], len(y))
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels.
All entries should be unique (cannot contain duplicate classes).
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> from sklearn.preprocessing import MultiLabelBinarizer
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([{'sci-fi', 'thriller'}, {'comedy'}])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
See also
--------
sklearn.preprocessing.OneHotEncoder : encode categorical features
using a one-hot aka one-of-K scheme.
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
self._cached_dict = None
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
elif len(set(self.classes)) < len(self.classes):
raise ValueError("The classes argument contains duplicate "
"classes. Remove these duplicates before passing "
"them to MultiLabelBinarizer.")
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
self._cached_dict = None
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
# ensure yt.indices keeps its current dtype
yt.indices = np.array(inverse[yt.indices], dtype=yt.indices.dtype,
copy=False)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
check_is_fitted(self, 'classes_')
class_to_index = self._build_cache()
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _build_cache(self):
if self._cached_dict is None:
self._cached_dict = dict(zip(self.classes_,
range(len(self.classes_))))
return self._cached_dict
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
unknown = set()
for labels in y:
index = set()
for label in labels:
try:
index.add(class_mapping[label])
except KeyError:
unknown.add(label)
indices.extend(index)
indptr.append(len(indices))
if unknown:
warnings.warn('unknown class(es) {0} will be ignored'
.format(sorted(unknown, key=str)))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
check_is_fitted(self, 'classes_')
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
def _more_tags(self):
return {'X_types': ['2dlabels']}
| 33.072508
| 79
| 0.587345
|
4a1c705da5f50a0d47a32f135e7e7689fc708966
| 1,125
|
py
|
Python
|
bank_api/api/v1/__init__.py
|
robinstauntoncollins/bank-api
|
b19cadf5a65f5e66ca14688af8774f400d4fb0f8
|
[
"Unlicense"
] | null | null | null |
bank_api/api/v1/__init__.py
|
robinstauntoncollins/bank-api
|
b19cadf5a65f5e66ca14688af8774f400d4fb0f8
|
[
"Unlicense"
] | null | null | null |
bank_api/api/v1/__init__.py
|
robinstauntoncollins/bank-api
|
b19cadf5a65f5e66ca14688af8774f400d4fb0f8
|
[
"Unlicense"
] | null | null | null |
from flask import Blueprint
from flask_restful import Api
API_VERSION_V1 = 1
api_v1_bp = Blueprint('api', __name__)
api = Api(api_v1_bp)
from . import accounts, customers, transactions
api.add_resource(accounts.AccountListAPI, '/accounts', '/accounts/', endpoint='accounts')
api.add_resource(accounts.AccountAPI, '/accounts/<int:id>', '/accounts/<int:id>/', endpoint='account')
api.add_resource(customers.CustomerListAPI, '/customers', '/customers/', endpoint='customers')
api.add_resource(customers.CustomerAPI, '/customers/<int:id>', '/customers/<int:id>', endpoint='customer')
api.add_resource(transactions.TransactionListAPI, '/transactions', '/transactions/', endpoint='transactions')
api.add_resource(
transactions.TransactionAPI,
'/transactions/<int:id>',
'/transactions/<int:id>',
endpoint='transaction'
)
def get_catelog():
return {
'accounts_url': api.url_for(accounts.AccountListAPI, _external=True),
'customers_url': api.url_for(customers.CustomerListAPI, _external=True),
'transactions_url': api.url_for(transactions.TransactionListAPI, _external=True)
}
| 34.090909
| 109
| 0.740444
|
4a1c7060370cc70694d461ed0fc2584ef1de0a48
| 778
|
py
|
Python
|
DSA/recursion/02_divide_and_Conquer/merge_sort/merge_sort.py
|
kaka-lin/Notes
|
c22e1d5ba14b94653c644448edb1d775913b05ec
|
[
"MIT"
] | null | null | null |
DSA/recursion/02_divide_and_Conquer/merge_sort/merge_sort.py
|
kaka-lin/Notes
|
c22e1d5ba14b94653c644448edb1d775913b05ec
|
[
"MIT"
] | 6
|
2020-06-10T02:56:37.000Z
|
2022-02-27T10:13:12.000Z
|
DSA/recursion/02_divide_and_Conquer/merge_sort/merge_sort.py
|
kaka-lin/Notes
|
c22e1d5ba14b94653c644448edb1d775913b05ec
|
[
"MIT"
] | null | null | null |
def merge_sort(nums):
# Base case
if len(nums) == 1:
return nums
# Recursive case
## 1. divide
mid = len(nums) // 2
## 2. Conquer
prev_list = merge_sort(nums[:mid])
post_list = merge_sort(nums[mid:])
## 3. Combine
return merge(prev_list, post_list)
def merge(prev_list, post_list):
result = []
while prev_list and post_list:
if prev_list[0] < post_list[0]:
result.append(prev_list.pop(0))
else:
result.append(post_list.pop(0))
if prev_list:
result += prev_list
if post_list:
result += post_list
return result
if __name__ == "__main__":
test = [6, 3, 5, 1, 8, 7, 2, 4]
print("original:", test)
print("Sorted:", merge_sort(test))
| 22.882353
| 43
| 0.569409
|
4a1c708b06e35fd142068082ae299ed999da51d6
| 4,535
|
py
|
Python
|
prepare_data/gen_RNet_tfrecords.py
|
JackieLeeTHU11/mtcnn
|
5a2970e2a18441783cec8c7733685333d1e0ef9c
|
[
"MIT"
] | null | null | null |
prepare_data/gen_RNet_tfrecords.py
|
JackieLeeTHU11/mtcnn
|
5a2970e2a18441783cec8c7733685333d1e0ef9c
|
[
"MIT"
] | null | null | null |
prepare_data/gen_RNet_tfrecords.py
|
JackieLeeTHU11/mtcnn
|
5a2970e2a18441783cec8c7733685333d1e0ef9c
|
[
"MIT"
] | null | null | null |
#coding:utf-8
import os
import random
import sys
import time
import tensorflow as tf
from tfrecord_utils import _process_image_withoutcoder, _convert_to_example_simple
def _add_to_tfrecord(filename, image_example, tfrecord_writer):
"""Loads data from image and annotations files and add them to a TFRecord.
Args:
dataset_dir: Dataset directory;
name: Image name to add to the TFRecord;
tfrecord_writer: The TFRecord writer to use for writing.
"""
print('---', filename)
#imaga_data:array to string
#height:original image's height
#width:original image's width
#image_example dict contains image's info
image_data, height, width = _process_image_withoutcoder(filename)
example = _convert_to_example_simple(image_example, image_data)
tfrecord_writer.write(example.SerializeToString())
def _get_output_filename(output_dir, name, net):
#st = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
#return '%s/%s_%s_%s.tfrecord' % (output_dir, name, net, st)
#return '%s/train_PNet_landmark.tfrecord' % (output_dir)
# return '%s/landmark_landmark.tfrecord' % (output_dir)
return '%s/neg_landmark.tfrecord' % (output_dir)
def run(dataset_dir, net, output_dir, name='MTCNN', shuffling=False):
"""Runs the conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
output_dir: Output directory.
"""
#tfrecord name
tf_filename = _get_output_filename(output_dir, name, net)
if tf.gfile.Exists(tf_filename):
print('Dataset files already exist. Exiting without re-creating them.')
return
# GET Dataset, and shuffling.
dataset = get_dataset(dataset_dir, net=net)
# filenames = dataset['filename']
if shuffling:
tf_filename = tf_filename + '_shuffle'
#andom.seed(12345454)
random.shuffle(dataset)
# Process dataset files.
# write the data to tfrecord
print 'lala'
with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:
for i, image_example in enumerate(dataset):
sys.stdout.write('\r>> Converting image %d/%d' % (i + 1, len(dataset)))
sys.stdout.flush()
filename = image_example['filename']
_add_to_tfrecord(filename, image_example, tfrecord_writer)
# tfrecord_writer.close()
# Finally, write the labels file:
# labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
# dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
print('\nFinished converting the MTCNN dataset!')
def get_dataset(dir, net='PNet'):
#item = 'imglists/PNet/train_%s_raw.txt' % net
#item = 'imglists/PNet/train_%s_landmark.txt' % net
# item = '%s/landmark_%s_aug.txt' % (net,net)
item = '%s/neg_%s.txt' % (net,net)
print item
dataset_dir = os.path.join(dir, item)
imagelist = open(dataset_dir, 'r')
dataset = []
for line in imagelist.readlines():
info = line.strip().split(' ')
data_example = dict()
bbox = dict()
data_example['filename'] = info[0]
data_example['label'] = int(info[1])
bbox['xmin'] = 0
bbox['ymin'] = 0
bbox['xmax'] = 0
bbox['ymax'] = 0
bbox['xlefteye'] = 0
bbox['ylefteye'] = 0
bbox['xrighteye'] = 0
bbox['yrighteye'] = 0
bbox['xnose'] = 0
bbox['ynose'] = 0
bbox['xleftmouth'] = 0
bbox['yleftmouth'] = 0
bbox['xrightmouth'] = 0
bbox['yrightmouth'] = 0
if len(info) == 6:
bbox['xmin'] = float(info[2])
bbox['ymin'] = float(info[3])
bbox['xmax'] = float(info[4])
bbox['ymax'] = float(info[5])
if len(info) == 12:
bbox['xlefteye'] = float(info[2])
bbox['ylefteye'] = float(info[3])
bbox['xrighteye'] = float(info[4])
bbox['yrighteye'] = float(info[5])
bbox['xnose'] = float(info[6])
bbox['ynose'] = float(info[7])
bbox['xleftmouth'] = float(info[8])
bbox['yleftmouth'] = float(info[9])
bbox['xrightmouth'] = float(info[10])
bbox['yrightmouth'] = float(info[11])
data_example['bbox'] = bbox
dataset.append(data_example)
return dataset
if __name__ == '__main__':
dir = '.'
net = '24'
output_directory = 'imglists/RNet'
run(dir, net, output_directory, shuffling=True)
| 34.356061
| 83
| 0.619184
|
4a1c70b594fbc387fdde6504ab7b82c9c2c745cf
| 2,309
|
py
|
Python
|
12. Paint Program/Painting Program.py
|
Azfarbakht/Python-Games
|
21b893573f3dc8fb65ef09d782b5b7aaf922a687
|
[
"MIT"
] | 1
|
2021-08-21T08:12:11.000Z
|
2021-08-21T08:12:11.000Z
|
12. Paint Program/Painting Program.py
|
Azfarbakht/Python-Games
|
21b893573f3dc8fb65ef09d782b5b7aaf922a687
|
[
"MIT"
] | null | null | null |
12. Paint Program/Painting Program.py
|
Azfarbakht/Python-Games
|
21b893573f3dc8fb65ef09d782b5b7aaf922a687
|
[
"MIT"
] | null | null | null |
#Let us import the Turtle library once again. * here means all.
from turtle import *
#Setting up screen
screen = Screen()
screenMinX = -screen.window_width()/2
screenMinY = -screen.window_height()/2
screenMaxX = screen.window_width()/2
screenMaxY = screen.window_height()/2
screen.setworldcoordinates(screenMinX,screenMinY,screenMaxX,screenMaxY)
#Setting up turtle
t = Turtle()
t.goto(0, 0)
t.speed(10)
# Set up event handler to have the t draw a line
# to the point that the user clicks on
def on_screen_click(x, y):
if y < screenMaxY - 40: # only draw if clicked below color squares
t.goto(x, y)
#Let's call this method right now. When you click on the screen, this function is executed
screen.onclick(on_screen_click)
#What we're about to make right now is a class. A class is basically a building block that leads to object oriented programming. Each class has some characteristics and some functions that it performs. Making a class for the ColorPicker will allow us to group all of its functions and attributes together.
class ColorPicker(Turtle):
def __init__(self, color="red",num=0):
Turtle.__init__(self)
self.num = num
self.color_name = color
self.speed(0)
self.shape("circle")
self.color("black", color)
self.penup()
# hack to register click handler to instance method
self.onclick(lambda x, y: self.handle_click(x, y))
def draw(self):
self.setx(screenMinX+110+self.num*30)
self.sety(screenMaxY - 20)
def handle_click(self, x, y):
if self.color_name == "#F9F9F9":
t.penup()
t.color("black")
else:
t.pendown()
t.color(self.color_name)
# Suppress animations while interface is being drawn
screen.tracer(0)
#This turtle creates the UI elements of our program
ui_turtle = Turtle()
ui_turtle.ht()
ui_turtle.penup()
ui_turtle.goto(screenMinX, screenMaxY - 23)
ui_turtle.write("TurtleDraw!", align="left", font=("Courier", 10, "bold"))
# Create color choosing squares at the top of screen
colors = ["red", "orange", "yellow", "green", "blue", "indigo", "violet", "black", "#F9F9F9"]
color_pickers = [ColorPicker(color=c, num=i) for i, c in enumerate(colors)]
for picker in color_pickers:
picker.draw()
# Resume animations now that main interface has been drawn
screen.tracer(1)
| 31.630137
| 305
| 0.714595
|
4a1c70b7a24d4af690c8e4315e52db4f8a63f245
| 17,759
|
py
|
Python
|
src/generative_playground/codec/hypergraph_grammar.py
|
ZmeiGorynych/generative_playground
|
5c336dfbd14235e4fd97b21778842a650e733275
|
[
"MIT"
] | 9
|
2018-09-23T17:34:23.000Z
|
2021-07-29T09:48:55.000Z
|
src/generative_playground/codec/hypergraph_grammar.py
|
ZmeiGorynych/generative_playground
|
5c336dfbd14235e4fd97b21778842a650e733275
|
[
"MIT"
] | 2
|
2020-04-15T17:52:18.000Z
|
2020-04-15T18:26:27.000Z
|
src/generative_playground/codec/hypergraph_grammar.py
|
ZmeiGorynych/generative_playground
|
5c336dfbd14235e4fd97b21778842a650e733275
|
[
"MIT"
] | 6
|
2019-04-30T22:01:43.000Z
|
2021-11-22T02:20:18.000Z
|
from collections import OrderedDict
from generative_playground.molecules.lean_settings import molecules_root_location
from generative_playground.codec.parent_codec import GenericCodec
from generative_playground.codec.hypergraph import (
HyperGraph, HypergraphTree, replace_nonterminal, to_mol, MolToSmiles, MolFromSmiles, hypergraphs_are_equivalent,
put_parent_node_first)
from generative_playground.codec.hypergraph_parser import hypergraph_parser, tree_with_rule_inds_to_list_of_tuples
from generative_playground.molecules.data_utils.zinc_utils import get_smiles_from_database
import pickle
import os, copy
import numpy as np
import math
from functools import lru_cache
grammar_data_location = molecules_root_location + 'data/grammar/'
def full_location(filename):
return os.path.realpath(grammar_data_location + filename)
class HypergraphGrammar(GenericCodec):
def __init__(self, cache_file='tmp.pickle', max_len=None, isomorphy=False):
self.id_by_parent = {'DONE': [0]} # from str(parent_node) to rule index
self.parent_by_id = {0: 'DONE'} # from rule index to str(parent_node)
self.rules = [None]# list of HyperGraphFragments
self.rule_frequency_dict = {}
self.node_data_index = OrderedDict()
self.rate_tracker = []
self.candidate_counter = 0
self.cache_file = full_location(cache_file)
self.terminal_distance_by_parent = {}
self._rule_term_dist_deltas = []
self.shortest_rule_by_parent = {}
self.last_tree_processed = None
self.MAX_LEN = max_len # only used to pad string_to_actions output, factor out?
self.PAD_INDEX = 0
self.conditional_frequencies = OrderedDict()
# self.isomorphy_match = isomorphy
def __len__(self):
return len(self.rules)
def check_attributes(self):
for rule in self.rules:
if rule is not None:
for this_node in rule.node.values():
assert hasattr(this_node, 'rule_id')
assert hasattr(this_node, 'node_index')
def feature_len(self):
return len(self)
def delete_cache(self):
if os.path.isfile(self.cache_file):
os.remove(self.cache_file)
@property
def grammar(self):
return self
@property
def rule_term_dist_deltas(self): # write-protect that one
if isinstance(self._rule_term_dist_deltas, tuple):
self._rule_term_dist_deltas = np.array(self._rule_term_dist_deltas)
return self._rule_term_dist_deltas
@classmethod
def load(Class, filename):
with open(full_location(filename), 'rb') as f:
self = pickle.load(f)
return self
def get_log_frequencies(self):
out = np.zeros(len(self.rules))
for ind, value in self.rule_frequency_dict.items():
out[ind] = math.log(value)
return out
@lru_cache()
def get_conditional_log_frequencies_single_query(self, x, default=-3):
out = default*np.ones(len(self.rules))
if x in self.conditional_frequencies:
for ind, value in self.conditional_frequencies[x].items():
out[ind] = math.log(value)
return out
@lru_cache()
def get_all_conditional_log_frequencies(self):
out = np.array([self.get_conditional_log_frequencies_single_query(x)
for x in self.conditional_frequencies.keys()])
return out
def decode_from_actions(self, actions):
'''
:param actions: batch_size x max_out_len longs
:return: batch_size of decoded SMILES strings
'''
# just loop through the batch
out = []
for action_seq in actions:
rules = [self.rules[i] for i in action_seq if not self.is_padding(i)]
graph = evaluate_rules(rules)
mol = to_mol(graph)
smiles = MolToSmiles(mol)
out.append(smiles)
return out
def _smiles_to_tree_gen(self, smiles):
assert type(smiles) == list or type(smiles) == tuple, "Input must be a list or a tuple"
for smile in smiles:
mol = MolFromSmiles(smile)
assert mol is not None, "SMILES String could not be parsed: " + smile
try:
tree = hypergraph_parser(mol)
except Exception as e:
print(str(e))
continue
yield self.normalize_tree(tree)
def raw_strings_to_actions(self, smiles):
'''
Convert a list of valid SMILES string to actions
:param smiles: a list of valid SMILES strings
:return:
'''
actions = []
for norm_tree in self._smiles_to_tree_gen(smiles):
these_actions = [rule.rule_id for rule in norm_tree.rules()]
actions.append(these_actions)
return actions
def strings_to_actions(self, smiles, MAX_LEN=100):
list_of_action_lists = self.raw_strings_to_actions(smiles)
actions = [a + [self.PAD_INDEX] * (MAX_LEN - len(a)) for a in list_of_action_lists ]
return np.array(actions)
def normalize_tree(self, tree):
# as we're replacing the original hypergraph from the parser with an equivalent node from our rules list,
# which could have a different order of nonterminals, need to reorder subtrees to match
new_subtrees = [self.normalize_tree(subtree) for subtree in tree]
child_id_to_subtree = {child_id: subtree for child_id, subtree in zip(tree.node.child_ids(), new_subtrees)}
rule_id, node_id_map = self.rule_to_index(tree.node)
reordered_subtrees = [child_id_to_subtree[node_id_map[child_id]] for child_id in self.rules[rule_id].child_ids()]
new_tree = HypergraphTree(node=self.rules[rule_id], children=reordered_subtrees)
new_tree.node.rule_id = rule_id
self.last_tree_processed = new_tree
return new_tree
def calc_terminal_distance(self):
self.terminal_distance_by_parent = {}
self._rule_term_dist_deltas = []
self.shortest_rule_by_parent = {}
self.terminal_distance_by_parent = {parent_str: float('inf') for parent_str in self.id_by_parent.keys()}
while True:
prev_terminal_distance = copy.deepcopy(self.terminal_distance_by_parent)
for rule in self.rules:
if rule is None: # after we're done expanding, padding resolves to this
term_dist_candidate = 0
this_hash = 'DONE'
else:
term_dist_candidate = 1 + sum([self.terminal_distance_by_parent[str(child)] for child in rule.children()])
this_hash = str(rule.parent_node())
if self.terminal_distance_by_parent[this_hash] > term_dist_candidate:
self.terminal_distance_by_parent[this_hash] = term_dist_candidate
self.shortest_rule_by_parent[this_hash] = rule
if self.terminal_distance_by_parent == prev_terminal_distance:
break
for r, rule in enumerate(self.rules):
if rule is None:
rule_term_dist_delta = float('-inf') # the padding rule
else:
rule_term_dist_delta = 1 + sum([self.terminal_distance_by_parent[str(child)] for child in rule.children()])\
- self.terminal_distance_by_parent[str(rule.parent_node())]
assert rule_term_dist_delta >= 0
self._rule_term_dist_deltas.append(rule_term_dist_delta)
self._rule_term_dist_deltas = np.array(self._rule_term_dist_deltas)
assert min(self._rule_term_dist_deltas[1:]) >= 0
assert len(self._rule_term_dist_deltas) == len(self.rules)
print('terminal distance calculated!')
def terminal_distance(self, graph):
if graph is None:
return 0 #TODO: should really be the min of term distance of parent-less rules
else:
my_sum = 0
for child in graph.children():
my_sum += self.terminal_distance_by_parent[str(child)]
return my_sum
def get_mask(self, next_rule_string, max_term_dist):
out = np.zeros((len(self)))
out[self.id_by_parent[next_rule_string]] = 1
out[self.rule_term_dist_deltas > max_term_dist] = 0
# for i, rule in enumerate(self.rules):
# if i in self.id_by_parent[next_rule_string] and self.rule_term_dist_deltas[i] <= max_term_dist:
# out.append(1)
# else:
# out.append(0)
# assert any(out), "Mask must allow at least one rule"
return out
def process_candidates(self, rules):
for rule in rules:
self.rule_to_index(rule)
def rule_to_index(self, rule: HyperGraph, no_new_rules=False):
self.candidate_counter +=1
parent_node = rule.parent_node()
if str(parent_node) not in self.id_by_parent:
self.id_by_parent[str(parent_node)] = []
# only check the equivalence against graphs with matching parent node
for rule_id in self.id_by_parent[str(parent_node)]:
mapping = hypergraphs_are_equivalent(self.rules[rule_id], rule)
if mapping is not None:
# if we found a match, we're done!
return rule_id, mapping
# if got this far, no match so this is a new rule
if no_new_rules:
raise ValueError("Unknown rule hypergraph " + str(rule))
self.add_rule(rule)
return (len(self.rules)-1), {i: i for i in rule.node.keys()}
def add_rule(self, rule):
rule = put_parent_node_first(rule)
parent_node = rule.parent_node()
# add more information to the rule nodes, to be used later
for n, node in enumerate(rule.node.values()):
node.rule_id = len(self.rules)
node.node_index = n
self.rules.append(rule)
for node in rule.node.values():
self.index_node_data(node)
new_rule_index = len(self.rules)-1
self.id_by_parent[str(parent_node)].append(new_rule_index)
self.parent_by_id[new_rule_index] = str(parent_node)
self.rate_tracker.append((self.candidate_counter, len(self.rules)))
# print(self.rate_tracker[-1])
if self.cache_file is not None:
with open(self.cache_file, 'wb') as f:
pickle.dump(self, f)
@lru_cache(maxsize=10000)
def condition_pair_to_nonterminal_string(self, pair):
rule_ind, nt_ind = pair
if rule_ind is None:
node = None # starting graphs, rules with no parent node
else:
rule = self.rules[rule_ind]
node = rule.node[rule.nonterminal_ids()[nt_ind]]
assert node.node_index == nt_ind
return str(node)
def index_node_data(self, node):
for fn in node.data.keys():
if fn not in self.node_data_index:
self.node_data_index[fn] = OrderedDict()
if node.data[fn] not in self.node_data_index[fn]:
self.node_data_index[fn][node.data[fn]] = len(self.node_data_index[fn])
def node_data_index_length(self):
# an extra slot needed for 'other' for each fieldname
return len(self.node_data_index) + sum([len(x) for x in self.node_data_index.values()])
def reset_rule_frequencies(self):
self.conditional_frequencies = {}
self.rule_frequency_dict = {}
def count_rule_frequencies(self, trees):
for tree in trees:
these_tuples = tree_with_rule_inds_to_list_of_tuples(tree)
for p, nt, c in these_tuples:
if (p, nt) not in self.conditional_frequencies:
self.grammar.conditional_frequencies[(p, nt)] = {}
if c not in self.conditional_frequencies[(p, nt)]:
self.conditional_frequencies[(p, nt)][c] = 1
else:
self.conditional_frequencies[(p, nt)][c] += 1
these_actions = [rule.rule_id for rule in tree.rules()]
for a in these_actions:
if a not in self.rule_frequency_dict:
self.rule_frequency_dict[a] = 0
self.rule_frequency_dict[a] += 1
def normalize_conditional_frequencies(self):
for key, values in self.conditional_frequencies.items():
self.conditional_frequencies[key] = normalize_frequencies(values)
print('conditional frequencies normalized')
def normalize_frequencies(x: dict):
return x
# total = sum(x.values())
# out = {key: value/total for key, value in x.items()}
# return out
def check_full_equivalence(graph1, graph2):
for node1, node2 in zip(graph1.node.values(), graph2.node.values()):
assert str(node1) == str(node2)
for edge_id_1, edge_id_2 in zip(node1.edge_ids, node2.edge_ids):
assert graph1.edges[edge_id_1].type == graph2.edges[edge_id_2].type
def apply_rule(start_graph, rule, loc=None):
if loc == None:
child_ids = start_graph.child_ids()
if len(child_ids):
loc = child_ids[-1]
else:
raise ValueError("Start graph has no children!")
start_graph = replace_nonterminal(start_graph, loc, rule)
return start_graph
def evaluate_rules(rules):
start_graph = rules[0].clone()
for num, rule in enumerate(rules[1:]):
if rule is not None: # None are the padding rules
start_graph = apply_rule(start_graph, rule)
return start_graph
class GrammarInitializer:
def __init__(self, filename, grammar_class=HypergraphGrammar):
self.grammar_filename = self.full_grammar_filename(filename)
self.own_filename = self.full_own_filename(filename)
self.max_len = 0 # maximum observed number of rules so far
self.last_processed = -1
self.new_rules = []
self.frequency_dict = {}
self.total_len = 0
self.stats = {}
if os.path.isfile(self.grammar_filename):
self.grammar = grammar_class.load(filename)
else:
self.grammar = grammar_class(cache_file=filename)
def full_grammar_filename(self, filename):
return grammar_data_location + filename
def full_own_filename(self, filename):
return grammar_data_location + 'init_' + filename
def save(self):
with open(self.own_filename, 'wb') as f:
pickle.dump(self, f)
with open(self.grammar_filename, 'wb') as f:
pickle.dump(self.grammar, f)
@classmethod
def load(Class, filename):
with open(filename, 'rb') as f:
out = pickle.load(f)
assert type(out) == Class
return out
def delete_cache(self):
if os.path.isfile(self.own_filename):
os.remove(self.own_filename)
if os.path.isfile(self.grammar_filename):
os.remove(self.grammar_filename)
self.grammar.delete_cache()
def init_grammar(self, max_num_mols):
L = get_smiles_from_database(max_num_mols)
for ind, smiles in enumerate(L):
if ind >= max_num_mols:
break
if ind > self.last_processed: # don't repeat
try:
# this causes g to remember all the rules occurring in these molecules
these_actions = self.grammar.raw_strings_to_actions([smiles])
this_tree = self.grammar.last_tree_processed
these_tuples = tree_with_rule_inds_to_list_of_tuples(this_tree)
for p, nt, c in these_tuples:
if (p,nt) not in self.grammar.conditional_frequencies:
self.grammar.conditional_frequencies[(p, nt)] = {}
if c not in self.grammar.conditional_frequencies[(p, nt)]:
self.grammar.conditional_frequencies[(p, nt)][c] = 1
else:
self.grammar.conditional_frequencies[(p, nt)][c] += 1
# count the frequency of the occurring rules
for aa in these_actions:
for a in aa:
if a not in self.grammar.rule_frequency_dict:
self.grammar.rule_frequency_dict[a] = 0
self.grammar.rule_frequency_dict[a] += 1
lengths = [len(x) for x in these_actions]
new_max_len = max(lengths)
self.total_len += sum(lengths)
if new_max_len > self.max_len:
self.max_len = new_max_len
print("Max len so far:", self.max_len)
except Exception as e: #TODO: fix this, make errors not happen ;)
print(e)
self.last_processed = ind
# if we discovered a new rule, remember that
if not len(self.new_rules) or self.grammar.rate_tracker[-1][-1] > self.new_rules[-1][-1]:
self.new_rules.append((ind,*self.grammar.rate_tracker[-1]))
print(self.new_rules[-1])
if ind % 10 == 9:
self.save()
if ind % 100 == 0 and ind > 0:
self.stats[ind] = {
'max_len': self.max_len,
'avg_len': self.total_len / ind,
'num_rules': len(self.grammar.rules),
}
self.grammar.normalize_conditional_frequencies()
self.grammar.calc_terminal_distance()
return self.max_len # maximum observed molecule length
| 41.492991
| 126
| 0.621657
|
4a1c712d58ba1f141aab1a3133f3d901957a07a7
| 12,366
|
py
|
Python
|
dissect/utils/database_handler.py
|
adamivora/DiSSECT
|
73c2e657eab859a78c8a2b13959763b99ed865be
|
[
"MIT"
] | null | null | null |
dissect/utils/database_handler.py
|
adamivora/DiSSECT
|
73c2e657eab859a78c8a2b13959763b99ed865be
|
[
"MIT"
] | null | null | null |
dissect/utils/database_handler.py
|
adamivora/DiSSECT
|
73c2e657eab859a78c8a2b13959763b99ed865be
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import json
from pathlib import Path
from typing import Optional, Tuple, Iterable, Dict, Any
from pymongo import MongoClient
from pymongo.database import Database
from pymongo.errors import DuplicateKeyError
from sage.all import Integer
from dissect.utils.custom_curve import CustomCurve
from dissect.traits.trait_info import TRAIT_INFO
def connect(database: Optional[str] = None) -> Database:
client = MongoClient(database, connect=False)
return client["dissect"]
def create_curves_index(db: Database) -> None:
db["curves"].create_index([("name", 1)], unique=True)
def create_trait_index(db: Database, trait: str) -> None:
db[f"trait_{trait}"].create_index([("curve", 1), ("params", 1)], unique=True)
def _format_curve(curve):
c = dict()
c["name"] = curve["name"]
c["category"] = curve["category"]
if curve.get("aliases"):
c["aliases"] = curve["aliases"]
if curve.get("oid"):
c["oid"] = curve["oid"]
if curve.get("desc"):
c["desc"] = curve["desc"]
c["form"] = curve["form"]
c["field"] = curve["field"]
c["params"] = curve["params"]
try:
if (curve["generator"]["x"]["raw"] or curve["generator"]["x"]["poly"]) and (
curve["generator"]["y"]["raw"] or curve["generator"]["y"]["poly"]):
c["generator"] = curve["generator"]
except:
pass
if isinstance(curve["order"], int):
c["order"] = hex(curve["order"])
elif isinstance(curve["order"], str): # Workaround for std database
c["order"] = hex(int(curve["order"], base=16))
if isinstance(curve["cofactor"], int):
c["cofactor"] = hex(curve["cofactor"])
elif isinstance(curve["cofactor"], str): # Workaround for std database
c["cofactor"] = hex(int(curve["cofactor"], base=16))
c["standard"] = False if "sim" in curve["category"] else True
c["example"] = curve.get("example", False)
if curve.get("simulation"):
sim = curve["simulation"]
else:
sim = {}
if "seed" in curve and curve["seed"]:
sim["seed"] = hex(int(curve["seed"], base=16))
elif "characteristics" in curve and "seed" in curve["characteristics"] and curve["characteristics"]["seed"]:
sim["seed"] = hex(int(curve["characteristics"]["seed"], base=16))
if sim:
c["simulation"] = sim
if curve.get("properties"):
properties = curve["properties"]
else:
properties = {}
if properties:
c["properties"] = properties
return c
def upload_curves(db: Database, path: str) -> Tuple[int, int]:
try:
with open(path, "r") as f:
curves = json.load(f)
if not isinstance(
curves, list
): # inconsistency between simulated and standard format
curves = curves["curves"]
except Exception: # invalid format
return 0, 0
success = 0
for curve in curves:
try:
if db["curves"].insert_one(_format_curve(curve)):
success += 1
except DuplicateKeyError:
pass
return success, len(curves)
def upload_results(db: Database, trait_name: str, path: str) -> Tuple[int, int]:
try:
with open(path, "r") as f:
results = json.load(f)
except Exception: # invalid format
return 0, 0
success = 0
total = 0
for result in results:
total += 1
record = {}
try:
if isinstance(result["curve"], str):
curve = db["curves"].find_one({"name": result["curve"]})
record["curve"] = {}
record["curve"]["name"] = curve["name"]
record["curve"]["standard"] = curve["standard"]
record["curve"]["example"] = curve["example"]
record["curve"]["category"] = curve["category"]
record["curve"]["bits"] = curve["field"]["bits"]
record["curve"]["field_type"] = curve["field"]["type"]
record["curve"]["cofactor"] = curve["cofactor"]
else:
record["curve"] = result["curve"]
record["params"] = result["params"]
record["result"] = result["result"]
if db[f"trait_{trait_name}"].insert_one(record):
success += 1
except Exception:
pass
return success, total
def get_curves(db: Database, query: Any = None) -> Iterable[CustomCurve]:
aggregate_pipeline = []
aggregate_pipeline.append({"$match": format_curve_query(query) if query else dict()})
aggregate_pipeline.append({"$unset": "_id"})
curves = list(db["curves"].aggregate(aggregate_pipeline))
return map(_decode_ints, curves)
def get_curves_count(db: Database, query: Any = None) -> int:
return db["curves"].count_documents(format_curve_query(query) if query else dict())
def get_curve_categories(db: Database) -> Iterable[str]:
return db["curves"].distinct("category")
def format_curve_query(query: Dict[str, Any]) -> Dict[str, Any]:
result = {}
def helper(key, cast, db_key = None):
if key not in query:
return
db_key = db_key if db_key else key
if isinstance(query[key], list):
if len(query[key]) == 0 or "all" in query[key]:
return
if len(query[key]) == 1:
result[db_key] = cast(query[key][0])
else:
result[db_key] = { "$in": list(map(cast, query[key])) }
elif query[key] != "all":
result[db_key] = cast(query[key])
helper("name", str)
helper("standard", bool)
helper("example", bool)
helper("category", str)
helper("bits", int, "field.bits")
helper("cofactor", int)
helper("field_type", str, "field.type")
return result
def _cast_sage_types(result: Any) -> Any:
if isinstance(result, Integer):
return int(result)
if isinstance(result, dict):
for key, value in result.items():
result[key] = _cast_sage_types(value)
elif isinstance(result, list):
for idx, value in enumerate(result):
result[idx] = _cast_sage_types(value)
return result
def _encode_ints(result: Any) -> Any:
if isinstance(result, Integer) or isinstance(result, int):
return hex(result)
if isinstance(result, dict):
for key, value in result.items():
result[key] = _encode_ints(value)
elif isinstance(result, list):
for idx, value in enumerate(result):
result[idx] = _encode_ints(value)
return result
def store_trait_result(
db: Database,
curve: CustomCurve,
trait: str,
params: Dict[str, Any],
result: Dict[str, Any],
) -> bool:
trait_result = {}
trait_result["curve"] = {}
trait_result["curve"]["name"] = curve.name()
trait_result["curve"]["standard"] = curve.standard()
trait_result["curve"]["example"] = curve.example()
trait_result["curve"]["category"] = curve.category()
trait_result["curve"]["bits"] = curve.q().nbits()
trait_result["curve"]["cofactor"] = hex(curve.cofactor())
trait_result["curve"] = _cast_sage_types(trait_result["curve"])
trait_result["params"] = _cast_sage_types(params)
trait_result["result"] = _encode_ints(result)
try:
return db[f"trait_{trait}"].insert_one(trait_result).acknowledged
except DuplicateKeyError:
return False
def is_solved(
db: Database, curve: CustomCurve, trait: str, params: Dict[str, Any]
) -> bool:
trait_result = { "curve.name": curve.name() }
trait_result["params"] = _cast_sage_types(params)
return db[f"trait_{trait}"].find_one(trait_result) is not None
def get_trait_results(
db: Database,
trait: str,
query: Dict[str, Any] = None,
limit: int = None
):
aggregate_pipeline = []
aggregate_pipeline.append({"$match": format_trait_query(trait, query) if query else dict()})
aggregate_pipeline.append({"$unset": "_id"})
if limit:
aggregate_pipeline.append({"$limit": limit})
aggregated = list(db[f"trait_{trait}"].aggregate(aggregate_pipeline))
return map(_decode_ints, map(_flatten_trait_result, aggregated))
def get_trait_results_count(db: Database, trait: str, query: Dict[str, Any] = None):
return db[f"trait_{trait}"].count_documents(format_trait_query(trait, query) if query else dict())
def format_trait_query(trait_name: str, query: Dict[str, Any]) -> Dict[str, Any]:
result = {}
def helper(key, cast, db_key = None):
if key not in query:
return
db_key = db_key if db_key else key
if isinstance(query[key], list):
if len(query[key]) == 0 or "all" in query[key]:
return
if len(query[key]) == 1:
result[db_key] = cast(query[key][0])
else:
result[db_key] = { "$in": list(map(cast, query[key])) }
elif query[key] != "all":
result[db_key] = cast(query[key])
helper("name", str, "curve.name")
helper("standard", bool, "curve.standard")
helper("example", bool, "curve.example")
helper("category", str, "curve.category")
helper("bits", int, "curve.bits")
helper("cofactor", lambda x: hex(int(x)), "curve.cofactor")
helper("field_type", str, "curve.field_type")
for key in TRAIT_INFO[trait_name]["input"]:
helper(key, TRAIT_INFO[trait_name]["input"][key][0], f"params.{key}")
for key in TRAIT_INFO[trait_name]["output"]:
helper(key, lambda x: _encode_ints(TRAIT_INFO[trait_name]["output"][key][0](x)), f"result.{key}")
return result
# TODO move to data_processing?
def _flatten_trait_result(record: Dict[str, Any]):
output = dict()
_flatten_trait_result_rec(record["curve"], "", output)
_flatten_trait_result_rec(record["params"], "", output)
_flatten_trait_result_rec(record["result"], "", output)
output["curve"] = output["name"]
del output["name"]
return output
def _flatten_trait_result_rec(
record: Dict[str, Any], prefix: str, output: Dict[str, Any]
):
for key in record:
if isinstance(record[key], dict):
_flatten_trait_result_rec(record[key], key + "_", output)
else:
output[prefix + key] = record[key]
def _decode_ints(source: Any) -> Any:
if isinstance(source, str) and (source[:2].lower() == "0x" or source[:3].lower() == "-0x"):
return int(source, base=16)
if isinstance(source, dict):
for key, value in source.items():
source[key] = _decode_ints(value)
elif isinstance(source, list):
for idx, value in enumerate(source):
source[idx] = _decode_ints(value)
return source
if __name__ == "__main__":
import sys
if len(sys.argv) < 3 or not sys.argv[1] in ("curves", "results"):
print(
f"USAGE: python3 {sys.argv[0]} curves [database_uri] <curve_files...>",
file=sys.stderr,
)
print(
f" OR: python3 {sys.argv[0]} results [database_uri] <trait_name> <results_file>",
file=sys.stderr,
)
sys.exit(1)
database_uri = "mongodb://localhost:27017/"
args = sys.argv[2:]
for idx, arg in enumerate(args):
if "mongodb://" in arg:
database_uri = arg
del args[idx]
break
print(f"Connecting to database {database_uri}")
db = connect(database_uri)
def upload_curves_from_files(curve_files_list):
for curves_file in curve_files_list:
print(f"Loading curves from file {curves_file}")
create_curves_index(db)
uploaded, total = upload_curves(db, curves_file)
print(f"Successfully uploaded {uploaded} out of {total}")
def upload_results_from_file(trait_name, results_file):
print(f"Loading trait {trait_name} results from file {results_file}")
create_trait_index(db, trait_name)
uploaded, total = upload_results(db, trait_name, results_file)
print(f"Successfully uploaded {uploaded} out of {total}")
if sys.argv[1] == "curves":
upload_curves_from_files(args)
elif sys.argv[1] == "results":
upload_results_from_file(args[0], args[1])
| 32.203125
| 116
| 0.601084
|
4a1c72d12fa9ba71c8414ed2ecb2cb278e452845
| 2,547
|
py
|
Python
|
hasher-matcher-actioner/hmalib/common/models/count.py
|
mengyangwang/ThreatExchange
|
0712a6234b81f336367c81ee7c1fdfe70de152d1
|
[
"BSD-3-Clause"
] | null | null | null |
hasher-matcher-actioner/hmalib/common/models/count.py
|
mengyangwang/ThreatExchange
|
0712a6234b81f336367c81ee7c1fdfe70de152d1
|
[
"BSD-3-Clause"
] | null | null | null |
hasher-matcher-actioner/hmalib/common/models/count.py
|
mengyangwang/ThreatExchange
|
0712a6234b81f336367c81ee7c1fdfe70de152d1
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
""" Refer to hmalib.lambdas.ddb_stream_counter.lambda_handler's doc string to
understand how these models are used. """
import typing as t
from mypy_boto3_dynamodb.service_resource import Table
class BaseCount:
"""
Defines a single count value.
"""
def get_pkey(self) -> str:
"""Get partition key for this count."""
raise NotImplementedError
def get_skey(self) -> str:
"""Get sort key for this count."""
raise NotImplementedError
def get_value(self, table: Table) -> int:
"""Get current value for the counter."""
return t.cast(
int,
table.get_item(Key={"PK": self.get_pkey(), "SK": self.get_skey()})
.get("Item", {})
.get("CurrentCount", 0),
)
def inc(self, table: Table, by=1):
"""Increment count. Default by 1, unless specified."""
table.update_item(
Key={"PK": self.get_pkey(), "SK": self.get_skey()},
UpdateExpression="SET CurrentCount = if_not_exists(CurrentCount, :zero) + :by",
ExpressionAttributeValues={":by": by, ":zero": 0},
)
def dec(self, table: Table, by=1):
"""Increment count. Default by 1, unless specified."""
table.update_item(
Key={"PK": self.get_pkey(), "SK": self.get_skey()},
UpdateExpression="SET CurrentCount = if_not_exists(CurrentCount, :zero) - :by",
ExpressionAttributeValues={":by": by, ":zero": 0},
)
class AggregateCount(BaseCount):
"""
A "total" count. It is possible for some entities to have TBD hourly as well as
aggregate counts. eg. Give me all matches today, but also keep track of the
total number of matches we have ever done.
"""
class PipelineNames:
# How many pieces of content were submitted?
submits = "hma.pipeline.submits"
# How many pieces of content created a hash record?
hashes = "hma.pipeline.hashes"
# How many match object recorded?
matches = "hma.pipeline.matches"
def __init__(self, of: str):
self.of = of
@staticmethod
def _get_pkey_for_aggregate(of: str) -> str:
return f"aggregate#{of}"
@staticmethod
def _get_skey_for_aggregate() -> str:
return "aggregate_count"
def get_pkey(self) -> str:
return self._get_pkey_for_aggregate(self.of)
def get_skey(self) -> str:
return self._get_skey_for_aggregate()
| 31.060976
| 91
| 0.616019
|
4a1c73fcf82f5a05844ac9673964c9863041075f
| 11,532
|
py
|
Python
|
mojo/public/tools/mojom/mojom/generate/generator.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
mojo/public/tools/mojom/mojom/generate/generator.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113
|
2015-05-04T09:58:14.000Z
|
2022-01-31T19:35:03.000Z
|
mojo/public/tools/mojom/mojom/generate/generator.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Code shared by the various language-specific code generators."""
from __future__ import print_function
from functools import partial
import os.path
import re
from mojom import fileutil
from mojom.generate import module as mojom
from mojom.generate import pack
def ExpectedArraySize(kind):
if mojom.IsArrayKind(kind):
return kind.length
return None
def SplitCamelCase(identifier):
"""Splits a camel-cased |identifier| and returns a list of lower-cased
strings.
"""
# Add underscores after uppercase letters when appropriate. An uppercase
# letter is considered the end of a word if it is followed by an upper and a
# lower. E.g. URLLoaderFactory -> URL_LoaderFactory
identifier = re.sub('([A-Z][0-9]*)(?=[A-Z][0-9]*[a-z])', r'\1_', identifier)
# Add underscores after lowercase letters when appropriate. A lowercase letter
# is considered the end of a word if it is followed by an upper.
# E.g. URLLoaderFactory -> URLLoader_Factory
identifier = re.sub('([a-z][0-9]*)(?=[A-Z])', r'\1_', identifier)
return [x.lower() for x in identifier.split('_')]
def ToCamel(identifier, lower_initial=False, digits_split=False, delimiter='_'):
"""Splits |identifier| using |delimiter|, makes the first character of each
word uppercased (but makes the first character of the first word lowercased
if |lower_initial| is set to True), and joins the words. Please note that for
each word, all the characters except the first one are untouched.
"""
result = ''
capitalize_next = True
for i in range(len(identifier)):
if identifier[i] == delimiter:
capitalize_next = True
elif digits_split and identifier[i].isdigit():
capitalize_next = True
result += identifier[i]
elif capitalize_next:
capitalize_next = False
result += identifier[i].upper()
else:
result += identifier[i]
if lower_initial and result:
result = result[0].lower() + result[1:]
return result
def _ToSnakeCase(identifier, upper=False):
"""Splits camel-cased |identifier| into lower case words, removes the first
word if it's "k" and joins them using "_" e.g. for "URLLoaderFactory", returns
"URL_LOADER_FACTORY" if upper, otherwise "url_loader_factory".
"""
words = SplitCamelCase(identifier)
if words[0] == 'k' and len(words) > 1:
words = words[1:]
# Variables cannot start with a digit
if (words[0][0].isdigit()):
words[0] = '_' + words[0]
if upper:
words = map(lambda x: x.upper(), words)
return '_'.join(words)
def ToUpperSnakeCase(identifier):
"""Splits camel-cased |identifier| into lower case words, removes the first
word if it's "k" and joins them using "_" e.g. for "URLLoaderFactory", returns
"URL_LOADER_FACTORY".
"""
return _ToSnakeCase(identifier, upper=True)
def ToLowerSnakeCase(identifier):
"""Splits camel-cased |identifier| into lower case words, removes the first
word if it's "k" and joins them using "_" e.g. for "URLLoaderFactory", returns
"url_loader_factory".
"""
return _ToSnakeCase(identifier, upper=False)
class Stylizer(object):
"""Stylizers specify naming rules to map mojom names to names in generated
code. For example, if you would like method_name in mojom to be mapped to
MethodName in the generated code, you need to define a subclass of Stylizer
and override StylizeMethod to do the conversion."""
def StylizeConstant(self, mojom_name):
return mojom_name
def StylizeField(self, mojom_name):
return mojom_name
def StylizeStruct(self, mojom_name):
return mojom_name
def StylizeUnion(self, mojom_name):
return mojom_name
def StylizeParameter(self, mojom_name):
return mojom_name
def StylizeMethod(self, mojom_name):
return mojom_name
def StylizeInterface(self, mojom_name):
return mojom_name
def StylizeEnumField(self, mojom_name):
return mojom_name
def StylizeEnum(self, mojom_name):
return mojom_name
def StylizeModule(self, mojom_namespace):
return mojom_namespace
def WriteFile(contents, full_path):
# If |contents| is same with the file content, we skip updating.
if not isinstance(contents, bytes):
data = contents.encode('utf8')
else:
data = contents
if os.path.isfile(full_path):
with open(full_path, 'rb') as destination_file:
if destination_file.read() == data:
return
# Make sure the containing directory exists.
full_dir = os.path.dirname(full_path)
fileutil.EnsureDirectoryExists(full_dir)
# Dump the data to disk.
with open(full_path, 'wb') as f:
f.write(data)
def AddComputedData(module):
"""Adds computed data to the given module. The data is computed once and
used repeatedly in the generation process."""
def _AddStructComputedData(exported, struct):
struct.packed = pack.PackedStruct(struct)
struct.bytes = pack.GetByteLayout(struct.packed)
struct.versions = pack.GetVersionInfo(struct.packed)
struct.exported = exported
def _AddInterfaceComputedData(interface):
interface.version = 0
for method in interface.methods:
# this field is never scrambled
method.sequential_ordinal = method.ordinal
if method.min_version is not None:
interface.version = max(interface.version, method.min_version)
method.param_struct = _GetStructFromMethod(method)
if interface.stable:
method.param_struct.attributes[mojom.ATTRIBUTE_STABLE] = True
if method.explicit_ordinal is None:
raise Exception(
'Stable interfaces must declare explicit method ordinals. The '
'method %s on stable interface %s does not declare an explicit '
'ordinal.' % (method.mojom_name, interface.qualified_name))
interface.version = max(interface.version,
method.param_struct.versions[-1].version)
if method.response_parameters is not None:
method.response_param_struct = _GetResponseStructFromMethod(method)
if interface.stable:
method.response_param_struct.attributes[mojom.ATTRIBUTE_STABLE] = True
interface.version = max(
interface.version,
method.response_param_struct.versions[-1].version)
else:
method.response_param_struct = None
def _GetStructFromMethod(method):
"""Converts a method's parameters into the fields of a struct."""
params_class = "%s_%s_Params" % (method.interface.mojom_name,
method.mojom_name)
struct = mojom.Struct(params_class,
module=method.interface.module,
attributes={})
for param in method.parameters:
struct.AddField(
param.mojom_name,
param.kind,
param.ordinal,
attributes=param.attributes)
_AddStructComputedData(False, struct)
return struct
def _GetResponseStructFromMethod(method):
"""Converts a method's response_parameters into the fields of a struct."""
params_class = "%s_%s_ResponseParams" % (method.interface.mojom_name,
method.mojom_name)
struct = mojom.Struct(params_class,
module=method.interface.module,
attributes={})
for param in method.response_parameters:
struct.AddField(
param.mojom_name,
param.kind,
param.ordinal,
attributes=param.attributes)
_AddStructComputedData(False, struct)
return struct
for struct in module.structs:
_AddStructComputedData(True, struct)
for interface in module.interfaces:
_AddInterfaceComputedData(interface)
class Generator(object):
# Pass |output_dir| to emit files to disk. Omit |output_dir| to echo all
# files to stdout.
def __init__(self,
module,
output_dir=None,
typemap=None,
variant=None,
bytecode_path=None,
for_blink=False,
js_bindings_mode="new",
js_generate_struct_deserializers=False,
export_attribute=None,
export_header=None,
generate_non_variant_code=False,
support_lazy_serialization=False,
disallow_native_types=False,
disallow_interfaces=False,
generate_message_ids=False,
generate_fuzzing=False,
enable_kythe_annotations=False,
extra_cpp_template_paths=None,
generate_extra_cpp_only=False):
self.module = module
self.output_dir = output_dir
self.typemap = typemap or {}
self.variant = variant
self.bytecode_path = bytecode_path
self.for_blink = for_blink
self.js_bindings_mode = js_bindings_mode
self.js_generate_struct_deserializers = js_generate_struct_deserializers
self.export_attribute = export_attribute
self.export_header = export_header
self.generate_non_variant_code = generate_non_variant_code
self.support_lazy_serialization = support_lazy_serialization
self.disallow_native_types = disallow_native_types
self.disallow_interfaces = disallow_interfaces
self.generate_message_ids = generate_message_ids
self.generate_fuzzing = generate_fuzzing
self.enable_kythe_annotations = enable_kythe_annotations
self.extra_cpp_template_paths = extra_cpp_template_paths
self.generate_extra_cpp_only = generate_extra_cpp_only
def Write(self, contents, filename):
if self.output_dir is None:
print(contents)
return
full_path = os.path.join(self.output_dir, filename)
WriteFile(contents, full_path)
def OptimizeEmpty(self, contents):
# Look for .cc files that contain no actual code. There are many of these
# and they collectively take a while to compile.
lines = contents.splitlines()
for line in lines:
if line.startswith('#') or line.startswith('//'):
continue
if re.match(r'namespace .* {', line) or re.match(r'}.*//.*namespace',
line):
continue
if line.strip():
# There is some actual code - return the unmodified contents.
return contents
# If we reach here then we have a .cc file with no actual code. The
# includes are therefore unneeded and can be removed.
new_lines = [line for line in lines if not line.startswith('#include')]
if len(new_lines) < len(lines):
new_lines.append('')
new_lines.append('// Includes removed due to no code being generated.')
return '\n'.join(new_lines)
def WriteWithComment(self, contents, filename):
generator_name = "mojom_bindings_generator.py"
comment = r"// %s is auto generated by %s, do not edit" % (filename,
generator_name)
contents = comment + '\n' + '\n' + contents;
if filename.endswith('.cc'):
contents = self.OptimizeEmpty(contents)
self.Write(contents, filename)
def GenerateFiles(self, args):
raise NotImplementedError("Subclasses must override/implement this method")
def GetJinjaParameters(self):
"""Returns default constructor parameters for the jinja environment."""
return {}
def GetGlobals(self):
"""Returns global mappings for the template generation."""
return {}
| 35.158537
| 80
| 0.683316
|
4a1c747f4a5eac79e079a0e24fddb7b0e74d5a07
| 2,623
|
py
|
Python
|
palo_alto_pan_os/unit_test/test_set.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | null | null | null |
palo_alto_pan_os/unit_test/test_set.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | 1
|
2021-02-23T23:57:37.000Z
|
2021-02-23T23:57:37.000Z
|
palo_alto_pan_os/unit_test/test_set.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | null | null | null |
import sys
import os
sys.path.append(os.path.abspath('../'))
from unittest import TestCase
from komand_palo_alto_pan_os.connection.connection import Connection
from komand_palo_alto_pan_os.actions.set import Set
import json
import logging
class TestSet(TestCase):
def test_integration_set(self):
"""
TODO: Implement assertions at the end of this test case
This is an integration test that will connect to the services your plugin uses. It should be used
as the basis for tests below that can run independent of a "live" connection.
This test assumes a normal plugin structure with a /tests directory. In that /tests directory should
be json samples that contain all the data needed to run this test. To generate samples run:
icon-plugin generate samples
"""
log = logging.getLogger("Test")
test_conn = Connection()
test_action = Set()
test_conn.logger = log
test_action.logger = log
try:
with open("../tests/set.json") as file:
test_json = json.loads(file.read()).get("body")
connection_params = test_json.get("connection")
action_params = test_json.get("input")
except Exception as e:
message = """
Could not find or read sample tests from /tests directory
An exception here likely means you didn't fill out your samples correctly in the /tests directory
Please use 'icon-plugin generate samples', and fill out the resulting test files in the /tests directory
"""
self.fail(message)
test_conn.connect(connection_params)
test_action.connection = test_conn
results = test_action.run(action_params)
# TODO: Remove this line
self.fail("Unimplemented test case")
# TODO: The following assert should be updated to look for data from your action
# For example: self.assertEquals({"success": True}, results)
self.assertEquals({}, results)
def test_set(self):
"""
TODO: Implement test cases here
Here you can mock the connection with data returned from the above integration test.
For information on mocking and unit testing please go here:
https://docs.google.com/document/d/1PifePDG1-mBcmNYE8dULwGxJimiRBrax5BIDG_0TFQI/edit?usp=sharing
You can either create a formal Mock for this, or you can create a fake connection class to pass to your
action for testing.
"""
self.fail("Unimplemented Test Case")
| 36.430556
| 116
| 0.661456
|
4a1c74869a45e92a459a5e6ed5d9235a0957faee
| 2,684
|
py
|
Python
|
examples/chat_stats.py
|
w311ang/python-telegram
|
e1c72167ba63bedef5144c0e8f5fd5918ed00edc
|
[
"MIT"
] | null | null | null |
examples/chat_stats.py
|
w311ang/python-telegram
|
e1c72167ba63bedef5144c0e8f5fd5918ed00edc
|
[
"MIT"
] | null | null | null |
examples/chat_stats.py
|
w311ang/python-telegram
|
e1c72167ba63bedef5144c0e8f5fd5918ed00edc
|
[
"MIT"
] | null | null | null |
import string
import logging
import argparse
from collections import Counter
from utils import setup_logging
from telegram.client import Telegram
"""
Prints most popular words in the chat.
Usage:
python examples/chat_stats.py api_id api_hash phone chat_id --limit 500
"""
def retreive_messages(telegram, chat_id, receive_limit):
receive = True
from_message_id = 0
stats_data = {}
while receive:
response = telegram.get_chat_history(
chat_id=chat_id,
limit=1000,
from_message_id=from_message_id,
)
response.wait()
for message in response.update['messages']:
if message['content']['@type'] == 'messageText':
stats_data[message['id']] = message['content']['text']['text']
from_message_id = message['id']
total_messages = len(stats_data)
if total_messages > receive_limit or not response.update['total_count']:
receive = False
print(f'[{total_messages}/{receive_limit}] received')
return stats_data
def print_stats(stats_data, most_common_count):
words = Counter()
translator = str.maketrans('', '', string.punctuation)
for _, message in stats_data.items():
for word in message.split(' '):
word = word.translate(translator).lower()
if len(word) > 3:
words[word] += 1
for word, count in words.most_common(most_common_count):
print(f'{word}: {count}')
if __name__ == '__main__':
setup_logging(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('api_id', help='API id') # https://my.telegram.org/apps
parser.add_argument('api_hash', help='API hash')
parser.add_argument('phone', help='Phone')
parser.add_argument('chat_id', help='Chat ID')
parser.add_argument('--limit', help='Messages to retrieve', type=int, default=1000)
parser.add_argument('--most-common', help='Most common count', type=int, default=30)
args = parser.parse_args()
tg = Telegram(
api_id=args.api_id,
api_hash=args.api_hash,
phone=args.phone,
database_encryption_key='changeme1234',
)
tg.call_method(
'setOption',
{
'name': 'prefer_ipv6',
'value': {'@type': 'optionValueBoolean', 'value': False},
},
)
# you must call login method before others
tg.login()
stats_data = retreive_messages(
telegram=tg,
chat_id=args.chat_id,
receive_limit=args.limit,
)
print_stats(
stats_data=stats_data,
most_common_count=args.most_common,
)
tg.stop()
| 27.387755
| 88
| 0.630775
|
4a1c74b455d66bf76e8356708e55d916be2a177d
| 5,701
|
py
|
Python
|
rpi_camera_code.py
|
AGarcia-20/Python_practice
|
805647401970ae90e1e27f28cb489d3a7b0f3f2e
|
[
"MIT"
] | 1
|
2020-03-04T22:33:48.000Z
|
2020-03-04T22:33:48.000Z
|
rpi_camera_code.py
|
AGarcia-20/Python_practice
|
805647401970ae90e1e27f28cb489d3a7b0f3f2e
|
[
"MIT"
] | null | null | null |
rpi_camera_code.py
|
AGarcia-20/Python_practice
|
805647401970ae90e1e27f28cb489d3a7b0f3f2e
|
[
"MIT"
] | 1
|
2020-03-04T22:28:32.000Z
|
2020-03-04T22:28:32.000Z
|
import time
import RPi.GPIO as GPIO
from picamera import PiCamera
import os
from subprocess import check_output
from datetime import datetime
import numpy as np
from PIL import Image
GPIO.setwarnings(False)
i = 0
light = 12
wifi = 7
ping_hub = "ping 192.168.0.1 -c 1"
subp = "sudo pkill -9 -f ADXL345_Sampler_100Hz.py"
#Looping through frame rate:
fps_top=30 #fps_top is the max(top) frame rate limit
fps_bottom=15 #fps_bottom is the min(bottom) frame rate limit
fps_increment=12 #fps_increment is the increment value
fps_lst=[fps_bottom] #fps_lst the list in which frame rates will go, starting with the lower limit
while fps_bottom < fps_top: #Conditions set for the while loop: while top limit < bottom limit
fps_bottom=fps_bottom+fps_increment # addition of fps_increment + fps_bottom= fps_bottom
fps_lst.append(fps_bottom) # appending the new fps_bottom value to fps_lst
if fps_lst[len(fps_lst)-1] > fps_top: #If the last number is greater than the top limit
fps_lst.pop() #Then it will be popped out (won't be included in final list)
#Looping though ISO:
iso_top=800 #iso_top is the max(top) iso limit
iso_bottom=100 #iso_bottom is the min(bottom) iso limit
iso_increment=250 #iso_increment is the increment value
iso_lst=[iso_bottom] #iso_lst the list in which ISO values will go, starting with the lower limit
while iso_bottom < iso_top: # Conditions for the while loop: while the iso bottom limit is < iso top limit
iso_bottom=iso_bottom+iso_increment # add iso_bottom and increments to replace iso_bottom valeu (Adding itself + increment)
iso_lst.append(iso_bottom) # append the new iso_bottom value to iso_lst
if iso_lst[len(iso_lst)-1] > iso_top: # if the last number is greater than top limit it will be popped out and it won't be included in final list
iso_lst.pop()
#Combinding both lists to get all possible permutations
#Total permutations saved on total_per
combo=[]
total_per=0
for a in fps_lst: #for a variable (a) in list 1
for b in iso_lst: #for a variable (b) in list 2
combo.append([a,b]) #append variables a and b into list called combo
total_per=total_per+1
#Making an array called permu_array and placing it in a list
permu_array=np.array(combo)
permu_array=combo
#Image naming using for loop
image= Image.open('dino1.jpg')
for i in range(total_per):
condition=permu_array[i]
fps=condition[0]
iso=condition[1]
#print('Condition:',condition,' fps:',str(fps),' iso:',str(iso))
#image.save('my_dino_FR%s_ISO%s.jpg' %(fps,iso))
#Camera Functions:
def off(): #Camera off
GPIO.output(light, 0)
def on(): #Camera on
GPIO.output(light, 1)
def picture(fr,iso):
camera.resolution = (2592, 1944) #Camera resolution
camera.framerate = fr #fr assigned to camera.framerate in picture function
camera.iso= iso #iso assigned to camera.iso in picture function
camera.start_preview()
pictime = datetime.now().strftime('%Y_%m_%d_%H-%M-%S.%f')[:-4] #pictime assigned to time photo was taken displaying in Years_month_day_hour-minute-seconds
time.sleep(10)
camera.capture('/home/pi/Documents/minion_pics/%s_FR%s_ISO%s.jpg' %(pictime,fr,iso)) #Directory where photo is saved and naming format
camera.stop_preview()
def send():
who = check_output("who",shell=True)
who = who.split('(')[1]
ip = who.split(')')[0]
# print(ip)
scp = "sudo sshpass -p 'ramboat' scp /home/pi/Documents/minion_pics/%s.jpg jack@%s:/home/jack/minion_pics/" % (pictime, ip)
os.system(scp)
# print(scp)
if __name__ == '__main__':
status = os.system(ping_hub)
if status == 0:
status = "Connected"
os.system(subp)
quit()
else:
status = "Not Connected"
camera = PiCamera()
GPIO.setmode(GPIO.BOARD)
GPIO.setup(light, GPIO.OUT)
GPIO.setup(wifi, GPIO.OUT)
GPIO.output(wifi, 1)
# on()
for i in fps_lst: #loop through i in fps_lst and j in iso_lst and call the function picture.
for j in iso_lst: #This will result in camera.framerate and camera.iso cycling through a different value, taking a photo and going to the next value.
picture(i,j)
# off()
time.sleep(5)
# status = os.system(ping_hub)
#
# if status == 0:
# status = "Connected"
# else:
# status = "Not Connected"
#
# print(status)
if status == "Connected":
# send()
os.system(subp)
# GPIO.output(wifi, 1)
# quit()
else:
GPIO.output(wifi, 0)
time.sleep(6)
os.system('sudo shutdown now')
| 37.506579
| 183
| 0.559376
|
4a1c74b8627943c72d29c5a68626d9cfa665ed2b
| 705
|
py
|
Python
|
001 - Exercicios Condicionais.py/010 - Equacao de 2o grau.py
|
rodrigoviannini/meus_Primeiros_Codigos
|
828dec1c4ce06889efd491145e631c30a45e858f
|
[
"MIT"
] | 2
|
2021-07-22T23:26:54.000Z
|
2021-07-22T23:27:27.000Z
|
001 - Exercicios Condicionais.py/010 - Equacao de 2o grau.py
|
rodrigoviannini/meus_Primeiros_Codigos
|
828dec1c4ce06889efd491145e631c30a45e858f
|
[
"MIT"
] | null | null | null |
001 - Exercicios Condicionais.py/010 - Equacao de 2o grau.py
|
rodrigoviannini/meus_Primeiros_Codigos
|
828dec1c4ce06889efd491145e631c30a45e858f
|
[
"MIT"
] | null | null | null |
# IF -> CALCULE UMA EQUAÇÃO DE 2º GRAU
a = float(input("Digite o valor de a: ")) #CASO 1 -> não é uma equação do 2o grau
if a == 0:
print("O valor de a não pode ser ZERO!!!")
else: # CASO 2 -> é uma equação do 2o grau
b = float(input("Digite o valor de b: "))
c = float(input("Digite o valor de c: "))
delta = (b**2) - (4 * a * c)
print(f"O valor de delta é: {delta}")
if delta < 0:
print("Raízes complexas, não tem como calcular")
elif delta == 0:
x = -b/(2*a)
print(f"Raiz única, x = {x}")
else:
x1 = (-b + delta**(1/2)) / (2 * a)
x2 = (-b - delta**(1/2)) / (2 * a)
print(f"Raizes de x1 = {x1} e x2 = {x2}")
| 27.115385
| 81
| 0.503546
|
4a1c75be30bb4b805434e3d8e2e98a54c9fc3df2
| 3,611
|
py
|
Python
|
airflow/sensors/http_sensor.py
|
shrutimantri/airflow
|
61eaaacd20ab0f743786df895cf8f232b3b2a48c
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 15
|
2017-04-06T09:01:50.000Z
|
2021-10-02T13:54:31.000Z
|
airflow/sensors/http_sensor.py
|
shrutimantri/airflow
|
61eaaacd20ab0f743786df895cf8f232b3b2a48c
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 26
|
2019-08-05T13:44:11.000Z
|
2022-03-30T10:06:18.000Z
|
airflow/sensors/http_sensor.py
|
shrutimantri/airflow
|
61eaaacd20ab0f743786df895cf8f232b3b2a48c
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 21
|
2017-08-20T03:01:05.000Z
|
2021-09-07T06:47:51.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from builtins import str
from airflow.exceptions import AirflowException
from airflow.hooks.http_hook import HttpHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class HttpSensor(BaseSensorOperator):
"""
Executes a HTTP get statement and returns False on failure:
404 not found or response_check function returned False
:param http_conn_id: The connection to run the sensor against
:type http_conn_id: str
:param method: The HTTP request method to use
:type method: str
:param endpoint: The relative part of the full url
:type endpoint: str
:param request_params: The parameters to be added to the GET url
:type request_params: a dictionary of string key/value pairs
:param headers: The HTTP headers to be added to the GET request
:type headers: a dictionary of string key/value pairs
:param response_check: A check against the 'requests' response object.
Returns True for 'pass' and False otherwise.
:type response_check: A lambda or defined function.
:param extra_options: Extra options for the 'requests' library, see the
'requests' documentation (options to modify timeout, ssl, etc.)
:type extra_options: A dictionary of options, where key is string and value
depends on the option that's being modified.
"""
template_fields = ('endpoint', 'request_params')
@apply_defaults
def __init__(self,
endpoint,
http_conn_id='http_default',
method='GET',
request_params=None,
headers=None,
response_check=None,
extra_options=None, *args, **kwargs):
super(HttpSensor, self).__init__(*args, **kwargs)
self.endpoint = endpoint
self.http_conn_id = http_conn_id
self.request_params = request_params or {}
self.headers = headers or {}
self.extra_options = extra_options or {}
self.response_check = response_check
self.hook = HttpHook(
method=method,
http_conn_id=http_conn_id)
def poke(self, context):
self.log.info('Poking: %s', self.endpoint)
try:
response = self.hook.run(self.endpoint,
data=self.request_params,
headers=self.headers,
extra_options=self.extra_options)
if self.response_check:
# run content check on response
return self.response_check(response)
except AirflowException as ae:
if str(ae).startswith("404"):
return False
raise ae
return True
| 38.827957
| 79
| 0.665467
|
4a1c75fc723e870b529c353f32772aecfd0dc549
| 3,208
|
py
|
Python
|
src/harrastuspassi/harrastuspassi/tests/test_location_api.py
|
savilmik/harrastuspassi-backend
|
885c6903a753c7eea29d23f98275747edcdab892
|
[
"MIT"
] | 2
|
2019-07-29T09:34:47.000Z
|
2020-08-24T18:15:06.000Z
|
src/harrastuspassi/harrastuspassi/tests/test_location_api.py
|
savilmik/harrastuspassi-backend
|
885c6903a753c7eea29d23f98275747edcdab892
|
[
"MIT"
] | 72
|
2019-08-08T10:36:21.000Z
|
2022-03-11T23:55:56.000Z
|
src/harrastuspassi/harrastuspassi/tests/test_location_api.py
|
savilmik/harrastuspassi-backend
|
885c6903a753c7eea29d23f98275747edcdab892
|
[
"MIT"
] | 6
|
2019-08-08T13:26:07.000Z
|
2021-05-03T06:09:10.000Z
|
import pytest
from django.urls import reverse
from rest_framework.exceptions import ErrorDetail
from harrastuspassi import settings
from harrastuspassi.models import Location
@pytest.mark.django_db
def test_location_list_returns_only_editable_for_authenticated_user(user_api_client, user2_api_client, api_client):
""" Location endpoint should return only editable locations for authenticated user """
api_url = reverse('location-list')
response = user_api_client.get(api_url)
assert response.status_code == 200
assert len(response.data) == 0
data_for_user = {'name': 'location for user'}
response = user_api_client.post(api_url, data_for_user)
data_for_user2 = {'name': 'location for user 2'}
response = user2_api_client.post(api_url, data_for_user2)
# user should not not receive location created by user2
response = user_api_client.get(api_url)
assert len(response.data) == 1
response_json = response.json()[0]
assert response_json['name'] == data_for_user['name']
# user2 should not receive location created by user
response = user2_api_client.get(api_url)
assert len(response.data) == 1
response_json = response.json()[0]
assert response_json['name'] == data_for_user2['name']
# unauthenticated user should receive both locations
response = api_client.get(api_url)
assert len(response.data) == 2
@pytest.mark.skipif(not settings.GOOGLE_GEOCODING_API_KEY, reason="GOOGLE_GEOCODING_API_KEY not provided!")
@pytest.mark.django_db
def test_geocoding_functionality(user_api_client, user2_api_client, api_client, location_data_without_coordinates):
""" Posting a location to API without coordinates should fetch the coordinates from Google Geolocoding API """
api_url = reverse('location-list')
response = user_api_client.post(api_url, data=location_data_without_coordinates, format='json')
assert response.status_code == 201
assert response.data['name'] == location_data_without_coordinates['name']
assert response.data['coordinates']
# Creating a location with user provided coordinates should still be possible
Location.objects.all().delete()
location_data_with_coordinates = location_data_without_coordinates.copy()
location_data_with_coordinates['coordinates'] = {
'type': 'Point',
'coordinates': [1, 1]
}
response = user_api_client.post(api_url, data=location_data_with_coordinates, format='json')
assert response.status_code == 201
assert response.data['coordinates']['coordinates'] == [1.0, 1.0]
# Geocoding a faulty address should fail gracefully
location_data_without_coordinates['address'] = 'Sangen kelvoton osoite'
location_data_without_coordinates['city'] = 'Tuskin kaupunki'
location_data_without_coordinates['zip_code'] = '00000'
response = user_api_client.post(api_url, data=location_data_without_coordinates, format='json')
assert response.status_code == 400
expected_error = [
ErrorDetail(
string='This address could not be geocoded. Please confirm your address is right, or try again later.',
code='invalid')
]
assert response.data == expected_error
| 43.945205
| 115
| 0.74813
|
4a1c768cf4148e8292f66a0ca57a26fd3ca8d62e
| 1,579
|
py
|
Python
|
musictaxonomy/spotify/service.py
|
akurihara/music-taxonomy
|
53eb2112e67b3ec9591411bc8117463af760adf8
|
[
"MIT"
] | 3
|
2019-05-06T04:10:47.000Z
|
2020-02-20T01:26:32.000Z
|
musictaxonomy/spotify/service.py
|
akurihara/music-taxonomy
|
53eb2112e67b3ec9591411bc8117463af760adf8
|
[
"MIT"
] | 3
|
2020-02-06T01:52:49.000Z
|
2022-02-12T07:41:24.000Z
|
musictaxonomy/spotify/service.py
|
akurihara/musictaxonomy
|
53eb2112e67b3ec9591411bc8117463af760adf8
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict, List
from musictaxonomy.spotify import client as spotify_client
from musictaxonomy.spotify.models import SpotifyArtist, SpotifyUser
__all__ = ["get_spotify_user", "get_all_top_artists_for_user"]
SpotifyArtistDocument = Dict[str, Any]
async def get_spotify_user(access_token: str) -> SpotifyUser:
user_profile_response = await spotify_client.get_current_user_profile(access_token)
spotify_user = SpotifyUser(
id=user_profile_response["id"],
display_name=user_profile_response["display_name"],
)
return spotify_user
async def get_all_top_artists_for_user(access_token: str) -> List[SpotifyArtist]:
futures = [
spotify_client.get_top_artists_in_time_range(access_token, time_range)
for time_range in ("short_term", "medium_term", "long_term")
]
responses = [await future for future in futures]
return [
spotify_artist
for response in responses
for spotify_artist in _parse_spotify_artists_from_top_artists_response(response)
]
def _parse_spotify_artists_from_top_artists_response(
response: Dict,
) -> List[SpotifyArtist]:
artist_documents = response["items"]
return [
_parse_spotify_artist_from_artist_document(document)
for document in artist_documents
]
def _parse_spotify_artist_from_artist_document(
artist_document: SpotifyArtistDocument,
) -> SpotifyArtist:
return SpotifyArtist(
id=artist_document["id"],
name=artist_document["name"],
genres=artist_document["genres"],
)
| 29.792453
| 88
| 0.743509
|
4a1c775a93a237cff01565d0980789d493ec62ac
| 2,601
|
py
|
Python
|
tests/test_visitors/test_ast/test_complexity/test_function/conftest.py
|
cdhiraj40/wemake-python-styleguide
|
7cef9be081d594c30045b7a98cae77a9be46e1aa
|
[
"MIT"
] | 1,931
|
2018-03-17T13:52:45.000Z
|
2022-03-27T09:39:17.000Z
|
tests/test_visitors/test_ast/test_complexity/test_function/conftest.py
|
cdhiraj40/wemake-python-styleguide
|
7cef9be081d594c30045b7a98cae77a9be46e1aa
|
[
"MIT"
] | 2,231
|
2018-03-09T21:19:05.000Z
|
2022-03-31T08:35:37.000Z
|
tests/test_visitors/test_ast/test_complexity/test_function/conftest.py
|
cdhiraj40/wemake-python-styleguide
|
7cef9be081d594c30045b7a98cae77a9be46e1aa
|
[
"MIT"
] | 492
|
2018-05-18T21:20:28.000Z
|
2022-03-20T14:11:50.000Z
|
import pytest
from wemake_python_styleguide.compat.constants import PY38
function_with_single_argument = 'def function(arg1): ...'
function_with_arguments = 'def function(arg1, arg2): ...'
function_with_args_kwargs = 'def function(*args, **kwargs): ...'
function_with_kwonly = 'def function(*, kwonly1, kwonly2=True): ...'
function_with_posonly = 'def function(arg1, arg2, /): ...'
method_without_arguments = """
class Test(object):
def method(self): ...
"""
method_with_single_argument = """
class Test(object):
def method(self, arg): ...
"""
method_with_single_args = """
class Test(object):
def method(self, *args): ...
"""
method_with_single_posonly_arg = """
class Test(object):
def method(self, arg, /): ...
"""
method_with_single_kwargs = """
class Test(object):
def method(self, **kwargs): ...
"""
method_with_single_kwonly = """
class Test(object):
def method(self, *, kwonly=True): ...
"""
classmethod_without_arguments = """
class Test(object):
@classmethod
def method(cls): ...
"""
classmethod_with_single_argument = """
class Test(object):
@classmethod
def method(cls, arg1): ...
"""
new_method_without_arguments = """
class Test(object):
def __new__(cls): ...
"""
new_method_single_argument = """
class Test(object):
def __new__(cls, arg1): ...
"""
metaclass_without_arguments = """
class TestMeta(type):
def method(cls): ...
"""
metaclass_with_single_argument = """
class TestMeta(type):
def method(cls, arg1): ...
"""
# Actual fixtures:
@pytest.fixture(params=[
function_with_single_argument,
method_without_arguments,
classmethod_without_arguments,
new_method_without_arguments,
metaclass_without_arguments,
])
def single_argument(request):
"""Fixture that returns different code examples that have one arg."""
return request.param
@pytest.fixture(params=[
function_with_arguments,
function_with_args_kwargs,
function_with_kwonly,
pytest.param(
function_with_posonly,
marks=pytest.mark.skipif(not PY38, reason='posonly appeared in 3.8'),
),
method_with_single_argument,
method_with_single_args,
method_with_single_kwargs,
method_with_single_kwonly,
pytest.param(
method_with_single_posonly_arg,
marks=pytest.mark.skipif(not PY38, reason='posonly appeared in 3.8'),
),
classmethod_with_single_argument,
new_method_single_argument,
metaclass_with_single_argument,
])
def two_arguments(request):
"""Fixture that returns different code examples that have two args."""
return request.param
| 23.432432
| 77
| 0.700884
|
4a1c77abfd998b4b0c50fb0dcc3c2a960f5cf340
| 2,683
|
py
|
Python
|
CIM14/CDPSM/Connectivity/IEC61970/Core/ConnectivityNodeContainer.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | null | null | null |
CIM14/CDPSM/Connectivity/IEC61970/Core/ConnectivityNodeContainer.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | null | null | null |
CIM14/CDPSM/Connectivity/IEC61970/Core/ConnectivityNodeContainer.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.CDPSM.Connectivity.IEC61970.Core.PowerSystemResource import PowerSystemResource
class ConnectivityNodeContainer(PowerSystemResource):
"""A base class for all objects that may contain ConnectivityNodes or TopologicalNodes.
"""
def __init__(self, ConnectivityNodes=None, *args, **kw_args):
"""Initialises a new 'ConnectivityNodeContainer' instance.
@param ConnectivityNodes: Connectivity nodes contained by this container.
"""
self._ConnectivityNodes = []
self.ConnectivityNodes = [] if ConnectivityNodes is None else ConnectivityNodes
super(ConnectivityNodeContainer, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["ConnectivityNodes"]
_many_refs = ["ConnectivityNodes"]
def getConnectivityNodes(self):
"""Connectivity nodes contained by this container.
"""
return self._ConnectivityNodes
def setConnectivityNodes(self, value):
for x in self._ConnectivityNodes:
x.ConnectivityNodeContainer = None
for y in value:
y._ConnectivityNodeContainer = self
self._ConnectivityNodes = value
ConnectivityNodes = property(getConnectivityNodes, setConnectivityNodes)
def addConnectivityNodes(self, *ConnectivityNodes):
for obj in ConnectivityNodes:
obj.ConnectivityNodeContainer = self
def removeConnectivityNodes(self, *ConnectivityNodes):
for obj in ConnectivityNodes:
obj.ConnectivityNodeContainer = None
| 40.651515
| 91
| 0.733135
|
4a1c77f3e447e9e1c331d1f7b3a419d058217bff
| 695
|
py
|
Python
|
setup.py
|
CHC278Cao/template_cv
|
05e8728bcdc441be8f9cfc4da7f605e2701fe4a7
|
[
"MIT"
] | null | null | null |
setup.py
|
CHC278Cao/template_cv
|
05e8728bcdc441be8f9cfc4da7f605e2701fe4a7
|
[
"MIT"
] | null | null | null |
setup.py
|
CHC278Cao/template_cv
|
05e8728bcdc441be8f9cfc4da7f605e2701fe4a7
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
"""
@author: ccj
@contact:
"""
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name="template_cv",
version="0.0.1",
author="ccj",
author_email='changjian1026@gmail.com',
description="A template cv model for kaggle",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/CHC278Cao/template_cv",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 23.965517
| 51
| 0.656115
|
4a1c781d2ec2ee5b743eae08117690d429bae568
| 1,150
|
py
|
Python
|
bitbar/scripts/watcher.py
|
schrecka/techromancer
|
406e0861ee9f1c0a50e8e5f0a69f72411923c030
|
[
"MIT"
] | null | null | null |
bitbar/scripts/watcher.py
|
schrecka/techromancer
|
406e0861ee9f1c0a50e8e5f0a69f72411923c030
|
[
"MIT"
] | null | null | null |
bitbar/scripts/watcher.py
|
schrecka/techromancer
|
406e0861ee9f1c0a50e8e5f0a69f72411923c030
|
[
"MIT"
] | null | null | null |
#!/bin/env python3
import sys
import time
import logging
import os
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
if __name__ == "__main__":
run()
def run():
logging.basicConfig(filename='logger.txt', filemode='a', level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
#CHANGE PATH to directory you want to observe
path = "test_dir_test"
# sys.argv[1] if len(sys.argv) > 1 else '.'
event_handler = LoggingEventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
files = os.listdir(os.curdir)
mtime_last = {}
for file in files:
mtime_last[file] = 0
try:
while True:
time.sleep(1)
mtime_cur = {}
for file in files:
mtime_cur[file] = os.path.getmtime(file)
if mtime_cur[file] != mtime_last:
print("File Opened")
mtime_last = mtime_cur[file]
except KeyboardInterrupt:
observer.stop()
observer.join()
| 29.487179
| 80
| 0.596522
|
4a1c783f3be0d7af5ce39410f224a43d53624265
| 8,020
|
py
|
Python
|
tests/integration/api/v2010/account/incoming_phone_number/test_toll_free.py
|
ashish-s/twilio-python
|
5462b05af0906a1464b1e95a56a1f15afddc3b8c
|
[
"MIT"
] | 1
|
2020-10-29T19:28:25.000Z
|
2020-10-29T19:28:25.000Z
|
tests/integration/api/v2010/account/incoming_phone_number/test_toll_free.py
|
CostantiniMatteo/twilio-python
|
9eee1ca9e73790b12678e9a5660206ea44948d00
|
[
"MIT"
] | 1
|
2021-08-21T22:54:01.000Z
|
2021-08-23T19:39:42.000Z
|
tests/integration/api/v2010/account/incoming_phone_number/test_toll_free.py
|
team-telnyx/twexit-python
|
69e11c5c2b5681f9bc410795dda0cf8942219e6f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class TollFreeTestCase(IntegrationTestCase):
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.incoming_phone_numbers \
.toll_free.list()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/IncomingPhoneNumbers/TollFree.json',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers/TollFree.json?PageSize=1&Page=0",
"incoming_phone_numbers": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"address_requirements": "none",
"address_sid": "ADaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"api_version": "2010-04-01",
"beta": null,
"capabilities": {
"mms": true,
"sms": false,
"voice": true
},
"date_created": "Thu, 30 Jul 2015 23:19:04 +0000",
"date_updated": "Thu, 30 Jul 2015 23:19:04 +0000",
"friendly_name": "(808) 925-5327",
"identity_sid": "RIaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"phone_number": "+18089255327",
"origin": "origin",
"sid": "PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sms_application_sid": "",
"sms_fallback_method": "POST",
"sms_fallback_url": "",
"sms_method": "POST",
"sms_url": "",
"status_callback": "",
"status_callback_method": "POST",
"trunk_sid": null,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json",
"voice_application_sid": "",
"voice_caller_id_lookup": false,
"voice_fallback_method": "POST",
"voice_fallback_url": null,
"voice_method": "POST",
"voice_url": null,
"emergency_status": "Active",
"emergency_address_sid": "ADaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"bundle_sid": "BUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
],
"last_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers/TollFree.json?PageSize=1&Page=2",
"next_page_uri": null,
"num_pages": 3,
"page": 0,
"page_size": 1,
"previous_page_uri": null,
"start": 0,
"total": 3,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers/TollFree.json?PageSize=1"
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.incoming_phone_numbers \
.toll_free.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers/TollFree.json?PageSize=1&Page=0",
"incoming_phone_numbers": [],
"last_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers/TollFree.json?PageSize=1&Page=2",
"next_page_uri": null,
"num_pages": 3,
"page": 0,
"page_size": 1,
"previous_page_uri": null,
"start": 0,
"total": 3,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers/TollFree.json?PageSize=1"
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.incoming_phone_numbers \
.toll_free.list()
self.assertIsNotNone(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.incoming_phone_numbers \
.toll_free.create(phone_number="+15017122661")
values = {'PhoneNumber': "+15017122661", }
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/IncomingPhoneNumbers/TollFree.json',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"address_requirements": "none",
"address_sid": "ADaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"api_version": "2010-04-01",
"beta": false,
"capabilities": {
"mms": true,
"sms": false,
"voice": true
},
"date_created": "Thu, 30 Jul 2015 23:19:04 +0000",
"date_updated": "Thu, 30 Jul 2015 23:19:04 +0000",
"friendly_name": "(808) 925-5327",
"identity_sid": "RIaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"phone_number": "+18089255327",
"origin": "origin",
"sid": "PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sms_application_sid": "",
"sms_fallback_method": "POST",
"sms_fallback_url": "",
"sms_method": "POST",
"sms_url": "",
"status_callback": "",
"status_callback_method": "POST",
"trunk_sid": null,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json",
"voice_application_sid": "",
"voice_caller_id_lookup": false,
"voice_fallback_method": "POST",
"voice_fallback_url": null,
"voice_method": "POST",
"voice_url": null,
"emergency_status": "Active",
"emergency_address_sid": "ADaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"bundle_sid": "BUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.incoming_phone_numbers \
.toll_free.create(phone_number="+15017122661")
self.assertIsNotNone(actual)
| 42.887701
| 150
| 0.509726
|
4a1c7851989fcecdcd5a735ce3bb408157c13eda
| 1,401
|
py
|
Python
|
examples/configs/algorithm.py
|
hrayrhar/wilds
|
099db63bf7323c5c9e42e66893e84202595f8860
|
[
"MIT"
] | null | null | null |
examples/configs/algorithm.py
|
hrayrhar/wilds
|
099db63bf7323c5c9e42e66893e84202595f8860
|
[
"MIT"
] | null | null | null |
examples/configs/algorithm.py
|
hrayrhar/wilds
|
099db63bf7323c5c9e42e66893e84202595f8860
|
[
"MIT"
] | null | null | null |
algorithm_defaults = {
'ERM': {
'train_loader': 'standard',
'uniform_over_groups': False,
'eval_loader': 'standard',
},
'ERM_HSIC': {
'train_loader': 'standard',
'uniform_over_groups': False,
'eval_loader': 'standard',
'hsic_beta': 1.0,
},
'ERM_HSIC_GradPenalty': {
'train_loader': 'standard',
'uniform_over_groups': False,
'eval_loader': 'standard',
'hsic_beta': 1.0,
'grad_penalty_lamb': 1.0,
'params_regex': '.*',
'label_cond': False
},
'groupDRO': {
'train_loader': 'standard',
'uniform_over_groups': True,
'distinct_groups': True,
'eval_loader': 'standard',
'group_dro_step_size': 0.01,
},
'deepCORAL': {
'train_loader': 'group',
'uniform_over_groups': True,
'distinct_groups': True,
'eval_loader': 'standard',
'coral_penalty_weight': 1.,
},
'IRM': {
'train_loader': 'group',
'uniform_over_groups': True,
'distinct_groups': True,
'eval_loader': 'standard',
'irm_lambda': 100.,
'irm_penalty_anneal_iters': 500,
},
'DANN': {
'train_loader': 'standard',
'uniform_over_groups': False,
'eval_loader': 'standard',
'dann_lamb': 1.0,
'dann_dc_name': 'unspecified'
},
}
| 27.470588
| 40
| 0.533191
|
4a1c78cfb0b0b74091d325be6b18ada6fe35aa1c
| 1,370
|
py
|
Python
|
src/seedsigner/helpers/mnemonic_generation.py
|
NachE/seedsigner
|
43abe96f2f2191a3a42c72704a4df8cab5f8e440
|
[
"MIT"
] | 245
|
2020-12-14T15:26:25.000Z
|
2022-03-31T19:57:08.000Z
|
src/seedsigner/helpers/mnemonic_generation.py
|
btcpuertorico/seedsigner
|
9750c927bb8a3363d4c607d037a877221a5ffc95
|
[
"MIT"
] | 119
|
2020-12-15T18:55:28.000Z
|
2022-03-30T19:28:54.000Z
|
src/seedsigner/helpers/mnemonic_generation.py
|
btcpuertorico/seedsigner
|
9750c927bb8a3363d4c607d037a877221a5ffc95
|
[
"MIT"
] | 69
|
2020-12-25T17:48:50.000Z
|
2022-03-31T22:27:03.000Z
|
from embit import bip39
from embit.bip39 import mnemonic_to_bytes, mnemonic_from_bytes
def calculate_checksum(partial_mnemonic: list):
# Provide 11- or 23-word mnemonic, returns complete mnemonic w/checksum
if len(partial_mnemonic) not in [11, 23]:
raise Exception("Pass in a 11- or 23-word mnemonic")
# Work on a copy of the input list
mnemonic_copy = partial_mnemonic.copy()
mnemonic_copy.append("abandon")
# Ignores the final checksum word and recalcs
mnemonic_bytes = bip39.mnemonic_to_bytes(" ".join(mnemonic_copy), ignore_checksum=True)
# Return as a list
return bip39.mnemonic_from_bytes(mnemonic_bytes).split()
def generate_mnemonic_from_bytes(entropy_bytes):
# Return as a list
return bip39.mnemonic_from_bytes(entropy_bytes).split()
def generate_mnemonic_from_dice(roll_data: str):
entropyinteger = int(roll_data, 6)
entropy_bytes = entropyinteger.to_bytes(32, byteorder="little")
# Return as a list
return bip39.mnemonic_from_bytes(entropy_bytes).split()
# Note: This currently isn't being used since we're now chaining hashed bytes for the
# image-based entropy and aren't just ingesting a single image.
def generate_mnemonic_from_image(image):
hash = hashlib.sha256(image.tobytes())
# Return as a list
return bip39.mnemonic_from_bytes(hash.digest()).split()
| 30.444444
| 91
| 0.748175
|
4a1c79330cc95885ad828e1bf9e14ec32ba53367
| 46
|
py
|
Python
|
game/__init__.py
|
MarsRaptor/battleships
|
81e0a595c05f627de568dad49904be99f0cbf6ac
|
[
"MIT"
] | null | null | null |
game/__init__.py
|
MarsRaptor/battleships
|
81e0a595c05f627de568dad49904be99f0cbf6ac
|
[
"MIT"
] | null | null | null |
game/__init__.py
|
MarsRaptor/battleships
|
81e0a595c05f627de568dad49904be99f0cbf6ac
|
[
"MIT"
] | null | null | null |
from .components import *
from .util import *
| 15.333333
| 25
| 0.73913
|
4a1c7af62ab2de76de458dfb6bd82c63c27e55b1
| 1,844
|
py
|
Python
|
regexlib/python_re_test_file/regexlib_2766.py
|
yetingli/ReDoS-Benchmarks
|
f5b5094d835649e957bf3fec6b8bd4f6efdb35fc
|
[
"MIT"
] | 1
|
2022-01-24T14:43:23.000Z
|
2022-01-24T14:43:23.000Z
|
regexlib/python_re_test_file/regexlib_2766.py
|
yetingli/ReDoS-Benchmarks
|
f5b5094d835649e957bf3fec6b8bd4f6efdb35fc
|
[
"MIT"
] | null | null | null |
regexlib/python_re_test_file/regexlib_2766.py
|
yetingli/ReDoS-Benchmarks
|
f5b5094d835649e957bf3fec6b8bd4f6efdb35fc
|
[
"MIT"
] | null | null | null |
# 2766
# (?:@[A-Z]\w*\s+)*(?:(?:public|private|protected)\s+)?(?:(?:(?:abstract|final|native|transient|static|synchronized)\s+)*(?:<(?:\?|[A-Z]\w*)(?:\s+(?:extends|super)\s+[A-Z]\w*)?(?:(?:,\s*(?:\?|[A-Z]\w*))(?:\s+(?:extends|super)\s+[A-Z]\w*)?)*>\s+)?(?:(?:(?:[A-Z]\w*(?:<[A-Z]\w*>)?|int|float|double|char|byte|long|short|boolean)(?:(?:\[\]))*)|void)+)\s+(([a-zA-Z]\w*)\s*\(\s*(((?:[A-Z]\w*(?:<(?:\?|[A-Z]\w*)(?:\s+(?:extends|super)\s+[A-Z]\w*)?(?:(?:,\s*(?:\?|[A-Z]\w*))(?:\s+(?:extends|super)\s+[A-Z]\w*)?)*>)?|int|float|double|char|boolean|byte|long|short)(?:(?:\[\])|\.\.\.)?\s+[a-z]\w*)(?:,\s*((?:[A-Z]\w*(?:<[A-Z]\w*>)?|int|float|double|char|byte|long|short|boolean)(?:(?:\[\])|\.\.\.)?\s+[a-z]\w*))*)?\s*\))
# EXPONENT
# nums:4
# EXPONENT AttackString:""+"A"*32+"! _1_EOA(iii)"
import re
from time import perf_counter
regex = """(?:@[A-Z]\w*\s+)*(?:(?:public|private|protected)\s+)?(?:(?:(?:abstract|final|native|transient|static|synchronized)\s+)*(?:<(?:\?|[A-Z]\w*)(?:\s+(?:extends|super)\s+[A-Z]\w*)?(?:(?:,\s*(?:\?|[A-Z]\w*))(?:\s+(?:extends|super)\s+[A-Z]\w*)?)*>\s+)?(?:(?:(?:[A-Z]\w*(?:<[A-Z]\w*>)?|int|float|double|char|byte|long|short|boolean)(?:(?:\[\]))*)|void)+)\s+(([a-zA-Z]\w*)\s*\(\s*(((?:[A-Z]\w*(?:<(?:\?|[A-Z]\w*)(?:\s+(?:extends|super)\s+[A-Z]\w*)?(?:(?:,\s*(?:\?|[A-Z]\w*))(?:\s+(?:extends|super)\s+[A-Z]\w*)?)*>)?|int|float|double|char|boolean|byte|long|short)(?:(?:\[\])|\.\.\.)?\s+[a-z]\w*)(?:,\s*((?:[A-Z]\w*(?:<[A-Z]\w*>)?|int|float|double|char|byte|long|short|boolean)(?:(?:\[\])|\.\.\.)?\s+[a-z]\w*))*)?\s*\))"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "A" * i * 1 + "! _1_EOA(iii)"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!")
| 97.052632
| 721
| 0.474512
|
4a1c7af6d7a450000a21824c631a7b6007964589
| 2,604
|
py
|
Python
|
zvt/domain/trader_info.py
|
stone64/zvt
|
19360b3f29992bc759709adfa90e32843147a807
|
[
"MIT"
] | 2
|
2020-09-04T03:24:03.000Z
|
2020-11-27T20:57:55.000Z
|
zvt/domain/trader_info.py
|
stone64/zvt
|
19360b3f29992bc759709adfa90e32843147a807
|
[
"MIT"
] | null | null | null |
zvt/domain/trader_info.py
|
stone64/zvt
|
19360b3f29992bc759709adfa90e32843147a807
|
[
"MIT"
] | 1
|
2021-01-24T15:44:53.000Z
|
2021-01-24T15:44:53.000Z
|
# -*- coding: utf-8 -*-
from sqlalchemy import Column, String, DateTime, Boolean, Float, Integer, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from zvt.contract import Mixin
from zvt.contract.register import register_schema
TraderBase = declarative_base()
# trader信息
class TraderInfo(TraderBase, Mixin):
__tablename__ = 'trader_info'
# 机器人名字
trader_name = Column(String(length=128))
entity_ids = Column(String(length=1024))
entity_type = Column(String(length=128))
exchanges = Column(String(length=128))
codes = Column(String(length=128))
start_timestamp = Column(DateTime)
end_timestamp = Column(DateTime)
provider = Column(String(length=32))
level = Column(String(length=32))
real_time = Column(Boolean)
kdata_use_begin_time = Column(Boolean)
# account stats of every day
class AccountStats(TraderBase, Mixin):
__tablename__ = 'account_stats'
input_money = Column(Float)
# 机器人名字
trader_name = Column(String(length=128))
# 可用现金
cash = Column(Float)
# 具体仓位
positions = relationship("Position", back_populates="account_stats")
# 市值
value = Column(Float)
# 市值+cash
all_value = Column(Float)
# 收盘计算
closing = Column(Boolean)
# the position for specific entity of every day
class Position(TraderBase, Mixin):
__tablename__ = 'position'
# 机器人名字
trader_name = Column(String(length=128))
# 账户id
account_stats_id = Column(Integer, ForeignKey('account_stats.id'))
account_stats = relationship("AccountStats", back_populates="positions")
# 做多数量
long_amount = Column(Float)
# 可平多数量
available_long = Column(Float)
# 平均做多价格
average_long_price = Column(Float)
# 做空数量
short_amount = Column(Float)
# 可平空数量
available_short = Column(Float)
# 平均做空价格
average_short_price = Column(Float)
profit = Column(Float)
# 市值 或者 占用的保证金(方便起见,总是100%)
value = Column(Float)
# 交易类型(0代表T+0,1代表T+1)
trading_t = Column(Integer)
# 委托单
class Order(TraderBase, Mixin):
__tablename__ = 'order'
# 机器人名字
trader_name = Column(String(length=128))
# 订单价格
order_price = Column(Float)
# 订单数量
order_amount = Column(Float)
# 订单类型
order_type = Column(String(length=64))
# 订单状态
status = Column(String(length=64))
# 产生订单的selector/factor level
level = Column(String(length=32))
register_schema(providers=['zvt'], db_name='trader_info', schema_base=TraderBase)
__all__ = ['TraderInfo', 'AccountStats', 'Position', 'Order']
| 25.038462
| 84
| 0.691628
|
4a1c7ca3df498c76d5bd5a5ca29a955a1d5efaf2
| 3,451
|
py
|
Python
|
PS1/ps1b.py
|
HollisHolmes/MIT_6.0002
|
5978df16f27963d93f0ef99b6b6d2518f3569e37
|
[
"MIT"
] | null | null | null |
PS1/ps1b.py
|
HollisHolmes/MIT_6.0002
|
5978df16f27963d93f0ef99b6b6d2518f3569e37
|
[
"MIT"
] | null | null | null |
PS1/ps1b.py
|
HollisHolmes/MIT_6.0002
|
5978df16f27963d93f0ef99b6b6d2518f3569e37
|
[
"MIT"
] | null | null | null |
###########################
# 6.0002 Problem Set 1b: Space Change
# Name:
# Collaborators:
# Time:
# Author: charz, cdenise
#================================
# Part B: Golden Eggs
# Part B: Golden Eggs
#================================
Problem 1
def dp_make_weight(egg_weights, target_weight, memo = {}):
"""
Find number of eggs to bring back, using the smallest number of eggs. Assumes there is
an infinite supply of eggs of each weight, and there is always a egg of value 1.
Parameters:
egg_weights - tuple of integers, available egg weights sorted from smallest to largest value (1 = d1 < d2 < ... < dk)
target_weight - int, amount of weight we want to find eggs to fit
memo - dictionary, OPTIONAL parameter for memoization (you may not need to use this parameter depending on your implementation)
Returns: int, smallest number of eggs needed to make target weight
!!!!
State is the least number of moves given a remaining weight. No state can depend on another with a larger weight.
Hence, the state map can be topologically sorted and we can run DP!!
!!!!!
"""
# memoize min eggs for each weight
memo[0] = 0
for weight in range(1, target_weight+1):
# base case is it takes 0 moves to get to 0 from 0
turns_at_this_weight = []
# bottom up approach starting at weight 1
for egg_weight in egg_weights:
# weight remaing respective after subtracting each possible egg weight
after_adding = weight - egg_weight
# if we can get to this state
if after_adding in memo:
# we can get to 0 in 1 + how many it takes in the new state by optimal substructure
turns_at_this_weight.append(1 + memo[after_adding])
# we have # turns for each egg weight, only store the best option
memo[weight] = min(turns_at_this_weight)
# return min at the targen weight in O(n) time
return memo[target_weight]
# (1, 5, 25)
# EXAMPLE TESTING CODE, feel free to add more if you'd like
if __name__ == '__main__':
egg_weights = (1, 5, 15, 20)
n = 99
print("Egg weights = (1, 5, 10, 25)")
print("n = 99")
print("Expected ouput: 9 (3 * 25 + 2 * 10 + 4 * 1 = 99)")
print("Actual output:", dp_make_weight(egg_weights, n))
print()
#### Greedy algorithm implementation that could return incorrect values ####
# def dp_make_weight(egg_weights, target_weight, memo = {}):
# """
# Find number of eggs to bring back, using the smallest number of eggs. Assumes there is
# an infinite supply of eggs of each weight, and there is always a egg of value 1.
#
# Parameters:
# egg_weights - tuple of integers, available egg weights sorted from smallest to largest value (1 = d1 < d2 < ... < dk)
# target_weight - int, amount of weight we want to find eggs to fit
# memo - dictionary, OPTIONAL parameter for memoization (you may not need to use this parameter depending on your implementation)
#
# Returns: int, smallest number of eggs needed to make target weight
# """
#
# weightRemaining = target_weight
# numEggs = 0
#
# for i in range(-1, -len(egg_weights)-1, -1):
# while weightRemaining >= egg_weights[i]:
# weightRemaining -= egg_weights[i]
# numEggs += 1
# return numEggs
#
| 38.344444
| 134
| 0.625036
|
4a1c7cb624a1a608384b7d6eb4969c28bd6960d1
| 668
|
py
|
Python
|
trax/format/__init__.py
|
dsully/trax
|
55e16d54f3805f2a5fa3eebe2faf810c45a62088
|
[
"MIT"
] | 1
|
2018-04-02T14:37:20.000Z
|
2018-04-02T14:37:20.000Z
|
trax/format/__init__.py
|
dsully/trax
|
55e16d54f3805f2a5fa3eebe2faf810c45a62088
|
[
"MIT"
] | null | null | null |
trax/format/__init__.py
|
dsully/trax
|
55e16d54f3805f2a5fa3eebe2faf810c45a62088
|
[
"MIT"
] | null | null | null |
""" Abstraction for audio file formats. """
import logging
from trax.format.flac import FLAC
from trax.format.mp3 import MP3
from trax.format.mp4 import MP4
log = logging.getLogger(__name__)
FORMAT_MAP = {
'flac': FLAC,
'mp3' : MP3,
'mp4' : MP4,
'm4a' : MP4,
}
EXTENSION_MAP = {
'flac': 'flac',
'alac': 'm4a',
'aac' : 'm4a',
'mp3' : 'mp3',
'ogg' : 'ogg',
}
def load(track, filetype=None):
""" Load a Format class for a given track. """
if filetype is None:
filetype = track.filetype
if filetype in FORMAT_MAP:
return FORMAT_MAP[filetype](track)
def extension_for_codec(extension):
return EXTENSION_MAP.get(extension, None)
| 18.054054
| 48
| 0.658683
|
4a1c7d7c33b780b5b6b1ee840d285097e0ea3603
| 8,553
|
py
|
Python
|
nitroml/automl/metalearning/metalearner/executor.py
|
google/nitroml
|
5eabdbe6de85ff7fdae4fefda7547c0c031f9431
|
[
"Apache-2.0"
] | 43
|
2020-09-13T18:07:15.000Z
|
2022-01-05T19:05:28.000Z
|
nitroml/automl/metalearning/metalearner/executor.py
|
google/nitroml
|
5eabdbe6de85ff7fdae4fefda7547c0c031f9431
|
[
"Apache-2.0"
] | 4
|
2020-09-14T13:15:09.000Z
|
2021-11-21T11:21:13.000Z
|
nitroml/automl/metalearning/metalearner/executor.py
|
google/nitroml
|
5eabdbe6de85ff7fdae4fefda7547c0c031f9431
|
[
"Apache-2.0"
] | 5
|
2020-09-14T13:03:04.000Z
|
2021-10-21T01:55:48.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Lint as: python3
"""Executor for MetaLearner."""
import collections
import json
import os
from typing import Any, Dict, List
from absl import logging
import kerastuner
from nitroml.automl.metalearning import artifacts
import numpy as np
import tensorflow as tf
from tfx.dsl.components.base import base_executor
from tfx.types import artifact_utils
from tfx.types.artifact import Artifact
from tfx.utils import io_utils
from tfx.utils import path_utils
_DEFAULT_FILE_NAME = 'meta_hyperparameters.txt'
MAX_INPUTS = 10
OUTPUT_MODEL = 'metamodel'
OUTPUT_HYPERPARAMS = 'output_hyperparameters'
MAJORITY_VOTING = 'majority_voting'
NEAREST_NEIGHBOR = 'nearest_neighbor'
METALEARNING_ALGORITHMS = [
MAJORITY_VOTING,
NEAREST_NEIGHBOR,
]
class MetaLearnerExecutor(base_executor.BaseExecutor):
"""Executor for MetaLearnerExecutor."""
def _convert_to_kerastuner_hyperparameters(
self,
candidate_hparams: List[Dict[str,
Any]]) -> List[kerastuner.HyperParameters]:
"""Convert list of HSpace to a list of search space each with cardinality 1.
Args:
candidate_hparams: List of Dict of HParams with same keys.
Returns:
The list of hparams in the search space.
"""
if not candidate_hparams:
raise ValueError(
f'Expected a non-empty list of candidate_hparams. Got {candidate_hparams}'
)
simple_search_space_list = []
for candidate_hparam in candidate_hparams:
simple_search_space = kerastuner.HyperParameters()
for key in candidate_hparam:
simple_search_space.Choice(key, [candidate_hparam[key]])
simple_search_space_list.append(simple_search_space)
return simple_search_space_list
def _create_search_space_using_voting(
self, candidate_hparams: List[Dict[str,
Any]]) -> kerastuner.HyperParameters:
"""Convert List of HParams to kerastuner.HyperaParameters based on voting.
Args:
candidate_hparams: List of Dict of HParams with same keys.
Returns:
discrete_search_space: A kerastuner.HyperParameters object representing
a discrete search
space created using voting. For example, when
`candidate_hparams` is [{`learning_rate`: 0.01, `num_nodes`: 32},
{`learning_rate`: 0.001, `num_nodes`: 128},
{`learning_rate`: 0.01, `num_nodes`: 128},
{`learning_rate`: 0.001, `num_nodes`: 128}]
then, `discrete_search_space` depicts the following discrete search
space
{`learning_rate`: [0.01, 0.001], `num_nodes`: [128]}
Raises:
ValueError: An error occured when `candidate_hparams` is empty or None.
"""
if not candidate_hparams:
raise ValueError(
f'Expected a non-empty list of candidate_hparams. Got {candidate_hparams}'
)
hparams = candidate_hparams[0].keys()
search_space = {}
for key in hparams:
search_space[key] = collections.Counter(
[candidate[key] for candidate in candidate_hparams]).most_common()
discrete_search_space = kerastuner.HyperParameters()
for key, value_list in search_space.items():
max_vote = -1
candidate_values = []
for value, count in value_list:
if count >= max_vote:
candidate_values.append(value)
max_vote = count
else:
break
discrete_search_space.Choice(
key, candidate_values, default=candidate_values[0])
return discrete_search_space
def _create_knn_model_from_metafeatures(
self, metafeatures_list: List[List[float]]) -> tf.keras.Model:
"""Creates a Model that stores metafeatures as a Layer for nearest neighbor.
The function creates a keras model with a dense layer. The weight kernel of
the layer is formed of metafeatures of training datasets. One can find the
nearest neighbor for a new dataset by doing a forward pass of the model. The
output of the forward pass represents the similarity scores based on the
inner product of metafeatures. The keras model is intended to be used in
metalearning initialized tuner, which receives the metafeatures of a new
dataset.
Args:
metafeatures_list: List of metafeatures of training datasets.
Returns:
A tf.keras.Model with a single dense layer having metafeatures as weights.
"""
n = len(metafeatures_list[0])
k = len(metafeatures_list)
inputs = tf.keras.layers.Input(shape=(n,))
outputs = tf.keras.layers.Dense(
k, activation=None, use_bias=False, name='metafeatures')(
inputs)
model = tf.keras.models.Model(inputs=inputs, outputs=outputs)
weights = np.array(metafeatures_list, dtype=np.float32).T
# Normalize weights to lie in a unit ball.
weights = weights / np.linalg.norm(weights, axis=1, keepdims=True)
model.get_layer('metafeatures').set_weights([weights])
return model
def Do(self, input_dict: Dict[str, List[Artifact]],
output_dict: Dict[str, List[Artifact]],
exec_properties: Dict[str, Any]) -> None:
"""Recommends a tuner config.
Args:
input_dict: Input dict from input key to a list of artifacts, including:
- meta_train_features_N: MetaFeatures for Nth train dataset.
- hparams_train_N: HParms for Nth train dataset. The maximum value `N`
being _MAX_INPUTS.
output_dict: Output dict from key to a list of artifacts.
exec_properties: A dict of execution properties.
Raises:
"""
algorithm = exec_properties['algorithm']
metafeatures_list = []
# This should be agnostic to meta-feature type.
for ix in range(MAX_INPUTS):
metafeature_key = f'meta_train_features_{ix}'
if metafeature_key in input_dict:
metafeature_uri = os.path.join(
artifact_utils.get_single_uri(input_dict[metafeature_key]),
artifacts.MetaFeatures.DEFAULT_FILE_NAME)
logging.info('Found %s at %s.', metafeature_key, metafeature_uri)
metafeatures = json.loads(io_utils.read_string_file(metafeature_uri))
metafeatures_list.append(metafeatures['metafeature'])
all_hparams = []
for ix in range(MAX_INPUTS):
hparam_key = f'hparams_train_{ix}'
if hparam_key in input_dict:
hyperparameters_file = io_utils.get_only_uri_in_dir(
artifact_utils.get_single_uri(input_dict[hparam_key]))
logging.info('Found %s at %s.', hparam_key, hyperparameters_file)
hparams_json = json.loads(
io_utils.read_string_file(hyperparameters_file))
all_hparams.append(hparams_json['values'])
if algorithm == MAJORITY_VOTING:
discrete_search_space = self._create_search_space_using_voting(
all_hparams)
hparams_config_list = [discrete_search_space.get_config()]
elif algorithm == NEAREST_NEIGHBOR:
# Build nearest_neighbor model
output_path = artifact_utils.get_single_uri(output_dict[OUTPUT_MODEL])
serving_model_dir = path_utils.serving_model_dir(output_path)
model = self._create_knn_model_from_metafeatures(metafeatures_list)
# TODO(nikhilmehta): Consider adding signature here.
model.save(serving_model_dir)
# Collect all Candidate HParams
hparams_list = self._convert_to_kerastuner_hyperparameters(all_hparams)
hparams_config_list = [hparam.get_config() for hparam in hparams_list]
else:
raise NotImplementedError(
f'The algorithm "{algorithm}" is not supported.')
meta_hparams_path = os.path.join(
artifact_utils.get_single_uri(output_dict[OUTPUT_HYPERPARAMS]),
_DEFAULT_FILE_NAME)
io_utils.write_string_file(meta_hparams_path,
json.dumps(hparams_config_list))
logging.info('Meta HParams saved at %s', meta_hparams_path)
| 37.678414
| 84
| 0.697182
|
4a1c7e529884f71c6ca1b1cc56b84a3d9d5d6b1a
| 5,164
|
py
|
Python
|
generate.py
|
Bhaskers-Blu-Org1/risk-pddl
|
10708bfa44df69876adea532ce68c4e25770b7f7
|
[
"MIT"
] | 5
|
2020-05-08T19:07:17.000Z
|
2021-11-04T11:24:05.000Z
|
generate.py
|
IBM/risk-pddl
|
10708bfa44df69876adea532ce68c4e25770b7f7
|
[
"MIT"
] | null | null | null |
generate.py
|
IBM/risk-pddl
|
10708bfa44df69876adea532ce68c4e25770b7f7
|
[
"MIT"
] | 1
|
2020-06-29T15:23:28.000Z
|
2020-06-29T15:23:28.000Z
|
#! /usr/bin/env python
import sys
import networkx as nx
import random
def start_cost_function(degree):
return degree
def random_edge_cost():
if random.random() < 0.8:
return 10
return 30
class Task(object):
def __init__(self, num_nodes, num_start, num_end, num_poi, discard_cost, start_cost, edge_cost):
self.discard_cost = discard_cost
self.start_cost_function = start_cost
self.edge_cost_function = edge_cost
self.graph = self.generate_connected_graph(num_nodes)
self.poi = self.generate_poi(num_poi)
self.end = self.generate_end_nodes(num_end)
self.start = self.generate_start_nodes(num_start)
def generate_directed_graph(self, num_nodes):
# return nx.gn_graph(num_nodes)
graph = nx.barabasi_albert_graph(num_nodes, 2)
directed = graph.to_directed()
for s, t in graph.edges():
if random.random() < 0.01:
continue
if random.random() < 0.5:
directed.remove_edge(s,t)
else:
directed.remove_edge(t,s)
return directed
def generate_connected_graph(self, num_nodes):
is_connected = False
while not is_connected:
graph = self.generate_directed_graph(num_nodes)
is_connected = nx.is_weakly_connected(graph)
return graph
def generate_poi(self, num_poi):
return random.sample(self.graph.nodes(), num_poi)
def generate_start_nodes(self, num_nodes):
startnodes = [ (n, 0) for n, degree in self.graph.in_degree(self.graph.nodes()) if degree == 0]
nonstartnodes = [ n for n, degree in self.graph.in_degree(self.graph.nodes()) if degree > 0]
if len(startnodes) >= num_nodes:
return random.sample(startnodes, num_nodes)
num_remaining_nodes = num_nodes - len(startnodes)
remaining_nodes = random.sample(nonstartnodes, num_remaining_nodes)
remaining_nodes_with_cost = [ (n, self.start_cost_function(degree)) for n, degree in self.graph.in_degree(remaining_nodes)]
startnodes.extend(remaining_nodes_with_cost)
return startnodes
def generate_end_nodes(self, num_nodes):
endnodes = [ n for n, degree in self.graph.out_degree(self.graph.nodes()) if degree == 0]
nonendnodes = [ n for n, degree in self.graph.out_degree(self.graph.nodes()) if degree > 0]
if len(endnodes) >= num_nodes:
return random.sample(endnodes, num_nodes)
num_remaining_nodes = num_nodes - len(endnodes)
endnodes.extend(random.sample(nonendnodes, num_remaining_nodes))
return endnodes
def _write_costs(self, fh):
fh.write(" (= (total-cost) 0)\n")
fh.write(" (= (discard-cost) %s)\n" % self.discard_cost)
for s, cost in self.start:
fh.write(" (= (starting-cost o%s) %s)\n" % (s, cost))
for s, d in self.graph.edges():
fh.write(" (= (connected-cost o%s o%s) %s)\n" % (s,d, self.edge_cost_function()))
def _write_static(self, fh):
for s, _ in self.start:
fh.write(" (SOURCE o%s)\n" % s)
for s in self.end:
fh.write(" (TARGET o%s)\n" % s)
for s in self.poi:
fh.write(" (POI o%s)\n" % s)
for s, d in self.graph.edges():
fh.write(" (CONNECTED o%s o%s)\n" % (s,d))
last_discard = "dummy"
for s in self.poi:
curr = "o%s" % s
fh.write(" (DISCARD_AFTER %s %s)\n" % (curr, last_discard))
last_discard = curr
def write_pddl(self, filename, domain_name, problem_name):
with open(filename, "w") as f:
f.write("(define (problem %s)\n" % problem_name)
f.write(" (:domain %s)\n" % domain_name)
f.write(" (:objects %s dummy)\n" % " ".join(["o%s" %s for s in self.graph.nodes()]))
f.write(" (:init\n")
self._write_static(f)
self._write_costs(f)
f.write(" (considered dummy)\n")
f.write(" )\n")
f.write(" (:goal (and\n")
f.write(" (__goal-achieved)\n")
for s in self.poi:
f.write(" (considered o%s)\n" % s)
f.write(" )\n")
f.write(" )\n")
f.write(" (:metric minimize (total-cost))\n")
f.write(")\n")
def main(prefix):
#
counter = 1
for num_nodes in [1000, 2500, 5000, 10000, 20000]:
for start in [2, 5]:
for end in [10, 100]:
for pois in [3, 5, 10, 25, 50]:
task = Task(num_nodes, start, end, pois, 100, start_cost_function,random_edge_cost)
prob_name = "rm-%s-%s-%s-%s" % (num_nodes, start, end, pois)
fname = "%s%03d.pddl" % (prefix, counter)
print("Generating task: %s" % fname)
task.write_pddl(fname, "risk", prob_name)
counter += 1
if __name__ == "__main__":
main(sys.argv[1])
| 36.366197
| 131
| 0.562742
|
4a1c7f134bcbb1227cbed372a1f507d61c8a6530
| 2,099
|
py
|
Python
|
generate_questions.py
|
daveshap/NLCA_Question_Generator
|
2a2d407df0c1d770193bc7a16f9da8bba3830a6f
|
[
"MIT"
] | 16
|
2021-08-18T04:07:55.000Z
|
2022-03-23T03:05:36.000Z
|
generate_questions.py
|
daveshap/NLCA_Question_Generator
|
2a2d407df0c1d770193bc7a16f9da8bba3830a6f
|
[
"MIT"
] | null | null | null |
generate_questions.py
|
daveshap/NLCA_Question_Generator
|
2a2d407df0c1d770193bc7a16f9da8bba3830a6f
|
[
"MIT"
] | 6
|
2021-08-18T04:07:58.000Z
|
2022-03-23T10:29:04.000Z
|
import openai
from random import seed,sample
import os
import re
with open('openaiapikey.txt', 'r') as infile:
open_ai_api_key = infile.read()
openai.api_key = open_ai_api_key
seed()
ctxdir = 'C:/RavenFinetune/contexts/'
outdir = 'C:/RavenFinetune/questions/'
files = os.listdir(ctxdir)
files = [i for i in files if 'news' in i] # filter list: dialog, medical, reddit, stack, news
#prompt_name = 'p_questions_task.txt'
prompt_name = 'p_questions_moral.txt'
#prompt_name = 'p_questions_important.txt'
files = sample(files, 50)
print(files)
# davinci-instruct
# temp = 0.9
# top_p = 0.95
# freq pen = 0.5
# pres pen = 0.5
def load_prompt(filename, payload):
with open('C:/RavenFinetune/%s' % filename, 'r', encoding='utf-8') as infile:
body = infile.read()
body = body.replace('<<TEXT>>', payload)
return body
def completion(prompt, engine='davinci-instruct-beta', temp=0.9, top_p=0.95, tokens=200, freq_pen=0.5, pres_pen=0.5, stop=['\n\n']):
try:
response = openai.Completion.create(
engine=engine,
prompt=prompt,
temperature=temp,
max_tokens=tokens,
top_p=top_p,
frequency_penalty=freq_pen,
presence_penalty=pres_pen,
stop=stop)
text = response['choices'][0]['text'].strip().splitlines()
questions = ''
for t in text:
questions += re.sub('^\-', '', t).strip() + '\n'
questions = questions.strip()
return questions
except Exception as oops:
print('ERROR in completion function:', oops)
for f in files:
try:
with open(ctxdir + f, 'r', encoding='utf-8') as infile:
context = infile.read()
prompt = load_prompt(prompt_name, context)
print('\n---------------------\n', prompt)
questions = completion(prompt)
print('\n---------------------\n', questions)
with open(outdir + f, 'w', encoding='utf-8') as outfile:
outfile.write(questions)
except Exception as oops:
print('ERROR in main loop:', f, oops)
| 30.867647
| 132
| 0.603144
|
4a1c806ba652b8af7f19f244248408e30f9c5521
| 14,856
|
py
|
Python
|
accelbyte_py_sdk/api/platform/operations/payment_dedicated/refund_payment_order_by_309df5.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/api/platform/operations/payment_dedicated/refund_payment_order_by_309df5.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | 1
|
2021-10-13T03:46:58.000Z
|
2021-10-13T03:46:58.000Z
|
accelbyte_py_sdk/api/platform/operations/payment_dedicated/refund_payment_order_by_309df5.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# justice-platform-service (4.10.0)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from ...models import ErrorEntity
from ...models import PaymentOrderRefund
from ...models import PaymentOrderRefundResult
from ...models import ValidationErrorEntity
class RefundPaymentOrderByDedicated(Operation):
"""Refund payment order by dedicated server (refundPaymentOrderByDedicated)
This API is used to refund payment order by paymentOrderNo from non justice service. e.g. dedicated server.
* if the status field of response json is "REFUNDED", usually wallet paid, it indicates payment order already refunded
* if the status field of response json is "REFUNDING", usually real money paid, platform will send notification to registered notify url once refund successfully
Path Parameter:
Parameter | Type | Required | Description
---------------|--------|----------|-----------------------------------------
namespace | String | Yes | Namespace that payment order resides in
paymentOrderNo | String | Yes | Payment order number
Request Body Parameters:
Parameter | Type | Required | Description
------------|--------|----------|--------------------
description | String | Yes | Refund description
Request Body Example:
{
"description": "Repeated item."
}
`
#### Refund Notification:
It will send notification to configured web hook after refund successfully, http status code should return 200 or 204 once you resolve notification successfully, otherwise payment system will retry notification in interval
Refund notification parameter:
Parameter | Type | Required | Description
-----------|--------|----------|------------------------------------------------
payload | String | Yes | Refund notification payload in json string
sign | String | Yes | sha1 hex signature for payload and private key
Refund notification Example:
{
"payload": "{
"type": "payment",
"nonceStr": "34c1dcf3eb58455eb161465bbfc0b590",
"paymentOrderNo": "18081239088",
"namespace": "accelbyte",
"targetNamespace": "game1",
"targetUserId": "94451623768940d58416ca33ca767ec3",
"extOrderNo": "123456789",
"sku": "sku",
"extUserId": "678",
"price": 100,
"paymentProvider": "XSOLLA",
"vat": 0,
"salesTax": 0,
"paymentProviderFee": 0,
"paymentMethodFee": 0,
"currency": {
"currencyCode": "USD",
"currencySymbol": "$",
"currencyType": "REAL",
"namespace": "accelbyte",
"decimals": 2
},
"status": "REFUNDED",
"createdTime": "2018-07-28T00:39:16.274Z",
"chargedTime": "2018-07-28T00:39:16.274Z",
"refundedTime": "2018-07-28T00:39:16.274Z"
}",
"sign":"e31fb92516cc9faaf50ad70343e1293acec6f3d5"
}
`
Refund notification payload parameter list:
Parameter | Type | Required | Description
-------------------|----------|----------|--------------------------------------------------------------------------------------
type | String | Yes | Notification type: 'payment'
paymentOrderNo | String | Yes | Payment system generated order number
extOrderNo | String | No | External order number that passed by invoker
namespace | String | Yes | Namespace that related payment order resides in
targetNamespace | String | Yes | The game namespace
targetUserId | String | Yes | The user id in game namespace
sku | String | No | Item identify, it will return if pass it when create payment
extUserId | String | No | External user id, can be character id, it will return if pass it when create payment
price | int | Yes | Price of item
paymentProvider | String | Yes | Payment provider: xsolla/alipay/wxpay/wallet
vat | int | Yes | Payment order VAT
salesTax | int | Yes | Payment order sales tax
paymentProviderFee | int | Yes | Payment provider fee
paymentMethodFee | int | Yes | Payment method fee
currency | Map | Yes | Payment order currency info
status | String | Yes | Payment order status
statusReason | String | No | Payment order refund status reason
createdTime | Datetime | No | The time of the order created
chargedTime | Datetime | No | The time of the order charged
refundedTime | Datetime | No | The time of the order refunded
customParameters | Map | No | custom parameters, will return if pass it when create payment
nonceStr | String | Yes | Random string, max length is 32,
Currency info parameter list:
Parameter | Type | Required | Description
---------------|--------|----------|-----------------------------
currencyCode | String | Yes | Currency Code
currencySymbol | String | Yes | Currency Symbol
currencyType | String | Yes | Currency type(REAL/VIRTUAL)
namespace | String | Yes | Currency namespace
decimals | int | Yes | Currency decimals
#### Encryption Rule:
Concat payload json string and private key and then do sha1Hex.
#### Other detail info:
* Token type : client token
* Required permission : resource="ADMIN:NAMESPACE:{namespace}:PAYMENT", action=4 (UPDATE)
* cross namespace allowed
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:PAYMENT [UPDATE]
Properties:
url: /platform/admin/namespaces/{namespace}/payment/orders/{paymentOrderNo}/refund
method: PUT
tags: ["Payment(Dedicated)"]
consumes: ["application/json"]
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
body: (body) OPTIONAL PaymentOrderRefund in body
namespace: (namespace) REQUIRED str in path
payment_order_no: (paymentOrderNo) REQUIRED str in path
Responses:
200: OK - PaymentOrderRefundResult (successful operation)
204: No Content - (Refund successfully)
404: Not Found - ErrorEntity (33141: Payment Order [{paymentOrderNo}] does not exist)
409: Conflict - ErrorEntity (33172: Payment order [{paymentOrderNo}] is not refundable)
422: Unprocessable Entity - ValidationErrorEntity (20002: validation error)
"""
# region fields
_url: str = "/platform/admin/namespaces/{namespace}/payment/orders/{paymentOrderNo}/refund"
_method: str = "PUT"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"], ["BEARER_AUTH"]]
_location_query: str = None
body: PaymentOrderRefund # OPTIONAL in [body]
namespace: str # REQUIRED in [path]
payment_order_no: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
}
def get_body_params(self) -> Any:
if not hasattr(self, "body") or self.body is None:
return None
return self.body.to_dict()
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
if hasattr(self, "payment_order_no"):
result["paymentOrderNo"] = self.payment_order_no
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_body(self, value: PaymentOrderRefund) -> RefundPaymentOrderByDedicated:
self.body = value
return self
def with_namespace(self, value: str) -> RefundPaymentOrderByDedicated:
self.namespace = value
return self
def with_payment_order_no(self, value: str) -> RefundPaymentOrderByDedicated:
self.payment_order_no = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "body") and self.body:
result["body"] = self.body.to_dict(include_empty=include_empty)
elif include_empty:
result["body"] = PaymentOrderRefund()
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
if hasattr(self, "payment_order_no") and self.payment_order_no:
result["paymentOrderNo"] = str(self.payment_order_no)
elif include_empty:
result["paymentOrderNo"] = ""
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, PaymentOrderRefundResult], Union[None, ErrorEntity, HttpResponse, ValidationErrorEntity]]:
"""Parse the given response.
200: OK - PaymentOrderRefundResult (successful operation)
204: No Content - (Refund successfully)
404: Not Found - ErrorEntity (33141: Payment Order [{paymentOrderNo}] does not exist)
409: Conflict - ErrorEntity (33172: Payment order [{paymentOrderNo}] is not refundable)
422: Unprocessable Entity - ValidationErrorEntity (20002: validation error)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(code=code, content_type=content_type, content=content)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 200:
return PaymentOrderRefundResult.create_from_dict(content), None
if code == 204:
return None, None
if code == 404:
return None, ErrorEntity.create_from_dict(content)
if code == 409:
return None, ErrorEntity.create_from_dict(content)
if code == 422:
return None, ValidationErrorEntity.create_from_dict(content)
return None, self.handle_undocumented_response(code=code, content_type=content_type, content=content)
# endregion response methods
# region static methods
@classmethod
def create(
cls,
namespace: str,
payment_order_no: str,
body: Optional[PaymentOrderRefund] = None,
) -> RefundPaymentOrderByDedicated:
instance = cls()
instance.namespace = namespace
instance.payment_order_no = payment_order_no
if body is not None:
instance.body = body
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> RefundPaymentOrderByDedicated:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = PaymentOrderRefund.create_from_dict(dict_["body"], include_empty=include_empty)
elif include_empty:
instance.body = PaymentOrderRefund()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
if "paymentOrderNo" in dict_ and dict_["paymentOrderNo"] is not None:
instance.payment_order_no = str(dict_["paymentOrderNo"])
elif include_empty:
instance.payment_order_no = ""
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"namespace": "namespace",
"paymentOrderNo": "payment_order_no",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"body": False,
"namespace": True,
"paymentOrderNo": True,
}
# endregion static methods
| 35.287411
| 226
| 0.587574
|
4a1c817d1cefd9f39f099a0c59586fd8f3c4d1b1
| 1,585
|
py
|
Python
|
tests/unit/mkeyboard/test_btn_release.py
|
dwmoffatt/macro-zero
|
1684415c592c8ea646649ae5dde81ddcc00da7d0
|
[
"MIT"
] | null | null | null |
tests/unit/mkeyboard/test_btn_release.py
|
dwmoffatt/macro-zero
|
1684415c592c8ea646649ae5dde81ddcc00da7d0
|
[
"MIT"
] | 43
|
2021-05-03T04:32:29.000Z
|
2021-10-01T03:29:32.000Z
|
tests/unit/mkeyboard/test_btn_release.py
|
dwmoffatt/macro-zero
|
1684415c592c8ea646649ae5dde81ddcc00da7d0
|
[
"MIT"
] | null | null | null |
import pytest
from src.modules import (
MK_B1_PIN,
MK_B2_PIN,
MK_B3_PIN,
MK_B4_PIN,
MK_B5_PIN,
MK_B6_PIN,
MK_B7_PIN,
MK_B8_PIN,
RE_SW_PIN,
)
from src.modules.mkeyboard import (
MK_COMMAND_MK_B1,
MK_COMMAND_MK_B2,
MK_COMMAND_MK_B3,
MK_COMMAND_MK_B4,
MK_COMMAND_MK_B5,
MK_COMMAND_MK_B6,
MK_COMMAND_MK_B7,
MK_COMMAND_MK_B8,
)
class TestBtnRelease:
# @classmethod
# def setup_class(cls):
# def setup_method(self, method):
@pytest.mark.parametrize(
"test_input,expected",
[
(MK_B1_PIN, MK_COMMAND_MK_B1),
(MK_B2_PIN, MK_COMMAND_MK_B2),
(MK_B3_PIN, MK_COMMAND_MK_B3),
(MK_B4_PIN, MK_COMMAND_MK_B4),
(MK_B5_PIN, MK_COMMAND_MK_B5),
(MK_B6_PIN, MK_COMMAND_MK_B6),
(MK_B7_PIN, MK_COMMAND_MK_B7),
(MK_B8_PIN, MK_COMMAND_MK_B8),
],
)
def test_btn_release_valid_channel(self, test_input, expected, app):
"""
Tests all button pin mappings put the correct value on the que
:return:
"""
app.mkeyboard.btn_release(test_input)
que_value = app.input_que.get_nowait()
assert que_value == expected
def test_btn_release_invalid_channel(self, app):
"""
Test an invalid channel input put nothing on the que
:return:
"""
app.mkeyboard.btn_release(RE_SW_PIN)
assert app.input_que.qsize() == 0
# def teardown_method(self, method):
# @classmethod
# def teardown_class(cls):
| 24.384615
| 72
| 0.623975
|
4a1c826cbae2520ecb1ea31c47bc9246032265a6
| 463
|
py
|
Python
|
test_app/utils.py
|
jamespeacock/django-request-profiler
|
777543cf8a1f0e3d091636d52a6690b024eb9ca3
|
[
"MIT"
] | null | null | null |
test_app/utils.py
|
jamespeacock/django-request-profiler
|
777543cf8a1f0e3d091636d52a6690b024eb9ca3
|
[
"MIT"
] | null | null | null |
test_app/utils.py
|
jamespeacock/django-request-profiler
|
777543cf8a1f0e3d091636d52a6690b024eb9ca3
|
[
"MIT"
] | null | null | null |
from unittest import skipIf
from django.conf import settings
def skipIfDefaultUser(test_func):
"""
Skip a test if a default user model is in use.
"""
return skipIf(settings.AUTH_USER_MODEL == 'auth.User', 'Default user model in use')(test_func)
def skipIfCustomUser(test_func):
"""
Skip a test if a custom user model is in use.
"""
return skipIf(settings.AUTH_USER_MODEL != 'auth.User', 'Custom user model in use')(test_func)
| 25.722222
| 98
| 0.697624
|
4a1c83775585d7153288e240f92db93fd4b95168
| 18,865
|
py
|
Python
|
examples/cartpole_dqn.py
|
Alexanders101/TorchSpread
|
27cb9c6ead97d8b92284f6eff016bd6e406505e9
|
[
"MIT"
] | 1
|
2019-08-15T11:16:05.000Z
|
2019-08-15T11:16:05.000Z
|
examples/cartpole_dqn.py
|
Alexanders101/TorchSpread
|
27cb9c6ead97d8b92284f6eff016bd6e406505e9
|
[
"MIT"
] | null | null | null |
examples/cartpole_dqn.py
|
Alexanders101/TorchSpread
|
27cb9c6ead97d8b92284f6eff016bd6e406505e9
|
[
"MIT"
] | null | null | null |
from threading import Thread
import gym
import numpy as np
import torch
import ctypes
from threading import Thread
from typing import Tuple, List
from torch import nn
from torch.nn import functional as F
from torch_spread import NetworkClient, NetworkManager, SpreadModule, Buffer, mp_ctx
from torch_spread.buffer_queue import BufferRing
from torch_spread.buffer import raw_buffer_and_size
from argparse import ArgumentParser, Namespace
from scipy import signal
process_type = mp_ctx.Process
Value = mp_ctx.Value
JoinableQueue = mp_ctx.JoinableQueue
# process_type = Thread
class DuelingNetwork(SpreadModule):
""" A simple feed forward neural network for training a q-value on cartpole. """
def __init__(self, worker: bool, state_shape: Tuple[int], num_actions: int):
super(DuelingNetwork, self).__init__(worker)
self.input_shape = int(np.prod(state_shape))
self.encoder = nn.Sequential(
nn.Linear(self.input_shape, 16),
nn.PReLU(16),
nn.Linear(16, 32),
nn.PReLU(32),
)
self.value_output = nn.Linear(32, 1)
self.advantage_output = nn.Linear(32, num_actions)
def forward(self, input_buffer):
x = self.encoder(input_buffer.view(-1, self.input_shape))
value = self.value_output(x)
advantage = self.advantage_output(x)
return value + advantage - advantage.mean(dim=-1, keepdim=True)
def q_values(self, states, actions):
return self.forward(states).gather(1, actions.unsqueeze(1)).squeeze()
class Episode:
def __init__(self, n_step: int = 1, discount: float = 0.99):
self.states: List[np.ndarray] = []
self.actions: List[int] = []
self.rewards: List[float] = []
self.action_probabilities: List[float] = []
self.length: int = 0
self.n_step = n_step
self.discount = discount
if n_step > 1:
self.discount_filter = np.arange(n_step, dtype=np.float32)
self.discount_filter = discount ** self.discount_filter
def add(self, state: np.ndarray, action: int, reward: float, action_probability: float):
self.states.append(state)
self.actions.append(action)
self.rewards.append(reward)
self.action_probabilities.append(action_probability)
self.length += 1
def clear(self):
self.states.clear()
self.actions.clear()
self.rewards.clear()
self.action_probabilities.clear()
self.length = 0
@property
def total_reward(self):
return sum(self.rewards)
@property
def observations(self):
states = torch.as_tensor(self.states, dtype=torch.float32)
actions = torch.as_tensor(self.actions, dtype=torch.long)
rewards = torch.as_tensor(self.rewards, dtype=torch.float32)
action_probabilities = torch.as_tensor(self.action_probabilities, dtype=torch.float32)
# Priorities are calculated when adding to replay buffer
priorities = torch.zeros(self.length, dtype=torch.float32)
# Full Monte Carlo discounts
if self.n_step < 1:
terminals = torch.ones(self.length, dtype=torch.uint8)
results = torch.zeros_like(states)
discount_rewards = signal.lfilter([1], [1, -self.discount], x=rewards[::-1].numpy())
discount_rewards = torch.from_numpy(discount_rewards[::-1])
# TD(n) discounts
else:
# Compute terminals as a binary mask for states that hit the terminal state during n-step
terminals = torch.zeros(self.length, dtype=torch.uint8)
terminals[-self.n_step:] = 1
# Compute the next-states as the n-offset of the states with zero padding
results = torch.zeros_like(states)
if self.length > self.n_step:
results[:self.length - self.n_step] = states[self.n_step:]
# Compute the n-step discount returns
discount_rewards = rewards
if self.n_step > 1:
discount_rewards = signal.correlate(rewards.numpy(), self.discount_filter[:self.length], 'full')
discount_rewards = torch.from_numpy(discount_rewards[-self.length:])
return {
"states": states,
"actions": actions,
"results": results,
"rewards": rewards,
"terminals": terminals,
"priorities": priorities,
"discount_rewards": discount_rewards,
"action_probabilities": action_probabilities
}
class PrioritizedReplayBuffer(BufferRing):
def __init__(self, state_shape: Tuple[int], max_size: int, alpha: float = 0.6, beta: float = 0.4):
""" A ring buffer for storing a prioritized replay buffer. Used for Deep Q Learning.
Parameters
----------
state_shape: tuple
The numpy shape of a single state
max_size: int
Maximum number of unique samples to hold in this buffer.
alpha: float
Prioritized Experience Replay alpha parameter
beta: float
Prioritized Experience Replay beta parameter
"""
buffer_shapes = {
"states": state_shape,
"results": state_shape,
"actions": tuple(),
"rewards": tuple(),
"terminals": tuple(),
"priorities": tuple(),
"discount_rewards": tuple(),
"action_probabilities": tuple()
}
buffer_types = {
"states": torch.float32,
"results": torch.float32,
"actions": torch.long,
"rewards": torch.float32,
"terminals": torch.uint8,
"priorities": torch.float32,
"discount_rewards": torch.float32,
"action_probabilities": torch.float32
}
super(PrioritizedReplayBuffer, self).__init__(buffer_shapes, buffer_types, max_size)
self.alpha = alpha
self.beta = beta
self.max_priority = Value(ctypes.c_float, lock=False)
self.max_priority.value = 1.0
@property
def priorities(self):
current_size = self.size
return self.buffer[:current_size]('priorities').numpy()
def update_priority(self, idx: np.ndarray, delta: torch.Tensor):
self.buffer('priorities')[idx] = torch.abs(delta.detach().cpu())
def update_max_priority(self):
self.max_priority.value = float(np.max(self.priorities, initial=1))
def put(self, buffer, size: int = None):
buffer, size = raw_buffer_and_size(buffer, size)
# Compute the priority for an incoming sample
buffer['priorities'][:] = self.max_priority.value
# Put it into the buffer
super(PrioritizedReplayBuffer, self).put(buffer, size)
def add_episode(self, episode: Episode):
self.put(episode.observations, episode.length)
def sample(self, num_samples: int = 32):
current_size = self.size
assert num_samples <= current_size, f"Buffer is not large enough to provide {num_samples} samples"
# Calculate probabilities
P = self.buffer[:current_size]('priorities').numpy() ** self.alpha
P /= P.sum()
# Calculate IS weights
w = (current_size * P) ** -self.beta
# Normalize weights
max_w = np.max(w)
if max_w > 1e-5:
w /= max_w
# Sample based on probabilities
idx = np.random.choice(current_size, size=num_samples, replace=True, p=P)
idx = np.sort(idx)
# Get the sampled batch
batch = self.buffer[idx]
weights = torch.from_numpy(w[idx])
return idx, batch, weights
class EpsilonGreedyClient(NetworkClient):
def __init__(self, config: dict, batch_size: int, epsilon: float, epsilon_update=None):
""" An extension to the regular NetworkClient that provides Q-learning policy functions.
Parameters
----------
config: dict
Client configuration from the network manager.
batch_size: int
Maximum number of states you're planning on predicting at once.
epsilon: float
Probability of performing a random action
epsilon_update: float -> float, optional
A function for updating the epsilon value every time you sample
"""
super().__init__(config, batch_size)
self.epsilon = epsilon
self.epsilon_update = epsilon_update
def update_epsilon(self):
""" Perform a single call to the epsilon update function. """
if self.epsilon_update is not None:
self.epsilon = self.epsilon_update(self.epsilon)
def sample_actions(self, states):
""" Sample many actions at once (for vectorized environment). """
q_values = self.predict(states)
num_states, num_actions = q_values.shape
greedy_actions = q_values.max(dim=1)[1]
greedy_actions = greedy_actions.numpy()
random_actions = np.random.randint(low=0, high=num_actions, size=num_states, dtype=greedy_actions.dtype)
epsilons = np.random.rand(num_states)
epsilons = (epsilons < self.epsilon).astype(np.int64)
return np.choose(epsilons, (greedy_actions, random_actions))
def sample_action(self, state, num_actions):
""" Sample a single action. """
q_values = self.predict(torch.from_numpy(np.expand_dims(state.astype(np.float32), axis=0)))
action = greedy_action = q_values.max(dim=1).indices[0].item()
if np.random.rand() < self.epsilon:
action = np.random.randint(low=0, high=num_actions)
if action == greedy_action:
return action, (1 - self.epsilon) + self.epsilon / num_actions
else:
return action, self.epsilon / num_actions
class EpisodeCollector(process_type):
def __init__(self,
parameters: Namespace,
client_config: dict,
replay_buffer: PrioritizedReplayBuffer,
request_queue: JoinableQueue):
super(EpisodeCollector, self).__init__()
self.epsilon_initial: float = parameters.epsilon_initial
self.epsilon_final: float = parameters.epsilon_final
self.epsilon_episodes: int = parameters.epsilon_decay_episodes // parameters.num_collectors
self.environment_name: str = parameters.environment_name
self.discount: float = parameters.discount
self.n_step: int = parameters.n_step
self.client_config = client_config
self.request_queue = request_queue
self.replay_buffer = replay_buffer
self.average_reward = Value(ctypes.c_float, lock=False)
self.average_length = Value(ctypes.c_float, lock=False)
self.total_episodes = Value(ctypes.c_int64, lock=False)
@staticmethod
def create(parameters: Namespace, client_config: dict, replay_buffer: PrioritizedReplayBuffer):
request_queue: JoinableQueue = JoinableQueue(parameters.num_collectors)
collectors: List[EpisodeCollector] = []
for _ in range(parameters.num_collectors):
collector = EpisodeCollector(parameters, client_config, replay_buffer, request_queue)
collector.start()
collectors.append(collector)
return request_queue, collectors
@staticmethod
def kill_collectors(request_queue: JoinableQueue, collectors: List["EpisodeCollector"]):
for _ in range(len(collectors)):
request_queue.put(-1)
for collector in collectors:
collector.join(timeout=None)
@staticmethod
def collect(request_queue: JoinableQueue, collectors: List["EpisodeCollector"], total_states: int):
num_workers = len(collectors)
states_per_worker = total_states // num_workers
for _ in range(num_workers):
request_queue.put(states_per_worker)
try:
request_queue.join()
except KeyboardInterrupt:
EpisodeCollector.kill_collectors(request_queue, collectors)
raise KeyboardInterrupt
average_reward = sum(collector.average_reward.value for collector in collectors) / num_workers
total_episodes = sum(collector.total_episodes.value for collector in collectors)
return total_episodes, average_reward
def run(self):
torch.manual_seed(self.pid)
epsilon_factor = (self.epsilon_final / self.epsilon_initial) ** (1 / self.epsilon_episodes)
def epsilon_update(epsilon):
return max(epsilon * epsilon_factor, self.epsilon_final)
env = gym.make(self.environment_name)
num_actions = env.action_space.n
state = env.reset()
episode = Episode(self.n_step, self.discount)
with EpsilonGreedyClient(self.client_config, 1, self.epsilon_initial, epsilon_update) as client:
while True:
num_states = self.request_queue.get()
self.average_reward.value = 0
self.average_length.value = 0
num_episodes = 0
# Kill Switch
if num_states < 0:
return
for sample in range(num_states):
# Sample an action from our network
action, probability = client.sample_action(state, num_actions)
# Perform action and get the results from the environment
next_state, reward, terminal, _ = env.step(action)
# Add all of the observations to the current episode
episode.add(state, action, reward, probability)
# Set variables for next step
state = next_state
# If the environment has terminated, then we add our observations to the replay buffer and reset.
if terminal:
self.replay_buffer.add_episode(episode)
num_episodes += 1
self.average_reward.value += (episode.total_reward - self.average_reward.value) / num_episodes
self.average_length.value += (episode.length - self.average_length.value) / num_episodes
self.total_episodes.value += 1
episode.clear()
state = env.reset()
client.update_epsilon()
self.request_queue.task_done()
def main():
params = Namespace()
params.environment_name = "CartPole-v0"
params.num_collectors = 4
params.replay_buffer_size = 200_000
params.total_episodes = 30_000
params.warmup_episodes = 1_000
params.states_per_epoch = 20_000
params.train_iterations = 40
params.train_batch_size = 1024
params.target_network_update_epochs = 1
params.epsilon_initial = 1.0
params.epsilon_final = 0.001
params.epsilon_decay_episodes = 10_000
params.discount = 0.99
params.n_step = 20
env = gym.make(params.environment_name)
state_shape = env.observation_space.shape
num_actions = env.action_space.n
manager = NetworkManager(input_shape=state_shape,
input_type=torch.float32,
output_shape=num_actions,
output_type=torch.float32,
batch_size=params.num_collectors,
network_class=DuelingNetwork,
network_args=[state_shape, num_actions],
placement={'cuda:0': 1},
worker_amp=True)
with manager:
optimizer = torch.optim.Adam(manager.training_parameters, lr=0.005)
# Create the dynamically changing target network
device = manager.training_placement
target_network = DuelingNetwork(False, state_shape, num_actions).to(device)
# Prioritized replay buffer for collecting states
replay_buffer = PrioritizedReplayBuffer(manager.input_shape, params.replay_buffer_size)
# Create the worker processes for collecting observations
request_queue, collectors = EpisodeCollector.create(params, manager.client_config, replay_buffer)
# Loop variables
num_episodes = 0
num_states = 0
iteration = 0
while num_episodes < params.total_episodes:
num_episodes, average_reward = EpisodeCollector.collect(request_queue, collectors, params.states_per_epoch)
num_states += params.states_per_epoch
iteration += 1
print(f"{iteration}")
print("=" * 60)
print(f"Average Reward: {average_reward}")
print(f"Number of Episodes: {num_episodes}")
print(f"Number of States: {num_states}")
print()
if num_episodes < params.warmup_episodes:
continue
with manager.training_network as policy_network:
for train_iteration in range(params.train_iterations):
sample_index, sample, weights = replay_buffer.sample(params.train_batch_size)
sample = sample.to(device)
weights = weights.to(device)
# Generate n-step double dqn target
with torch.no_grad():
next_states = sample("results")
discount = params.discount ** params.n_step
policy_actions = policy_network(next_states).max(dim=1).indices
q_max = target_network.q_values(next_states, policy_actions)
targets = sample("discount_rewards") + q_max * discount * (1 - sample("terminals"))
# Generate current estimates for the q-value
states, actions = sample("states"), sample("actions")
all_q_values = policy_network(states)
q_values = all_q_values.gather(1, actions.unsqueeze(1)).squeeze()
# Compute bellman loss term
delta = targets - q_values
loss = delta * delta * weights
loss = loss.sum()
# Perform gradient descent step
optimizer.zero_grad()
loss.backward()
optimizer.step()
replay_buffer.update_priority(sample_index, delta)
if iteration % params.target_network_update_epochs == 0:
target_network.load_state_dict(manager.state_dict)
# Update the incoming priority once before sampling
replay_buffer.update_max_priority()
EpisodeCollector.kill_collectors(request_queue, collectors)
if __name__ == '__main__':
main()
| 36.990196
| 119
| 0.623165
|
4a1c84857d075356fe12a2249f12f7e98192689b
| 6,839
|
py
|
Python
|
projects/PointRend/point_rend/point_head.py
|
MargeryLab/BMaskR-CNN
|
41f63d301d6be7fa30ba281a5a0f727fbca6ad2a
|
[
"Apache-2.0"
] | null | null | null |
projects/PointRend/point_rend/point_head.py
|
MargeryLab/BMaskR-CNN
|
41f63d301d6be7fa30ba281a5a0f727fbca6ad2a
|
[
"Apache-2.0"
] | null | null | null |
projects/PointRend/point_rend/point_head.py
|
MargeryLab/BMaskR-CNN
|
41f63d301d6be7fa30ba281a5a0f727fbca6ad2a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.layers import ShapeSpec, cat
from detectron2.structures import BitMasks
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry
from .point_features import point_sample
POINT_HEAD_REGISTRY = Registry("POINT_HEAD")
POINT_HEAD_REGISTRY.__doc__ = """
Registry for point heads, which makes prediction for a given set of per-point features.
The registered object will be called with `obj(cfg, input_shape)`.
"""
def roi_mask_point_loss(mask_logits, instances, points_coord):
"""
Compute the point-based loss for instance segmentation mask predictions.
Args:
mask_logits (Tensor): A tensor of shape (R, C, P) or (R, 1, P) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images, C is the
number of foreground classes, and P is the number of points sampled for each mask.
The values are logits.
instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. These instances are in 1:1 correspondence with the `mask_logits`. So, i_th
elememt of the list contains R_i objects and R_1 + ... + R_N is equal to R.
The ground-truth labels (class, box, mask, ...) associated with each instance are stored
in fields.
points_coords (Tensor): A tensor of shape (R, P, 2), where R is the total number of
predicted masks and P is the number of points for each mask. The coordinates are in
the image pixel coordinate space, i.e. [0, H] x [0, W].
Returns:
point_loss (Tensor): A scalar tensor containing the loss.
"""
with torch.no_grad():
cls_agnostic_mask = mask_logits.size(1) == 1
total_num_masks = mask_logits.size(0)
gt_classes = []
gt_mask_logits = []
idx = 0
for instances_per_image in instances:
if len(instances_per_image) == 0:
continue
assert isinstance(
instances_per_image.gt_masks, BitMasks
), "Point head works with GT in 'bitmask' format. Set INPUT.MASK_FORMAT to 'bitmask'."
if not cls_agnostic_mask:
gt_classes_per_image = instances_per_image.gt_classes.to(dtype=torch.int64)
gt_classes.append(gt_classes_per_image)
gt_bit_masks = instances_per_image.gt_masks.tensor
h, w = instances_per_image.gt_masks.image_size
scale = torch.tensor([w, h], dtype=torch.float, device=gt_bit_masks.device)
points_coord_grid_sample_format = (
points_coord[idx : idx + len(instances_per_image)] / scale
)
idx += len(instances_per_image)
gt_mask_logits.append(
point_sample(
gt_bit_masks.to(torch.float32).unsqueeze(1),
points_coord_grid_sample_format,
align_corners=False,
).squeeze(1)
)
if len(gt_mask_logits) == 0:
return mask_logits.sum() * 0
gt_mask_logits = cat(gt_mask_logits)
assert gt_mask_logits.numel() > 0, gt_mask_logits.shape
if cls_agnostic_mask:
mask_logits = mask_logits[:, 0]
else:
indices = torch.arange(total_num_masks)
gt_classes = cat(gt_classes, dim=0)
mask_logits = mask_logits[indices, gt_classes]
# Log the training accuracy (using gt classes and 0.0 threshold for the logits)
mask_accurate = (mask_logits > 0.0) == gt_mask_logits.to(dtype=torch.uint8)
mask_accuracy = mask_accurate.nonzero().size(0) / mask_accurate.numel()
get_event_storage().put_scalar("point_rend/accuracy", mask_accuracy)
point_loss = F.binary_cross_entropy_with_logits(
mask_logits, gt_mask_logits.to(dtype=torch.float32), reduction="mean"
)
return point_loss
@POINT_HEAD_REGISTRY.register()
class StandardPointHead(nn.Module):
"""
A point head multi-layer perceptron which we model with conv1d layers with kernel 1. The head
takes both fine-grained and coarse prediction features as its input.
"""
def __init__(self, cfg, input_shape: ShapeSpec):
"""
The following attributes are parsed from config:
fc_dim: the output dimension of each FC layers
num_fc: the number of FC layers
coarse_pred_each_layer: if True, coarse prediction features are concatenated to each
layer's input
"""
super(StandardPointHead, self).__init__()
# fmt: off
num_classes = cfg.MODEL.POINT_HEAD.NUM_CLASSES
fc_dim = cfg.MODEL.POINT_HEAD.FC_DIM
num_fc = cfg.MODEL.POINT_HEAD.NUM_FC
cls_agnostic_mask = cfg.MODEL.POINT_HEAD.CLS_AGNOSTIC_MASK
self.coarse_pred_each_layer = cfg.MODEL.POINT_HEAD.COARSE_PRED_EACH_LAYER
input_channels = input_shape.channels
# fmt: on
fc_dim_in = input_channels + num_classes
self.fc_layers = []
for k in range(num_fc):
fc = nn.Conv1d(fc_dim_in, fc_dim, kernel_size=1, stride=1, padding=0, bias=True)
self.add_module("fc{}".format(k + 1), fc)
self.fc_layers.append(fc)
fc_dim_in = fc_dim
fc_dim_in += num_classes if self.coarse_pred_each_layer else 0
num_mask_classes = 1 if cls_agnostic_mask else num_classes
self.predictor = nn.Conv1d(fc_dim_in, num_mask_classes, kernel_size=1, stride=1, padding=0)
for layer in self.fc_layers:
weight_init.c2_msra_fill(layer)
# use normal distribution initialization for mask prediction layer
nn.init.normal_(self.predictor.weight, std=0.001)
if self.predictor.bias is not None:
nn.init.constant_(self.predictor.bias, 0)
def forward(self, fine_grained_features, coarse_features):
x = torch.cat((fine_grained_features, coarse_features), dim=1)
for layer in self.fc_layers:
x = F.relu(layer(x))
if self.coarse_pred_each_layer:
x = cat((x, coarse_features), dim=1)
return self.predictor(x)
def build_point_head(cfg, input_channels):
"""
Build a point head defined by `cfg.MODEL.POINT_HEAD.NAME`.
"""
head_name = cfg.MODEL.POINT_HEAD.NAME
return POINT_HEAD_REGISTRY.get(head_name)(cfg, input_channels)
| 43.28481
| 101
| 0.64176
|
4a1c84895f3a052786d61f342c536609442b44a4
| 349
|
pyde
|
Python
|
Processing Py_!/listing_10/listing_10.pyde
|
GermogenovAs/2019-fall-polytech-cs
|
3d02ed149b5d0468e213d96b5a1aa8263880a3e1
|
[
"MIT"
] | null | null | null |
Processing Py_!/listing_10/listing_10.pyde
|
GermogenovAs/2019-fall-polytech-cs
|
3d02ed149b5d0468e213d96b5a1aa8263880a3e1
|
[
"MIT"
] | null | null | null |
Processing Py_!/listing_10/listing_10.pyde
|
GermogenovAs/2019-fall-polytech-cs
|
3d02ed149b5d0468e213d96b5a1aa8263880a3e1
|
[
"MIT"
] | null | null | null |
def setup():
size(500, 500)
smooth()
noLoop
def draw():
background(100)
stroke( 0xFF881DCB)
strokeWeight(110)
line(100, 150, 400, 150)
stroke( 0xFFCB1DA3)
strokeWeight(60)
line(100, 250, 400, 250)
stroke( 0xFFCB1D1D)
strokeWeight(110)
line(100, 350, 400, 350)
| 16.619048
| 28
| 0.544413
|
4a1c84aafd74572c6eda4b91925717d47b8a7fb3
| 55,654
|
py
|
Python
|
simba/features_scripts/extract_features_16bp.py
|
KonradDanielewski/simba
|
d7a448222e33dcb9880b65c14b5b676933cc6fd7
|
[
"MIT"
] | 172
|
2019-12-18T22:19:42.000Z
|
2022-03-29T01:58:25.000Z
|
simba/features_scripts/extract_features_16bp.py
|
KonradDanielewski/simba
|
d7a448222e33dcb9880b65c14b5b676933cc6fd7
|
[
"MIT"
] | 165
|
2020-01-10T19:05:16.000Z
|
2022-03-31T16:08:36.000Z
|
simba/features_scripts/extract_features_16bp.py
|
KonradDanielewski/simba
|
d7a448222e33dcb9880b65c14b5b676933cc6fd7
|
[
"MIT"
] | 80
|
2019-12-20T00:01:43.000Z
|
2022-03-29T16:20:10.000Z
|
from __future__ import division
import os, glob
import pandas as pd
import math
import numpy as np
from scipy.spatial import ConvexHull
import scipy
from configparser import ConfigParser, NoOptionError, NoSectionError
from numba import jit
from simba.rw_dfs import *
from simba.features_scripts.unit_tests import *
def extract_features_wotarget_16(inifile):
config = ConfigParser()
configFile = str(inifile)
config.read(configFile)
projectPath = config.get('General settings', 'project_path')
csv_dir_in, csv_dir_out = os.path.join(projectPath, 'csv', 'outlier_corrected_movement_location'), os.path.join(projectPath,'csv', 'features_extracted')
vidInfPath = os.path.join(projectPath, 'logs', 'video_info.csv')
try:
wfileType = config.get('General settings', 'workflow_file_type')
except NoOptionError:
wfileType = 'csv'
vidinfDf = pd.read_csv(vidInfPath)
#change videos name to str
vidinfDf.Video = vidinfDf.Video.astype('str')
def count_values_in_range(series, values_in_range_min, values_in_range_max):
return series.between(left=values_in_range_min, right=values_in_range_max).sum()
def angle3pt(ax, ay, bx, by, cx, cy):
ang = math.degrees(
math.atan2(cy - by, cx - bx) - math.atan2(ay - by, ax - bx))
return ang + 360 if ang < 0 else ang
@jit(nopython=True, cache=True)
def EuclidianDistCald(bp1xVals, bp1yVals, bp2xVals, bp2yVals, currPixPerMM):
series = (np.sqrt((bp1xVals - bp2xVals) ** 2 + (bp1yVals - bp2yVals) ** 2)) / currPixPerMM
return series
roll_windows, loopy = [], 0
roll_windows_values = [2, 5, 6, 7.5, 15]
#REMOVE WINDOWS THAT ARE TOO SMALL
roll_windows_values = check_minimum_roll_windows(roll_windows_values, vidinfDf['fps'].min())
########### FIND CSV FILES ###########
filesFound = glob.glob(csv_dir_in + '/*.' + wfileType)
print('Extracting features from ' + str(len(filesFound)) + ' files...')
########### CREATE PD FOR RAW DATA AND PD FOR MOVEMENT BETWEEN FRAMES ###########
for currentFile in filesFound:
M1_hull_large_euclidean_list, M1_hull_small_euclidean_list, M1_hull_mean_euclidean_list, M1_hull_sum_euclidean_list, M2_hull_large_euclidean_list, M2_hull_small_euclidean_list, M2_hull_mean_euclidean_list, M2_hull_sum_euclidean_list = [], [], [], [], [], [], [], []
currVidName = os.path.basename(currentFile).replace('.' +wfileType, '')
currVideoSettings, currPixPerMM, fps = read_video_info(vidinfDf, currVidName)
print('Processing ' + '"' + str(currVidName) + '".' + ' Fps: ' + str(fps) + ". mm/ppx: " + str(currPixPerMM))
for i in range(len(roll_windows_values)):
roll_windows.append(int(fps / roll_windows_values[i]))
loopy += 1
columnHeaders = ["Ear_left_1_x", "Ear_left_1_y", "Ear_left_1_p", "Ear_right_1_x", "Ear_right_1_y",
"Ear_right_1_p", "Nose_1_x", "Nose_1_y", "Nose_1_p", "Center_1_x", "Center_1_y", "Center_1_p",
"Lat_left_1_x", "Lat_left_1_y",
"Lat_left_1_p", "Lat_right_1_x", "Lat_right_1_y", "Lat_right_1_p", "Tail_base_1_x",
"Tail_base_1_y", "Tail_base_1_p", "Tail_end_1_x", "Tail_end_1_y", "Tail_end_1_p",
"Ear_left_2_x",
"Ear_left_2_y", "Ear_left_2_p", "Ear_right_2_x", "Ear_right_2_y", "Ear_right_2_p",
"Nose_2_x", "Nose_2_y", "Nose_2_p", "Center_2_x", "Center_2_y", "Center_2_p", "Lat_left_2_x",
"Lat_left_2_y",
"Lat_left_2_p", "Lat_right_2_x", "Lat_right_2_y", "Lat_right_2_p", "Tail_base_2_x",
"Tail_base_2_y", "Tail_base_2_p", "Tail_end_2_x", "Tail_end_2_y", "Tail_end_2_p"]
csv_df = read_df(currentFile, wfileType)
try:
csv_df = csv_df.set_index('scorer')
except KeyError:
pass
csv_df.columns = columnHeaders
csv_df = csv_df.fillna(0)
#csv_df = csv_df.drop(csv_df.index[[0]])
csv_df = csv_df.apply(pd.to_numeric)
csv_df = csv_df.reset_index()
csv_df = csv_df.reset_index(drop=True)
print('Evaluating convex hulls...')
########### MOUSE AREAS ###########################################
try:
csv_df['Mouse_1_poly_area'] = csv_df.apply(lambda x: ConvexHull(np.array(
[[x['Ear_left_1_x'], x["Ear_left_1_y"]],
[x['Ear_right_1_x'], x["Ear_right_1_y"]],
[x['Nose_1_x'], x["Nose_1_y"]],
[x['Lat_left_1_x'], x["Lat_left_1_y"]], \
[x['Lat_right_1_x'], x["Lat_right_1_y"]],
[x['Tail_base_1_x'], x["Tail_base_1_y"]],
[x['Center_1_x'], x["Center_1_y"]]])).area, axis=1)
except scipy.spatial.qhull.QhullError as e:
print(e)
print('ERROR: For more information, go to https://github.com/sgoldenlab/simba/blob/SimBA_no_TF/docs/FAQ.md#i-get-a-qhull-eg-qh6154-or-6013-error-when-extracting-the-features')
csv_df['Mouse_1_poly_area'] = csv_df.eval('Mouse_1_poly_area / @currPixPerMM')
try:
csv_df['Mouse_2_poly_area'] = csv_df.apply(lambda x: ConvexHull(np.array(
[[x['Ear_left_2_x'], x["Ear_left_2_y"]],
[x['Ear_right_2_x'], x["Ear_right_2_y"]],
[x['Nose_2_x'], x["Nose_2_y"]],
[x['Lat_left_2_x'], x["Lat_left_2_y"]], \
[x['Lat_right_2_x'], x["Lat_right_2_y"]],
[x['Tail_base_2_x'], x["Tail_base_2_y"]],
[x['Center_2_x'], x["Center_2_y"]]])).area, axis=1)
except scipy.spatial.qhull.QhullError as e:
print(e)
print('ERROR: For more information, check https://github.com/sgoldenlab/simba/blob/SimBA_no_TF/docs/FAQ.md#i-get-a-qhull-eg-qh6154-or-6013-error-when-extracting-the-features')
########### CREATE SHIFTED DATAFRAME FOR DISTANCE CALCULATIONS ###########################################
csv_df_shifted = csv_df.shift(periods=1)
csv_df_shifted = csv_df_shifted.rename(
columns={'Ear_left_1_x': 'Ear_left_1_x_shifted', 'Ear_left_1_y': 'Ear_left_1_y_shifted',
'Ear_left_1_p': 'Ear_left_1_p_shifted', 'Ear_right_1_x': 'Ear_right_1_x_shifted', \
'Ear_right_1_y': 'Ear_right_1_y_shifted', 'Ear_right_1_p': 'Ear_right_1_p_shifted',
'Nose_1_x': 'Nose_1_x_shifted', 'Nose_1_y': 'Nose_1_y_shifted', \
'Nose_1_p': 'Nose_1_p_shifted', 'Center_1_x': 'Center_1_x_shifted',
'Center_1_y': 'Center_1_y_shifted', 'Center_1_p': 'Center_1_p_shifted', 'Lat_left_1_x': \
'Lat_left_1_x_shifted', 'Lat_left_1_y': 'Lat_left_1_y_shifted',
'Lat_left_1_p': 'Lat_left_1_p_shifted', 'Lat_right_1_x': 'Lat_right_1_x_shifted',
'Lat_right_1_y': 'Lat_right_1_y_shifted', \
'Lat_right_1_p': 'Lat_right_1_p_shifted', 'Tail_base_1_x': 'Tail_base_1_x_shifted',
'Tail_base_1_y': 'Tail_base_1_y_shifted', \
'Tail_base_1_p': 'Tail_base_1_p_shifted', 'Tail_end_1_x': 'Tail_end_1_x_shifted',
'Tail_end_1_y': 'Tail_end_1_y_shifted', 'Tail_end_1_p': 'Tail_end_1_p_shifted',
'Ear_left_2_x': 'Ear_left_2_x_shifted', 'Ear_left_2_y': 'Ear_left_2_y_shifted',
'Ear_left_2_p': 'Ear_left_2_p_shifted', 'Ear_right_2_x': 'Ear_right_2_x_shifted', \
'Ear_right_2_y': 'Ear_right_2_y_shifted', 'Ear_right_2_p': 'Ear_right_2_p_shifted',
'Nose_2_x': 'Nose_2_x_shifted', 'Nose_2_y': 'Nose_2_y_shifted', \
'Nose_2_p': 'Nose_2_p_shifted', 'Center_2_x': 'Center_2_x_shifted',
'Center_2_y': 'Center_2_y_shifted', 'Center_2_p': 'Center_2_p_shifted', 'Lat_left_2_x': \
'Lat_left_2_x_shifted', 'Lat_left_2_y': 'Lat_left_2_y_shifted',
'Lat_left_2_p': 'Lat_left_2_p_shifted', 'Lat_right_2_x': 'Lat_right_2_x_shifted',
'Lat_right_2_y': 'Lat_right_2_y_shifted', \
'Lat_right_2_p': 'Lat_right_2_p_shifted', 'Tail_base_2_x': 'Tail_base_2_x_shifted',
'Tail_base_2_y': 'Tail_base_2_y_shifted', \
'Tail_base_2_p': 'Tail_base_2_p_shifted', 'Tail_end_2_x': 'Tail_end_2_x_shifted',
'Tail_end_2_y': 'Tail_end_2_y_shifted', 'Tail_end_2_p': 'Tail_end_2_p_shifted',
'Mouse_1_poly_area': 'Mouse_1_poly_area_shifted',
'Mouse_2_poly_area': 'Mouse_2_poly_area_shifted'})
csv_df_combined = pd.concat([csv_df, csv_df_shifted], axis=1, join='inner')
csv_df_combined = csv_df_combined.fillna(0)
csv_df_combined = csv_df_combined.reset_index(drop=True)
print('Calculating euclidean distances...')
########### EUCLIDEAN DISTANCES ###########################################
csv_df['Mouse_1_nose_to_tail'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values, csv_df['Tail_base_1_x'].values, csv_df['Tail_base_1_y'].values, currPixPerMM)
csv_df['Mouse_2_nose_to_tail'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values, csv_df['Tail_base_2_x'].values, csv_df['Tail_base_2_y'].values, currPixPerMM)
csv_df['Mouse_1_width'] = EuclidianDistCald(csv_df['Lat_left_1_x'].values, csv_df['Lat_left_1_y'].values, csv_df['Lat_right_1_x'].values, csv_df['Lat_right_1_y'].values, currPixPerMM)
csv_df['Mouse_2_width'] = EuclidianDistCald(csv_df['Lat_left_2_x'].values, csv_df['Lat_left_2_y'].values, csv_df['Lat_right_2_x'].values, csv_df['Lat_right_2_y'].values, currPixPerMM)
csv_df['Mouse_1_Ear_distance'] = EuclidianDistCald(csv_df['Ear_left_1_x'].values, csv_df['Ear_left_1_y'].values, csv_df['Ear_right_1_x'].values, csv_df['Ear_right_1_y'].values, currPixPerMM)
csv_df['Mouse_2_Ear_distance'] = EuclidianDistCald(csv_df['Ear_left_2_x'].values, csv_df['Ear_left_2_y'].values, csv_df['Ear_right_2_x'].values, csv_df['Ear_right_2_y'].values, currPixPerMM)
csv_df['Mouse_1_Nose_to_centroid'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values, csv_df['Center_1_x'].values, csv_df['Center_1_y'].values, currPixPerMM)
csv_df['Mouse_2_Nose_to_centroid'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values,csv_df['Center_2_x'].values, csv_df['Center_2_y'].values, currPixPerMM)
csv_df['Mouse_1_Nose_to_lateral_left'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values,csv_df['Lat_left_1_x'].values, csv_df['Lat_left_1_y'].values, currPixPerMM)
csv_df['Mouse_2_Nose_to_lateral_left'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values,csv_df['Lat_left_2_x'].values, csv_df['Lat_left_2_y'].values, currPixPerMM)
csv_df['Mouse_1_Nose_to_lateral_right'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values,csv_df['Lat_right_1_x'].values, csv_df['Lat_right_1_y'].values, currPixPerMM)
csv_df['Mouse_2_Nose_to_lateral_right'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values,csv_df['Lat_right_2_x'].values, csv_df['Lat_right_2_y'].values, currPixPerMM)
csv_df['Mouse_1_Centroid_to_lateral_left'] = EuclidianDistCald(csv_df['Center_1_x'].values, csv_df['Center_1_y'].values,csv_df['Lat_left_1_x'].values, csv_df['Lat_left_1_y'].values, currPixPerMM)
csv_df['Mouse_2_Centroid_to_lateral_left'] = EuclidianDistCald(csv_df['Center_2_x'].values, csv_df['Center_2_y'].values,csv_df['Lat_left_2_x'].values, csv_df['Lat_left_2_y'].values, currPixPerMM)
csv_df['Mouse_1_Centroid_to_lateral_right'] = EuclidianDistCald(csv_df['Center_1_x'].values, csv_df['Center_1_y'].values,csv_df['Lat_right_1_x'].values, csv_df['Lat_right_1_y'].values, currPixPerMM)
csv_df['Mouse_2_Centroid_to_lateral_right'] = EuclidianDistCald(csv_df['Center_2_x'].values, csv_df['Center_2_y'].values,csv_df['Lat_right_2_x'].values, csv_df['Lat_right_2_y'].values, currPixPerMM)
csv_df['Centroid_distance'] = EuclidianDistCald(csv_df['Center_2_x'].values, csv_df['Center_2_y'].values,csv_df['Center_1_x'].values, csv_df['Center_1_y'].values, currPixPerMM)
csv_df['Nose_to_nose_distance'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values,csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values, currPixPerMM)
csv_df['M1_Nose_to_M2_lat_left'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values,csv_df['Lat_left_2_x'].values, csv_df['Lat_left_2_y'].values, currPixPerMM)
csv_df['M1_Nose_to_M2_lat_right'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values,csv_df['Lat_right_2_x'].values, csv_df['Lat_right_2_y'].values, currPixPerMM)
csv_df['M2_Nose_to_M1_lat_left'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values,csv_df['Lat_left_1_x'].values, csv_df['Lat_left_1_y'].values, currPixPerMM)
csv_df['M2_Nose_to_M1_lat_right'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values,csv_df['Lat_right_1_x'].values, csv_df['Lat_right_1_y'].values, currPixPerMM)
csv_df['M1_Nose_to_M2_tail_base'] = EuclidianDistCald(csv_df['Nose_1_x'].values, csv_df['Nose_1_y'].values,csv_df['Tail_base_2_x'].values, csv_df['Tail_base_2_y'].values, currPixPerMM)
csv_df['M2_Nose_to_M1_tail_base'] = EuclidianDistCald(csv_df['Nose_2_x'].values, csv_df['Nose_2_y'].values,csv_df['Tail_base_1_x'].values, csv_df['Tail_base_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_centroid'] = EuclidianDistCald(csv_df_combined['Center_1_x_shifted'].values, csv_df_combined['Center_1_y_shifted'].values,csv_df_combined['Center_1_x'].values, csv_df_combined['Center_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_centroid'] = EuclidianDistCald(csv_df_combined['Center_2_x_shifted'].values, csv_df_combined['Center_2_y_shifted'].values,csv_df_combined['Center_2_x'].values, csv_df_combined['Center_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_nose'] = EuclidianDistCald(csv_df_combined['Nose_1_x_shifted'].values, csv_df_combined['Nose_1_y_shifted'].values,csv_df_combined['Nose_1_x'].values, csv_df_combined['Nose_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_nose'] = EuclidianDistCald(csv_df_combined['Nose_2_x_shifted'].values, csv_df_combined['Nose_2_y_shifted'].values,csv_df_combined['Nose_2_x'].values, csv_df_combined['Nose_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_tail_base'] = EuclidianDistCald(csv_df_combined['Tail_base_1_x_shifted'].values, csv_df_combined['Tail_base_1_y_shifted'].values,csv_df_combined['Tail_base_1_x'].values, csv_df_combined['Tail_base_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_tail_base'] = EuclidianDistCald(csv_df_combined['Tail_base_2_x_shifted'].values, csv_df_combined['Tail_base_2_y_shifted'].values,csv_df_combined['Tail_base_2_x'].values, csv_df_combined['Tail_base_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_tail_end'] = EuclidianDistCald(csv_df_combined['Tail_end_1_x_shifted'].values, csv_df_combined['Tail_end_1_y_shifted'].values,csv_df_combined['Tail_end_1_x'].values, csv_df_combined['Tail_end_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_tail_end'] = EuclidianDistCald(csv_df_combined['Tail_end_2_x_shifted'].values, csv_df_combined['Tail_end_2_y_shifted'].values,csv_df_combined['Tail_end_2_x'].values, csv_df_combined['Tail_end_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_left_ear'] = EuclidianDistCald(csv_df_combined['Ear_left_1_x_shifted'].values, csv_df_combined['Ear_left_1_y_shifted'].values,csv_df_combined['Ear_left_1_x'].values, csv_df_combined['Ear_left_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_left_ear'] = EuclidianDistCald(csv_df_combined['Ear_left_2_x_shifted'].values, csv_df_combined['Ear_left_2_y_shifted'].values,csv_df_combined['Ear_left_2_x'].values, csv_df_combined['Ear_left_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_right_ear'] = EuclidianDistCald(csv_df_combined['Ear_right_1_x_shifted'].values, csv_df_combined['Ear_right_1_y_shifted'].values,csv_df_combined['Ear_right_1_x'].values, csv_df_combined['Ear_right_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_right_ear'] = EuclidianDistCald(csv_df_combined['Ear_right_2_x_shifted'].values, csv_df_combined['Ear_right_2_y_shifted'].values,csv_df_combined['Ear_right_2_x'].values, csv_df_combined['Ear_right_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_lateral_left'] = EuclidianDistCald(csv_df_combined['Lat_left_1_x_shifted'].values, csv_df_combined['Lat_left_1_y_shifted'].values,csv_df_combined['Lat_left_1_x'].values, csv_df_combined['Lat_left_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_lateral_left'] = EuclidianDistCald(csv_df_combined['Lat_left_2_x_shifted'].values, csv_df_combined['Lat_left_2_y_shifted'].values,csv_df_combined['Lat_left_2_x'].values, csv_df_combined['Lat_left_2_y'].values, currPixPerMM)
csv_df['Movement_mouse_1_lateral_right'] = EuclidianDistCald(csv_df_combined['Lat_right_1_x_shifted'].values, csv_df_combined['Lat_right_1_y_shifted'].values,csv_df_combined['Lat_right_1_x'].values, csv_df_combined['Lat_right_1_y'].values, currPixPerMM)
csv_df['Movement_mouse_2_lateral_right'] = EuclidianDistCald(csv_df_combined['Lat_right_2_x_shifted'].values, csv_df_combined['Lat_right_2_y_shifted'].values,csv_df_combined['Lat_right_2_x'].values, csv_df_combined['Lat_right_2_y'].values, currPixPerMM)
csv_df['Mouse_1_polygon_size_change'] = pd.eval("csv_df_combined.Mouse_1_poly_area_shifted - csv_df_combined.Mouse_1_poly_area")
csv_df['Mouse_2_polygon_size_change'] = pd.eval("csv_df_combined.Mouse_2_poly_area_shifted - csv_df_combined.Mouse_2_poly_area")
print('Calculating hull variables...')
########### HULL - EUCLIDEAN DISTANCES ###########################################
for index, row in csv_df.iterrows():
M1_np_array = np.array(
[[row['Ear_left_1_x'], row["Ear_left_1_y"]], [row['Ear_right_1_x'], row["Ear_right_1_y"]],
[row['Nose_1_x'], row["Nose_1_y"]], [row['Center_1_x'], row["Center_1_y"]],
[row['Lat_left_1_x'], row["Lat_left_1_y"]], [row['Lat_right_1_x'], row["Lat_right_1_y"]],
[row['Tail_base_1_x'], row["Tail_base_1_y"]]]).astype(int)
M2_np_array = np.array(
[[row['Ear_left_2_x'], row["Ear_left_2_y"]], [row['Ear_right_2_x'], row["Ear_right_2_y"]],
[row['Nose_2_x'], row["Nose_2_y"]], [row['Center_2_x'], row["Center_2_y"]],
[row['Lat_left_2_x'], row["Lat_left_2_y"]], [row['Lat_right_2_x'], row["Lat_right_2_y"]],
[row['Tail_base_2_x'], row["Tail_base_2_y"]]]).astype(int)
M1_dist_euclidean = scipy.spatial.distance.cdist(M1_np_array, M1_np_array, metric='euclidean')
M1_dist_euclidean = M1_dist_euclidean[M1_dist_euclidean != 0]
M1_hull_large_euclidean = np.amax(M1_dist_euclidean)
M1_hull_small_euclidean = np.min(M1_dist_euclidean)
M1_hull_mean_euclidean = np.mean(M1_dist_euclidean)
M1_hull_sum_euclidean = np.sum(M1_dist_euclidean)
M1_hull_large_euclidean_list.append(M1_hull_large_euclidean)
M1_hull_small_euclidean_list.append(M1_hull_small_euclidean)
M1_hull_mean_euclidean_list.append(M1_hull_mean_euclidean)
M1_hull_sum_euclidean_list.append(M1_hull_sum_euclidean)
M2_dist_euclidean = scipy.spatial.distance.cdist(M2_np_array, M2_np_array, metric='euclidean')
M2_dist_euclidean = M2_dist_euclidean[M2_dist_euclidean != 0]
M2_hull_large_euclidean = np.amax(M2_dist_euclidean)
M2_hull_small_euclidean = np.min(M2_dist_euclidean)
M2_hull_mean_euclidean = np.mean(M2_dist_euclidean)
M2_hull_sum_euclidean = np.sum(M2_dist_euclidean)
M2_hull_large_euclidean_list.append(M2_hull_large_euclidean)
M2_hull_small_euclidean_list.append(M2_hull_small_euclidean)
M2_hull_mean_euclidean_list.append(M2_hull_mean_euclidean)
M2_hull_sum_euclidean_list.append(M2_hull_sum_euclidean)
csv_df['M1_largest_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M1_hull_large_euclidean_list))
csv_df['M1_smallest_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M1_hull_small_euclidean_list))
csv_df['M1_mean_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M1_hull_mean_euclidean_list))
csv_df['M1_sum_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M1_hull_sum_euclidean_list))
csv_df['M2_largest_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M2_hull_large_euclidean_list))
csv_df['M2_smallest_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M2_hull_small_euclidean_list))
csv_df['M2_mean_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M2_hull_mean_euclidean_list))
csv_df['M2_sum_euclidean_distance_hull'] = list(map(lambda x: x / currPixPerMM, M2_hull_sum_euclidean_list))
csv_df['Sum_euclidean_distance_hull_M1_M2'] = (csv_df['M1_sum_euclidean_distance_hull'] + csv_df['M2_sum_euclidean_distance_hull'])
########### COLLAPSED MEASURES ###########################################
csv_df['Total_movement_centroids'] = csv_df.eval("Movement_mouse_1_centroid + Movement_mouse_2_centroid")
csv_df['Total_movement_tail_ends'] = csv_df.eval('Movement_mouse_1_tail_end + Movement_mouse_2_tail_end')
csv_df['Total_movement_all_bodyparts_M1'] = csv_df.eval('Movement_mouse_1_nose + Movement_mouse_1_tail_end + Movement_mouse_1_tail_base + Movement_mouse_1_left_ear + Movement_mouse_1_right_ear + Movement_mouse_1_lateral_left + Movement_mouse_1_lateral_right')
csv_df['Total_movement_all_bodyparts_M2'] = csv_df.eval('Movement_mouse_2_nose + Movement_mouse_2_tail_end + Movement_mouse_2_tail_base + Movement_mouse_2_left_ear + Movement_mouse_2_right_ear + Movement_mouse_2_lateral_left + Movement_mouse_2_lateral_right')
csv_df['Total_movement_all_bodyparts_both_mice'] = csv_df.eval('Total_movement_all_bodyparts_M1 + Total_movement_all_bodyparts_M2')
########### CALC ROLLING WINDOWS MEDIANS AND MEANS ###########################################
print('Calculating rolling windows: medians, medians, and sums...')
for i in range(len(roll_windows_values)):
currentColName = 'Sum_euclid_distances_hull_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Sum_euclidean_distance_hull_M1_M2'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Sum_euclid_distances_hull_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Sum_euclidean_distance_hull_M1_M2'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Sum_euclid_distances_hull_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Sum_euclidean_distance_hull_M1_M2'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Movement_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_centroids'].rolling(roll_windows[i], min_periods=1).median()
currentColName = 'Movement_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_centroids'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Movement_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_centroids'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Distance_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Centroid_distance'].rolling(roll_windows[i], min_periods=1).median()
currentColName = 'Distance_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Centroid_distance'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Distance_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Centroid_distance'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_width_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Mouse_1_width'].rolling(roll_windows[i], min_periods=1).median()
currentColName = 'Mouse1_width_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Mouse_1_width'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Mouse1_width_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Mouse_1_width'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse2_width_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Mouse_2_width'].rolling(roll_windows[i], min_periods=1).median()
currentColName = 'Mouse2_width_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Mouse_2_width'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Mouse2_width_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Mouse_2_width'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_mean_euclid_distances_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_mean_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Mouse1_mean_euclid_distances_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_mean_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Mouse1_mean_euclid_distances_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_mean_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse2_mean_euclid_distances_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_mean_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Mouse2_mean_euclid_distances_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_mean_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Mouse2_mean_euclid_distances_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_mean_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_smallest_euclid_distances_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_smallest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Mouse1_smallest_euclid_distances_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_smallest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Mouse1_smallest_euclid_distances_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_smallest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse2_smallest_euclid_distances_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_smallest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Mouse2_smallest_euclid_distances_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_smallest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Mouse2_smallest_euclid_distances_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_smallest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_largest_euclid_distances_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_largest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Mouse1_largest_euclid_distances_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_largest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Mouse1_largest_euclid_distances_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M1_largest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Mouse2_largest_euclid_distances_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_largest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Mouse2_largest_euclid_distances_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_largest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Mouse2_largest_euclid_distances_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['M2_largest_euclidean_distance_hull'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Total_movement_all_bodyparts_both_mice_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_all_bodyparts_both_mice'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Total_movement_all_bodyparts_both_mice_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_all_bodyparts_both_mice'].rolling(roll_windows[i],
min_periods=1).mean()
currentColName = 'Total_movement_all_bodyparts_both_mice_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_all_bodyparts_both_mice'].rolling(roll_windows[i],
min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Total_movement_centroids_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_centroids'].rolling(roll_windows[i], min_periods=1).median()
currentColName = 'Total_movement_centroids_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_centroids'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Total_movement_centroids_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_movement_centroids'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Tail_base_movement_M1_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_tail_base'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Tail_base_movement_M1_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_tail_base'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Tail_base_movement_M1_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_tail_base'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Tail_base_movement_M2_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_tail_base'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Tail_base_movement_M2_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_tail_base'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Tail_base_movement_M2_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_tail_base'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Centroid_movement_M1_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_centroid'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Centroid_movement_M1_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_centroid'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Centroid_movement_M1_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_centroid'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Centroid_movement_M2_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_centroid'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Centroid_movement_M2_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_centroid'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Centroid_movement_M2_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_centroid'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Tail_end_movement_M1_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_tail_end'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Tail_end_movement_M1_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_tail_end'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Tail_end_movement_M1_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_tail_end'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Tail_end_movement_M2_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_tail_end'].rolling(roll_windows[i],
min_periods=1).median()
currentColName = 'Tail_end_movement_M2_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_tail_end'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Tail_end_movement_M2_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_tail_end'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Nose_movement_M1_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_nose'].rolling(roll_windows[i], min_periods=1).median()
currentColName = 'Nose_movement_M1_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_nose'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Nose_movement_M1_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_1_nose'].rolling(roll_windows[i], min_periods=1).sum()
for i in range(len(roll_windows_values)):
currentColName = 'Nose_movement_M2_median_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_nose'].rolling(roll_windows[i], min_periods=1).median()
currentColName = 'Nose_movement_M2_mean_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_nose'].rolling(roll_windows[i], min_periods=1).mean()
currentColName = 'Nose_movement_M2_sum_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Movement_mouse_2_nose'].rolling(roll_windows[i], min_periods=1).sum()
########### BODY PARTS RELATIVE TO EACH OTHER ##################
csv_df['Tail_end_relative_to_tail_base_centroid_nose'] = csv_df['Movement_mouse_1_tail_end'] - (
csv_df['Movement_mouse_1_tail_base'] + csv_df['Movement_mouse_1_centroid'] + csv_df[
'Movement_mouse_1_nose'])
for i in range(len(roll_windows_values)):
currentColName_M1 = 'Tail_end_relative_to_tail_base_centroid_nose_M1_' + str(roll_windows_values[i])
tail_end_col_name = 'Tail_end_movement_M1_mean_' + str(roll_windows_values[i])
tail_base_col_name = 'Tail_base_movement_M1_mean_' + str(roll_windows_values[i])
centroid_col_name = 'Centroid_movement_M1_mean_' + str(roll_windows_values[i])
nose_col_name = 'Nose_movement_M1_mean_' + str(roll_windows_values[i])
currentColName_M2 = 'Tail_end_relative_to_tail_base_centroid_nose_M2_mean_' + str(roll_windows_values[i])
tail_end_col_name_M2 = 'Tail_end_movement_M2_mean_' + str(roll_windows_values[i])
tail_base_col_name_M2 = 'Tail_base_movement_M2_mean_' + str(roll_windows_values[i])
centroid_col_name_M2 = 'Centroid_movement_M2_mean_' + str(roll_windows_values[i])
nose_col_name_M2 = 'Nose_movement_M2_mean_' + str(roll_windows_values[i])
csv_df[currentColName_M1] = csv_df[tail_end_col_name] - (
csv_df[tail_base_col_name] + csv_df[centroid_col_name] + csv_df[nose_col_name])
csv_df[currentColName_M2] = csv_df[tail_end_col_name_M2] - (
csv_df[tail_base_col_name_M2] + csv_df[centroid_col_name_M2] + csv_df[nose_col_name_M2])
########### ANGLES ###########################################
print('Calculating angles...')
csv_df['Mouse_1_angle'] = csv_df.apply(
lambda x: angle3pt(x['Nose_1_x'], x['Nose_1_y'], x['Center_1_x'], x['Center_1_y'], x['Tail_base_1_x'],
x['Tail_base_1_y']), axis=1)
csv_df['Mouse_2_angle'] = csv_df.apply(
lambda x: angle3pt(x['Nose_2_x'], x['Nose_2_y'], x['Center_2_x'], x['Center_2_y'], x['Tail_base_2_x'],
x['Tail_base_2_y']), axis=1)
csv_df['Total_angle_both_mice'] = csv_df['Mouse_1_angle'] + csv_df['Mouse_2_angle']
for i in range(len(roll_windows_values)):
currentColName = 'Total_angle_both_mice_' + str(roll_windows_values[i])
csv_df[currentColName] = csv_df['Total_angle_both_mice'].rolling(roll_windows[i], min_periods=1).sum()
########### DEVIATIONS ###########################################
print('Calculating deviations...')
csv_df['Total_movement_all_bodyparts_both_mice_deviation'] = csv_df.eval('Total_movement_all_bodyparts_both_mice.mean() - Total_movement_all_bodyparts_both_mice')
csv_df['Sum_euclid_distances_hull_deviation'] = csv_df.eval('Sum_euclidean_distance_hull_M1_M2.mean() - Sum_euclidean_distance_hull_M1_M2')
csv_df['M1_smallest_euclid_distances_hull_deviation'] = csv_df.eval('M1_smallest_euclidean_distance_hull.mean() - M1_smallest_euclidean_distance_hull')
csv_df['M1_largest_euclid_distances_hull_deviation'] = csv_df.eval('M1_largest_euclidean_distance_hull.mean() - M1_largest_euclidean_distance_hull')
csv_df['M1_mean_euclid_distances_hull_deviation'] = csv_df.eval('M1_mean_euclidean_distance_hull.mean() - M1_mean_euclidean_distance_hull')
csv_df['Centroid_distance_deviation'] = csv_df.eval('Centroid_distance.mean() - Centroid_distance')
csv_df['Total_angle_both_mice_deviation'] = csv_df.eval('Total_angle_both_mice - Total_angle_both_mice')
csv_df['Movement_mouse_1_deviation_centroid'] = csv_df.eval('Movement_mouse_1_centroid.mean() - Movement_mouse_1_centroid')
csv_df['Movement_mouse_2_deviation_centroid'] = csv_df.eval('Movement_mouse_2_centroid.mean() - Movement_mouse_2_centroid')
csv_df['Mouse_1_polygon_deviation'] = csv_df.eval('Mouse_1_poly_area.mean() - Mouse_1_poly_area')
csv_df['Mouse_2_polygon_deviation'] = csv_df.eval('Mouse_2_poly_area.mean() - Mouse_2_poly_area')
for i in roll_windows_values:
currentColName = 'Total_movement_all_bodyparts_both_mice_mean_' + str(i)
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Sum_euclid_distances_hull_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_smallest_euclid_distances_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_largest_euclid_distances_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_mean_euclid_distances_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Movement_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Distance_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Total_angle_both_mice_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_deviation'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
########### PERCENTILE RANK ###########################################
print('Calculating percentile ranks...')
csv_df['Movement_percentile_rank'] = csv_df['Total_movement_centroids'].rank(pct=True)
csv_df['Distance_percentile_rank'] = csv_df['Centroid_distance'].rank(pct=True)
csv_df['Movement_mouse_1_percentile_rank'] = csv_df['Movement_mouse_1_centroid'].rank(pct=True)
csv_df['Movement_mouse_2_percentile_rank'] = csv_df['Movement_mouse_1_centroid'].rank(pct=True)
csv_df['Movement_mouse_1_deviation_percentile_rank'] = csv_df['Movement_mouse_1_deviation_centroid'].rank(
pct=True)
csv_df['Movement_mouse_2_deviation_percentile_rank'] = csv_df['Movement_mouse_2_deviation_centroid'].rank(
pct=True)
csv_df['Centroid_distance_percentile_rank'] = csv_df['Centroid_distance'].rank(pct=True)
csv_df['Centroid_distance_deviation_percentile_rank'] = csv_df['Centroid_distance_deviation'].rank(pct=True)
for i in range(len(roll_windows_values)):
currentColName = 'Total_movement_all_bodyparts_both_mice_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Sum_euclid_distances_hull_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_mean_euclid_distances_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_smallest_euclid_distances_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Mouse1_largest_euclid_distances_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Movement_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
for i in range(len(roll_windows_values)):
currentColName = 'Distance_mean_' + str(roll_windows_values[i])
currentDev_colName = currentColName + '_percentile_rank'
csv_df[currentDev_colName] = (csv_df[currentColName].mean() - csv_df[currentColName])
########### CALCULATE STRAIGHTNESS OF POLYLINE PATH: tortuosity ###########################################
print('Calculating path tortuosities...')
as_strided = np.lib.stride_tricks.as_strided
win_size = 3
centroidList_Mouse1_x = as_strided(csv_df.Center_1_x, (len(csv_df) - (win_size - 1), win_size),
(csv_df.Center_1_x.values.strides * 2))
centroidList_Mouse1_y = as_strided(csv_df.Center_1_y, (len(csv_df) - (win_size - 1), win_size),
(csv_df.Center_1_y.values.strides * 2))
centroidList_Mouse2_x = as_strided(csv_df.Center_2_x, (len(csv_df) - (win_size - 1), win_size),
(csv_df.Center_2_x.values.strides * 2))
centroidList_Mouse2_y = as_strided(csv_df.Center_2_y, (len(csv_df) - (win_size - 1), win_size),
(csv_df.Center_2_y.values.strides * 2))
for k in range(len(roll_windows_values)):
start = 0
end = start + int(roll_windows_values[k])
tortuosity_M1 = []
tortuosity_M2 = []
for y in range(len(csv_df)):
tortuosity_List_M1 = []
tortuosity_List_M2 = []
CurrCentroidList_Mouse1_x = centroidList_Mouse1_x[start:end]
CurrCentroidList_Mouse1_y = centroidList_Mouse1_y[start:end]
CurrCentroidList_Mouse2_x = centroidList_Mouse2_x[start:end]
CurrCentroidList_Mouse2_y = centroidList_Mouse2_y[start:end]
for i in range(len(CurrCentroidList_Mouse1_x)):
currMovementAngle_mouse1 = (
angle3pt(CurrCentroidList_Mouse1_x[i][0], CurrCentroidList_Mouse1_y[i][0],
CurrCentroidList_Mouse1_x[i][1], CurrCentroidList_Mouse1_y[i][1],
CurrCentroidList_Mouse1_x[i][2], CurrCentroidList_Mouse1_y[i][2]))
currMovementAngle_mouse2 = (
angle3pt(CurrCentroidList_Mouse2_x[i][0], CurrCentroidList_Mouse2_y[i][0],
CurrCentroidList_Mouse2_x[i][1], CurrCentroidList_Mouse2_y[i][1],
CurrCentroidList_Mouse2_x[i][2], CurrCentroidList_Mouse2_y[i][2]))
tortuosity_List_M1.append(currMovementAngle_mouse1)
tortuosity_List_M2.append(currMovementAngle_mouse2)
tortuosity_M1.append(sum(tortuosity_List_M1) / (2 * math.pi))
tortuosity_M2.append(sum(tortuosity_List_M2) / (2 * math.pi))
start += 1
end += 1
currentColName1 = str('Tortuosity_Mouse1_') + str(roll_windows_values[k])
#currentColName2 = str('Tortuosity_Mouse2_') + str(roll_windows_values[k])
csv_df[currentColName1] = tortuosity_M1
#csv_df[currentColName2] = tortuosity_M2
########### CALC THE NUMBER OF LOW PROBABILITY DETECTIONS & TOTAL PROBABILITY VALUE FOR ROW###########################################
print('Calculating pose probability scores...')
csv_df['Sum_probabilities'] = csv_df.eval('Ear_left_1_p + Ear_right_1_p + Nose_1_p + Center_1_p + Lat_left_1_p + Lat_right_1_p + Tail_base_1_p + Tail_end_1_p + Ear_left_2_p + Ear_right_2_p + Nose_2_p + Center_2_p + Lat_left_2_p + Lat_right_2_p + Tail_base_2_p + Tail_end_2_p')
csv_df['Sum_probabilities_deviation'] = csv_df.eval('Sum_probabilities.mean() - Sum_probabilities')
csv_df['Sum_probabilities_deviation_percentile_rank'] = csv_df['Sum_probabilities_deviation'].rank(pct=True)
csv_df['Sum_probabilities_percentile_rank'] = csv_df['Sum_probabilities_deviation_percentile_rank'].rank(pct=True)
csv_df_probability = csv_df.filter(
['Ear_left_1_p', 'Ear_right_1_p', 'Nose_1_p', 'Center_1_p', 'Lat_left_1_p', 'Lat_right_1_p',
'Tail_base_1_p', 'Tail_end_1_p', 'Ear_left_2_p', 'Ear_right_2_p', 'Nose_2_p', 'Center_2_p', 'Lat_left_2_p',
'Lat_right_2_p', 'Tail_base_2_p', 'Tail_end_2_p'])
values_in_range_min, values_in_range_max = 0.0, 0.1
csv_df["Low_prob_detections_0.1"] = csv_df_probability.apply(func=lambda row: count_values_in_range(row, values_in_range_min, values_in_range_max), axis=1)
values_in_range_min, values_in_range_max = 0.000000000, 0.5
csv_df["Low_prob_detections_0.5"] = csv_df_probability.apply(
func=lambda row: count_values_in_range(row, values_in_range_min, values_in_range_max), axis=1)
values_in_range_min, values_in_range_max = 0.000000000, 0.75
csv_df["Low_prob_detections_0.75"] = csv_df_probability.apply(
func=lambda row: count_values_in_range(row, values_in_range_min, values_in_range_max), axis=1)
########### DROP COORDINATE COLUMNS ###########################################
csv_df = csv_df.reset_index(drop=True)
csv_df = csv_df.fillna(0)
csv_df = csv_df.drop(columns=['index'], axis=1, errors='ignore')
fileName = os.path.basename(currentFile)
saveFN = os.path.join(csv_dir_out, fileName)
save_df(csv_df, wfileType, saveFN)
print('Feature extraction complete for ' + '"' + str(currVidName) + '".')
print('All feature extraction complete.')
| 85.097859
| 285
| 0.656988
|
4a1c84c202da0d1a30b8d57c853b37f05b580bf4
| 24,687
|
py
|
Python
|
miniwdl_aws/batch_job.py
|
miniwdl-ext/miniwdl-aws
|
604a1f1f79ba34d138e685eff9f686f57c587c39
|
[
"MIT"
] | 1
|
2022-03-16T18:42:23.000Z
|
2022-03-16T18:42:23.000Z
|
miniwdl_aws/batch_job.py
|
miniwdl-ext/miniwdl-aws
|
604a1f1f79ba34d138e685eff9f686f57c587c39
|
[
"MIT"
] | null | null | null |
miniwdl_aws/batch_job.py
|
miniwdl-ext/miniwdl-aws
|
604a1f1f79ba34d138e685eff9f686f57c587c39
|
[
"MIT"
] | 2
|
2021-11-03T14:11:09.000Z
|
2022-03-09T22:54:06.000Z
|
"""
BatchJob: implements miniwdl TaskContainer by submitting jobs to an AWS Batch queue and polling
their status. Assumes a shared filesystem (typically EFS) between the miniwdl host and the Batch
workers.
"""
import os
import math
import time
import json
import threading
import heapq
from contextlib import ExitStack, suppress
import boto3
import botocore
import WDL
import WDL.runtime.task_container
import WDL.runtime._statusbar
from WDL._util import PygtailLogger, rmtree_atomic, symlink_force, write_atomic
from WDL._util import StructuredLogMessage as _
from ._util import (
detect_aws_region,
randomize_job_name,
efs_id_from_access_point,
detect_sagemaker_studio_efs,
detect_studio_fsap,
detect_gwfcore_batch_queue,
)
class BatchJob(WDL.runtime.task_container.TaskContainer):
@classmethod
def global_init(cls, cfg, logger):
cls._set_config_defaults(cfg, logger)
cls._region_name = detect_aws_region(cfg)
assert (
cls._region_name
), "Failed to detect AWS region; configure AWS CLI or set environment AWS_DEFAULT_REGION"
# EFS configuration based on:
# - [aws] fsap / MINIWDL__AWS__FSAP
# - [aws] fs / MINIWDL__AWS__FS
# - SageMaker Studio metadata, if applicable
cls._fs_id = None
cls._fsap_id = None
cls._fs_mount = cfg.get("file_io", "root")
assert (
len(cls._fs_mount) > 1
), "misconfiguration, set [file_io] root / MINIWDL__FILE_IO__ROOT to EFS mount point"
if cfg.has_option("aws", "fs"):
cls._fs_id = cfg.get("aws", "fs")
if cfg.has_option("aws", "fsap"):
cls._fsap_id = cfg.get("aws", "fsap")
if not cls._fs_id:
cls._fs_id = efs_id_from_access_point(cls._region_name, cls._fsap_id)
cls._studio_efs_uid = None
sagemaker_studio_efs = detect_sagemaker_studio_efs(logger, region_name=cls._region_name)
if sagemaker_studio_efs:
(
studio_efs_id,
studio_efs_uid,
studio_efs_home,
studio_efs_mount,
) = sagemaker_studio_efs
assert (
not cls._fs_id or cls._fs_id == studio_efs_id
), "Configured EFS ([aws] fs / MINIWDL__AWS__FS, [aws] fsap / MINIWDL__AWS__FSAP) isn't associated with current SageMaker Studio domain EFS"
cls._fs_id = studio_efs_id
assert cls._fs_mount.rstrip("/") == studio_efs_mount.rstrip("/"), (
"misconfiguration, set [file_io] root / MINIWDL__FILE_IO__ROOT to "
+ studio_efs_mount.rstrip("/")
)
cls._studio_efs_uid = studio_efs_uid
if not cls._fsap_id:
cls._fsap_id = detect_studio_fsap(
logger,
studio_efs_id,
studio_efs_uid,
studio_efs_home,
region_name=cls._region_name,
)
assert (
cls._fsap_id
), "Unable to detect suitable EFS Access Point for use with SageMaker Studio; set [aws] fsap / MINIWDL__AWS__FSAP"
# TODO: else sanity-check that FSAP's root directory equals studio_efs_home
assert (
cls._fs_id
), "Missing EFS configuration ([aws] fs / MINIWDL__AWS__FS or [aws] fsap / MINIWDL__AWS__FSAP)"
if not cls._fsap_id:
logger.warning(
"AWS BatchJob plugin recommends using EFS Access Point to simplify permissions between containers (configure [aws] fsap / MINIWDL__AWS__FSAP to fsap-xxxx)"
)
logger.debug(
_(
"AWS BatchJob EFS configuration",
fs_id=cls._fs_id,
fsap_id=cls._fsap_id,
mount=cls._fs_mount,
)
)
# set AWS Batch job queue
if cfg.has_option("aws", "task_queue"):
cls._job_queue = cfg.get("aws", "task_queue")
elif sagemaker_studio_efs:
cls._job_queue = detect_gwfcore_batch_queue(
logger, sagemaker_studio_efs[0], region_name=cls._region_name
)
assert (
cls._job_queue
), "Missing AWS Batch job queue configuration ([aws] task_queue / MINIWDL__AWS__TASK_QUEUE)"
# TODO: query Batch compute environment for resource limits
cls._resource_limits = {"cpu": 9999, "mem_bytes": 999999999999999}
cls._submit_lock = threading.Lock()
cls._last_submit_time = [0.0]
cls._init_time = time.time()
cls._describer = BatchJobDescriber()
logger.info(
_(
"initialized AWS BatchJob plugin",
region_name=cls._region_name,
job_queue=cls._job_queue,
resource_limits=cls._resource_limits,
)
)
@classmethod
def _set_config_defaults(cls, cfg, logger):
# Set defaults in the cfg: ConfigLoader object so that our subsequent cfg.get* ops won't
# throw if the user's custom .cfg file, which we don't necessarily control, omits them.
#
# This repo's miniwdl_aws.cfg also sets these options, and takes precedence in typical
# operations using the auto-built Docker image. But consider the case where a user has
# edited/written their own .cfg file, and we release a new plugin version with a new
# config option. That'd break their existing .cfg file unless the option has a default
# of last resort set here.
cfg.plugin_defaults(
{
"aws": {
"job_timeout": 864000,
"describe_period": 1,
"submit_period": 1,
"submit_period_b": 0.0,
"submit_period_c": 0.0,
"boto3_retries": {"max_attempts": 5, "mode": "standard"},
"retry_wait": 20,
"container_sync": False,
"job_tags": dict(),
}
}
)
@classmethod
def detect_resource_limits(cls, cfg, logger):
return cls._resource_limits
def __init__(self, cfg, run_id, host_dir):
super().__init__(cfg, run_id, host_dir)
self._logStreamName = None
self._inputs_copied = False
# We'll direct Batch to mount EFS inside the task container at the same location we have
# it mounted ourselves, namely /mnt/efs. Therefore container_dir will be the same as
# host_dir (unlike the default Swarm backend, which mounts it at a different virtualized
# location)
self.container_dir = self.host_dir
def copy_input_files(self, logger):
self._inputs_copied = True
return super().copy_input_files(logger)
def host_work_dir(self):
# Since we aren't virtualizing the in-container paths as noted above, always use the same
# working directory on task retries, instead of the base class behavior of appending the
# try counter (on the host side). This loses some robustness to a split-brain condition
# where the previous try is actually still running when we start the retry.
# (see also retry_wait)
return os.path.join(self.host_dir, "work")
def host_stdout_txt(self):
return os.path.join(self.host_dir, "stdout.txt")
def host_stderr_txt(self):
return os.path.join(self.host_dir, "stderr.txt")
def reset(self, logger) -> None:
cooldown = self.cfg.get_float("aws", "retry_wait")
if cooldown > 0.0:
logger.info(
_(
"waiting to retry per configuration [aws] retry_wait",
seconds=cooldown,
)
)
time.sleep(cooldown)
rmtree_atomic(self.host_work_dir())
with suppress(FileNotFoundError):
os.unlink(self.host_stderr_txt() + ".offset") # PygtailLogger state file
super().reset(logger)
def _run(self, logger, terminating, command):
"""
Run task
"""
self._observed_states = set()
boto3_retries = self.cfg.get_dict("aws", "boto3_retries")
try:
aws_batch = boto3.Session().client( # Session() needed for thread safety
"batch",
region_name=self._region_name,
config=botocore.config.Config(retries=boto3_retries),
)
with ExitStack() as cleanup:
# submit Batch job (with request throttling)
job_id = None
submit_period = self.cfg.get_float("aws", "submit_period")
while True:
with self._submit_lock:
if terminating():
raise WDL.runtime.Terminated(quiet=True)
if (
time.time() - self._last_submit_time[0]
>= submit_period * self._submit_period_multiplier()
):
job_id = self._submit_batch_job(logger, cleanup, aws_batch, command)
self._last_submit_time[0] = time.time()
break
time.sleep(submit_period / 4)
# poll Batch job status
return self._await_batch_job(logger, cleanup, aws_batch, job_id, terminating)
except botocore.exceptions.ClientError as exn:
wrapper = AWSError(exn)
logger.error(wrapper)
raise wrapper
def _submit_batch_job(self, logger, cleanup, aws_batch, command):
"""
Register & submit AWS batch job, leaving a cleanup callback to deregister the transient
job definition.
"""
job_name = self.run_id
if job_name.startswith("call-"):
job_name = job_name[5:]
if self.try_counter > 1:
job_name += f"-try{self.try_counter}"
# Append entropy to the job name to avoid race condition using identical job names in
# concurrent RegisterJobDefinition requests
job_name = randomize_job_name(job_name)
container_properties = self._prepare_container_properties(logger, command)
job_def = aws_batch.register_job_definition(
jobDefinitionName=job_name,
type="container",
containerProperties=container_properties,
)
job_def_handle = f"{job_def['jobDefinitionName']}:{job_def['revision']}"
logger.debug(
_(
"registered Batch job definition",
jobDefinition=job_def_handle,
**container_properties,
)
)
self._cleanup_job_definition(logger, cleanup, aws_batch, job_def_handle)
job_tags = self.cfg.get_dict("aws", "job_tags")
if "AWS_BATCH_JOB_ID" in os.environ:
# If we find ourselves running inside an AWS Batch job, tag the new job identifying
# ourself as the "parent" job.
job_tags["AWS_BATCH_PARENT_JOB_ID"] = os.environ["AWS_BATCH_JOB_ID"]
# TODO: set a tag to indicate that this job is a retry of another
job = aws_batch.submit_job(
jobName=job_name,
jobQueue=self._job_queue,
jobDefinition=job_def_handle,
timeout={"attemptDurationSeconds": self.cfg.get_int("aws", "job_timeout")},
tags=job_tags,
)
logger.info(
_(
"AWS Batch job submitted",
jobQueue=self._job_queue,
jobId=job["jobId"],
tags=job_tags,
)
)
return job["jobId"]
def _prepare_container_properties(self, logger, command):
image_tag = self.runtime_values.get("docker", "ubuntu:20.04")
volumes, mount_points = self._prepare_mounts(logger, command)
vcpu = self.runtime_values.get("cpu", 1)
memory_mbytes = max(
math.ceil(self.runtime_values.get("memory_reservation", 0) / 1048576), 1024
)
commands = [
f"cd {self.container_dir}/work",
"exit_code=0",
"bash -l ../command >> ../stdout.txt 2> >(tee -a ../stderr.txt >&2) || exit_code=$?",
]
if self.cfg.get_bool("aws", "container_sync"):
commands.append("find . -type f | xargs sync")
commands.append("sync ../stdout.txt ../stderr.txt")
commands.append("exit $exit_code")
resource_requirements = [
{"type": "VCPU", "value": str(vcpu)},
{"type": "MEMORY", "value": str(memory_mbytes)},
]
if self.runtime_values.get("gpu", False):
resource_requirements += [{"type": "GPU", "value": "1"}]
container_properties = {
"image": image_tag,
"volumes": volumes,
"mountPoints": mount_points,
"command": ["/bin/bash", "-ec", "\n".join(commands)],
"environment": [
{"name": ev_name, "value": ev_value}
for ev_name, ev_value in self.runtime_values.get("env", dict())
],
"resourceRequirements": resource_requirements,
"privileged": self.runtime_values.get("privileged", False),
}
if self.cfg["task_runtime"].get_bool("as_user"):
user = (
f"{self._studio_efs_uid}:{self._studio_efs_uid}"
if self._studio_efs_uid is not None
else f"{os.geteuid()}:{os.getegid()}"
)
if user.startswith("0:"):
logger.warning(
"container command will run explicitly as root, since you are root and set --as-me"
)
container_properties["user"] = user
return container_properties
def _prepare_mounts(self, logger, command):
"""
Prepare the "volumes" and "mountPoints" for the Batch job definition, assembling the
in-container filesystem with the shared working directory, read-only input files, and
command/stdout/stderr files.
"""
# prepare control files
with open(os.path.join(self.host_dir, "command"), "w") as outfile:
outfile.write(command)
with open(self.host_stdout_txt(), "w"):
pass
with open(self.host_stderr_txt(), "w"):
pass
# EFS mount point
volumes = [
{
"name": "efs",
"efsVolumeConfiguration": {
"fileSystemId": self._fs_id,
"transitEncryption": "ENABLED",
},
}
]
if self._fsap_id:
volumes[0]["efsVolumeConfiguration"]["authorizationConfig"] = {
"accessPointId": self._fsap_id
}
mount_points = [{"containerPath": self._fs_mount, "sourceVolume": "efs"}]
if self._inputs_copied:
return volumes, mount_points
# Prepare symlinks to the input Files & Directories
container_prefix = os.path.join(self.container_dir, "work/_miniwdl_inputs/")
link_dirs_made = set()
for host_fn, container_fn in self.input_path_map.items():
assert container_fn.startswith(container_prefix) and len(container_fn) > len(
container_prefix
)
link_dn = os.path.dirname(container_fn)
if link_dn not in link_dirs_made:
os.makedirs(link_dn)
link_dirs_made.add(link_dn)
symlink_force(host_fn, container_fn)
return volumes, mount_points
def _cleanup_job_definition(self, logger, cleanup, aws_batch, job_def_handle):
def deregister(logger, aws_batch, job_def_handle):
try:
aws_batch.deregister_job_definition(jobDefinition=job_def_handle)
logger.debug(_("deregistered Batch job definition", jobDefinition=job_def_handle))
except botocore.exceptions.ClientError as exn:
# AWS expires job definitions after 6mo, so failing to delete them isn't fatal
logger.warning(
_(
"failed to deregister Batch job definition",
jobDefinition=job_def_handle,
error=str(AWSError(exn)),
)
)
cleanup.callback(deregister, logger, aws_batch, job_def_handle)
def _await_batch_job(self, logger, cleanup, aws_batch, job_id, terminating):
"""
Poll for Batch job success or failure & return exit code
"""
describe_period = self.cfg.get_float("aws", "describe_period")
cleanup.callback((lambda job_id: self._describer.unsubscribe(job_id)), job_id)
poll_stderr = cleanup.enter_context(
PygtailLogger(logger, self.host_stderr_txt(), callback=self.stderr_callback)
)
exit_code = None
while exit_code is None:
time.sleep(describe_period)
job_desc = self._describer.describe(aws_batch, job_id, describe_period)
write_atomic(
json.dumps(job_desc, indent=2, sort_keys=True),
os.path.join(self.host_dir, f"awsBatchJobDetail.{job_id}.json"),
)
job_status = job_desc["status"]
if "container" in job_desc and "logStreamName" in job_desc["container"]:
self._logStreamName = job_desc["container"]["logStreamName"]
if job_status not in self._observed_states:
self._observed_states.add(job_status)
logfn = (
logger.notice
if job_status in ("RUNNING", "SUCCEEDED", "FAILED")
else logger.info
)
logdetails = {"status": job_status, "jobId": job_id}
if self._logStreamName:
logdetails["logStreamName"] = self._logStreamName
logfn(_("AWS Batch job change", **logdetails))
if job_status == "STARTING" or (
job_status == "RUNNING" and "STARTING" not in self._observed_states
):
# TODO: base TaskContainer should handle this, for separation of concerns
cleanup.enter_context(
WDL.runtime._statusbar.task_running(
self.runtime_values.get("cpu", 1),
self.runtime_values.get("memory_reservation", 0),
)
)
if job_status not in (
"SUBMITTED",
"PENDING",
"RUNNABLE",
"STARTING",
"RUNNING",
"SUCCEEDED",
"FAILED",
):
logger.warning(_("unknown job status from AWS Batch", status=job_status))
if job_status == "SUCCEEDED":
exit_code = 0
elif job_status == "FAILED":
reason = job_desc.get("container", {}).get("reason", None)
status_reason = job_desc.get("statusReason", None)
self.failure_info = {"jobId": job_id}
if reason:
self.failure_info["reason"] = reason
if status_reason:
self.failure_info["statusReason"] = status_reason
if self._logStreamName:
self.failure_info["logStreamName"] = self._logStreamName
if status_reason and "Host EC2" in status_reason and "terminated" in status_reason:
raise WDL.runtime.Interrupted(
"AWS Batch job interrupted (likely spot instance termination)",
more_info=self.failure_info,
)
if "exitCode" not in job_desc.get("container", {}):
raise WDL.Error.RuntimeError(
"AWS Batch job failed", more_info=self.failure_info
)
exit_code = job_desc["container"]["exitCode"]
assert isinstance(exit_code, int) and exit_code != 0
if "RUNNING" in self._observed_states:
poll_stderr()
if terminating():
aws_batch.terminate_job(jobId=job_id, reason="terminated by miniwdl")
raise WDL.runtime.Terminated(
quiet=not self._observed_states.difference({"SUBMITTED", "PENDING", "RUNNABLE"})
)
for _root, _dirs, _files in os.walk(self.host_dir, followlinks=False):
# no-op traversal of working directory to refresh NFS metadata cache (speculative)
pass
poll_stderr()
return exit_code
def _submit_period_multiplier(self):
if self._describer.jobs:
b = self.cfg.get_float("aws", "submit_period_b")
if b > 0.0:
t = time.time() - self._init_time
c = self.cfg.get_float("aws", "submit_period_c")
return max(1.0, c - t / b)
return 1.0
class BatchJobDescriber:
"""
This singleton object handles calling the AWS Batch DescribeJobs API with up to 100 job IDs
per request, then dispensing each job description to the thread interested in it. This helps
avoid AWS API request rate limits when we're tracking many concurrent jobs.
"""
JOBS_PER_REQUEST = 100 # maximum jobs per DescribeJob request
def __init__(self):
self.lock = threading.Lock()
self.last_request_time = 0
self.job_queue = []
self.jobs = {}
def describe(self, aws_batch, job_id, period):
"""
Get the latest Batch job description
"""
while True:
with self.lock:
if job_id not in self.jobs:
# register new job to be described ASAP
heapq.heappush(self.job_queue, (0.0, job_id))
self.jobs[job_id] = None
# update as many job descriptions as possible
self._update(aws_batch, period)
# return the desired job description if we have it
desc = self.jobs[job_id]
if desc:
return desc
# otherwise wait (outside the lock) and try again
time.sleep(period / 4)
def unsubscribe(self, job_id):
"""
Unsubscribe from a job_id once we'll no longer be interested in it
"""
with self.lock:
if job_id in self.jobs:
del self.jobs[job_id]
def _update(self, aws_batch, period):
# if enough time has passed since our last DescribeJobs request
if time.time() - self.last_request_time >= period:
# take the N least-recently described jobs
job_ids = set()
assert self.job_queue
while self.job_queue and len(job_ids) < self.JOBS_PER_REQUEST:
job_id = heapq.heappop(self.job_queue)[1]
assert job_id not in job_ids
if job_id in self.jobs:
job_ids.add(job_id)
if not job_ids:
return
# describe them
try:
job_descs = aws_batch.describe_jobs(jobs=list(job_ids))
finally:
# always: bump last_request_time and re-enqueue these jobs
self.last_request_time = time.time()
for job_id in job_ids:
heapq.heappush(self.job_queue, (self.last_request_time, job_id))
# update self.jobs with the new descriptions
for job_desc in job_descs["jobs"]:
job_ids.remove(job_desc["jobId"])
self.jobs[job_desc["jobId"]] = job_desc
assert not job_ids, "AWS Batch DescribeJobs didn't return all expected results"
class AWSError(WDL.Error.RuntimeError):
"""
Repackage botocore.exceptions.ClientError to surface it more-informatively in miniwdl task log
"""
def __init__(self, client_error: botocore.exceptions.ClientError):
assert isinstance(client_error, botocore.exceptions.ClientError)
msg = (
f"{client_error.response['Error']['Code']}, {client_error.response['Error']['Message']}"
)
super().__init__(
msg, more_info={"ResponseMetadata": client_error.response["ResponseMetadata"]}
)
| 41.842373
| 171
| 0.573338
|
4a1c84ce6d6703eced8b5304bb29ef22d382124e
| 2,954
|
py
|
Python
|
src/train.py
|
yuvaphalle/datascience
|
315e37572110350aff6eab8361d73928a032eb0e
|
[
"MIT"
] | null | null | null |
src/train.py
|
yuvaphalle/datascience
|
315e37572110350aff6eab8361d73928a032eb0e
|
[
"MIT"
] | null | null | null |
src/train.py
|
yuvaphalle/datascience
|
315e37572110350aff6eab8361d73928a032eb0e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# ### Installing
# In[1]:
# !pip install scikit-learn==0.24.0
# !curl https://raw.githubusercontent.com/automl/auto-sklearn/master/requirements.txt | xargs -n 1 -L 1 pip install
# In[2]:
from google.colab import drive
drive.mount('/content/drive')
# In[3]:
# !pip install auto-sklearn
# In[4]:
# pip install dask distributed
# ### Importing
# In[5]:
from google.colab import drive
import pandas as pd
import numpy as np
from sklearn import set_config
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error
import logging
import datetime
from joblib import dump
# ### Google Drive connection
# In[6]:
mount_path = '/content/drive'
drive.mount(mount_path, force_remount=True)
# In[7]:
data_path = "/content/drive/MyDrive/ml data/"
model_path = "/content/drive/My Drive/Introduction2DataScience/w2d2/models/"
# In[8]:
timesstr = str(datetime.datetime.now()).replace(' ', '_')
logging.basicConfig(filename=f"{model_path}explog_{timesstr}.log", level=logging.INFO)
# In[9]:
#set_config(display='diagram')
# In[ ]:
wine = pd.read_csv(f'{data_path}winequality-red.csv', sep=';')
# In[ ]:
test_size = 0.2
random_state = 0
train, test = train_test_split(wine, test_size=test_size, random_state=random_state)
train.to_csv(f'{data_path}winequality-red-train.csv', index=False, sep=';')
train = train.copy()
test.to_csv(f'{data_path}winequality-red-test.csv', index=False, sep=';')
test = test.copy()
# In[ ]:
logging.info(f'train test split with test_size={test_size} and random state={random_state}')
# <a id='P2' name="P2"></a>
# ## [Modelling](#P0)
# In[ ]:
X_train, y_train = train.iloc[:,:-1], train.iloc[:,-1]
# ### Pipeline Definition
# In[ ]:
from sklearn.linear_model import LinearRegression
total_time = 600
per_run_time_limit = 30
automl = LinearRegression()
# ### Model Training
# In[ ]:
automl.fit(X_train, y_train)
# In[ ]:
logging.info(f'Ran autosklearn regressor for a total time of {total_time} seconds, with a maximum of {per_run_time_limit} seconds per model run')
# In[ ]:
dump(automl, f'{model_path}model{timesstr}.pkl')
# In[ ]:
logging.info(f'Saved regressor model at {model_path}model{timesstr}.pkl ')
# In[ ]:
# logging.info(f'autosklearn model statistics:')
# logging.info(automl.sprint_statistics())
# ### Model Evaluation
# In[ ]:
X_test, y_test = test.iloc[:,:-1], test.iloc[:,-1]
y_pred = automl.predict(X_test)
y_pred
# _Mean Squared Error:_
# In[ ]:
mse = mean_squared_error(y_test, y_pred)
mse
# _R^2 score:_
# In[ ]:
R_squared = automl.score(X_test, y_test)
R_squared
# In[ ]:
logging.info(f"Mean Squared Error is {mse}, \n R2 score is {R_squared}")
| 15.305699
| 145
| 0.706838
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.