text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from django.contrib.auth.models import User, Group
from rest_framework import serializers
|
{
"content_hash": "da19c3ff75a588f5005181a3d0e429ac",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 50,
"avg_line_length": 45,
"alnum_prop": 0.8444444444444444,
"repo_name": "MobileWebApps/backend-python-rest-gae",
"id": "5a974d8c007d716bae45c6f87f863acfe7e0a95e",
"size": "90",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/app_scaffolding/serializers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1860"
},
{
"name": "JavaScript",
"bytes": "1580"
},
{
"name": "Python",
"bytes": "3245769"
},
{
"name": "Shell",
"bytes": "868"
}
],
"symlink_target": ""
}
|
import demistomock as demisto
from CommonServerPython import *
import subprocess
import uuid
USERNAME = demisto.params()['credentials']['identifier'].replace("'", "''")
PASSWORD = demisto.params()['credentials']['password'].replace("'", "''")
EXCHANGE_FQDN = demisto.params()['exchangeFQDN'].replace("'", "''")
UNSECURE = demisto.params()['insecure']
STARTCS = '''
[CmdletBinding()]
Param(
[Parameter(Mandatory=$True)]
[string]$username,
[Parameter(Mandatory=$True)]
[string]$query,
[Parameter(Mandatory=$True)]
[string]$server,
[Parameter(Mandatory=$True)]
[bool]$unsecure
)
$WarningPreference = "silentlyContinue"
$password = Read-Host
$secpasswd = ConvertTo-SecureString $password -AsPlainText -Force
$UserCredential = New-Object System.Management.Automation.PSCredential ($username, $secpasswd)
$searchName = [guid]::NewGuid().ToString() -replace '[-]'
$searchName = "DemistoSearch" + $searchName
if($unsecure){
$url = "http://" + $server + "/PowerShell"
$session = New-PSSession -ConfigurationName Microsoft.Exchange -ConnectionUri $url `
-Credential $UserCredential -Authentication Kerberos
}else{
$url = "https://" + $server + "/PowerShell"
$session = New-PSSession -ConfigurationName Microsoft.Exchange -ConnectionUri $url `
-Credential $UserCredential -Authentication Basic -AllowRedirection
}
if (!$session)
{
"Failed to create remote PS session"
return
}
Import-PSSession $session -CommandName *Compliance* -AllowClobber -DisableNameChecking -Verbose:$false | Out-Null
$compliance = New-ComplianceSearch -Name $searchName -ExchangeLocation All -ContentMatchQuery $query -Confirm:$false
Start-ComplianceSearch -Identity $searchName
$complianceSearchName = "Action status: " + $searchName
$complianceSearchName | ConvertTo-Json
Remove-PSSession $session
'''
GETCS = '''
[CmdletBinding()]
Param(
[Parameter(Mandatory=$True)]
[string]$username,
[Parameter(Mandatory=$True)]
[string]$searchName,
[Parameter(Mandatory=$True)]
[string]$server,
[Parameter(Mandatory=$True)]
[bool]$unsecure
)
$WarningPreference = "silentlyContinue"
$password = Read-Host
$secpasswd = ConvertTo-SecureString $password -AsPlainText -Force
$UserCredential = New-Object System.Management.Automation.PSCredential ($username, $secpasswd)
if($unsecure){
$url = "http://" + $server + "/PowerShell"
$session = New-PSSession -ConfigurationName Microsoft.Exchange -ConnectionUri $url `
-Credential $UserCredential -Authentication Kerberos
}else{
$url = "https://" + $server + "/PowerShell"
$session = New-PSSession -ConfigurationName Microsoft.Exchange -ConnectionUri $url `
-Credential $UserCredential -Authentication Basic -AllowRedirection
}
if (!$session)
{
"Failed to create remote PS session"
return
}
Import-PSSession $session -CommandName Get-ComplianceSearch -AllowClobber `
-DisableNameChecking -Verbose:$false | Out-Null
$searchStatus = Get-ComplianceSearch $searchName
$searchStatus.Status
if ($searchStatus.Status -eq "Completed")
{
$searchStatus.SuccessResults | ConvertTo-Json
}
Remove-PSSession $session
'''
REMOVECS = '''
[CmdletBinding()]
Param(
[Parameter(Mandatory=$True)]
[string]$username,
[Parameter(Mandatory=$True)]
[string]$searchName,
[Parameter(Mandatory=$True)]
[string]$server,
[Parameter(Mandatory=$True)]
[bool]$unsecure
)
$WarningPreference = "silentlyContinue"
$password = Read-Host
$secpasswd = ConvertTo-SecureString $password -AsPlainText -Force
$UserCredential = New-Object System.Management.Automation.PSCredential ($username, $secpasswd)
if($unsecure){
$url = "http://" + $server + "/PowerShell"
$session = New-PSSession -ConfigurationName Microsoft.Exchange -ConnectionUri $url `
-Credential $UserCredential -Authentication Kerberos
}else{
$url = "https://" + $server + "/PowerShell"
$session = New-PSSession -ConfigurationName Microsoft.Exchange -ConnectionUri $url `
-Credential $UserCredential -Authentication Basic -AllowRedirection
}
if (!$session)
{
"Failed to create remote PS session"
return
}
Import-PSSession $session -CommandName *Compliance* -AllowClobber -DisableNameChecking -Verbose:$false | Out-Null
Remove-ComplianceSearch $searchName -Confirm:$false
Remove-PSSession $session
'''
STARTPURGE = '''
[CmdletBinding()]
Param(
[Parameter(Mandatory=$True)]
[string]$username,
[Parameter(Mandatory=$True)]
[string]$searchName,
[Parameter(Mandatory=$True)]
[string]$server,
[Parameter(Mandatory=$True)]
[bool]$unsecure
)
$WarningPreference = "silentlyContinue"
$password = Read-Host
$secpasswd = ConvertTo-SecureString $password -AsPlainText -Force
$UserCredential = New-Object System.Management.Automation.PSCredential ($username, $secpasswd)
if($unsecure){
$url = "http://" + $server + "/PowerShell"
$session = New-PSSession -ConfigurationName Microsoft.Exchange -ConnectionUri $url `
-Credential $UserCredential -Authentication Kerberos
}else{
$url = "https://" + $server + "/PowerShell"
$session = New-PSSession -ConfigurationName Microsoft.Exchange -ConnectionUri $url `
-Credential $UserCredential -Authentication Basic -AllowRedirection
}
if (!$session)
{
"Failed to create remote PS session"
return
}
Import-PSSession $session -CommandName *Compliance* -AllowClobber -DisableNameChecking -Verbose:$false | Out-Null
$newActionResult = New-ComplianceSearchAction -SearchName $searchName -Purge -PurgeType SoftDelete -Confirm:$false
if (!$newActionResult)
{
"No action was created"
}
Remove-PSSession $session
return
'''
CHECKPURGE = '''
[CmdletBinding()]
Param(
[Parameter(Mandatory=$True)]
[string]$username,
[Parameter(Mandatory=$True)]
[string]$searchName,
[Parameter(Mandatory=$True)]
[string]$server,
[Parameter(Mandatory=$True)]
[bool]$unsecure
)
$WarningPreference = "silentlyContinue"
$password = Read-Host
$secpasswd = ConvertTo-SecureString $password -AsPlainText -Force
$UserCredential = New-Object System.Management.Automation.PSCredential ($username, $secpasswd)
if($unsecure){
$url = "http://" + $server + "/PowerShell"
$session = New-PSSession -ConfigurationName Microsoft.Exchange -ConnectionUri $url `
-Credential $UserCredential -Authentication Kerberos
}else{
$url = "https://" + $server + "/PowerShell"
$session = New-PSSession -ConfigurationName Microsoft.Exchange -ConnectionUri $url `
-Credential $UserCredential -Authentication Basic -AllowRedirection
}
if (!$session)
{
"Failed to create remote PS session"
return
}
Import-PSSession $session -CommandName *Compliance* -AllowClobber -DisableNameChecking -Verbose:$false | Out-Null
$actionName = $searchName + "_Purge"
$actionStatus = Get-ComplianceSearchAction $actionName
""
$actionStatus.Status
Remove-PSSession $session
'''
TESTCON = '''
[CmdletBinding()]
Param(
[Parameter(Mandatory=$True)]
[string]$username,
[Parameter(Mandatory=$True)]
[string]$server,
[Parameter(Mandatory=$True)]
[bool]$unsecure
)
$errorActionPreference = 'Stop'
$WarningPreference = "silentlyContinue"
$password = Read-Host
$secpasswd = ConvertTo-SecureString $password -AsPlainText -Force
$UserCredential = New-Object System.Management.Automation.PSCredential ($username, $secpasswd)
try{
if($unsecure){
$url = "http://" + $server + "/PowerShell"
$session = New-PSSession -ConfigurationName Microsoft.Exchange -ConnectionUri $url `
-Credential $UserCredential -Authentication Kerberos
}else{
$url = "https://" + $server + "/PowerShell"
$session = New-PSSession -ConfigurationName Microsoft.Exchange -ConnectionUri $url `
-Credential $UserCredential -Authentication Basic -AllowRedirection
}
echo "successful connection"
} catch {
$e = $_.Exception
echo $e.Message
} finally {
Remove-PSSession $session
}
'''
def prepare_args(d):
return dict((k.replace("-", "_"), v) for k, v in d.items())
def str_to_unicode(obj):
if isinstance(obj, dict):
obj = {k: str_to_unicode(v) for k, v in obj.iteritems()}
elif isinstance(obj, list):
obj = map(str_to_unicode, obj)
elif isinstance(obj, str):
obj = unicode(obj, "utf-8")
return obj
def encode_and_submit_results(obj):
demisto.results(str_to_unicode(obj))
def get_cs_status(search_name, status):
return {
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': 'Search {} status: {}'.format(search_name, status),
'EntryContext': {
'EWS.ComplianceSearch(val.Name === obj.Name)': {'Name': search_name, 'Status': status}
}
}
def create_ps_file(ps_name, ps_content):
temp_path = os.getenv('TEMP')
if not temp_path:
return_error("Check that the integration is using single engine without docker."
" If so, add TEMP variable to the enviroment varibes.")
ps_path = temp_path + '\\' + ps_name # type: ignore
with open(ps_path, 'w+') as file:
file.write(ps_content)
return ps_path
def delete_ps_file(ps_path):
if os.path.exists(ps_path):
os.remove(ps_path)
def start_compliance_search(query):
try:
ps_path = create_ps_file('startcs_' + str(uuid.uuid4()).replace('-', '') + '.ps1', STARTCS)
output = subprocess.Popen(["powershell.exe", ps_path, "'" + USERNAME + "'",
"'" + str(query).replace("'", "''") + "'", "'" + EXCHANGE_FQDN + "'", "$" + str(UNSECURE)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = output.communicate(input=PASSWORD.encode())
finally:
delete_ps_file(ps_path)
if stderr:
return_error(stderr)
prefix = '"Action status: '
pref_ind = stdout.find(prefix)
sub_start = pref_ind + len(prefix)
sub_end = sub_start + 45
search_name = stdout[sub_start:sub_end]
return {
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': 'Search started: {}'.format(search_name),
'EntryContext': {
'EWS.ComplianceSearch': {'Name': search_name, 'Status': 'Starting'}
}
}
def get_compliance_search(search_name):
try:
ps_path = create_ps_file('getcs_' + search_name + '.ps1', GETCS)
output = subprocess.Popen(["powershell.exe", ps_path, "'" + USERNAME + "'",
"'" + search_name + "'", "'" + EXCHANGE_FQDN + "'", "$" + str(UNSECURE)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = output.communicate(input=PASSWORD.encode())
finally:
delete_ps_file(ps_path)
stdout = stdout[len(PASSWORD):]
if stderr:
return_error(stderr)
stdsplit = stdout.split('\n', 1)
status = stdsplit[0].strip()
results = [get_cs_status(search_name, status)]
if status == 'Completed' and len(stdsplit[1].strip()) > 4:
res = list(r[:-1].split(', ') if r[-1] == ',' else r.split(', ') for r in stdsplit[1][2:-4].split(r'\r\n'))
res = map(lambda x: {k: v for k, v in (s.split(': ') for s in x)}, res)
results.append(
{
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': stdout,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Exchange 2016 Compliance search results',
res, ['Location', 'Item count', 'Total size'])
}
)
return results
def remove_compliance_search(search_name):
try:
ps_path = create_ps_file('removecs_' + search_name + '.ps1', REMOVECS)
output = subprocess.Popen(["powershell.exe", ps_path, "'" + USERNAME + "'",
"'" + search_name + "'", "'" + EXCHANGE_FQDN + "'", "$" + str(UNSECURE)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = output.communicate(input=PASSWORD.encode())
finally:
delete_ps_file(ps_path)
return return_error(stderr) if stderr else get_cs_status(search_name, 'Removed')
def purge_compliance_search(search_name):
try:
ps_path = create_ps_file('startpurge_' + search_name + '.ps1', STARTPURGE)
output = subprocess.Popen(["powershell.exe", ps_path, "'" + USERNAME + "'",
"'" + search_name + "'", "'" + EXCHANGE_FQDN + "'", "$" + str(UNSECURE)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = output.communicate(input=PASSWORD.encode())
finally:
delete_ps_file(ps_path)
return return_error(stderr) if stderr else get_cs_status(search_name, 'Purging')
def check_purge_compliance_search(search_name):
try:
ps_path = create_ps_file('checkpurge_' + search_name + '.ps1', CHECKPURGE)
output = subprocess.Popen(["powershell.exe", ps_path, "'" + USERNAME + "'",
"'" + search_name + "'", "'" + EXCHANGE_FQDN + "'", "$" + str(UNSECURE)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = output.communicate(input=PASSWORD.encode())
finally:
delete_ps_file(ps_path)
return return_error(stderr) if stderr else get_cs_status(search_name,
'Purged' if stdout.strip() == 'Completed' else 'Purging')
def test_module():
try:
ps_path = create_ps_file('testcon_' + str(uuid.uuid4()).replace('-', '') + '.ps1', TESTCON)
output = subprocess.Popen(["powershell.exe", ps_path, "'" + USERNAME + "'",
"'" + EXCHANGE_FQDN + "'", "$" + str(UNSECURE)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.communicate(input=PASSWORD.encode())[0].strip()
finally:
delete_ps_file(ps_path)
stdout = stdout[len(PASSWORD):]
if stdout == "successful connection":
demisto.results('ok')
else:
return_error(stdout)
args = prepare_args(demisto.args())
try:
if demisto.command() == 'exchange2016-start-compliance-search':
encode_and_submit_results(start_compliance_search(**args))
elif demisto.command() == 'exchange2016-get-compliance-search':
encode_and_submit_results(get_compliance_search(**args))
elif demisto.command() == 'exchange2016-remove-compliance-search':
encode_and_submit_results(remove_compliance_search(**args))
elif demisto.command() == 'exchange2016-purge-compliance-search-results':
encode_and_submit_results(purge_compliance_search(**args))
elif demisto.command() == 'exchange2016-get-compliance-search-purge-status':
encode_and_submit_results(check_purge_compliance_search(**args))
elif demisto.command() == 'test-module':
test_module()
except Exception as e:
if isinstance(e, WindowsError): # pylint: disable=undefined-variable
return_error("Could not open powershell on the target engine.")
else:
return_error(e)
|
{
"content_hash": "d96f2008cbff9934c2925a845dba9d90",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 126,
"avg_line_length": 36.187203791469194,
"alnum_prop": 0.6558182175365072,
"repo_name": "VirusTotal/content",
"id": "cac2798d330a35b4cb295b774b73ce9affeb8156",
"size": "15271",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/Exchange2016_Compliance/Integrations/Exchange2016_Compliance/Exchange2016_Compliance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47594464"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
}
|
"""
Copyright 2014 Novartis Institutes for Biomedical Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import numpy
import re
import time
import glob
from subprocess import PIPE, Popen
import yap_tools
import yap_preprocess
import yap_aligner
import yap_file_io
import yap_log
import yap_workflow_dict as wd
'''
This script executes preprocess commands and prepared data for alignment,
for both chunkbased and file based workflows.
'''
def execute_chunk(
input_file_list_local,
inp1,
inp2,
chunk_number,
myrank,
workflow_prov,
eqp_dict):
'''
Executes preprocess commands for chunked data and passes to the alignment stage
Takes chunked input data, filename list, chunk number, rank of the processor
and provenance list to append log data.
'''
# variable declaration
input_filename_local = input_file_list_local[0]
input_filename_local_2 = input_file_list_local[1]
file_name = input_file_list_local[2]
err_chunk_file = wd.err_log_path + "/" + file_name + \
"_log_temp/" + file_name + "_" + str(chunk_number).zfill(6)
stat_chunk_file = wd.stat_log_path + "/" + file_name + \
"_log_temp/" + file_name + "_" + str(chunk_number).zfill(6)
myhost = os.getenv('HOSTNAME')
yap_file_io.write_data("HOSTNAME: " + str(myhost) + "\n", err_chunk_file)
yap_file_io.write_data("HOSTNAME: " + str(myhost) + "\n", stat_chunk_file)
yap_file_io.write_data("CHUNK NUMBER: " + str(chunk_number) + "\n", err_chunk_file)
yap_file_io.write_data("CHUNK NUMBER: " + str(chunk_number) + "\n", stat_chunk_file)
seqs_arr1 = []
seqs_arr2 = []
read_length = wd.max_read_length
barcode_seqstruct_dict1 = {}
barcode_seqstruct_dict2 = {}
barcode_output_dict = {}
aligner_out_str = ''
sort_order = ''
barcode_flag = 'False'
sort_order = wd.alignment_sort_order
# convert the input data based on format given in workflow configuration
if wd.input_file_format == "qseq" or wd.input_file_format != wd.preprocess_output_file_format:
inp1 = yap_tools.convert_format(inp1)
if wd.paired_end_data == 'yes':
inp2 = yap_tools.convert_format(inp2)
if wd.run_preprocess_analysis == 'yes':
str_out = "-"*20 + "PREPROCESS STARTED" +"\t" + str(time.strftime("%Y/%m/%d %H:%M:%S", time.localtime())) + "-"*20 + "\n"
yap_file_io.write_data(str_out,err_chunk_file)
yap_file_io.write_data(str_out,stat_chunk_file)
# Run barcode splitter as first preprocess step
for jj in range(0, len(wd.preprocess_cmd_arr)):
preprocess_cmd_name = wd.preprocess_cmd_arr[jj][2][0][0]
preprocess_cmd = wd.preprocess_cmd_arr[jj][2][0][1]
if re.search('fastx_barcode_splitter', preprocess_cmd_name) is not None:
barcode_flag = 'True'
print "Entering " + preprocess_cmd_name + " : Filename=", input_filename_local, " chunk number=", chunk_number, "\n"
str_out= "YAP_COMMAND: " + preprocess_cmd + "\n" + "INPUT FILE: " + input_filename_local
yap_file_io.write_data(str_out,err_chunk_file)
yap_file_io.write_data(str_out,stat_chunk_file)
barcode_seqstruct_dict1, workflow_prov = yap_preprocess.fastx_barcode_splitter(
inp1, wd.preprocess_output_file_format, preprocess_cmd, workflow_prov, err_chunk_file, stat_chunk_file)
yap_file_io.write_data("_" * 30 + "\n", err_chunk_file)
yap_file_io.write_data("_" * 30 + "\n", stat_chunk_file)
barcode_seqstruct_dict1["no_barcode_specified"] = ''
print "Exiting " + preprocess_cmd_name + " : Filename=", input_filename_local, " chunk number=", chunk_number, "\n"
if wd.paired_end_data == 'yes':
print "Entering " + preprocess_cmd_name + " : Filename=", input_filename_local_2, " chunk number=", chunk_number, "\n"
str_out= "YAP_COMMAND: " + preprocess_cmd + "\n" + "INPUT FILE: " + input_filename_local_2
yap_file_io.write_data(str_out,err_chunk_file)
yap_file_io.write_data(str_out,stat_chunk_file)
barcode_seqstruct_dict2, workflow_prov = yap_preprocess.fastx_barcode_splitter(
inp2,wd.preprocess_output_file_format , preprocess_cmd, workflow_prov, err_chunk_file, stat_chunk_file)
yap_file_io.write_data("_" * 30 + "\n", err_chunk_file)
yap_file_io.write_data("_" * 30 + "\n", stat_chunk_file)
barcode_seqstruct_dict2["no_barcode_specified"] = ''
print "Exiting " + preprocess_cmd_name + " : Filename=", input_filename_local, " chunk number=", chunk_number, "\n"
break
if barcode_flag == 'False':
#if no barcode command; then create dictionary with one barcode tag
barcode_seqstruct_dict1["no_barcode_specified"] = inp1
barcode_seqstruct_dict2["no_barcode_specified"] = inp2
else:
#if no preprocess stage specified; then create dictionary with one barcode tag
barcode_seqstruct_dict1["no_barcode_specified"] = inp1
barcode_seqstruct_dict2["no_barcode_specified"] = inp2
#iterate over the barcode dictionary
for barcode, inp1 in barcode_seqstruct_dict1.iteritems():
run_unique_reads = 'False'
barcode_value = yap_tools.rename_barcode(barcode)
if wd.paired_end_data == "yes":
inp2 = barcode_seqstruct_dict2[barcode]
preprocessed_data_dict = {}
#intialize matrix for basecount analysis
aligner_output_str_local = ''
basecount_matrix_local1 = numpy.zeros(
(int(read_length), 5), dtype=numpy.int)
basecount_matrix_local2 = numpy.zeros(
(int(read_length), 5), dtype=numpy.int)
barcode_output_dict.setdefault(barcode, [basecount_matrix_local1, basecount_matrix_local2])
#set output file paths
barcode_dir_path = wd.workflow_output_path + "/" + file_name + "/" + barcode
preprocess_dir_path = barcode_dir_path + "/" + "preprocess_output"
if wd.data_distribution_method != "file_based":
if barcode_value != '':
preprocess_out_filename1 = preprocess_dir_path + "/" + barcode_value + "_" + file_name + \
"_" + str(chunk_number).zfill(6) + "_" + \
str(myrank) + "_preprocessed_data_1.txt"
preprocess_out_filename2 = preprocess_dir_path + "/" + barcode_value + "_" + file_name + \
"_" + str(chunk_number).zfill(6) + "_" + \
str(myrank) + "_preprocessed_data_2.txt"
else:
preprocess_out_filename1 = preprocess_dir_path + "/" + file_name + "_" + \
str(chunk_number).zfill(6) + "_" + \
str(myrank) + "_preprocessed_data_1.txt"
preprocess_out_filename2 = preprocess_dir_path + "/" + file_name + "_" + \
str(chunk_number).zfill(6) + "_" + \
str(myrank) + "_preprocessed_data_2.txt"
else:
if barcode_value != '':
preprocess_out_filename1 = preprocess_dir_path + "/" + \
"preprocess_data" + "_" + file_name + \
"_" + barcode_value + "_1.txt"
preprocess_out_filename2 = preprocess_dir_path + "/" + \
"preprocess_data" + "_" + file_name + \
"_" + barcode_value + "_2.txt"
else:
preprocess_out_filename1 = preprocess_dir_path + "/" + \
"preprocess_data" + "_" + file_name + "_1.txt"
preprocess_out_filename2 = preprocess_dir_path + "/" + \
"preprocess_data" + "_" + file_name + "_2.txt"
aligner_dir_path = barcode_dir_path + "/" + "aligner_output"
if barcode_value != '':
aligner_output_filename = aligner_dir_path + "/" + "aligner_" + \
file_name + "_" + barcode_value + \
"_" + str(chunk_number).zfill(6)
else:
aligner_output_filename = aligner_dir_path + "/" + \
"aligner_" + file_name + "_" + str(chunk_number).zfill(6)
for jj in range(0, len(wd.preprocess_cmd_arr)):
preprocess_cmd_name = wd.preprocess_cmd_arr[jj][2][0][1]
preprocess_cmd = wd.preprocess_cmd_arr[jj][2][0][1]
# skip fastqc and fastq screen and barcode splitter as they are
# already executed
if (re.search('fastqc', preprocess_cmd_name) is not None) or (re.search('fastq_screen', preprocess_cmd_name) is not None)or(re.search('fastx_barcode_splitter',
preprocess_cmd_name) is not None):
pass
else:
if re.search('calculate_basecount_metrics', preprocess_cmd_name) is not None:
#excecute basecount calculation
basecount_matrix_local1, workflow_prov = yap_tools.qc_basecount(
inp1, workflow_prov)
basecount_matrix_local2, workflow_prov = yap_tools.qc_basecount(
inp2, workflow_prov)
elif re.search('fastx_clipper', preprocess_cmd_name) is not None:
"""
Check for fastx clipper as special case and execute.
This is because fastx clipper execution has been optimized by providing contaminants for every file,
instead of just applying contaminants universally.
"""
run_unique_reads = 'True'
if input_filename_local in wd.contaminant_dict.keys():
contaminants_arr1 = wd.contaminant_dict[
input_filename_local]
print "Entering " + preprocess_cmd_name + " : Filename=", input_filename_local, " chunk number=", chunk_number, "\n"
index = 0
for index in range(0, len(contaminants_arr1)):
#iterate over all the contaminants for this file
fastx_clipper_cmd = preprocess_cmd
contaminant1 = contaminants_arr1[index].strip("\n")
if inp1 != '':
cont_replace = " -a " + contaminant1
fastx_clipper_cmd = fastx_clipper_cmd.replace(
'pipe1', " - ") + " -a " + contaminant1
inp1 = yap_tools.multiproc_function(
fastx_clipper_cmd, inp1, int(
wd.format_specific_lines), '', err_chunk_file, stat_chunk_file)
yap_log.merge_multiproc_files(
fastx_clipper_cmd,
input_filename_local,
barcode,
err_chunk_file,
stat_chunk_file)
if inp1 == '':
break
print "Exiting " + preprocess_cmd_name + " : Filename=", input_filename_local, " chunk number=", chunk_number, "\n"
if wd.paired_end_data == 'yes':
if input_filename_local_2 in wd.contaminant_dict.keys():
#repeat fastx clipper for the paired end
contaminants_arr2 = wd.contaminant_dict[
input_filename_local_2]
print "Entering " + preprocess_cmd_name + " : Filename=", input_filename_local_2, " chunk number=", chunk_number, "\n"
index = 0
for index in range(0, len(contaminants_arr2)):
fastx_clipper_cmd = preprocess_cmd
contaminant2 = contaminants_arr2[
index].strip("\n")
if inp2 != '':
cont_replace = " -a " + contaminant2
fastx_clipper_cmd = fastx_clipper_cmd.replace(
'pipe1',
" - ") + " -a " + contaminant2
inp2 = yap_tools.multiproc_function(
fastx_clipper_cmd, inp2, int(
wd.format_specific_lines), '', err_chunk_file, stat_chunk_file)
yap_log.merge_multiproc_files(
fastx_clipper_cmd,
input_filename_local_2,
barcode,
err_chunk_file,
stat_chunk_file)
if inp2 == '':
break
print "Exiting " + preprocess_cmd_name + " : Filename=", input_filename_local_2, " chunk number=", chunk_number, "\n"
elif re.search('eqp_rename_reads',preprocess_cmd_name) != None:
# this section renames reads according to specific format, applies to in-house use, neglect otherwise
inp1_arr = inp1.splitlines(1)
inp1=''
inp2_arr = inp2.splitlines(1)
inp2=''
read_count=1
if wd.data_distribution_method == "file_based":
if eqp_dict.has_key("eqp_read_counter"):
if len(eqp_dict["eqp_read_counter"]) > 0:
file_name, read_count = eqp_dict["eqp_read_counter"]
if file_name != input_filename_local:
read_count = 1
format_lines = int(wd.format_specific_lines)
for i in range(0,len(inp1_arr),format_lines):
if wd.paired_end_data == 'yes':
if (len(inp1_arr[i+1].strip("\n").replace('A','')) >= 5) and (len(inp2_arr[i+1].strip("\n").replace('A','')) >= 5) and (len(inp1_arr[i+1].strip("\n").replace('T','')) >= 5) and (len(inp2_arr[i+1].strip("\n").replace('T','')) >= 5) :
inp1 += '@F'+str(read_count).zfill(9)+'/1'+'\n'
inp2 += '@F'+str(read_count).zfill(9)+'/2'+'\n'
for jj in range (1,format_lines):
inp1 += inp1_arr[i+jj]
inp2 += inp2_arr[i+jj]
read_count += 1
else:
if (len(inp1_arr[i+1].strip("\n").replace('A','')) >= 5) and (len(inp1_arr[i+1].strip("\n").replace('T','')) >= 5):
inp1_arr[i] = '@F'+str(read_count).zfill(9)+'/1'+'\n'
for jj in range (1,format_lines):
inp1 += inp1_arr[i+jj]
read_count += 1
eqp_dict["eqp_read_counter"] = [ input_filename_local, read_count]
inp1_arr = []
inp2_arr = []
else:
#set the flag to remove umatched pair after preprocesing
run_unique_reads = 'True'
print "Entering " + preprocess_cmd_name + " : Filename=", input_filename_local, " chunk number=", chunk_number, "\n"
#for all other preprocess commands execute this section
if inp1 != '':
preprocess_cmd = preprocess_cmd.replace('pipe1', ' - ')
inp1 = yap_tools.multiproc_function(
preprocess_cmd, inp1, int(
wd.format_specific_lines), '', err_chunk_file, stat_chunk_file)
yap_log.merge_multiproc_files(
preprocess_cmd,
input_filename_local,
barcode,
err_chunk_file,
stat_chunk_file)
print "Exiting " + preprocess_cmd_name + " : Filename=", input_filename_local, " chunk number=", chunk_number, "\n"
if wd.paired_end_data == 'yes':
preprocess_cmd = preprocess_cmd.replace('pipe1', ' - ')
print "Entering " + preprocess_cmd_name + " : Filename=", input_filename_local_2, " chunk number=", chunk_number, "\n"
if inp2 != '':
inp2 = yap_tools.multiproc_function(
preprocess_cmd, inp2, int(
wd.format_specific_lines), '', err_chunk_file, stat_chunk_file)
yap_log.merge_multiproc_files(
preprocess_cmd,
input_filename_local_2,
barcode,
err_chunk_file,
stat_chunk_file)
print "Exiting " + preprocess_cmd_name + " : Filename=", input_filename_local_2, " chunk number=", chunk_number, "\n"
if wd.paired_end_data == 'yes':
if run_unique_reads == 'True':
#remove all the umatched pairs from two chunks belonging to the same sample
#this is because each chunk goes through command separately, not as a pair.
if inp1 != '' and inp2 != '':
inp1, inp2 = yap_tools.find_unique_set(
inp1.splitlines(1), inp2.splitlines(1))
if wd.run_preprocess_analysis == 'yes':
#write log data
str_out="-"*20 + "PREPROCESS FINISHED" + "\t" + str(time.strftime("%Y/%m/%d %H:%M:%S", time.localtime())) + "-"*20 + "\n"
yap_file_io.write_data(str_out, err_chunk_file)
yap_file_io.write_data(str_out, stat_chunk_file)
if wd.data_distribution_method != "file_based":
#if the workflow is not filebased; then pass the chunks for alignment.
if wd.run_reference_alignment == 'yes':
str_out="-"*20 + "ALIGNMENT STARTED" + "\t" + str(time.strftime("%Y/%m/%d %H:%M:%S", time.localtime())) + "-"*20 + "\n"
yap_file_io.write_data(str_out, err_chunk_file)
yap_file_io.write_data(str_out, stat_chunk_file)
if (wd.paired_end_data == 'yes' and inp1 != '' and inp2 != '') or (wd.paired_end_data != 'yes' and inp1 != ''):
print "Entering Alignment: Filename=", input_filename_local, "barcode=", barcode, " chunk number=", chunk_number, "\n"
if wd.paired_end_data == 'yes':
workflow_prov.append(
"INPUT: " +
input_filename_local +
" and " +
input_filename_local_2 +
" chunk number= " +
str(chunk_number))
aligner_out_str, workflow_prov = yap_aligner.run_aligner(
inp1, inp2,aligner_output_filename, chunk_number, myrank,workflow_prov, err_chunk_file, stat_chunk_file)
else:
workflow_prov.append(
"INPUT: " +
input_filename_local +
" chunk number= " +
str(chunk_number))
aligner_out_str, workflow_prov = yap_aligner.run_aligner(
inp1, '', aligner_output_filename, chunk_number,myrank,workflow_prov, err_chunk_file, stat_chunk_file)
rm_cmd = "rm " + aligner_output_filename + "*.sai"
if len(glob.glob(aligner_output_filename + "*.sai")) > 0:
prm = Popen(rm_cmd, shell='False').wait()
if len(glob.glob(aligner_output_filename + "*.head")) > 0:
prm = Popen(rm_cmd, shell='False').wait()
else:
print "Exiting Alignment: Filename=", input_filename_local, "barcode=", barcode, " chunk number=", chunk_number, "\n"
str_out="-"*20 + "ALIGNMENT FINISHED" + "\t" + str(time.strftime("%Y/%m/%d %H:%M:%S", time.localtime())) + "-"*20+ "\n"
yap_file_io.write_data(str_out, err_chunk_file)
yap_file_io.write_data(str_out, stat_chunk_file)
if wd.run_preprocess_analysis == 'yes':
if wd.write_preprocessed_data == 'yes':
#write preprocessed data to file
yap_file_io.write_data(inp1, preprocess_out_filename1)
if wd.paired_end_data == "yes":
yap_file_io.write_data(inp2, preprocess_out_filename2)
else:
#else empty input data chunk
inp1 = ''
inp2 = ''
else:
#else empty input data chunk
inp1 = ''
inp2 = ''
else:
#if workflow is filebased; then write preprocessed data to file
if wd.run_preprocess_analysis == "yes":
if wd.write_preprocessed_data == 'yes' or wd.run_reference_alignment == "yes":
yap_file_io.write_data(inp1, preprocess_out_filename1)
if wd.paired_end_data == "yes":
yap_file_io.write_data(inp2, preprocess_out_filename2)
barcode_output_dict[barcode][0] = basecount_matrix_local1
barcode_output_dict[barcode][1] = basecount_matrix_local2
return barcode_output_dict, workflow_prov
def execute_file(input_filename_local,input_filename_local_2,file_name,chunk_number,myrank,ii,file_basecount_dict):
workflow_prov = []
err_chunk_file = wd.err_log_path + "/" + file_name + \
"_log_temp/" + file_name + "_" + str(ii).zfill(6)
stat_chunk_file = wd.stat_log_path + "/" + file_name + \
"_log_temp/" + file_name + "_" + str(ii).zfill(6)
str_out="*" * 50 + "ALIGNMENT STARTED" + "\t" + str(time.strftime("%Y/%m/%d %H:%M:%S", time.localtime())) + "*" * 50 + "\n"
yap_file_io.write_data(str_out,err_chunk_file)
yap_file_io.write_data(str_out,stat_chunk_file)
for filename_key in file_basecount_dict.iterkeys():
if filename_key == file_name:
for barcode in wd.barcode_dict.iterkeys():
barcode_value = yap_tools.rename_barcode(barcode)
barcode_dir_path = wd.workflow_output_path + "/" + file_name + "/" + barcode
aligner_dir_path = barcode_dir_path + "/" + "aligner_output"
if wd.alignment_sort_order != 'unsorted':
if barcode_value != '':
aligner_output_filename = aligner_dir_path + "/" + \
"aligner_" + file_name + \
"_" + barcode_value
else:
aligner_output_filename = aligner_dir_path + \
"/" + "aligner_" + file_name
else:
if barcode_value != '':
aligner_output_filename = aligner_dir_path + \
"/" + file_name + \
"_" + barcode_value
else:
aligner_output_filename = aligner_dir_path + \
"/" + file_name
if wd.run_preprocess_analysis == 'yes':
preprocessed_file_inp1 = ['pipe1']
preprocessed_file_inp2 = ['pipe2']
preprocess_dir_path = barcode_dir_path + \
"/" + "preprocess_output"
preprocessed_inp1 = preprocess_dir_path + \
"/" + "*preprocess_data*_1.txt"
preprocessed_inp2 = preprocess_dir_path + \
"/" + "*preprocess_data_*2.txt"
preprocessed_file_inp1 = glob.glob(
preprocessed_inp1)
if wd.paired_end_data == "yes":
preprocessed_file_inp2 = glob.glob(
preprocessed_inp2)
if (wd.paired_end_data== "yes" and preprocessed_file_inp1 and preprocessed_file_inp2) or (wd.paired_end_data != "yes" and preprocessed_file_inp1):
print "Entering Alignment section: Filename=", input_filename_local, "barcode=", barcode, "\n"
if wd.paired_end_data == 'yes':
workflow_prov.append(
"INPUT: " +
preprocessed_file_inp1[0] +
" and " +
preprocessed_file_inp2[0])
aligner_out_str, workflow_prov = yap_aligner.run_aligner(preprocessed_file_inp1[0], preprocessed_file_inp2[
0],aligner_output_filename, chunk_number,myrank,workflow_prov, err_chunk_file, stat_chunk_file)
else:
workflow_prov.append(
"INPUT: " +
preprocessed_file_inp1[0])
aligner_out_str, workflow_prov = yap_aligner.run_aligner(preprocessed_file_inp1[
0], '', aligner_output_filename, chunk_number,myrank, workflow_prov, err_chunk_file, stat_chunk_file)
if wd.write_preprocessed_data != 'yes':
prm1 = Popen(
"rm " +
preprocess_dir_path +
"/" +
"*preprocess_data*_1.txt",
shell='False').wait()
if paired_end_data == "yes":
if preprocessed_file_inp2:
prm2 = Popen(
"rm " +
preprocess_dir_path +
"/" +
"*preprocess_data*_2.txt",
shell='False').wait()
else:
print "Skipping Alignment for : Filename=", input_filename_local, "barcode=", barcode, "........", "No preprocessed data found"
else:
if wd.paired_end_data == 'yes':
workflow_prov.append(
"INPUT: " +
input_filename_local +
" and " +
input_filename_local_2)
aligner_out_str, workflow_prov = yap_aligner.run_aligner(
input_filename_local, input_filename_local_2, aligner_output_filename, 0, workflow_prov, err_chunk_file, stat_chunk_file)
else:
workflow_prov.append("INPUT: " + input_filename_local)
aligner_out_str, workflow_prov = yap_aligner.run_aligner(
input_filename_local, '', aligner_cmd_arr, aligner_output_filename, 0, workflow_prov, err_chunk_file, stat_chunk_file)
#remove temporary files created by aligners
rm_cmd = "rm " + \
aligner_output_filename + "*.sai"
if len(glob.glob(aligner_output_filename + "*.sai")) > 0:
prm = Popen(
rm_cmd, shell='False').wait()
if barcode in file_basecount_dict[filename_key]:
pass
else:
file_basecount_dict[
filename_key][barcode] = []
#write to log
str_out="*" * 50 + "ALIGNMENT FINISHED" + "\t" + str(time.strftime("%Y/%m/%d %H:%M:%S", time.localtime())) + "*" * 50 + "\n"
yap_file_io.write_data(str_out,err_chunk_file)
yap_file_io.write_data(str_out,stat_chunk_file)
return workflow_prov, file_basecount_dict
|
{
"content_hash": "af65728eea231765f1bd9414f10688db",
"timestamp": "",
"source": "github",
"line_count": 490,
"max_line_length": 272,
"avg_line_length": 60.86326530612245,
"alnum_prop": 0.48087046910102943,
"repo_name": "Novartis/yap",
"id": "23331f71f894aeb7aa22feadbfdf8518ee75da57",
"size": "29845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/yap_exe.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "0"
},
{
"name": "Perl",
"bytes": "29326"
},
{
"name": "Python",
"bytes": "484111"
},
{
"name": "Shell",
"bytes": "2932"
}
],
"symlink_target": ""
}
|
import decimal
from keystoneauth1 import session as ks_sess
import mock
from oslo_config import fixture as config_fixture
from oslotest import base
import testscenarios
from cloudkitty import collector
from cloudkitty import db
from cloudkitty.db import api as ck_db_api
from cloudkitty import rating
class FakeCollectorModule(collector.BaseCollector):
collector_name = 'test_fake'
dependencies = tuple()
def __init__(self):
super(FakeCollectorModule, self).__init__([], period=3600)
class FakeRatingModule(rating.RatingProcessorBase):
module_name = 'fake'
description = 'fake rating module'
def __init__(self, tenant_id=None):
super(FakeRatingModule, self).__init__()
def quote(self, data):
self.process(data)
def process(self, data):
for cur_data in data:
cur_usage = cur_data['usage']
for service in cur_usage:
for entry in cur_usage[service]:
if 'rating' not in entry:
entry['rating'] = {'price': decimal.Decimal(0)}
return data
def reload_config(self):
pass
def notify_reload(self):
pass
class TestCase(testscenarios.TestWithScenarios, base.BaseTestCase):
scenarios = [
('sqlite', dict(db_url='sqlite:///'))
]
def setUp(self):
super(TestCase, self).setUp()
self._conf_fixture = self.useFixture(config_fixture.Config())
self.conf = self._conf_fixture.conf
self.conf.set_override('connection', self.db_url, 'database')
self.conn = ck_db_api.get_instance()
migration = self.conn.get_migration()
migration.upgrade('head')
auth = mock.patch(
'keystoneauth1.loading.load_auth_from_conf_options',
return_value=dict())
auth.start()
self.auth = auth
session = mock.patch(
'keystoneauth1.loading.load_session_from_conf_options',
return_value=ks_sess.Session())
session.start()
self.session = session
def tearDown(self):
db.get_engine().dispose()
self.auth.stop()
self.session.stop()
super(TestCase, self).tearDown()
|
{
"content_hash": "e191d90b50b8b062109b417337a5e1dd",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 71,
"avg_line_length": 28.766233766233768,
"alnum_prop": 0.6221218961625282,
"repo_name": "stackforge/cloudkitty",
"id": "ed84dae966cdd49edd53ad4db0a8fe1dff343231",
"size": "2847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudkitty/tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1648"
},
{
"name": "Python",
"bytes": "452298"
},
{
"name": "Shell",
"bytes": "12116"
}
],
"symlink_target": ""
}
|
import sys
import os
import cmd
import threading
from gppylib.commands.base import WorkerPool, REMOTE, ExecutionError
from gppylib.commands.unix import Hostname, Echo
sys.path.append(sys.path[0] + '/lib')
import pxssh
import pexpect
import socket
class HostNameError(Exception):
def __init__(self, msg, lineno = 0):
if lineno: self.msg = ('%s at line %d' % (msg, lineno))
else: self.msg = msg
def __str__(self):
return self.msg
class SSHError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
# Utility Functions
def ssh_prefix(host):
ssh = 'ssh -o "BatchMode yes" -o "StrictHostKeyChecking no" ' + host
return ssh
def get_hosts(hostsfile):
hostlist = HostList()
hostlist.parseFile(hostsfile)
return hostlist.get()
class HostList():
def __init__(self):
self.list = []
def get(self):
return self.list
def addHostNameAlternatives(self):
''' Add short name and FQDN for each host to the host list '''
for host in self.list:
try:
fqdn = socket.getfqdn(host)
''' Add fully qualified domain names '''
if fqdn not in self.list:
self.list.append(fqdn)
except socket.error, e:
print "Error while including hostname alternatives"
def add(self, host, lineno=0):
'''Add a host to the hostlist.'''
# we don't allow the user@ syntax here
if host.find('@') >= 0:
raise HostNameError(host, lineno)
# MPP-13617 - check for ipv6
if host.find(':') >= 0:
try:
socket.inet_pton(socket.AF_INET6, host)
except socket.error, e:
raise HostNameError(str(e), lineno)
# MPP-13617 - check for ipv4
if host.find('.') >= 0:
octs = host.split('.')
if len(octs) == 4 and False not in [o.isdigit() for o in octs]:
try:
socket.inet_pton(socket.AF_INET, host)
except socket.error, e:
raise HostNameError(str(e), lineno)
self.list.append(host)
return self.list
def parseFile(self, path):
'''Add lines in a file to the hostlist.'''
with open(path) as fp:
for i, line in enumerate(fp):
line = line.strip()
if not line or line[0] == '#':
continue
self.add(line, i+1)
return self.list
def checkSSH(self):
'''Check that ssh to hostlist is okay.'''
pool = WorkerPool(min(len(self.list), 16))
for h in self.list:
cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
for cmd in pool.getCompletedItems():
if not cmd.get_results().wasSuccessful():
raise SSHError("Unable to ssh to '%s'" % cmd.remoteHost)
return True
def filterMultiHomedHosts(self):
'''For multiple host that is of the same node, keep only one in the hostlist.'''
unique = {}
pool = WorkerPool(min(len(self.list), 16))
for h in self.list:
cmd = Hostname('hostname', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
for finished_cmd in pool.getCompletedItems():
hostname = finished_cmd.get_hostname()
if (not hostname):
unique[finished_cmd.remoteHost] = finished_cmd.remoteHost
elif not unique.get(hostname):
unique[hostname] = finished_cmd.remoteHost
elif hostname == finished_cmd.remoteHost:
unique[hostname] = finished_cmd.remoteHost
self.list = unique.values()
return self.list
def removeBadHosts(self):
''' Update list of host to include only the host on which SSH was successful'''
pool = WorkerPool(min(len(self.list), 16))
for h in self.list:
cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
bad_hosts = []
working_hosts = []
for cmd in pool.getCompletedItems():
if not cmd.get_results().wasSuccessful():
bad_hosts.append(cmd.remoteHost)
else:
working_hosts.append(cmd.remoteHost)
self.list = working_hosts[:]
return bad_hosts
# Session is a command session, derived from a base class cmd.Cmd
class Session(cmd.Cmd):
'''Implements a list of open ssh sessions ready to execute commands'''
verbose=False
hostList=[]
userName=None
echoCommand=False
class SessionError(StandardError): pass
class SessionCmdExit(StandardError): pass
def __init__(self, hostList=None, userName=None):
cmd.Cmd.__init__(self)
self.pxssh = []
self.prompt = '=> '
self.peerStringFormatRaw = None
if hostList:
for host in hostList:
self.hostList.append(host)
if userName: self.userName=userName
def peerStringFormat(self):
if self.peerStringFormatRaw: return self.peerStringFormatRaw
cnt = 0
for p in self.pxssh:
if cnt < len(p.x_peer): cnt = len(p.x_peer)
self.peerStringFormatRaw = "[%%%ds]" % cnt
return self.peerStringFormatRaw
def login(self, hostList=None, userName=None):
'''This is the normal entry point used to add host names to the object and log in to each of them'''
if self.verbose: print '\n[Reset ...]'
if not (self.hostList or hostList):
raise self.SessionError('No host list available to Login method')
if not (self.userName or userName):
raise self.SessionError('No user name available to Login method')
#Cleanup
self.clean()
if hostList: #We have a new hostlist to use, initialize it
self.hostList=[]
for host in hostList:
self.hostList.append(host)
if userName: self.userName=userName #We have a new userName to use
# MPP-6583. Save off term type and set to nothing before creating ssh process
origTERM = os.getenv('TERM', None)
os.putenv('TERM', '')
for host in hostList:
self.hostList.append(host)
p = pxssh.pxssh()
p.loginAsync(host, self.userName)
p.x_peer = host
p.x_pid = p.pid
self.pxssh.append(p)
# Restore terminal type
if origTERM:
os.putenv('TERM', origTERM)
some_errors = False
good_list = []
for p in self.pxssh:
success_login = False
if self.verbose: print '[INFO] login %s' % p.x_peer
try:
success_login = p.loginWait(set_term_dumb=True)
except Exception as e:
pass
if success_login:
good_list.append(p)
else:
some_errors = True
print '[ERROR] unable to login to %s' % p.x_peer
if some_errors:
print 'hint: use gpssh-exkeys to setup public-key authentication between hosts'
self.pxssh = good_list
def close(self):
return self.clean()
def reset(self):
'''reads from all the ssh connections to make sure we dont have any pending cruft'''
for s in self.pxssh:
s.readlines()
def clean(self):
net_return_code = self.closePxsshList(self.pxssh)
self.pxssh = []
return net_return_code
def emptyline(self):
pass
def escapeLine(self,line):
'''Escape occurrences of \ and $ as needed and package the line as an "eval" shell command'''
line = line.strip()
if line == 'EOF' or line == 'exit' or line == 'quit':
raise self.SessionCmdExit()
line = line.split('\\')
line = '\\\\'.join(line)
line = line.split('"')
line = '\\"'.join(line)
line = line.split('$')
line = '\\$'.join(line)
line = 'eval "' + line + '" < /dev/null'
return line
def executeCommand(self,command):
commandoutput=[]
if self.echoCommand:
escapedCommand = command.replace('"', '\\"')
command = 'echo "%s"; %s' % (escapedCommand, command)
#Execute the command in all of the ssh sessions
for s in self.pxssh:
s.sendline(command)
s.flush()
#Wait for each command and retrieve the output
for s in self.pxssh:
#Wait for each command to finish
#!! TODO verify that this is a tight wait loop and find another way to do this
while not s.prompt(120) and s.isalive() and not s.eof(): pass
for s in self.pxssh:
#Split the output into an array of lines so that we can add text to the beginning of
# each line
output = s.before.split('\n')
output = output[1:-1]
commandoutput.append(output)
return commandoutput.__iter__()
# Interactive command line handler
# Override of base class, handles commands that aren't recognized as part of a predefined set
# The "command" argument is a command line to be executed on all available command sessions
# The output of the command execution is printed to the standard output, prepended with
# the hostname of each machine from which the output came
def default(self, command):
line = self.escapeLine(command)
if self.verbose: print command
#Execute the command on our ssh sessions
commandoutput=self.executeCommand(command)
self.writeCommandOutput(commandoutput)
def writeCommandOutput(self,commandoutput):
'''Takes a list of output lists as an iterator and writes them to standard output,
formatted with the hostname from which each output array was obtained'''
for s in self.pxssh:
output = commandoutput.next()
#Write the output
if len(output) == 0:
print (self.peerStringFormat() % s.x_peer)
else:
for line in output:
print (self.peerStringFormat() % s.x_peer), line
def closePxsshList(self,list):
lock = threading.Lock()
return_codes = [0]
def closePxsshOne(p, return_codes):
p.logout()
p.close()
with lock:
return_codes.append(p.exitstatus)
th = []
for p in list:
t = threading.Thread(target=closePxsshOne, args=(p, return_codes))
t.start()
th.append(t)
for t in th:
t.join()
return max(return_codes)
|
{
"content_hash": "f786a00bc2b7ee3095c75da3979b40d4",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 108,
"avg_line_length": 32.40462427745665,
"alnum_prop": 0.557260078487335,
"repo_name": "huor/incubator-hawq",
"id": "6240bedd64cb92cbf17c90bd0d9c9f65461e295f",
"size": "12094",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tools/bin/gppylib/util/ssh_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5196"
},
{
"name": "Batchfile",
"bytes": "11532"
},
{
"name": "C",
"bytes": "31784372"
},
{
"name": "C++",
"bytes": "5577084"
},
{
"name": "CMake",
"bytes": "159651"
},
{
"name": "Csound Score",
"bytes": "179"
},
{
"name": "DTrace",
"bytes": "1154"
},
{
"name": "HTML",
"bytes": "69676"
},
{
"name": "Java",
"bytes": "2416176"
},
{
"name": "Lex",
"bytes": "196336"
},
{
"name": "M4",
"bytes": "82922"
},
{
"name": "Makefile",
"bytes": "439548"
},
{
"name": "Objective-C",
"bytes": "10709"
},
{
"name": "PLSQL",
"bytes": "198268"
},
{
"name": "PLpgSQL",
"bytes": "2572992"
},
{
"name": "Perl",
"bytes": "868948"
},
{
"name": "Protocol Buffer",
"bytes": "106101"
},
{
"name": "Python",
"bytes": "166388"
},
{
"name": "Roff",
"bytes": "30181"
},
{
"name": "SQLPL",
"bytes": "156704"
},
{
"name": "Shell",
"bytes": "230513"
},
{
"name": "Smarty",
"bytes": "244244"
},
{
"name": "Thrift",
"bytes": "9459"
},
{
"name": "XS",
"bytes": "8309"
},
{
"name": "Yacc",
"bytes": "440492"
}
],
"symlink_target": ""
}
|
'''
Created on May 5, 2016
@author: xiao
'''
from argparse import ArgumentParser
from generate import *
from multiprocessing import Pool
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("seed", type = int, default = 103, nargs = '?',
help = "seed to make the generation deterministic")
parser.add_argument("-output", "-o", default = None,
help = "output directory, defaults to None and print to stdout")
parser.add_argument("-batch", type = int, default = None,
help = "Generates all htmls with seed from 0 to this number")
parser.add_argument("-parallel", "-p", type=int, default = 4,
help = "cores to use for parallel generation")
args = parser.parse_args()
seeds = [args.seed] if args.batch is None else xrange(args.batch)
if args.batch is not None: args.output = None
def run(seed):
generated_html = create(seed)
out_file = args.output if args.output else str(seed) + '.html'
# print generated_html.prettify()
with open(out_file, 'w') as outf:
outf.write(str(generated_html))
p = Pool(args.parallel)
p.map(run,seeds)
|
{
"content_hash": "999836cf5ee77a5a8b7f5fe3cd0db5d6",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 88,
"avg_line_length": 39.74193548387097,
"alnum_prop": 0.6055194805194806,
"repo_name": "xiao-cheng/html-generator",
"id": "649fc3e9d90c58669678796843a157ba85c4dc49",
"size": "1232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6981"
}
],
"symlink_target": ""
}
|
"""Test basic ceilometer-api app
"""
import os
import tempfile
import unittest
from oslo.config import cfg
from ceilometer.api.v1 import app
from ceilometer.api import acl
from ceilometer import service
class TestApp(unittest.TestCase):
def tearDown(self):
cfg.CONF.reset()
def test_keystone_middleware_conf(self):
cfg.CONF.set_override("auth_protocol", "foottp",
group=acl.OPT_GROUP_NAME)
cfg.CONF.set_override("auth_version", "v2.0", group=acl.OPT_GROUP_NAME)
api_app = app.make_app(cfg.CONF, attach_storage=False)
self.assertEqual(api_app.wsgi_app.auth_protocol, 'foottp')
def test_keystone_middleware_parse_conffile(self):
tmpfile = tempfile.mktemp()
with open(tmpfile, "w") as f:
f.write("[%s]\nauth_protocol = barttp" % acl.OPT_GROUP_NAME)
f.write("\nauth_version = v2.0")
service.prepare_service(['ceilometer-api',
'--config-file=%s' % tmpfile])
api_app = app.make_app(cfg.CONF, attach_storage=False)
self.assertEqual(api_app.wsgi_app.auth_protocol, 'barttp')
os.unlink(tmpfile)
|
{
"content_hash": "026f146d0f55d2dacf215ab067f72313",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 79,
"avg_line_length": 33.714285714285715,
"alnum_prop": 0.6322033898305085,
"repo_name": "dreamhost/ceilometer",
"id": "c38356ac17bd1b75603fd844da7fdf3357ba400f",
"size": "1836",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/api/v1/test_app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "368517"
},
{
"name": "Python",
"bytes": "993129"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from itertools import chain
from math import log
from nltk.probability import (ConditionalProbDist, ConditionalFreqDist,
SimpleGoodTuringProbDist)
from nltk.util import ingrams
from nltk.model.api import ModelI
from nltk import compat
def _estimator(fdist, bins):
"""
Default estimator function using a SimpleGoodTuringProbDist.
"""
# can't be an instance method of NgramModel as they
# can't be pickled either.
return SimpleGoodTuringProbDist(fdist)
@compat.python_2_unicode_compatible
class NgramModel(ModelI):
"""
A processing interface for assigning a probability to the next word.
"""
# add cutoff
def __init__(self, n, train, pad_left=True, pad_right=False,
estimator=None, *estimator_args, **estimator_kwargs):
"""
Create an ngram language model to capture patterns in n consecutive
words of training text. An estimator smooths the probabilities derived
from the text and may allow generation of ngrams not seen during
training.
>>> from nltk.corpus import brown
>>> from nltk.probability import LidstoneProbDist
>>> est = lambda fdist, bins: LidstoneProbDist(fdist, 0.2)
>>> lm = NgramModel(3, brown.words(categories='news'), estimator=est)
>>> lm
<NgramModel with 91603 3-grams>
>>> lm._backoff
<NgramModel with 62888 2-grams>
>>> lm.entropy(['The', 'Fulton', 'County', 'Grand', 'Jury', 'said',
... 'Friday', 'an', 'investigation', 'of', "Atlanta's", 'recent',
... 'primary', 'election', 'produced', '``', 'no', 'evidence',
... "''", 'that', 'any', 'irregularities', 'took', 'place', '.'])
... # doctest: +ELLIPSIS
0.5776...
:param n: the order of the language model (ngram size)
:type n: int
:param train: the training text
:type train: list(str) or list(list(str))
:param pad_left: whether to pad the left of each sentence with an (n-1)-gram of empty strings
:type pad_left: bool
:param pad_right: whether to pad the right of each sentence with an (n-1)-gram of empty strings
:type pad_right: bool
:param estimator: a function for generating a probability distribution
:type estimator: a function that takes a ConditionalFreqDist and
returns a ConditionalProbDist
:param estimator_args: Extra arguments for estimator.
These arguments are usually used to specify extra
properties for the probability distributions of individual
conditions, such as the number of bins they contain.
Note: For backward-compatibility, if no arguments are specified, the
number of bins in the underlying ConditionalFreqDist are passed to
the estimator as an argument.
:type estimator_args: (any)
:param estimator_kwargs: Extra keyword arguments for the estimator
:type estimator_kwargs: (any)
"""
# protection from cryptic behavior for calling programs
# that use the pre-2.0.2 interface
assert(isinstance(pad_left, bool))
assert(isinstance(pad_right, bool))
self._n = n
self._lpad = ('',) * (n - 1) if pad_left else ()
self._rpad = ('',) * (n - 1) if pad_right else ()
if estimator is None:
estimator = _estimator
cfd = ConditionalFreqDist()
self._ngrams = set()
# If given a list of strings instead of a list of lists, create enclosing list
if (train is not None) and isinstance(train[0], compat.string_types):
train = [train]
for sent in train:
for ngram in ingrams(chain(self._lpad, sent, self._rpad), n):
self._ngrams.add(ngram)
context = tuple(ngram[:-1])
token = ngram[-1]
cfd[context].inc(token)
if not estimator_args and not estimator_kwargs:
self._model = ConditionalProbDist(cfd, estimator, len(cfd))
else:
self._model = ConditionalProbDist(cfd, estimator, *estimator_args, **estimator_kwargs)
# recursively construct the lower-order models
if n > 1:
self._backoff = NgramModel(n-1, train, pad_left, pad_right,
estimator, *estimator_args, **estimator_kwargs)
def prob(self, word, context):
"""
Evaluate the probability of this word in this context using Katz Backoff.
:param word: the word to get the probability of
:type word: str
:param context: the context the word is in
:type context: list(str)
"""
context = tuple(context)
if (context + (word,) in self._ngrams) or (self._n == 1):
return self[context].prob(word)
else:
return self._alpha(context) * self._backoff.prob(word, context[1:])
def _alpha(self, tokens):
return self._beta(tokens) / self._backoff._beta(tokens[1:])
def _beta(self, tokens):
return (self[tokens].discount() if tokens in self else 1)
def logprob(self, word, context):
"""
Evaluate the (negative) log probability of this word in this context.
:param word: the word to get the probability of
:type word: str
:param context: the context the word is in
:type context: list(str)
"""
return -log(self.prob(word, context), 2)
def choose_random_word(self, context):
'''
Randomly select a word that is likely to appear in this context.
:param context: the context the word is in
:type context: list(str)
'''
return self.generate(1, context)[-1]
# NB, this will always start with same word if the model
# was trained on a single text
def generate(self, num_words, context=()):
'''
Generate random text based on the language model.
:param num_words: number of words to generate
:type num_words: int
:param context: initial words in generated string
:type context: list(str)
'''
text = list(context)
for i in range(num_words):
text.append(self._generate_one(text))
return text
def _generate_one(self, context):
context = (self._lpad + tuple(context))[-self._n+1:]
# print "Context (%d): <%s>" % (self._n, ','.join(context))
if context in self:
return self[context].generate()
elif self._n > 1:
return self._backoff._generate_one(context[1:])
else:
return '.'
def entropy(self, text):
"""
Calculate the approximate cross-entropy of the n-gram model for a
given evaluation text.
This is the average log probability of each word in the text.
:param text: words to use for evaluation
:type text: list(str)
"""
e = 0.0
text = list(self._lpad) + text + list(self._rpad)
for i in range(self._n-1, len(text)):
context = tuple(text[i-self._n+1:i])
token = text[i]
e += self.logprob(token, context)
return e / float(len(text) - (self._n-1))
def perplexity(self, text):
"""
Calculates the perplexity of the given text.
This is simply 2 ** cross-entropy for the text.
:param text: words to calculate perplexity of
:type text: list(str)
"""
return pow(2.0, self.entropy(text))
def __contains__(self, item):
return tuple(item) in self._model
def __getitem__(self, item):
return self._model[tuple(item)]
def __repr__(self):
return '<NgramModel with %d %d-grams>' % (len(self._ngrams), self._n)
def teardown_module(module):
from nltk.corpus import brown
brown._unload()
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
{
"content_hash": "ae3120169124c81aa3fad991390735a8",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 103,
"avg_line_length": 35.5764192139738,
"alnum_prop": 0.5943292009328587,
"repo_name": "syllog1sm/TextBlob",
"id": "c43f76334a7bbcc37302398db4bbd6b27121edc8",
"size": "8401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "text/nltk/model/ngram.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1163"
},
{
"name": "Erlang",
"bytes": "1863"
},
{
"name": "JavaScript",
"bytes": "326"
},
{
"name": "Python",
"bytes": "3645100"
},
{
"name": "Shell",
"bytes": "6711"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="text", parent_name="isosurface.colorbar.title", **kwargs
):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
{
"content_hash": "d8bd551eb4a47afa3e8b26b77fab4206",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 83,
"avg_line_length": 33.142857142857146,
"alnum_prop": 0.5926724137931034,
"repo_name": "plotly/python-api",
"id": "5595928c4077299b2043fb2dfc16614cdfbc65f1",
"size": "464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/isosurface/colorbar/title/_text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
server = {
'port': '8080',
'host': '0.0.0.0'
}
# Pecan Application Configurations
app = {
'root': 'interceptor.api.controllers.root.RootController',
'modules': ['interceptor.api'],
'static_root': '%(confdir)s/public',
'template_path': '%(confdir)s/templates',
'debug': False,
}
logging = {
'loggers': {
'root': {'level': 'INFO', 'handlers': ['console']},
'interceptor': {'level': 'DEBUG', 'handlers': ['console']}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'formatters': {
'simple': {
'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]'
'[%(threadName)s] %(message)s')
}
}
}
# Custom Configurations must be in Python dictionary format::
#
# foo = {'bar':'baz'}
#
# All configurations are accessible at::
# pecan.conf
|
{
"content_hash": "ba0ee1f1103bc8978a596a4105cb9482",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 66,
"avg_line_length": 22.372093023255815,
"alnum_prop": 0.5145530145530145,
"repo_name": "uhobawuhot/interceptor",
"id": "7c92dd46d3dfbf6a091db083e3a08fe7d6d58221",
"size": "1614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interceptor/api/config.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "569"
},
{
"name": "Perl",
"bytes": "877"
},
{
"name": "Python",
"bytes": "128711"
}
],
"symlink_target": ""
}
|
from django.db import models, connection
from . import denorms
from django.conf import settings
import django.db.models
def denormalized(DBField, *args, **kwargs):
"""
Turns a callable into model field, analogous to python's ``@property`` decorator.
The callable will be used to compute the value of the field every time the model
gets saved.
If the callable has dependency information attached to it the fields value will
also be recomputed if the dependencies require it.
**Arguments:**
DBField (required)
The type of field you want to use to save the data.
Note that you have to use the field class and not an instance
of it.
\*args, \*\*kwargs:
Those will be passed unaltered into the constructor of ``DBField``
once it gets actually created.
"""
class DenormDBField(DBField):
"""
Special subclass of the given DBField type, with a few extra additions.
"""
def __init__(self, func, *args, **kwargs):
self.func = func
self.skip = kwargs.pop('skip', None)
kwargs['editable'] = False
DBField.__init__(self, *args, **kwargs)
def contribute_to_class(self, cls, name, *args, **kwargs):
if hasattr(settings, 'DENORM_BULK_UNSAFE_TRIGGERS') and settings.DENORM_BULK_UNSAFE_TRIGGERS:
self.denorm = denorms.BaseCallbackDenorm(skip=self.skip)
else:
self.denorm = denorms.CallbackDenorm(skip=self.skip)
self.denorm.func = self.func
self.denorm.depend = [dcls(*dargs, **dkwargs) for (dcls, dargs, dkwargs) in getattr(self.func, 'depend', [])]
self.denorm.model = cls
self.denorm.fieldname = name
self.field_args = (args, kwargs)
models.signals.class_prepared.connect(self.denorm.setup, sender=cls)
# Add The many to many signal for this class
models.signals.pre_save.connect(denorms.many_to_many_pre_save, sender=cls)
models.signals.post_save.connect(denorms.many_to_many_post_save, sender=cls)
DBField.contribute_to_class(self, cls, name, *args, **kwargs)
def pre_save(self, model_instance, add):
"""
Updates the value of the denormalized field before it gets saved.
"""
value = self.denorm.func(model_instance)
if hasattr(self, "remote_field") and self.remote_field: # Django>=1.10
related_field_model = self.remote_field.model
elif hasattr(self, 'related_field'): # Django>1.5
related_field_model = self.related_field.model
elif hasattr(self, "related"):
try:
related_field_model = self.related.parent_model
except AttributeError:
related_field_model = self.related.model
else:
related_field_model = None
if related_field_model and isinstance(value, related_field_model):
setattr(model_instance, self.attname, None)
setattr(model_instance, self.name, value)
return getattr(model_instance, self.attname)
else:
setattr(model_instance, self.attname, value)
return value
def south_field_triple(self):
"""
Because this field will be defined as a decorator, give
South hints on how to recreate it for database use.
"""
from south.modelsinspector import introspector
field_class = DBField.__module__ + "." + DBField.__name__
args, kwargs = introspector(self)
return (field_class, args, kwargs)
def deconstruct(self):
name, path, args, kwargs = super(DenormDBField, self).deconstruct()
super_name, super_path, super_args, super_kwargs = DBField(*args, **kwargs).deconstruct()
return name, super_path, args, kwargs
def clone(self):
"""
Uses deconstruct() to clone a new copy of this Field.
Will not preserve any class attachments/attribute names.
This has been copied from the original definition in
django.db.fields.__init__ so that the DBField is copied
instead, as otherwise the `makemigrations` command
does not work in Django 1.9.
"""
name, path, args, kwargs = self.deconstruct()
return DBField(*args, **kwargs)
def deco(func):
dbfield = DenormDBField(func, *args, **kwargs)
return dbfield
return deco
class AggregateField(models.PositiveIntegerField):
def get_denorm(self, *args, **kwargs):
"""
Returns denorm instance
"""
raise NotImplemented('You need to override this method')
def __init__(self, manager_name=None, **kwargs):
"""
**Arguments:**
manager_name:
The name of the related manager to be counted.
filter:
Filter, which is applied to manager. For example:
>>> active_item_count = CountField('item_set', filter={'active__exact':True})
>>> adult_user_count = CountField('user_set', filter={'age__gt':18})
exclude:
Do not include filter in aggregation
Any additional arguments are passed on to the contructor of
PositiveIntegerField.
"""
skip = kwargs.pop('skip', None)
qs_filter = kwargs.pop('filter', {})
if qs_filter and connection.vendor == "sqlite":
raise NotImplementedError('filters for aggregate fields are currently not supported for sqlite')
qs_exclude = kwargs.pop('exclude', {})
self.denorm = self.get_denorm(skip)
self.denorm.manager_name = manager_name
self.denorm.filter = qs_filter
self.denorm.exclude = qs_exclude
self.kwargs = kwargs
kwargs['default'] = 0
kwargs['editable'] = False
super(AggregateField, self).__init__(**kwargs)
def contribute_to_class(self, cls, name, *args, **kwargs):
self.denorm.model = cls
self.denorm.fieldname = name
models.signals.class_prepared.connect(self.denorm.setup)
super(AggregateField, self).contribute_to_class(cls, name, *args, **kwargs)
def south_field_triple(self):
return (
'.'.join(('django', 'db', 'models', models.PositiveIntegerField.__name__)),
[],
{
'default': '0',
},
)
def pre_save(self, model_instance, add):
"""
Makes sure we never overwrite the count with an outdated value.
This is necessary because if the count was changed by
a trigger after this model instance was created, the value
we would write has not been updated.
"""
if add:
# if this is a new instance there can't be any related objects yet
value = 0
else:
# if we're updating, get the most recent value from the DB
value = self.denorm.model.objects.filter(
pk=model_instance.pk,
).values_list(
self.attname, flat=True,
)[0]
setattr(model_instance, self.attname, value)
return value
def deconstruct(self):
name, path, args, kwargs = super(AggregateField, self).deconstruct()
del kwargs['editable']
args = [self.denorm.manager_name] + args
return name, path, args, kwargs
class CountField(AggregateField):
"""
A ``PositiveIntegerField`` that stores the number of rows
related to this model instance through the specified manager.
The value will be incrementally updated when related objects
are added and removed.
"""
def __init__(self, manager_name=None, **kwargs):
"""
**Arguments:**
manager_name:
The name of the related manager to be counted.
filter:
Filter, which is applied to manager. For example:
>>> active_item_count = CountField('item_set', filter={'active__exact':True})
>>> adult_user_count = CountField('user_set', filter={'age__gt':18})
Any additional arguments are passed on to the contructor of
PositiveIntegerField.
"""
kwargs['editable'] = False
super(CountField, self).__init__(manager_name, **kwargs)
def get_denorm(self, skip):
return denorms.CountDenorm(skip)
class SumField(AggregateField):
"""
A ``PositiveIntegerField`` that stores sub of related field values
to this model instance through the specified manager.
The value will be incrementally updated when related objects
are added and removed.
"""
def __init__(self, manager_name=None, field=None, **kwargs):
self.field = field
kwargs['editable'] = False
super(SumField, self).__init__(manager_name, **kwargs)
def get_denorm(self, skip):
return denorms.SumDenorm(skip, self.field)
class CopyField(AggregateField):
"""
Field, which makes two field identical. Any change in related field will change this field
"""
# TODO: JFDI
class CacheKeyField(models.BigIntegerField):
"""
A ``BigIntegerField`` that gets set to a random value anytime
the model is saved or a dependency is triggered.
The field gets updated immediately and does not require *denorm.flush()*.
It currently cannot detect a direct (bulk)update to the model
it is declared in.
"""
def __init__(self, **kwargs):
"""
All arguments are passed on to the contructor of
BigIntegerField.
"""
self.dependencies = []
kwargs['default'] = 0
kwargs['editable'] = False
self.kwargs = kwargs
super(CacheKeyField, self).__init__(**kwargs)
def depend_on_related(self, *args, **kwargs):
"""
Add dependency information to the CacheKeyField.
Accepts the same arguments like the *denorm.depend_on_related* decorator
"""
from .dependencies import CacheKeyDependOnRelated
self.dependencies.append(CacheKeyDependOnRelated(*args, **kwargs))
def contribute_to_class(self, cls, name, *args, **kwargs):
for depend in self.dependencies:
depend.fieldname = name
self.denorm = denorms.BaseCacheKeyDenorm(depend_on_related=self.dependencies)
self.denorm.model = cls
self.denorm.fieldname = name
models.signals.class_prepared.connect(self.denorm.setup)
super(CacheKeyField, self).contribute_to_class(cls, name, *args, **kwargs)
def pre_save(self, model_instance, add):
if add:
value = self.denorm.func(model_instance)
else:
value = self.denorm.model.objects.filter(
pk=model_instance.pk,
).values_list(
self.attname, flat=True,
)[0]
setattr(model_instance, self.attname, value)
return value
def south_field_triple(self):
return (
'.'.join(('django', 'db', 'models', models.BigIntegerField.__name__)),
[],
{
'default': '0',
},
)
class CacheWrapper(object):
def __init__(self, field):
self.field = field
def __set__(self, obj, value):
key = 'CachedField_%s' % value
cached = self.field.cache.get(key)
if not cached:
cached = self.field.func(obj)
self.field.cache.set(key, cached, 60 * 60 * 24 * 30)
obj.__dict__[self.field.name] = cached
class CachedField(CacheKeyField):
def __init__(self, func=None, cache=None, *args, **kwargs):
self.func = func
self.cache = cache
super(CachedField, self).__init__(*args, **kwargs)
if func and cache:
for c, a, kw in self.func.depend:
self.depend_on_related(*a, **kw)
def contribute_to_class(self, cls, name, *args, **kwargs):
super(CachedField, self).contribute_to_class(cls, name, *args, **kwargs)
setattr(cls, self.name, CacheWrapper(self))
def cached(cache, *args, **kwargs):
def deco(func):
dbfield = CachedField(func, cache, *args, **kwargs)
return dbfield
return deco
|
{
"content_hash": "cd3fa222339e285addfb5709545bbba6",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 121,
"avg_line_length": 35.956647398843934,
"alnum_prop": 0.5975403906438389,
"repo_name": "alex-mcleod/django-denorm",
"id": "f8d2fc3d90792d45fbc2731e1e49fd8b8199be00",
"size": "12465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "denorm/fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "156029"
}
],
"symlink_target": ""
}
|
import argparse
import os
import stat
import sys
voice_parse_error = "Error in line %d: voice lines must look like 'David: zarvox' (without quotes)"
dialog_name_error = "Error in line %d: name '%s' was not assigned a voice"
dialog_parse_error = "Error in line %d: dialogue lines must look like 'David: hello there' (without quotes)"
say_line = "say -v %s \"%s\"\n"
def errorQuit(msg, args):
print msg % args
print "Script not generated."
sys.exit()
def main(filename):
print "converting..."
script = open(args.filename)
outfilename = args.filename + '.out'
fout = open(outfilename, 'w')
fout.write("#!/bin/bash \n\n")
voiceLines = True
voices = dict()
lineNumber = 0
for line in script:
lineNumber += 1
if line == "\n":
voiceLines = False
continue
if voiceLines: # are we still in the voices section
splits = line.strip().split(": ")
if len(splits) == 2:
name = splits[0]
voice = splits[1]
voices[name] = voice
else:
errorQuit(voice_parse_error, (lineNumber))
else:
splits = line.strip().split(": ", 1)
if len(splits) == 2:
name = splits[0]
dialogue = splits[1].replace("\"", "")
voice = voices.get(name)
if not voice:
errorQuit(dialog_name_error, (lineNumber, name))
else:
fout.write(say_line % (voice, dialogue))
else:
errorQuit(dialog_parse_error, (lineNumber))
script.close()
fout.close()
st = os.stat(outfilename)
os.chmod(outfilename, st.st_mode | stat.S_IEXEC)
print "Done! run './%s' to play the script!" % outfilename
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Turn your script into a play')
parser.add_argument('filename', type=str, help='the name of the file containing the script')
args = parser.parse_args()
filename = args.filename
main(filename)
|
{
"content_hash": "ca04a4e5e83455d47f2cbeb0a2ee7249",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 108,
"avg_line_length": 29.246575342465754,
"alnum_prop": 0.5587822014051522,
"repo_name": "david-crespo/say_script",
"id": "0f307b0f95ecaf4848777d7d978002b50bec4592",
"size": "2135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SayScript.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2135"
}
],
"symlink_target": ""
}
|
"""
Test how lldb reacts to wrong commands
"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class UnknownCommandTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@no_debug_info_test
def test_ambiguous_command(self):
command_interpreter = self.dbg.GetCommandInterpreter()
self.assertTrue(command_interpreter, VALID_COMMAND_INTERPRETER)
result = lldb.SBCommandReturnObject()
command_interpreter.HandleCommand("g", result)
self.assertFalse(result.Succeeded())
self.assertRegexpMatches(result.GetError(), "Ambiguous command 'g'. Possible matches:")
self.assertRegexpMatches(result.GetError(), "gui")
self.assertRegexpMatches(result.GetError(), "gdb-remote")
self.assertEquals(1, result.GetError().count("gdb-remote"))
@no_debug_info_test
def test_unknown_command(self):
command_interpreter = self.dbg.GetCommandInterpreter()
self.assertTrue(command_interpreter, VALID_COMMAND_INTERPRETER)
result = lldb.SBCommandReturnObject()
command_interpreter.HandleCommand("qbert", result)
self.assertFalse(result.Succeeded())
self.assertEquals(result.GetError(), "error: 'qbert' is not a valid command.\n")
|
{
"content_hash": "0a61a719b33e475be7346dfd92e079d9",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 95,
"avg_line_length": 35.69230769230769,
"alnum_prop": 0.7133620689655172,
"repo_name": "youtube/cobalt_sandbox",
"id": "c25f9afbfa4eda9f9844c12114a553deb11fd20b",
"size": "1392",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "third_party/llvm-project/lldb/packages/Python/lldbsuite/test/functionalities/wrong_commands/TestWrongCommands.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import argparse
import sys
import os
import tempfile
import shutil
import subprocess
import ntpath
"""
-h, --help show this help message and exit
-o PATH, --output-path PATH
Directory where output files will be written
-n NAME Analysis name
--output-format FORMAT
The FORMAT for the output file
-N NUMBER, --samplings NUMBER
Number of samplings to compute the FM bias pvalue
-e ESTIMATOR, --estimator ESTIMATOR
Test estimator for computation.
--gt THRESHOLD, --gene-threshold THRESHOLD
Minimum number of mutations per gene to compute the FM
bias
--pt THRESHOLD, --pathway-threshold THRESHOLD
Minimum number of mutations per pathway to compute the
FM bias
-s SLICES, --slices SLICES
Slices to process separated by commas
-m PATH, --mapping PATH
File with mappings between genes and pathways to be
analysed
--save-data The input data matrix will be saved
--save-analysis The analysis results will be saved
-j CORES, --cores CORES
Number of cores to use for calculations. Default is 0
that means all the available cores
-D KEY=VALUE Define external parameters to be saved in the results
-L LEVEL, --log-level LEVEL
Define log level: debug, info, warn, error, critical,
notset
"""
def stop_err( msg ):
sys.stderr.write( '%s\n' % msg )
sys.exit()
def main(params):
parser = argparse.ArgumentParser()
##TAKEN directly from the source code
parser.add_argument("-N", "--samplings", dest="num_samplings", type=int, default=10000, metavar="NUMBER",
help="Number of samplings to compute the FM bias pvalue")
parser.add_argument("-e", "--estimator", dest="estimator", metavar="ESTIMATOR",
choices=["mean", "median"], default="mean",
help="Test estimator for computation.")
parser.add_argument("--gt", "--gene-threshold", dest="mut_gene_threshold", type=int, default=2, metavar="THRESHOLD",
help="Minimum number of mutations per gene to compute the FM bias")
parser.add_argument("--pt", "--pathway-threshold", dest="mut_pathway_threshold", type=int, default=10, metavar="THRESHOLD",
help="Minimum number of mutations per pathway to compute the FM bias")
parser.add_argument("-s", "--slices", dest="slices", metavar="SLICES",
help="Slices to process separated by commas")
parser.add_argument("-m", "--mapping", dest="mapping", metavar="PATH",
help="File with mappings between genes and pathways to be analysed")
parser.add_argument("-f", "--filter", dest="filter", metavar="PATH",
help="File containing the features to be filtered. By default labels are includes,"
" labels preceded with - are excludes.")
#parser.add_argument("-o", "--output_path", type=str, required=True, help="Directory where output files will be written")
parser.add_argument("-o1", "--output1", type=str, dest="output1", required=True)
parser.add_argument("-o2", "--output2", type=str, dest="output2", required=False)
parser.add_argument("-n", "--analysis_name", type=str, required=False, help="Analysis name")
#parser.add_argument("-e", "--estimator", type=str, required=False, choices=["mean-empirical","median-empirical","mean-zscore","median-zscore"], help="Test estimator for computation")
parser.add_argument("--output-format", dest="output_format", required=False,
metavar="FORMAT",
choices=["tsv", "tsv.gz", "tsv.bz2"],
default="tsv",
help="The FORMAT for the output file")
parser.add_argument("-j", "--cores", dest="num_cores", type=int,
metavar="CORES",
help="Number of cores to use for calculations.\
Default is 0 that means all the available cores")
parser.add_argument("-D", dest="defines", metavar="KEY=VALUE", action="append", help="Define external parameters to be saved in the results")
parser.add_argument("-L", "--log-level", dest="log_level", metavar="LEVEL", default=None,
choices=["debug", "info", "warn", "error", "critical", "notset"],
help="Define log level: debug, info, warn, error, critical, notset")
parser.add_argument("-i", "--input", dest="input_path", required=True, type=str, help="Path to input file")
args = vars(parser.parse_args(params))
try:
mapping_path = args["mapping_path"]
except KeyError:
mapping_path = "no_mapping_path"
#if mapping_path=="no_mapping_path":
#params.remove(mapping_path)
#params.remove("-m")
output_dir = tempfile.mkdtemp()
params.append("-o")
params.append(output_dir)
params.append(args["input_path"])
cmd = "oncodrivefm "
i=0
while i<len(params):
p=params[i]
if p=="-i" or p=="-o1" or p=="-o2":
i+=2
else:
i+=1
cmd += " "+p
cmd += " 2>&1 "
#tmp = tempfile.NamedTemporaryFile( dir=output_dir ).name
#tmp_stderr = open( tmp, 'wb' )
print cmd
proc = subprocess.Popen(args=cmd, shell=True)
returncode = proc.wait()
#tmp_stderr.close()
if args['analysis_name'] is not None:
prefix = args["analysis_name"]
else:
##refer: http://stackoverflow.com/a/8384788/756986
prefix = ntpath.basename(args["input_path"]).split(".")[0]
if args["mapping"] is not None:
pathway_file = prefix+"-pathways"
else:
pathway_file = None
output_format = args["output_format"]
genes_output_file_name = os.path.join(output_dir, prefix+"-genes."+output_format)
shutil.move(genes_output_file_name,args["output1"])
if pathway_file:
pathway_output_file_name = os.path.join(output_dir, pathway_file+"."+output_format)
shutil.move(pathway_output_file_name,args["output2"])
if os.path.exists( output_dir ):
shutil.rmtree( output_dir )
if __name__=="__main__":
main(sys.argv[1:])
|
{
"content_hash": "e3d0aeea9c6f582c5e0ac8d96b1bd912",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 187,
"avg_line_length": 51.12307692307692,
"alnum_prop": 0.5761360216671683,
"repo_name": "saketkc/galaxy_tools",
"id": "ec91eba1e3a25ee152177fd9fd8da97bdb6ea0da",
"size": "6664",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "oncodrivefm_tool/oncodrivefm_tool.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "276"
},
{
"name": "Erlang",
"bytes": "1082"
},
{
"name": "JavaScript",
"bytes": "240810"
},
{
"name": "Python",
"bytes": "223766"
},
{
"name": "TeX",
"bytes": "11885"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'NetworkGroup.slug'
db.delete_column(u'organisation_networkgroup', 'slug')
# Adding field 'NetworkGroup.region_slug'
db.add_column(u'organisation_networkgroup', 'region_slug',
self.gf('django.db.models.fields.SlugField')(default=None, max_length=50, null=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'NetworkGroup.slug'
db.add_column(u'organisation_networkgroup', 'slug',
self.gf('django.db.models.fields.SlugField')(default='', max_length=50),
keep_default=False)
# Deleting field 'NetworkGroup.region_slug'
db.delete_column(u'organisation_networkgroup', 'region_slug')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'organisation.board': {
'Meta': {'object_name': 'Board'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Person']", 'through': u"orm['organisation.BoardMembership']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.boardmembership': {
'Meta': {'object_name': 'BoardMembership'},
'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Board']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.featuredproject': {
'Meta': {'object_name': 'FeaturedProject', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['organisation.Project']"})
},
u'organisation.networkgroup': {
'Meta': {'unique_together': "(('country', 'region'),)", 'object_name': 'NetworkGroup'},
'country': ('django_countries.fields.CountryField', [], {'max_length': '2'}),
'country_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extra_information': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'group_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailinglist': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Person']", 'through': u"orm['organisation.NetworkGroupMembership']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'position': ('geoposition.fields.GeopositionField', [], {'default': "'0,0'", 'max_length': '42', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'region_slug': ('django.db.models.fields.SlugField', [], {'default': 'None', 'max_length': '50', 'null': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.networkgroupmembership': {
'Meta': {'object_name': 'NetworkGroupMembership'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'networkgroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.NetworkGroup']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.person': {
'Meta': {'ordering': "['name']", 'object_name': 'Person'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'organisation.project': {
'Meta': {'ordering': "('name',)", 'object_name': 'Project'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailinglist_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'sourcecode_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'teaser': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'themes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Theme']", 'symmetrical': 'False', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.ProjectType']", 'symmetrical': 'False', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.projectlist': {
'Meta': {'object_name': 'ProjectList', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'project_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.ProjectType']", 'null': 'True', 'blank': 'True'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Theme']", 'null': 'True', 'blank': 'True'})
},
u'organisation.projecttype': {
'Meta': {'object_name': 'ProjectType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.theme': {
'Meta': {'object_name': 'Theme'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.unit': {
'Meta': {'ordering': "['-order', 'name']", 'object_name': 'Unit'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Person']", 'through': u"orm['organisation.UnitMembership']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.unitmembership': {
'Meta': {'object_name': 'UnitMembership'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Unit']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.workinggroup': {
'Meta': {'object_name': 'WorkingGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'incubation': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Theme']", 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['organisation']
|
{
"content_hash": "99e56e13f22cff1adaf88a3c107f1871",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 200,
"avg_line_length": 78.7741935483871,
"alnum_prop": 0.5566475566475566,
"repo_name": "MjAbuz/foundation",
"id": "04ca34ccda32363d8cd8336fd14d3e36e7dc79ae",
"size": "14676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "foundation/organisation/migrations/0018_auto__del_field_networkgroup_slug__add_field_networkgroup_region_slug.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11867"
},
{
"name": "HTML",
"bytes": "54905"
},
{
"name": "JavaScript",
"bytes": "345"
},
{
"name": "Python",
"bytes": "685955"
},
{
"name": "Shell",
"bytes": "4363"
}
],
"symlink_target": ""
}
|
"""TPU Feature Column Library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import math
import enum
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.feature_column import feature_column_lib as fc_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu.feature_column import _is_running_on_cpu
from tensorflow.python.tpu.feature_column import _record_variable_scope_and_name
from tensorflow.python.tpu.feature_column import _SUPPORTED_CATEGORICAL_COLUMNS_V2
from tensorflow.python.tpu.feature_column import _SUPPORTED_SEQUENCE_COLUMNS
from tensorflow.python.tpu.feature_column import _TPUBaseEmbeddingColumn
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
_ALLOWED_DEVICES = ['cpu', 'tpu_tensor_core', 'tpu_embedding_core']
_TENSOR_CORE_MASK_KEY_SUFFIX = '__TENSOR_CORE_MASK'
class EmbeddingDevice(enum.Enum):
CPU = 1
TPU_TENSOR_CORE = 2
TPU_EMBEDDING_CORE = 3
@tf_export(v1=['tpu.experimental.embedding_column'])
def embedding_column_v2(categorical_column,
dimension,
combiner='mean',
initializer=None,
max_sequence_length=0,
learning_rate_fn=None,
embedding_lookup_device=None,
tensor_core_shape=None,
use_safe_embedding_lookup=True):
"""TPU version of `tf.compat.v1.feature_column.embedding_column`.
Note that the interface for `tf.tpu.experimental.embedding_column` is
different from that of `tf.compat.v1.feature_column.embedding_column`: The
following arguments are NOT supported: `ckpt_to_load_from`,
`tensor_name_in_ckpt`, `max_norm` and `trainable`.
Use this function in place of `tf.compat.v1.feature_column.embedding_column`
when you want to use the TPU to accelerate your embedding lookups via TPU
embeddings.
```
column = tf.feature_column.categorical_column_with_identity(...)
tpu_column = tf.tpu.experimental.embedding_column(column, 10)
...
def model_fn(features):
dense_feature = tf.keras.layers.DenseFeature(tpu_column)
embedded_feature = dense_feature(features)
...
estimator = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn,
...
embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec(
column=[tpu_column],
...))
```
Args:
categorical_column: A categorical column returned from
`categorical_column_with_identity`, `weighted_categorical_column`,
`categorical_column_with_vocabulary_file`,
`categorical_column_with_vocabulary_list`,
`sequence_categorical_column_with_identity`,
`sequence_categorical_column_with_vocabulary_file`,
`sequence_categorical_column_with_vocabulary_list`
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row for a non-sequence column. For more information, see
`tf.feature_column.embedding_column`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.compat.v1.truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
max_sequence_length: An non-negative integer specifying the max sequence
length. Any sequence shorter then this will be padded with 0 embeddings
and any sequence longer will be truncated. This must be positive for
sequence features and 0 for non-sequence features.
learning_rate_fn: A function that takes global step and returns learning
rate for the embedding table. If you intend to use the same learning rate
for multiple embedding tables, please ensure that you pass the exact same
python function to all calls of embedding_column, otherwise performence
may suffer.
embedding_lookup_device: The device on which to run the embedding lookup.
Valid options are "cpu", "tpu_tensor_core", and "tpu_embedding_core".
If specifying "tpu_tensor_core", a tensor_core_shape must be supplied.
If not specified, the default behavior is embedding lookup on
"tpu_embedding_core" for training and "cpu" for inference.
Valid options for training : ["tpu_embedding_core", "tpu_tensor_core"]
Valid options for serving : ["cpu", "tpu_tensor_core"]
For training, tpu_embedding_core is good for large embedding vocab (>1M),
otherwise, tpu_tensor_core is often sufficient.
For serving, doing embedding lookup on tpu_tensor_core during serving is
a way to reduce host cpu usage in cases where that is a bottleneck.
tensor_core_shape: If supplied, a list of integers which specifies
the intended dense shape to run embedding lookup for this feature on
TensorCore. The batch dimension can be left None or -1 to indicate
a dynamic shape. Only rank 2 shapes currently supported.
use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse
instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures
there are no empty rows and all weights and ids are positive at the
expense of extra compute cost. This only applies to rank 2 (NxM) shaped
input tensors. Defaults to true, consider turning off if the above checks
are not needed. Note that having empty rows will not trigger any error
though the output result might be 0 or omitted.
Returns:
A `_TPUEmbeddingColumnV2`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if `initializer` is specified but not callable.
"""
if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS_V2):
raise TypeError(
'categorical_column for tpu '
' embedding_column must be type %s, got %s.' % (' or '.join([
cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS_V2
]), type(categorical_column)))
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if tensor_core_shape and len(tensor_core_shape) != 2:
raise ValueError(
'tensor_core_shape must be size 2. Got {}.'.format(tensor_core_shape))
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. '
'Embedding of column_name: {}'.format(
categorical_column.name))
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
if (embedding_lookup_device and
embedding_lookup_device not in _ALLOWED_DEVICES):
raise ValueError('If set, embedding_lookup_device must be in ',
_ALLOWED_DEVICES)
if embedding_lookup_device == 'cpu':
embedding_lookup_device = EmbeddingDevice.CPU
elif embedding_lookup_device == 'tpu_tensor_core':
embedding_lookup_device = EmbeddingDevice.TPU_TENSOR_CORE
elif embedding_lookup_device == 'tpu_embedding_core':
embedding_lookup_device = EmbeddingDevice.TPU_EMBEDDING_CORE
if embedding_lookup_device == EmbeddingDevice.TPU_TENSOR_CORE:
if not tensor_core_shape:
raise ValueError('Using embedding_lookup_device=tpu_tensor_core requires '
'tensor_core_shape to be set.')
if isinstance(categorical_column, _SUPPORTED_SEQUENCE_COLUMNS):
raise ValueError('embedding_lookup_device=tpu_tensor_core currently does '
'not support sequence columns.')
if not embedding_lookup_device:
return _TPUEmbeddingColumnV2(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn,
use_safe_embedding_lookup=use_safe_embedding_lookup)
else:
return _TPUDeviceSpecificEmbeddingColumnV2(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn,
embedding_lookup_device=embedding_lookup_device,
tensor_core_shape=tensor_core_shape,
use_safe_embedding_lookup=use_safe_embedding_lookup)
@tf_export(v1=['tpu.experimental.shared_embedding_columns'])
def shared_embedding_columns_v2(categorical_columns,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
max_sequence_lengths=None,
learning_rate_fn=None,
embedding_lookup_device=None,
tensor_core_shape=None,
use_safe_embedding_lookup=True):
"""TPU version of `tf.compat.v1.feature_column.shared_embedding_columns`.
Note that the interface for `tf.tpu.experimental.shared_embedding_columns` is
different from that of `tf.compat.v1.feature_column.shared_embedding_columns`:
The following arguments are NOT supported: `ckpt_to_load_from`,
`tensor_name_in_ckpt`, `max_norm` and `trainable`.
Use this function in place of
tf.compat.v1.feature_column.shared_embedding_columns` when you want to use the
TPU to accelerate your embedding lookups via TPU embeddings.
```
column_a = tf.feature_column.categorical_column_with_identity(...)
column_b = tf.feature_column.categorical_column_with_identity(...)
tpu_columns = tf.tpu.experimental.shared_embedding_columns(
[column_a, column_b], 10)
...
def model_fn(features):
dense_feature = tf.keras.layers.DenseFeature(tpu_columns)
embedded_feature = dense_feature(features)
...
estimator = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn,
...
embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec(
column=tpu_columns,
...))
```
Args:
categorical_columns: A list of categorical columns returned from
`categorical_column_with_identity`, `weighted_categorical_column`,
`categorical_column_with_vocabulary_file`,
`categorical_column_with_vocabulary_list`,
`sequence_categorical_column_with_identity`,
`sequence_categorical_column_with_vocabulary_file`,
`sequence_categorical_column_with_vocabulary_list`
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries in
a single row for a non-sequence column. For more information, see
`tf.feature_column.embedding_column`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
`1/sqrt(dimension)`.
shared_embedding_collection_name: Optional name of the collection where
shared embedding weights are added. If not given, a reasonable name will
be chosen based on the names of `categorical_columns`. This is also used
in `variable_scope` when creating shared embedding weights.
max_sequence_lengths: An list of non-negative integers, either None or empty
or the same length as the argument categorical_columns. Entries
corresponding to non-sequence columns must be 0 and entries corresponding
to sequence columns specify the max sequence length for the column. Any
sequence shorter then this will be padded with 0 embeddings and any
sequence longer will be truncated.
learning_rate_fn: A function that takes global step and returns learning
rate for the embedding table. If you intend to use the same learning rate
for multiple embedding tables, please ensure that you pass the exact same
python function to all calls of shared_embedding_columns, otherwise
performence may suffer.
embedding_lookup_device: The device on which to run the embedding lookup.
Valid options are "cpu", "tpu_tensor_core", and "tpu_embedding_core". If
specifying "tpu_tensor_core", a tensor_core_shape must be supplied.
Defaults to "cpu". If not specified, the default behavior is embedding
lookup on "tpu_embedding_core" for training and "cpu" for inference.
Valid options for training : ["tpu_embedding_core", "tpu_tensor_core"]
Valid options for serving : ["cpu", "tpu_tensor_core"]
For training, tpu_embedding_core is good for large embedding vocab (>1M),
otherwise, tpu_tensor_core is often sufficient.
For serving, doing embedding lookup on tpu_tensor_core during serving is
a way to reduce host cpu usage in cases where that is a bottleneck.
tensor_core_shape: If supplied, a list of integers which specifies the
intended dense shape to run embedding lookup for this feature on
TensorCore. The batch dimension can be left None or -1 to indicate a
dynamic shape. Only rank 2 shapes currently supported.
use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse
instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures
there are no empty rows and all weights and ids are positive at the
expense of extra compute cost. This only applies to rank 2 (NxM) shaped
input tensors. Defaults to true, consider turning off if the above checks
are not needed. Note that having empty rows will not trigger any error
though the output result might be 0 or omitted.
Returns:
A list of `_TPUSharedEmbeddingColumnV2`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if `initializer` is specified but not callable.
ValueError: if `max_sequence_lengths` is specified and not the same length
as `categorical_columns`.
ValueError: if `max_sequence_lengths` is positive for a non sequence column
or 0 for a sequence column.
"""
for categorical_column in categorical_columns:
if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS_V2):
raise TypeError(
'categorical_column for tpu '
' shared_embedding_columns must be type %s, got %s.' % (' or '.join([
cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS_V2
]), type(categorical_column)))
if not max_sequence_lengths:
max_sequence_lengths = [0] * len(categorical_columns)
if len(max_sequence_lengths) != len(categorical_columns):
raise ValueError('max_sequence_lengths and categorical_columns must be of '
'the same length. len(max_sequence_lengths)={} '
'len(categorical_columns)={}.'.format(
len(max_sequence_lengths), len(categorical_columns)))
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if tensor_core_shape and len(tensor_core_shape) != 2:
raise ValueError(
'tensor_core_shape must be size 2. Got {}.'.format(tensor_core_shape))
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. ')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
num_buckets = sorted_columns[0]._num_buckets # pylint: disable=protected-access
for c in sorted_columns[1:]:
if num_buckets != c._num_buckets: # pylint: disable=protected-access
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same number of buckets. Given column: {} with buckets: {} does '
'not match column: {} with buckets: {}'.format(
sorted_columns[0], num_buckets, c, c._num_buckets)) # pylint: disable=protected-access
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
tpu_columns = []
column_creator = fc_lib.SharedEmbeddingColumnCreator(
dimension=dimension, initializer=initializer, ckpt_to_load_from=None,
tensor_name_in_ckpt=None, num_buckets=num_buckets, trainable=None,
name=shared_embedding_collection_name)
if (embedding_lookup_device and
embedding_lookup_device not in _ALLOWED_DEVICES):
raise ValueError('If set, embedding_lookup_device must be in ',
_ALLOWED_DEVICES)
if embedding_lookup_device == 'cpu':
embedding_lookup_device = EmbeddingDevice.CPU
elif embedding_lookup_device == 'tpu_tensor_core':
embedding_lookup_device = EmbeddingDevice.TPU_TENSOR_CORE
elif embedding_lookup_device == 'tpu_embedding_core':
embedding_lookup_device = EmbeddingDevice.TPU_EMBEDDING_CORE
if embedding_lookup_device == EmbeddingDevice.TPU_TENSOR_CORE:
if not tensor_core_shape:
raise ValueError('Using embedding_lookup_device=tpu_tensor_core requires '
'tensor_core_shape to be set.')
for c in sorted_columns:
if isinstance(c, _SUPPORTED_SEQUENCE_COLUMNS):
raise ValueError('embedding_lookup_device=tpu_tensor_core currently '
'does not support sequence columns.')
# Create the state (_SharedEmbeddingColumnLayer) here.
for categorical_column, max_sequence_length in zip(
categorical_columns, max_sequence_lengths):
if not embedding_lookup_device:
column = _TPUSharedEmbeddingColumnV2(
categorical_column=categorical_column,
shared_embedding_column_creator=column_creator,
combiner=combiner,
initializer=initializer,
shared_embedding_collection_name=shared_embedding_collection_name,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn,
use_safe_embedding_lookup=use_safe_embedding_lookup)
else:
column = _TPUSharedDeviceSpecificEmbeddingColumnV2(
categorical_column=categorical_column,
shared_embedding_column_creator=column_creator,
combiner=combiner,
initializer=initializer,
shared_embedding_collection_name=shared_embedding_collection_name,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn,
embedding_lookup_device=embedding_lookup_device,
tensor_core_shape=tensor_core_shape,
use_safe_embedding_lookup=use_safe_embedding_lookup)
tpu_columns.append(column)
return tpu_columns
class _TPUEmbeddingColumnV2(_TPUBaseEmbeddingColumn, fc_lib.EmbeddingColumn):
"""Core Embedding Column."""
def __new__(cls,
categorical_column,
dimension,
combiner='mean',
initializer=None,
max_sequence_length=0,
learning_rate_fn=None,
use_safe_embedding_lookup=True,
bypass_scope_validation=False):
del bypass_scope_validation
return fc_lib.EmbeddingColumn.__new__(
cls,
categorical_column,
dimension,
combiner=combiner,
initializer=initializer,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True,
use_safe_embedding_lookup=use_safe_embedding_lookup)
def __getnewargs__(self):
return (self._tpu_categorical_column, self.dimension, self.combiner,
self.initializer, self._max_sequence_length, self._learning_rate_fn)
def __deepcopy__(self, memo):
return _TPUEmbeddingColumnV2(
*(copy.deepcopy(a, memo) for a in self.__getnewargs__()))
def __init__(self,
categorical_column,
dimension,
combiner='mean',
initializer=None,
max_sequence_length=0,
learning_rate_fn=None,
use_safe_embedding_lookup=True,
bypass_scope_validation=False):
_TPUBaseEmbeddingColumn.__init__(
self,
categorical_column,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn)
self._key = None
# If true, scope validation is skipped to allow the same column to be used
# in multiple variable scopes. By default, this is False, and we expect a
# 1:1 mapping between feature columns and scopes.
self._bypass_scope_validation = bypass_scope_validation
def get_combiner(self):
return self.combiner
def get_embedding_table_size(self):
"""Returns num_ids and width."""
return (self.categorical_column._num_buckets, self.dimension)
def get_feature_key_name(self):
"""get_feature_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.categorical_column.name
return self.categorical_column.name
def get_weight_key_name(self):
"""get_weight_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.weight_feature_key
return None
def get_embedding_var_name(self):
"""get_embedding_var_name."""
return self.categorical_column.name
def get_initializer(self):
return self.initializer
def is_categorical_column_weighted(self):
"""Check if the categorical column of the embedding column is weighted."""
if isinstance(
self.categorical_column,
(
fc._WeightedCategoricalColumn, # pylint: disable=protected-access
fc_lib.WeightedCategoricalColumn)):
return True
return False
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.EmbeddingColumn._get_dense_tensor(
self, inputs, weight_collections, trainable)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn._get_dense_tensor(
self, inputs, weight_collections, trainable)
# TPU mode
# Get the embeddings from the LazyBuilder.
tensor = inputs.get(self.get_feature_key_name())
# Add to collection for _create_tpu_embedding_variables_and_ops
_record_variable_scope_and_name(
self.get_embedding_var_name(),
'embedding_weights',
bypass_scope_validation=self._bypass_scope_validation)
return tensor
def create_state(self, state_manager):
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn.create_state(
self, state_manager)
# Create state is called for the EmbeddingColumn to create its embedding
# variables under feature column V2, if we are on TPU so record the scope
# here.
_record_variable_scope_and_name(
self.get_embedding_var_name(),
'embedding_weights',
bypass_scope_validation=self._bypass_scope_validation)
def get_dense_tensor(self, transformation_cache, state_manager):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.EmbeddingColumn.get_dense_tensor(
self, transformation_cache, state_manager)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn.get_dense_tensor(
self, transformation_cache, state_manager)
# TPU mode
# Get the embeddings from the FeatureTransformationCache.
tensor = transformation_cache.get(self.get_feature_key_name(),
state_manager)
return tensor
def _get_sequence_dense_tensor(
self, inputs, weight_collections=None, trainable=None):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.EmbeddingColumn._get_sequence_dense_tensor(
self, inputs, weight_collections, trainable)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn._get_sequence_dense_tensor(
self, inputs, weight_collections, trainable)
tensor = inputs.get(self.get_feature_key_name())
tensor_lengths = inputs.get(self.get_sequence_length_feature_key_name())
# inputs is a _LazyBuilder and for rank 1 tensors, it calls expand_dims(-1).
# We need to undo this to match the standard CPU sequence embedding.
tensor_lengths = array_ops.squeeze(tensor_lengths, -1)
# Add to collection for _create_tpu_embedding_variables_and_ops
_record_variable_scope_and_name(
self.get_embedding_var_name(),
'embedding_weights',
bypass_scope_validation=self._bypass_scope_validation)
return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=tensor, sequence_length=tensor_lengths)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.EmbeddingColumn.get_sequence_dense_tensor(
self, transformation_cache, state_manager)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.EmbeddingColumn.get_sequence_dense_tensor(
self, transformation_cache, state_manager)
tensor = transformation_cache.get(self.get_feature_key_name(),
state_manager)
tensor_lengths = transformation_cache.get(
self.get_sequence_length_feature_key_name(),
state_manager)
# FeatureTransformationCache expands rank 1 tensors (like sequence length)
# to rank 2. We need to undo this to match the standard CPU sequence
# embedding.
tensor_lengths = array_ops.squeeze(tensor_lengths, -1)
return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=tensor, sequence_length=tensor_lengths)
class _TPUSharedEmbeddingColumnV2(_TPUBaseEmbeddingColumn,
fc_lib.SharedEmbeddingColumn):
"""Core Shared Embedding Column."""
def __new__(cls,
categorical_column,
shared_embedding_column_creator,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
max_sequence_length=0,
learning_rate_fn=None,
use_safe_embedding_lookup=True):
return fc_lib.SharedEmbeddingColumn.__new__(
cls,
categorical_column,
combiner=combiner,
shared_embedding_column_creator=shared_embedding_column_creator,
max_norm=None,
use_safe_embedding_lookup=use_safe_embedding_lookup)
def __getnewargs__(self):
return (self._tpu_categorical_column, self.shared_embedding_column_creator,
self.combiner, self._initializer,
self._shared_embedding_collection_name, self._max_sequence_length,
self._learning_rate_fn)
def __deepcopy__(self, memo):
return _TPUSharedEmbeddingColumnV2(
*(copy.deepcopy(a, memo) for a in self.__getnewargs__()))
def __init__(self,
categorical_column,
shared_embedding_column_creator,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
max_sequence_length=0,
learning_rate_fn=None,
use_safe_embedding_lookup=True):
_TPUBaseEmbeddingColumn.__init__(
self,
categorical_column,
max_sequence_length=max_sequence_length,
learning_rate_fn=learning_rate_fn)
self._initializer = initializer
self._shared_embedding_collection_name = shared_embedding_collection_name
def get_combiner(self):
return self.combiner
def get_embedding_table_size(self):
"""Returns num_ids and width."""
return (self.categorical_column._num_buckets,
self.shared_embedding_column_creator.dimension)
def get_feature_key_name(self):
"""get_feature_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.categorical_column.name
return self.categorical_column.name
def get_weight_key_name(self):
"""get_weight_key_name."""
if self.is_categorical_column_weighted():
return self.categorical_column.weight_feature_key
return None
def get_embedding_var_name(self):
"""get_embedding_var_name."""
return self._shared_embedding_collection_name
def get_initializer(self):
return self._initializer
def is_categorical_column_weighted(self):
"""Check if the categorical column of the embedding column is weighted."""
if isinstance(
self.categorical_column,
(
fc._WeightedCategoricalColumn, # pylint: disable=protected-access
fc_lib.WeightedCategoricalColumn)):
return True
return False
def _get_dense_tensor_internal(
self, transformation_cache, state_manager):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.SharedEmbeddingColumn._get_dense_tensor_internal(
self, transformation_cache, state_manager)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.SharedEmbeddingColumn._get_dense_tensor_internal(
self, transformation_cache, state_manager)
# TPU mode
# Get the embeddings from the FeatureTransformationCache.
tensor = transformation_cache.get(self.get_feature_key_name(),
state_manager)
# Add to collection for _create_tpu_embedding_variables_and_ops
# Note that in Feature Column V2, shared embeddings have no scope.
_record_variable_scope_and_name(
self.get_embedding_var_name(),
self.shared_embedding_column_creator._name,
is_shared_embedding=True)
return tensor
def get_sequence_dense_tensor(
self, transformation_cache, state_manager):
if tpu.under_tpu_inference_context():
def host_computation():
return fc_lib.SharedEmbeddingColumn.get_sequence_dense_tensor(
self, transformation_cache, state_manager)
return tpu.outside_compilation(host_computation)
if _is_running_on_cpu():
return fc_lib.SharedEmbeddingColumn.get_sequence_dense_tensor(
self, transformation_cache, state_manager)
tensor = self._get_dense_tensor_internal(
transformation_cache, state_manager)
tensor_lengths = transformation_cache.get(
self.get_sequence_length_feature_key_name(),
state_manager)
# FeatureTransformationCache expands rank 1 tensors (like sequence length)
# to rank 2. We need to undo this to match the standard CPU sequence
# embedding.
tensor_lengths = array_ops.squeeze(tensor_lengths, -1)
return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=tensor, sequence_length=tensor_lengths)
def split_sequence_columns_v2(feature_columns):
"""Split a list of _TPUEmbeddingColumn into sequence and non-sequence columns.
For use in a TPUEstimator model_fn function. E.g.
def model_fn(features):
sequence_columns, feature_columns = (
tf.tpu.feature_column.split_sequence_columns(feature_columns))
input = tf.feature_column.input_layer(
features=features, feature_columns=feature_columns)
sequence_features, sequence_lengths = (
tf.contrib.feature_column.sequence_input_layer(
features=features, feature_columns=sequence_columns))
Args:
feature_columns: A list of _TPUEmbeddingColumns to split.
Returns:
Two lists of _TPUEmbeddingColumns, the first is the sequence columns and the
second is the non-sequence columns.
"""
sequence_columns = []
non_sequence_columns = []
for column in feature_columns:
if not isinstance(column, (_TPUEmbeddingColumnV2,
_TPUSharedEmbeddingColumnV2)):
raise TypeError(
'column must be a _TPUEmbeddingColumnV2 or '
'_TPUSharedEmbeddingColumnV2 but got %s instead.' % (type(column)))
if column.is_sequence_column():
sequence_columns.append(column)
else:
non_sequence_columns.append(column)
return sequence_columns, non_sequence_columns
def sparse_embedding_aggregate_slice(params,
values_and_values_mask,
combiner='mean',
name='sparse_embedding_aggregate_slice'):
"""Uses XLA's dynamic slice operations to perform embedding lookups.
From third_party/cloud_tpu/models/movielens/tpu_embedding.py
Args:
params: Tensor of embedding table. Rank 2 (table_size x embedding dim)
values_and_values_mask: is a two-tuple that contains: values - Tensor of
embedding indices. Rank 2 (batch x n_indices) values_mask - Tensor of mask
/ weights. Rank 2 (batch x n_indices)
combiner: The combiner to use for the embedding lookup. Currently supports
'sum' and 'mean'.
name: Optional name scope for created ops
Returns:
Rank 2 tensor of aggregated (per batch element) embedding vectors.
Raises:
ValueError: Combiner is not supported.
"""
values, values_mask = values_and_values_mask # unpack the two-tuple
with ops.name_scope(name):
_, embedding_dimension = params.get_shape().as_list()
n_batch, n_indices_padded = values.get_shape().as_list()
if not n_batch:
n_batch = -1
emb_lookup = array_ops.reshape(
embedding_ops.embedding_lookup(
params, array_ops.reshape(values, [n_batch, n_indices_padded])),
[n_batch, n_indices_padded, embedding_dimension])
values_mask_broadcast = array_ops.reshape(values_mask,
[n_batch, n_indices_padded, 1])
aggregate_emb = math_ops.reduce_sum(
emb_lookup * values_mask_broadcast, axis=1)
if combiner == 'sum':
return aggregate_emb
elif combiner == 'mean':
# In the case we have an empty row, both aggregate_emb and
# math_ops.reduce_sum(values_mask_broadcast, axis=1) will be 0. Thus,
# we can take max it with a non-zero value to prevent NaNs. Note that
# math_ops.reduce_sum(values_mask_broadcast, axis=1) will have integer
# values so 1.0 is the smallest value.
return aggregate_emb / math_ops.maximum(
math_ops.reduce_sum(values_mask_broadcast, axis=1), 1.0)
else:
raise ValueError('Dense TPU Embedding does not support combiner '
'other than sum and mean.')
def pad_sparse_embedding_lookup_indices(sparse_indices, padded_size):
"""Creates statically-sized Tensors containing indices and weights.
From third_party/cloud_tpu/models/movielens/tpu_embedding.py
Also computes sparse_indices.values % embedding_table_size, for equivalent
functionality to sparse_column_with_integerized_feature. The returned
padded weight Tensor also doubles as a mask indicating which values in
the returned padded indices Tensor are indices versus padded zeros.
Args:
sparse_indices: SparseTensor of embedding lookup indices.
padded_size: Number of columns of the returned Tensors. Indices which fall
out of bounds will be truncated to the padded size.
Returns:
(sparse_indices.values padded to the specified size,
a mask the same size as the returned padded values in which 0s
indicate padded locations and 1s (or values from sparse_weights)
indicate actual values)
"""
batch_size = sparse_indices.dense_shape[0]
sparse_indices = sparse_ops.sparse_slice(sparse_indices, [0, 0],
[batch_size, padded_size])
indices, values = sparse_indices.indices, sparse_indices.values
padded_values = array_ops.scatter_nd(
indices,
math_ops.cast(values, dtypes.int32),
shape=(batch_size, padded_size))
weights = array_ops.ones_like(values, dtype=dtypes.float32)
padded_mask = array_ops.scatter_nd(
indices, weights, shape=(batch_size, padded_size))
return padded_values, padded_mask
def _check_invalid_cases(embedding_lookup_device):
"""Checks for invalid embedding_lookup_device configurations."""
if (tpu.under_tpu_inference_context() and
embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE):
raise ValueError(
'Using embedding_lookup_device=tpu_embedding_core during inference '
'is not supported.')
if embedding_lookup_device == EmbeddingDevice.CPU:
if not tpu.under_tpu_inference_context():
raise ValueError(
'Using TPUEmbeddingColumn with embedding_lookup_device="cpu" '
'during training is not supported.')
class _TPUDeviceSpecificEmbeddingColumnV2(_TPUEmbeddingColumnV2):
"""TPUEmbeddingColumn which allows serving on TensorCore."""
def __new__(cls, *args, **kwargs):
# For __new__, just capture the inference dense shape and call parent.
if 'tensor_core_shape' in kwargs:
cls._tensor_core_shape = kwargs['tensor_core_shape']
del kwargs['tensor_core_shape']
if 'embedding_lookup_device' in kwargs:
cls._embedding_lookup_device = kwargs['embedding_lookup_device']
del kwargs['embedding_lookup_device']
return _TPUEmbeddingColumnV2.__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
# For __init__, just capture the inference dense shape and call parent.
if 'tensor_core_shape' in kwargs:
self._tensor_core_shape = kwargs['tensor_core_shape']
del kwargs['tensor_core_shape']
if 'embedding_lookup_device' in kwargs:
self._embedding_lookup_device = kwargs['embedding_lookup_device']
del kwargs['embedding_lookup_device']
_TPUEmbeddingColumnV2.__init__(self, *args, **kwargs)
def __deepcopy__(self, memo):
return _TPUDeviceSpecificEmbeddingColumnV2(
*(copy.deepcopy(a, memo) for a in self.__getnewargs__()),
tensor_core_shape=self._tensor_core_shape,
embedding_lookup_device=self._embedding_lookup_device)
def create_state(self, state_manager):
_check_invalid_cases(self._embedding_lookup_device)
# CPU case.
is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU
is_cpu = is_cpu or _is_running_on_cpu()
if is_cpu:
return fc_lib.EmbeddingColumn.create_state(self, state_manager)
# TPU_EMBEDDING_CORE case.
elif self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:
return super(_TPUDeviceSpecificEmbeddingColumnV2,
self).create_state(state_manager)
# TPU_EMBEDDING_CORE case.
return fc_lib.EmbeddingColumn.create_state(self, state_manager)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Private method that follows get_dense_tensor."""
_check_invalid_cases(self._embedding_lookup_device)
# CPU Case.
is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU
is_cpu = is_cpu or _is_running_on_cpu()
if is_cpu:
return super(_TPUDeviceSpecificEmbeddingColumnV2,
self).get_dense_tensor(transformation_cache, state_manager)
# TPU_EMBEDDING_CORE case.
elif self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:
return super(_TPUDeviceSpecificEmbeddingColumnV2,
self).get_dense_tensor(transformation_cache, state_manager)
# TPU_EMBEDDING_CORE cases.
if tpu.under_tpu_inference_context():
# For inference, use outside compile to densify and pad the input tensors.
sparse_tensor = transformation_cache.get(self.categorical_column.name,
state_manager)
def host_computation():
return pad_sparse_embedding_lookup_indices(sparse_tensor,
self._tensor_core_shape[1])
values, mask = tpu.outside_compilation(host_computation)
else:
# For training, the inputs should already have been densified and padded.
values = transformation_cache.get(self.categorical_column.name,
state_manager)
mask = transformation_cache.get(
self.categorical_column.name + _TENSOR_CORE_MASK_KEY_SUFFIX,
state_manager)
embedding_weights = state_manager.get_variable(
self, name='embedding_weights')
return sparse_embedding_aggregate_slice(embedding_weights, (values, mask),
self.get_combiner())
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
_check_invalid_cases(self._embedding_lookup_device)
# CPU Case.
is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU
is_cpu = is_cpu or _is_running_on_cpu()
if is_cpu:
return super(_TPUDeviceSpecificEmbeddingColumnV2,
self)._get_dense_tensor(inputs, weight_collections,
trainable)
# TPU_EMBEDDING_CORE case.
elif self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:
return super(_TPUDeviceSpecificEmbeddingColumnV2,
self)._get_dense_tensor(inputs, weight_collections,
trainable)
# TPU_EMBEDDING_CORE cases.
if tpu.under_tpu_inference_context():
# For inference, use outside compile to densify and pad the input tensors.
sparse_tensor = inputs.get(self.get_feature_key_name())
def host_computation():
return pad_sparse_embedding_lookup_indices(sparse_tensor,
self._tensor_core_shape[1])
values, mask = tpu.outside_compilation(host_computation)
else:
# For training, the inputs should already have been densified and padded.
values = inputs.get(self.get_feature_key_name())
mask = inputs.get(self.get_feature_key_name() +
_TENSOR_CORE_MASK_KEY_SUFFIX)
embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access
if (weight_collections and
ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections):
weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
embedding_weights = variable_scope.get_variable(
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self.initializer,
trainable=self.trainable and trainable,
collections=weight_collections)
return sparse_embedding_aggregate_slice(embedding_weights, (values, mask),
self.get_combiner())
class _TPUSharedDeviceSpecificEmbeddingColumnV2(_TPUSharedEmbeddingColumnV2):
"""TPUSharedEmbeddingColumnV2 which allows serving on TensorCore."""
def __new__(cls, *args, **kwargs):
# For __new__, just capture the inference dense shape and call parent.
if 'tensor_core_shape' in kwargs:
cls._tensor_core_shape = kwargs['tensor_core_shape']
del kwargs['tensor_core_shape']
if 'embedding_lookup_device' in kwargs:
cls._embedding_lookup_device = kwargs['embedding_lookup_device']
del kwargs['embedding_lookup_device']
return _TPUSharedEmbeddingColumnV2.__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
# For __init__, just capture the inference dense shape and call parent.
if 'tensor_core_shape' in kwargs:
self._tensor_core_shape = kwargs['tensor_core_shape']
del kwargs['tensor_core_shape']
if 'embedding_lookup_device' in kwargs:
self._embedding_lookup_device = kwargs['embedding_lookup_device']
del kwargs['embedding_lookup_device']
_TPUSharedEmbeddingColumnV2.__init__(self, *args, **kwargs)
def __deepcopy__(self, memo):
return _TPUSharedDeviceSpecificEmbeddingColumnV2(
*(copy.deepcopy(a, memo) for a in self.__getnewargs__()),
tensor_core_shape=self._tensor_core_shape,
embedding_lookup_device=self._embedding_lookup_device)
def _get_dense_tensor_internal(self, transformation_cache, state_manager):
"""Private method that follows _get_dense_tensor_internal."""
_check_invalid_cases(self._embedding_lookup_device)
# CPU Case.
is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU
is_cpu = is_cpu or _is_running_on_cpu()
if is_cpu:
return super(_TPUSharedDeviceSpecificEmbeddingColumnV2,
self)._get_dense_tensor_internal(transformation_cache,
state_manager)
# TPU_EMBEDDING_CORE case.
if self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:
return super(_TPUSharedDeviceSpecificEmbeddingColumnV2,
self)._get_dense_tensor_internal(transformation_cache,
state_manager)
# TPU_EMBEDDING_CORE cases.
if tpu.under_tpu_inference_context():
# For inference, use outside compile to densify and pad the input tensors.
sparse_tensor = transformation_cache.get(self.categorical_column.name,
state_manager)
def host_computation():
return pad_sparse_embedding_lookup_indices(sparse_tensor,
self._tensor_core_shape[1])
values, mask = tpu.outside_compilation(host_computation)
else:
# For training, the inputs should already have been densified and padded.
values = transformation_cache.get(self.categorical_column.name,
state_manager)
mask = transformation_cache.get(
self.categorical_column.name + _TENSOR_CORE_MASK_KEY_SUFFIX,
state_manager)
# Do a dense embedding lookup on TensorCore.
embedding_weights = self.shared_embedding_column_creator.embedding_weights
return sparse_embedding_aggregate_slice(embedding_weights, (values, mask),
self.get_combiner())
|
{
"content_hash": "992ecab301958d4aec33bfe5fd959822",
"timestamp": "",
"source": "github",
"line_count": 1078,
"max_line_length": 112,
"avg_line_length": 43.526901669758814,
"alnum_prop": 0.6774221047696176,
"repo_name": "petewarden/tensorflow",
"id": "32472053791d15df5a69049a1d59cb63b1f6f865",
"size": "47600",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tensorflow/python/tpu/feature_column_v2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31796"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "895451"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82100676"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112853"
},
{
"name": "Go",
"bytes": "1867248"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "984477"
},
{
"name": "Jupyter Notebook",
"bytes": "550862"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1982867"
},
{
"name": "Makefile",
"bytes": "66496"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "317461"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37425809"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "8992"
},
{
"name": "Shell",
"bytes": "700106"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3613406"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
import random
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import lambda_stmt
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import update
from sqlalchemy.future import select
from sqlalchemy.orm import aliased
from sqlalchemy.orm import relationship
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.orm import subqueryload
from sqlalchemy.sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from .inheritance import _poly_fixtures
from .test_query import QueryTest
class LambdaTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
# we want to test the lambda expiration logic so use backend
# to exercise that
__backend__ = True
run_setup_mappers = None
@testing.fixture
def plain_fixture(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(Address, back_populates="user")
},
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(User, back_populates="addresses")
},
)
return User, Address
def test_user_cols_single_lambda(self, plain_fixture):
User, Address = plain_fixture
q = select(lambda: (User.id, User.name)).select_from(lambda: User)
self.assert_compile(q, "SELECT users.id, users.name FROM users")
def test_user_cols_single_lambda_query(self, plain_fixture):
User, Address = plain_fixture
s = fixture_session()
q = s.query(lambda: (User.id, User.name)).select_from(lambda: User)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name FROM users",
)
def test_multiple_entities_single_lambda(self, plain_fixture):
User, Address = plain_fixture
q = select(lambda: (User, Address)).join(lambda: User.addresses)
self.assert_compile(
q,
"SELECT users.id, users.name, addresses.id AS id_1, "
"addresses.user_id, addresses.email_address "
"FROM users JOIN addresses ON users.id = addresses.user_id",
)
def test_cols_round_trip(self, plain_fixture):
User, Address = plain_fixture
s = Session(testing.db, future=True)
# note this does a traversal + _clone of the InstrumentedAttribute
# for the first time ever
def query(names):
stmt = lambda_stmt(
lambda: select(User.name, Address.email_address)
.where(User.name.in_(names))
.join(User.addresses)
) + (lambda s: s.order_by(User.id, Address.id))
return s.execute(stmt)
def go1():
r1 = query(["ed"])
eq_(
r1.all(),
[
("ed", "ed@wood.com"),
("ed", "ed@bettyboop.com"),
("ed", "ed@lala.com"),
],
)
def go2():
r1 = query(["ed", "fred"])
eq_(
r1.all(),
[
("ed", "ed@wood.com"),
("ed", "ed@bettyboop.com"),
("ed", "ed@lala.com"),
("fred", "fred@fred.com"),
],
)
for i in range(5):
fn = random.choice([go1, go2])
fn()
@testing.combinations(
(True, True),
(True, False),
(False, False),
argnames="use_aliased,use_indirect_access",
)
def test_entity_round_trip(
self, plain_fixture, use_aliased, use_indirect_access
):
User, Address = plain_fixture
s = Session(testing.db, future=True)
if use_aliased:
if use_indirect_access:
def query(names):
class Foo(object):
def __init__(self):
self.u1 = aliased(User)
f1 = Foo()
stmt = lambda_stmt(
lambda: select(f1.u1)
.where(f1.u1.name.in_(names))
.options(selectinload(f1.u1.addresses)),
track_on=[f1.u1],
).add_criteria(
lambda s: s.order_by(f1.u1.id), track_on=[f1.u1]
)
return s.execute(stmt)
else:
def query(names):
u1 = aliased(User)
stmt = lambda_stmt(
lambda: select(u1)
.where(u1.name.in_(names))
.options(selectinload(u1.addresses))
) + (lambda s: s.order_by(u1.id))
return s.execute(stmt)
else:
def query(names):
stmt = lambda_stmt(
lambda: select(User)
.where(User.name.in_(names))
.options(selectinload(User.addresses))
) + (lambda s: s.order_by(User.id))
return s.execute(stmt)
def go1():
r1 = query(["ed"])
eq_(
r1.scalars().all(),
[User(name="ed", addresses=[Address(), Address(), Address()])],
)
def go2():
r1 = query(["ed", "fred"])
eq_(
r1.scalars().all(),
[
User(
name="ed", addresses=[Address(), Address(), Address()]
),
User(name="fred", addresses=[Address()]),
],
)
for i in range(5):
fn = random.choice([go1, go2])
self.assert_sql_count(testing.db, fn, 2)
def test_lambdas_rejected_in_options(self, plain_fixture):
User, Address = plain_fixture
assert_raises_message(
exc.ArgumentError,
"ExecutionOption Core or ORM object expected, got",
select(lambda: User).options,
lambda: subqueryload(User.addresses),
)
def test_subqueryload_internal_lambda(self, plain_fixture):
User, Address = plain_fixture
s = Session(testing.db, future=True)
def query(names):
stmt = (
select(lambda: User)
.where(lambda: User.name.in_(names))
.options(subqueryload(User.addresses))
.order_by(lambda: User.id)
)
return s.execute(stmt)
def go1():
r1 = query(["ed"])
eq_(
r1.scalars().all(),
[User(name="ed", addresses=[Address(), Address(), Address()])],
)
def go2():
r1 = query(["ed", "fred"])
eq_(
r1.scalars().all(),
[
User(
name="ed", addresses=[Address(), Address(), Address()]
),
User(name="fred", addresses=[Address()]),
],
)
for i in range(5):
fn = random.choice([go1, go2])
self.assert_sql_count(testing.db, fn, 2)
def test_subqueryload_external_lambda_caveats(self, plain_fixture):
User, Address = plain_fixture
s = Session(testing.db, future=True)
def query(names):
stmt = lambda_stmt(
lambda: select(User)
.where(User.name.in_(names))
.options(subqueryload(User.addresses))
) + (lambda s: s.order_by(User.id))
return s.execute(stmt)
def go1():
r1 = query(["ed"])
eq_(
r1.scalars().all(),
[User(name="ed", addresses=[Address(), Address(), Address()])],
)
def go2():
r1 = query(["ed", "fred"])
eq_(
r1.scalars().all(),
[
User(
name="ed", addresses=[Address(), Address(), Address()]
),
User(name="fred", addresses=[Address()]),
],
)
for i in range(5):
fn = random.choice([go1, go2])
with testing.expect_warnings(
'subqueryloader for "User.addresses" must invoke lambda '
r"callable at .*LambdaElement\(<code object <lambda> "
r".*test_lambdas.py.* in order to produce a new query, "
r"decreasing the efficiency of caching"
):
self.assert_sql_count(testing.db, fn, 2)
@testing.combinations(
lambda s, User, Address: s.query(lambda: User).join(lambda: Address),
lambda s, User, Address: s.query(lambda: User).join(
lambda: User.addresses
),
lambda s, User, Address: s.query(lambda: User).join(
lambda: Address, lambda: User.addresses
),
lambda s, User, Address: s.query(lambda: User).join(
Address, lambda: User.addresses
),
lambda s, User, Address: s.query(lambda: User).join(
lambda: Address, User.addresses
),
lambda User, Address: select(lambda: User)
.join(lambda: Address)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
lambda User, Address: select(lambda: User)
.join(lambda: User.addresses)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
lambda User, Address: select(lambda: User)
.join(lambda: Address, lambda: User.addresses)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
lambda User, Address: select(lambda: User)
.join(Address, lambda: User.addresses)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
lambda User, Address: select(lambda: User)
.join(lambda: Address, User.addresses)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
argnames="test_case",
)
def test_join_entity_arg(self, plain_fixture, test_case):
User, Address = plain_fixture
s = Session(testing.db, future=True)
stmt = testing.resolve_lambda(test_case, **locals())
self.assert_compile(
stmt,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN addresses ON users.id = addresses.user_id",
)
class PolymorphicTest(_poly_fixtures._Polymorphic):
run_setup_mappers = "once"
__dialect__ = "default"
def test_join_second_prop_lambda(self):
Company = self.classes.Company
Manager = self.classes.Manager
s = Session(future=True)
q = s.query(Company).join(lambda: Manager, lambda: Company.employees)
self.assert_compile(
q,
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name FROM companies "
"JOIN (people JOIN managers ON people.person_id = "
"managers.person_id) ON companies.company_id = people.company_id",
)
class UpdateDeleteTest(fixtures.MappedTest):
__backend__ = True
run_setup_mappers = "once"
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(32)),
Column("age_int", Integer),
)
Table(
"addresses",
metadata,
Column("id", Integer, primary_key=True),
Column("user_id", ForeignKey("users.id")),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
@classmethod
def insert_data(cls, connection):
users = cls.tables.users
connection.execute(
users.insert(),
[
dict(id=1, name="john", age_int=25),
dict(id=2, name="jack", age_int=47),
dict(id=3, name="jill", age_int=29),
dict(id=4, name="jane", age_int=37),
],
)
@classmethod
def setup_mappers(cls):
User = cls.classes.User
users = cls.tables.users
Address = cls.classes.Address
addresses = cls.tables.addresses
cls.mapper_registry.map_imperatively(
User,
users,
properties={
"age": users.c.age_int,
"addresses": relationship(Address),
},
)
cls.mapper_registry.map_imperatively(Address, addresses)
def test_update(self):
User, Address = self.classes("User", "Address")
s = Session(testing.db, future=True)
def go(ids, values):
stmt = lambda_stmt(lambda: update(User).where(User.id.in_(ids)))
s.execute(
stmt,
values,
# note this currently just unrolls the lambda on the statement.
# so lambda caching for updates is not actually that useful
# unless synchronize_session is turned off.
# evaluate is similar just doesn't work for IN yet.
execution_options={"synchronize_session": "fetch"},
)
go([1, 2], {"name": "jack2"})
eq_(
s.execute(select(User.id, User.name).order_by(User.id)).all(),
[(1, "jack2"), (2, "jack2"), (3, "jill"), (4, "jane")],
)
go([3], {"name": "jane2"})
eq_(
s.execute(select(User.id, User.name).order_by(User.id)).all(),
[(1, "jack2"), (2, "jack2"), (3, "jane2"), (4, "jane")],
)
|
{
"content_hash": "c02d6aa4f33223759937b4a1cd6acdca",
"timestamp": "",
"source": "github",
"line_count": 463,
"max_line_length": 79,
"avg_line_length": 31.481641468682504,
"alnum_prop": 0.514681668496158,
"repo_name": "j5int/sqlalchemy",
"id": "6de702ad4b0cd529cc05a2911e6daf583cec5c70",
"size": "14576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/orm/test_lambdas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "63151"
},
{
"name": "Python",
"bytes": "15339979"
}
],
"symlink_target": ""
}
|
"""Simulates carbon-cache after loading the carbon plugin.
This fail is required as there is no way to load a plugin from
carbon configuration, such as what
https://github.com/graphite-project/carbon/pull/139
implements.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
def _manipulate_paths_like_upstream(_executable, sys_path):
"""Replicate the sys.path magic from carbon-cache.
Upstream's carbon-cache adds the lib sister directory of its
parent bin directory to sys.path. This does the same.
"""
bin_dir = os.path.dirname(os.path.abspath(_executable))
root_dir = os.path.dirname(bin_dir)
lib_dir = os.path.join(root_dir, "lib")
sys_path.insert(0, lib_dir)
def main(_executable=sys.argv[0], _sys_path=sys.path):
"""The entry point of this module."""
_manipulate_paths_like_upstream(_executable, _sys_path)
from carbon import util as carbon_util
from carbon import exceptions as carbon_exceptions
# Importing the plugin registers it.
from biggraphite.plugins import carbon as unused_carbon # noqa
try:
# The carbon code tries to guess GRAPHITE_ROOT from the filename
# given to run_twistd_plugin() to set GRAPHITE_ROOT. This is then
# used to setup default paths. Try to make it somewhat compatible
# when carbon is installed in its default directory.
if os.path.dirname(carbon_util.__file__) == "/opt/graphite/lib/carbon":
if "GRAPHITE_ROOT" not in os.environ:
os.environ["GRAPHITE_ROOT"] = "/opt/graphite"
carbon_util.run_twistd_plugin("carbon-cache")
except carbon_exceptions.CarbonConfigException as exc:
# This is what carbon cache does, we preserve that behaviour.
raise SystemExit(str(exc))
if __name__ == "__main__":
main()
|
{
"content_hash": "e113b70dcf3098f3b8042f3f9f6d34d6",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 79,
"avg_line_length": 37.89795918367347,
"alnum_prop": 0.691437802907916,
"repo_name": "natbraun/biggraphite",
"id": "bab009053164f4e5db5f24a1f308cf1591492609",
"size": "2427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "biggraphite/cli/bg_carbon_cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "245802"
}
],
"symlink_target": ""
}
|
"""Support for wired binary sensors attached to a Konnected device."""
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_STATE,
CONF_BINARY_SENSORS,
CONF_DEVICES,
CONF_NAME,
CONF_TYPE,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo
from .const import DOMAIN as KONNECTED_DOMAIN
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up binary sensors attached to a Konnected device from a config entry."""
data = hass.data[KONNECTED_DOMAIN]
device_id = config_entry.data["id"]
sensors = [
KonnectedBinarySensor(device_id, pin_num, pin_data)
for pin_num, pin_data in data[CONF_DEVICES][device_id][
CONF_BINARY_SENSORS
].items()
]
async_add_entities(sensors)
class KonnectedBinarySensor(BinarySensorEntity):
"""Representation of a Konnected binary sensor."""
def __init__(self, device_id, zone_num, data):
"""Initialize the Konnected binary sensor."""
self._data = data
self._device_id = device_id
self._zone_num = zone_num
self._state = self._data.get(ATTR_STATE)
self._device_class = self._data.get(CONF_TYPE)
self._unique_id = f"{device_id}-{zone_num}"
self._name = self._data.get(CONF_NAME)
@property
def unique_id(self) -> str:
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the sensor."""
return self._state
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def device_info(self) -> DeviceInfo:
"""Return the device info."""
return DeviceInfo(
identifiers={(KONNECTED_DOMAIN, self._device_id)},
)
async def async_added_to_hass(self):
"""Store entity_id and register state change callback."""
self._data[ATTR_ENTITY_ID] = self.entity_id
self.async_on_remove(
async_dispatcher_connect(
self.hass, f"konnected.{self.entity_id}.update", self.async_set_state
)
)
@callback
def async_set_state(self, state):
"""Update the sensor's state."""
self._state = state
self.async_write_ha_state()
|
{
"content_hash": "0611dbcc110f777173339372e920a34f",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 85,
"avg_line_length": 30.325842696629213,
"alnum_prop": 0.62726935902186,
"repo_name": "home-assistant/home-assistant",
"id": "2647d43a44eaa6b00335797a365271ad3c4fe3c5",
"size": "2699",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/konnected/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
}
|
import contextlib
import pandas
import pytest
import modin.pandas as pd
import numpy as np
from numpy.testing import assert_array_equal
from modin.utils import get_current_execution, to_pandas
from modin.test.test_utils import warns_that_defaulting_to_pandas
from pandas.testing import assert_frame_equal
from .utils import (
create_test_dfs,
test_data_values,
test_data_keys,
df_equals,
sort_index_for_equal_values,
eval_general,
)
@contextlib.contextmanager
def _nullcontext():
"""Replacement for contextlib.nullcontext missing in older Python."""
yield
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isna(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.isna(pandas_df)
modin_result = pd.isna(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.isna(pd.Series([1, np.nan, 2]))
pandas_result = pandas.isna(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isnull(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.isnull(pandas_df)
modin_result = pd.isnull(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.isnull(pd.Series([1, np.nan, 2]))
pandas_result = pandas.isnull(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.notna(pandas_df)
modin_result = pd.notna(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.notna(pd.Series([1, np.nan, 2]))
pandas_result = pandas.notna(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.notnull(pandas_df)
modin_result = pd.notnull(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.notnull(pd.Series([1, np.nan, 2]))
pandas_result = pandas.notnull(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
def test_merge():
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col1": [0, 1, 2], "col2": [1, 5, 6]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["outer", "inner"]
for how in join_types:
with warns_that_defaulting_to_pandas() if how == "outer" else _nullcontext():
modin_result = pd.merge(modin_df, modin_df2, how=how)
pandas_result = pandas.merge(pandas_df, pandas_df2, how=how)
df_equals(modin_result, pandas_result)
# left_on and right_index
with warns_that_defaulting_to_pandas():
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_on="col1", right_index=True
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_on="col1", right_index=True
)
df_equals(modin_result, pandas_result)
# left_index and right_on
with warns_that_defaulting_to_pandas():
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_index=True, right_on="col1"
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_index=True, right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col1
if how == "outer":
warning_catcher = warns_that_defaulting_to_pandas()
else:
warning_catcher = _nullcontext()
with warning_catcher:
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_on="col1", right_on="col1"
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_on="col1", right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col2
if how == "outer":
warning_catcher = warns_that_defaulting_to_pandas()
else:
warning_catcher = _nullcontext()
with warning_catcher:
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_on="col2", right_on="col2"
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_on="col2", right_on="col2"
)
df_equals(modin_result, pandas_result)
# left_index and right_index
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_index=True, right_index=True
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_index=True, right_index=True
)
df_equals(modin_result, pandas_result)
s = pd.Series(frame_data.get("col1"))
with pytest.raises(ValueError):
pd.merge(s, modin_df2)
with pytest.raises(TypeError):
pd.merge("Non-valid type", modin_df2)
def test_merge_ordered():
data_a = {
"key": list("aceace"),
"lvalue": [1, 2, 3, 1, 2, 3],
"group": list("aaabbb"),
}
data_b = {"key": list("bcd"), "rvalue": [1, 2, 3]}
modin_df_a = pd.DataFrame(data_a)
modin_df_b = pd.DataFrame(data_b)
with warns_that_defaulting_to_pandas():
df = pd.merge_ordered(
modin_df_a, modin_df_b, fill_method="ffill", left_by="group"
)
assert isinstance(df, pd.DataFrame)
with pytest.raises(ValueError):
pd.merge_ordered(data_a, data_b, fill_method="ffill", left_by="group")
@pytest.mark.parametrize("right_index", [None, [0] * 5], ids=["default", "non_unique"])
def test_merge_asof(right_index):
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame(
{"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]}, index=right_index
)
with warns_that_defaulting_to_pandas():
df = pd.merge_asof(left, right, on="a")
assert isinstance(df, pd.DataFrame)
with warns_that_defaulting_to_pandas():
df = pd.merge_asof(left, right, on="a", allow_exact_matches=False)
assert isinstance(df, pd.DataFrame)
with warns_that_defaulting_to_pandas():
df = pd.merge_asof(left, right, on="a", direction="forward")
assert isinstance(df, pd.DataFrame)
with warns_that_defaulting_to_pandas():
df = pd.merge_asof(left, right, on="a", direction="nearest")
assert isinstance(df, pd.DataFrame)
left = pd.DataFrame({"left_val": ["a", "b", "c"]}, index=[1, 5, 10])
right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7])
with warns_that_defaulting_to_pandas():
df = pd.merge_asof(left, right, left_index=True, right_index=True)
assert isinstance(df, pd.DataFrame)
with pytest.raises(ValueError):
pd.merge_asof(
{"left_val": ["a", "b", "c"]},
{"right_val": [1, 2, 3, 6, 7]},
left_index=True,
right_index=True,
)
def test_merge_asof_on_variations():
"""on=,left_on=,right_on=,right_index=,left_index= options match Pandas."""
left = {"a": [1, 5, 10], "left_val": ["a", "b", "c"]}
left_index = [6, 8, 12]
right = {"a": [1, 2, 3, 6, 7], "right_val": ["d", "e", "f", "g", "h"]}
right_index = [6, 7, 8, 9, 15]
pandas_left, pandas_right = (
pandas.DataFrame(left, index=left_index),
pandas.DataFrame(right, index=right_index),
)
modin_left, modin_right = (
pd.DataFrame(left, index=left_index),
pd.DataFrame(right, index=right_index),
)
for on_arguments in [
{"on": "a"},
{"left_on": "a", "right_on": "a"},
{"left_on": "a", "right_index": True},
{"left_index": True, "right_on": "a"},
{"left_index": True, "right_index": True},
]:
pandas_merged = pandas.merge_asof(pandas_left, pandas_right, **on_arguments)
with warns_that_defaulting_to_pandas():
modin_merged = pd.merge_asof(modin_left, modin_right, **on_arguments)
df_equals(pandas_merged, modin_merged)
def test_merge_asof_suffixes():
"""Suffix variations are handled the same as Pandas."""
left = {"a": [1, 5, 10]}
right = {"a": [2, 3, 6]}
pandas_left, pandas_right = (pandas.DataFrame(left), pandas.DataFrame(right))
modin_left, modin_right = pd.DataFrame(left), pd.DataFrame(right)
for suffixes in [("a", "b"), (False, "c"), ("d", False)]:
pandas_merged = pandas.merge_asof(
pandas_left,
pandas_right,
left_index=True,
right_index=True,
suffixes=suffixes,
)
with warns_that_defaulting_to_pandas():
modin_merged = pd.merge_asof(
modin_left,
modin_right,
left_index=True,
right_index=True,
suffixes=suffixes,
)
df_equals(pandas_merged, modin_merged)
with pytest.raises(ValueError):
pandas.merge_asof(
pandas_left,
pandas_right,
left_index=True,
right_index=True,
suffixes=(False, False),
)
with pytest.raises(ValueError), warns_that_defaulting_to_pandas():
modin_merged = pd.merge_asof(
modin_left,
modin_right,
left_index=True,
right_index=True,
suffixes=(False, False),
)
def test_merge_asof_bad_arguments():
left = {"a": [1, 5, 10], "b": [5, 7, 9]}
right = {"a": [2, 3, 6], "b": [6, 5, 20]}
pandas_left, pandas_right = (pandas.DataFrame(left), pandas.DataFrame(right))
modin_left, modin_right = pd.DataFrame(left), pd.DataFrame(right)
# Can't mix by with left_by/right_by
with pytest.raises(ValueError), warns_that_defaulting_to_pandas():
pandas.merge_asof(
pandas_left, pandas_right, on="a", by="b", left_by="can't do with by"
)
with pytest.raises(ValueError), warns_that_defaulting_to_pandas():
pd.merge_asof(
modin_left, modin_right, on="a", by="b", left_by="can't do with by"
)
with pytest.raises(ValueError), warns_that_defaulting_to_pandas():
pandas.merge_asof(
pandas_left, pandas_right, by="b", on="a", right_by="can't do with by"
)
with pytest.raises(ValueError), warns_that_defaulting_to_pandas():
pd.merge_asof(
modin_left, modin_right, by="b", on="a", right_by="can't do with by"
)
# Can't mix on with left_on/right_on
with pytest.raises(ValueError), warns_that_defaulting_to_pandas():
pandas.merge_asof(pandas_left, pandas_right, on="a", left_on="can't do with by")
with pytest.raises(ValueError), warns_that_defaulting_to_pandas():
pd.merge_asof(modin_left, modin_right, on="a", left_on="can't do with by")
with pytest.raises(ValueError), warns_that_defaulting_to_pandas():
pandas.merge_asof(
pandas_left, pandas_right, on="a", right_on="can't do with by"
)
with pytest.raises(ValueError), warns_that_defaulting_to_pandas():
pd.merge_asof(modin_left, modin_right, on="a", right_on="can't do with by")
# Can't mix left_index with left_on or on, similarly for right.
with pytest.raises(ValueError), warns_that_defaulting_to_pandas():
pd.merge_asof(modin_left, modin_right, on="a", right_index=True)
with pytest.raises(ValueError), warns_that_defaulting_to_pandas():
pd.merge_asof(
modin_left, modin_right, left_on="a", right_on="a", right_index=True
)
with pytest.raises(ValueError), warns_that_defaulting_to_pandas():
pd.merge_asof(modin_left, modin_right, on="a", left_index=True)
with pytest.raises(ValueError), warns_that_defaulting_to_pandas():
pd.merge_asof(
modin_left, modin_right, left_on="a", right_on="a", left_index=True
)
# Need both left and right
with pytest.raises(Exception): # Pandas bug, didn't validate inputs sufficiently
pandas.merge_asof(pandas_left, pandas_right, left_on="a")
with pytest.raises(ValueError), warns_that_defaulting_to_pandas():
pd.merge_asof(modin_left, modin_right, left_on="a")
with pytest.raises(Exception): # Pandas bug, didn't validate inputs sufficiently
pandas.merge_asof(pandas_left, pandas_right, right_on="a")
with pytest.raises(ValueError), warns_that_defaulting_to_pandas():
pd.merge_asof(modin_left, modin_right, right_on="a")
with pytest.raises(ValueError), warns_that_defaulting_to_pandas():
pandas.merge_asof(pandas_left, pandas_right)
with pytest.raises(ValueError), warns_that_defaulting_to_pandas():
pd.merge_asof(modin_left, modin_right)
def test_merge_asof_merge_options():
modin_quotes = pd.DataFrame(
{
"time": [
pd.Timestamp("2016-05-25 13:30:00.023"),
pd.Timestamp("2016-05-25 13:30:00.023"),
pd.Timestamp("2016-05-25 13:30:00.030"),
pd.Timestamp("2016-05-25 13:30:00.041"),
pd.Timestamp("2016-05-25 13:30:00.048"),
pd.Timestamp("2016-05-25 13:30:00.049"),
pd.Timestamp("2016-05-25 13:30:00.072"),
pd.Timestamp("2016-05-25 13:30:00.075"),
],
"ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL", "GOOG", "MSFT"],
"bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
"ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
}
)
modin_trades = pd.DataFrame(
{
"time": [
pd.Timestamp("2016-05-25 13:30:00.023"),
pd.Timestamp("2016-05-25 13:30:00.038"),
pd.Timestamp("2016-05-25 13:30:00.048"),
pd.Timestamp("2016-05-25 13:30:00.048"),
pd.Timestamp("2016-05-25 13:30:00.048"),
],
"ticker2": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.0],
"quantity": [75, 155, 100, 100, 100],
}
)
pandas_quotes, pandas_trades = to_pandas(modin_quotes), to_pandas(modin_trades)
# left_by + right_by
with warns_that_defaulting_to_pandas():
modin_result = pd.merge_asof(
modin_quotes,
modin_trades,
on="time",
left_by="ticker",
right_by="ticker2",
)
df_equals(
pandas.merge_asof(
pandas_quotes,
pandas_trades,
on="time",
left_by="ticker",
right_by="ticker2",
),
modin_result,
)
# Just by:
pandas_trades["ticker"] = pandas_trades["ticker2"]
modin_trades["ticker"] = modin_trades["ticker2"]
with warns_that_defaulting_to_pandas():
modin_result = pd.merge_asof(
modin_quotes,
modin_trades,
on="time",
by="ticker",
)
df_equals(
pandas.merge_asof(
pandas_quotes,
pandas_trades,
on="time",
by="ticker",
),
modin_result,
)
# Tolerance
with warns_that_defaulting_to_pandas():
modin_result = pd.merge_asof(
modin_quotes,
modin_trades,
on="time",
by="ticker",
tolerance=pd.Timedelta("2ms"),
)
df_equals(
pandas.merge_asof(
pandas_quotes,
pandas_trades,
on="time",
by="ticker",
tolerance=pd.Timedelta("2ms"),
),
modin_result,
)
# Direction
with warns_that_defaulting_to_pandas():
modin_result = pd.merge_asof(
modin_quotes,
modin_trades,
on="time",
by="ticker",
direction="forward",
)
df_equals(
pandas.merge_asof(
pandas_quotes,
pandas_trades,
on="time",
by="ticker",
direction="forward",
),
modin_result,
)
# Allow exact matches
with warns_that_defaulting_to_pandas():
modin_result = pd.merge_asof(
modin_quotes,
modin_trades,
on="time",
by="ticker",
tolerance=pd.Timedelta("10ms"),
allow_exact_matches=False,
)
df_equals(
pandas.merge_asof(
pandas_quotes,
pandas_trades,
on="time",
by="ticker",
tolerance=pd.Timedelta("10ms"),
allow_exact_matches=False,
),
modin_result,
)
def test_pivot():
test_df = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
df = pd.pivot(test_df, index="foo", columns="bar", values="baz")
assert isinstance(df, pd.DataFrame)
with pytest.raises(ValueError):
pd.pivot(test_df["bar"], index="foo", columns="bar", values="baz")
def test_pivot_table():
test_df = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
df = pd.pivot_table(
test_df, values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum
)
assert isinstance(df, pd.DataFrame)
with pytest.raises(ValueError):
pd.pivot_table(
test_df["C"], values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum
)
def test_unique():
modin_result = pd.unique([2, 1, 3, 3])
pandas_result = pandas.unique([2, 1, 3, 3])
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.unique(pd.Series([2] + [1] * 5))
pandas_result = pandas.unique(pandas.Series([2] + [1] * 5))
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.unique(
pd.Series([pd.Timestamp("20160101"), pd.Timestamp("20160101")])
)
pandas_result = pandas.unique(
pandas.Series([pandas.Timestamp("20160101"), pandas.Timestamp("20160101")])
)
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.unique(
pd.Series(
[
pd.Timestamp("20160101", tz="US/Eastern"),
pd.Timestamp("20160101", tz="US/Eastern"),
]
)
)
pandas_result = pandas.unique(
pandas.Series(
[
pandas.Timestamp("20160101", tz="US/Eastern"),
pandas.Timestamp("20160101", tz="US/Eastern"),
]
)
)
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.unique(
pd.Index(
[
pd.Timestamp("20160101", tz="US/Eastern"),
pd.Timestamp("20160101", tz="US/Eastern"),
]
)
)
pandas_result = pandas.unique(
pandas.Index(
[
pandas.Timestamp("20160101", tz="US/Eastern"),
pandas.Timestamp("20160101", tz="US/Eastern"),
]
)
)
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.unique(pd.Series(pd.Categorical(list("baabc"))))
pandas_result = pandas.unique(pandas.Series(pandas.Categorical(list("baabc"))))
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
@pytest.mark.parametrize("normalize, bins, dropna", [(True, 3, False)])
def test_value_counts(normalize, bins, dropna):
# We sort indices for Modin and pandas result because of issue #1650
values = np.array([3, 1, 2, 3, 4, np.nan])
with warns_that_defaulting_to_pandas():
modin_result = sort_index_for_equal_values(
pd.value_counts(values, normalize=normalize, ascending=False), False
)
pandas_result = sort_index_for_equal_values(
pandas.value_counts(values, normalize=normalize, ascending=False), False
)
df_equals(modin_result, pandas_result)
with warns_that_defaulting_to_pandas():
modin_result = sort_index_for_equal_values(
pd.value_counts(values, bins=bins, ascending=False), False
)
pandas_result = sort_index_for_equal_values(
pandas.value_counts(values, bins=bins, ascending=False), False
)
df_equals(modin_result, pandas_result)
with warns_that_defaulting_to_pandas():
modin_result = sort_index_for_equal_values(
pd.value_counts(values, dropna=dropna, ascending=True), True
)
pandas_result = sort_index_for_equal_values(
pandas.value_counts(values, dropna=dropna, ascending=True), True
)
df_equals(modin_result, pandas_result)
def test_to_datetime():
# DataFrame input for to_datetime
modin_df = pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
pandas_df = pandas.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
df_equals(pd.to_datetime(modin_df), pandas.to_datetime(pandas_df))
# Series input for to_datetime
modin_s = pd.Series(["3/11/2000", "3/12/2000", "3/13/2000"] * 1000)
pandas_s = pandas.Series(["3/11/2000", "3/12/2000", "3/13/2000"] * 1000)
df_equals(pd.to_datetime(modin_s), pandas.to_datetime(pandas_s))
# Other inputs for to_datetime
value = 1490195805
assert pd.to_datetime(value, unit="s") == pandas.to_datetime(value, unit="s")
value = 1490195805433502912
assert pd.to_datetime(value, unit="ns") == pandas.to_datetime(value, unit="ns")
value = [1, 2, 3]
assert pd.to_datetime(value, unit="D", origin=pd.Timestamp("2000-01-01")).equals(
pandas.to_datetime(value, unit="D", origin=pandas.Timestamp("2000-01-01"))
)
def test_to_datetime_inplace_side_effect():
# See GH#3063
times = list(range(1617993360, 1618193360))
values = list(range(215441, 415441))
modin_df = pd.DataFrame({"time": times, "value": values})
pandas_df = pandas.DataFrame({"time": times, "value": values})
df_equals(
pd.to_datetime(modin_df["time"], unit="s"),
pandas.to_datetime(pandas_df["time"], unit="s"),
)
@pytest.mark.parametrize(
"data, errors, downcast",
[
(["1.0", "2", -3], "raise", None),
(["1.0", "2", -3], "raise", "float"),
(["1.0", "2", -3], "raise", "signed"),
(["apple", "1.0", "2", -3], "ignore", None),
(["apple", "1.0", "2", -3], "coerce", None),
],
)
def test_to_numeric(data, errors, downcast):
modin_series = pd.Series(data)
pandas_series = pandas.Series(data)
modin_result = pd.to_numeric(modin_series, errors=errors, downcast=downcast)
pandas_result = pandas.to_numeric(pandas_series, errors=errors, downcast=downcast)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize(
"data", [test_data_values[0], []], ids=["test_data_values[0]", "[]"]
)
def test_to_pandas_indices(data):
md_df = pd.DataFrame(data)
index = pandas.MultiIndex.from_tuples(
[(i, i * 2) for i in np.arange(len(md_df) + 1)], names=["A", "B"]
).drop(0)
columns = pandas.MultiIndex.from_tuples(
[(i, i * 2) for i in np.arange(len(md_df.columns) + 1)], names=["A", "B"]
).drop(0)
md_df.index = index
md_df.columns = columns
pd_df = md_df._to_pandas()
for axis in [0, 1]:
assert md_df.axes[axis].equals(
pd_df.axes[axis]
), f"Indices at axis {axis} are different!"
assert md_df.axes[axis].equal_levels(
pd_df.axes[axis]
), f"Levels of indices at axis {axis} are different!"
def test_create_categorical_dataframe_with_duplicate_column_name():
# This tests for https://github.com/modin-project/modin/issues/4312
pd_df = pandas.DataFrame(
{
"a": pandas.Categorical([1, 2]),
"b": [4, 5],
"c": pandas.Categorical([7, 8]),
}
)
pd_df.columns = ["a", "b", "a"]
md_df = pd.DataFrame(pd_df)
# Use assert_frame_equal instead of the common modin util df_equals because
# we should check dtypes of the new categorical with check_dtype=True.
# TODO(https://github.com/modin-project/modin/issues/3804): Make
# df_equals set check_dtype=True and use df_equals instead.
assert_frame_equal(
md_df._to_pandas(),
pd_df,
check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True,
check_categorical=True,
)
@pytest.mark.skipif(
get_current_execution() != "BaseOnPython",
reason="This test make sense only on BaseOnPython execution.",
)
@pytest.mark.parametrize(
"func, regex",
[
(lambda df: df.mean(level=0), r"DataFrame\.mean"),
(lambda df: df + df, r"DataFrame\.add"),
(lambda df: df.index, r"DataFrame\.get_axis\(0\)"),
(
lambda df: df.drop(columns="col1").squeeze().repeat(2),
r"Series\.repeat",
),
(lambda df: df.groupby("col1").prod(), r"GroupBy\.prod"),
(lambda df: df.rolling(1).count(), r"Rolling\.count"),
],
)
def test_default_to_pandas_warning_message(func, regex):
data = {"col1": [1, 2, 3], "col2": [4, 5, 6]}
df = pd.DataFrame(data)
with pytest.warns(UserWarning, match=regex):
func(df)
def test_empty_dataframe():
df = pd.DataFrame(columns=["a", "b"])
with warns_that_defaulting_to_pandas():
df[(df.a == 1) & (df.b == 2)]
def test_empty_series():
s = pd.Series([])
pd.to_numeric(s)
@pytest.mark.parametrize(
"arg",
[[1, 2], ["a"], 1, "a"],
ids=["list_of_ints", "list_of_invalid_strings", "scalar", "invalid_scalar"],
)
def test_to_timedelta(arg):
# This test case comes from
# https://github.com/modin-project/modin/issues/4966
eval_general(pd, pandas, lambda lib: lib.to_timedelta(arg))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_series_to_timedelta(data):
def make_frame(lib):
series = lib.Series(
next(iter(data.values())) if isinstance(data, dict) else data
)
return lib.to_timedelta(series).to_frame(name="timedelta")
eval_general(pd, pandas, make_frame)
@pytest.mark.parametrize(
"key",
[["col0"], "col0", "col1"],
ids=["valid_list_of_string", "valid_string", "invalid_string"],
)
def test_get(key):
modin_df, pandas_df = create_test_dfs({"col0": [0, 1]})
eval_general(modin_df, pandas_df, lambda df: df.get(key))
|
{
"content_hash": "3927534f74382cfc67f2377878a7d632",
"timestamp": "",
"source": "github",
"line_count": 824,
"max_line_length": 88,
"avg_line_length": 34.112864077669904,
"alnum_prop": 0.5708491942082607,
"repo_name": "modin-project/modin",
"id": "4b6f3a4acdf2ae6d12a3035d6b2d3f86cb880a68",
"size": "28892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modin/pandas/test/test_general.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2330"
},
{
"name": "Python",
"bytes": "3914783"
},
{
"name": "Shell",
"bytes": "2377"
}
],
"symlink_target": ""
}
|
"""
tests.test_component_group
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the group compoments.
"""
# pylint: disable=protected-access,too-many-public-methods
import unittest
import logging
import homeassistant as ha
from homeassistant.const import STATE_ON, STATE_OFF, STATE_HOME, STATE_UNKNOWN
import homeassistant.components.group as group
def setUpModule(): # pylint: disable=invalid-name
""" Setup to ignore group errors. """
logging.disable(logging.CRITICAL)
class TestComponentsGroup(unittest.TestCase):
""" Tests homeassistant.components.group module. """
def setUp(self): # pylint: disable=invalid-name
""" Init needed objects. """
self.hass = ha.HomeAssistant()
self.hass.states.set('light.Bowl', STATE_ON)
self.hass.states.set('light.Ceiling', STATE_OFF)
test_group = group.Group(
self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False)
self.group_entity_id = test_group.entity_id
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_setup_group_with_mixed_groupable_states(self):
""" Try to setup a group with mixed groupable states """
self.hass.states.set('device_tracker.Paulus', STATE_HOME)
group.setup_group(
self.hass, 'person_and_light',
['light.Bowl', 'device_tracker.Paulus'])
self.assertEqual(
STATE_ON,
self.hass.states.get(
group.ENTITY_ID_FORMAT.format('person_and_light')).state)
def test_setup_group_with_a_non_existing_state(self):
""" Try to setup a group with a non existing state """
grp = group.setup_group(
self.hass, 'light_and_nothing',
['light.Bowl', 'non.existing'])
self.assertEqual(STATE_ON, grp.state.state)
def test_setup_group_with_non_groupable_states(self):
self.hass.states.set('cast.living_room', "Plex")
self.hass.states.set('cast.bedroom', "Netflix")
grp = group.setup_group(
self.hass, 'chromecasts',
['cast.living_room', 'cast.bedroom'])
self.assertEqual(STATE_UNKNOWN, grp.state.state)
def test_setup_empty_group(self):
""" Try to setup an empty group. """
grp = group.setup_group(self.hass, 'nothing', [])
self.assertEqual(STATE_UNKNOWN, grp.state.state)
def test_monitor_group(self):
""" Test if the group keeps track of states. """
# Test if group setup in our init mode is ok
self.assertIn(self.group_entity_id, self.hass.states.entity_ids())
group_state = self.hass.states.get(self.group_entity_id)
self.assertEqual(STATE_ON, group_state.state)
self.assertTrue(group_state.attributes[group.ATTR_AUTO])
def test_group_turns_off_if_all_off(self):
"""
Test if the group turns off if the last device that was on turns off.
"""
self.hass.states.set('light.Bowl', STATE_OFF)
self.hass.pool.block_till_done()
group_state = self.hass.states.get(self.group_entity_id)
self.assertEqual(STATE_OFF, group_state.state)
def test_group_turns_on_if_all_are_off_and_one_turns_on(self):
"""
Test if group turns on if all devices were turned off and one turns on.
"""
# Make sure all are off.
self.hass.states.set('light.Bowl', STATE_OFF)
self.hass.pool.block_till_done()
# Turn one on
self.hass.states.set('light.Ceiling', STATE_ON)
self.hass.pool.block_till_done()
group_state = self.hass.states.get(self.group_entity_id)
self.assertEqual(STATE_ON, group_state.state)
def test_is_on(self):
""" Test is_on method. """
self.assertTrue(group.is_on(self.hass, self.group_entity_id))
self.hass.states.set('light.Bowl', STATE_OFF)
self.hass.pool.block_till_done()
self.assertFalse(group.is_on(self.hass, self.group_entity_id))
# Try on non existing state
self.assertFalse(group.is_on(self.hass, 'non.existing'))
def test_expand_entity_ids(self):
""" Test expand_entity_ids method. """
self.assertEqual(sorted(['light.ceiling', 'light.bowl']),
sorted(group.expand_entity_ids(
self.hass, [self.group_entity_id])))
def test_expand_entity_ids_does_not_return_duplicates(self):
""" Test that expand_entity_ids does not return duplicates. """
self.assertEqual(
['light.bowl', 'light.ceiling'],
sorted(group.expand_entity_ids(
self.hass, [self.group_entity_id, 'light.Ceiling'])))
self.assertEqual(
['light.bowl', 'light.ceiling'],
sorted(group.expand_entity_ids(
self.hass, ['light.bowl', self.group_entity_id])))
def test_expand_entity_ids_ignores_non_strings(self):
""" Test that non string elements in lists are ignored. """
self.assertEqual([], group.expand_entity_ids(self.hass, [5, True]))
def test_get_entity_ids(self):
""" Test get_entity_ids method. """
self.assertEqual(
['light.bowl', 'light.ceiling'],
sorted(group.get_entity_ids(self.hass, self.group_entity_id)))
def test_get_entity_ids_with_domain_filter(self):
""" Test if get_entity_ids works with a domain_filter. """
self.hass.states.set('switch.AC', STATE_OFF)
mixed_group = group.Group(
self.hass, 'mixed_group', ['light.Bowl', 'switch.AC'], False)
self.assertEqual(
['switch.ac'],
group.get_entity_ids(
self.hass, mixed_group.entity_id, domain_filter="switch"))
def test_get_entity_ids_with_non_existing_group_name(self):
""" Tests get_entity_ids with a non existing group. """
self.assertEqual([], group.get_entity_ids(self.hass, 'non_existing'))
def test_get_entity_ids_with_non_group_state(self):
""" Tests get_entity_ids with a non group state. """
self.assertEqual([], group.get_entity_ids(self.hass, 'switch.AC'))
def test_group_being_init_before_first_tracked_state_is_set_to_on(self):
""" Test if the group turns on if no states existed and now a state it is
tracking is being added as ON. """
test_group = group.Group(
self.hass, 'test group', ['light.not_there_1'])
self.hass.states.set('light.not_there_1', STATE_ON)
self.hass.pool.block_till_done()
group_state = self.hass.states.get(test_group.entity_id)
self.assertEqual(STATE_ON, group_state.state)
def test_group_being_init_before_first_tracked_state_is_set_to_off(self):
""" Test if the group turns off if no states existed and now a state it is
tracking is being added as OFF. """
test_group = group.Group(
self.hass, 'test group', ['light.not_there_1'])
self.hass.states.set('light.not_there_1', STATE_OFF)
self.hass.pool.block_till_done()
group_state = self.hass.states.get(test_group.entity_id)
self.assertEqual(STATE_OFF, group_state.state)
def test_setup(self):
""" Test setup method. """
self.assertTrue(
group.setup(
self.hass,
{
group.DOMAIN: {
'second_group': ','.join((self.group_entity_id,
'light.Bowl'))
}
}))
group_state = self.hass.states.get(
group.ENTITY_ID_FORMAT.format('second_group'))
self.assertEqual(STATE_ON, group_state.state)
self.assertFalse(group_state.attributes[group.ATTR_AUTO])
def test_groups_get_unique_names(self):
""" Two groups with same name should both have a unique entity id. """
grp1 = group.Group(self.hass, 'Je suis Charlie')
grp2 = group.Group(self.hass, 'Je suis Charlie')
self.assertNotEqual(grp1.entity_id, grp2.entity_id)
|
{
"content_hash": "1f80110b88e19318a9de4ed2ef590f64",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 82,
"avg_line_length": 37.37155963302752,
"alnum_prop": 0.6047624892598502,
"repo_name": "Jaidan/jaidan-hab-home-assistant",
"id": "36ce2b80319b9a9a4e9b555e8b212ce6a152e49b",
"size": "8147",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_component_group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "708583"
},
{
"name": "Python",
"bytes": "413732"
},
{
"name": "Shell",
"bytes": "3984"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Budget'
db.create_table(u'accounting_main_budget', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('status', self.gf('django.db.models.fields.CharField')(default='0_draft', max_length=255)),
('accounting_year', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounting_core.AccountingYear'])),
('costcenter', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounting_core.CostCenter'])),
('unit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['units.Unit'])),
))
db.send_create_signal(u'accounting_main', ['Budget'])
# Adding model 'BudgetLine'
db.create_table(u'accounting_main_budgetline', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('budget', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounting_main.Budget'])),
('compte', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounting_core.Account'])),
('amount', self.gf('django.db.models.fields.DecimalField')(max_digits=20, decimal_places=2)),
('description', self.gf('django.db.models.fields.CharField')(max_length=250)),
))
db.send_create_signal(u'accounting_main', ['BudgetLine'])
# Adding model 'BudgetLogging'
db.create_table(u'accounting_main_budgetlogging', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('when', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('extra_data', self.gf('django.db.models.fields.TextField')(blank=True)),
('who', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['users.TruffeUser'])),
('what', self.gf('django.db.models.fields.CharField')(max_length=64)),
('object', self.gf('django.db.models.fields.related.ForeignKey')(related_name='logs', to=orm['accounting_main.Budget'])),
))
db.send_create_signal(u'accounting_main', ['BudgetLogging'])
def backwards(self, orm):
# Deleting model 'Budget'
db.delete_table(u'accounting_main_budget')
# Deleting model 'BudgetLine'
db.delete_table(u'accounting_main_budgetline')
# Deleting model 'BudgetLogging'
db.delete_table(u'accounting_main_budgetlogging')
models = {
u'accounting_core.account': {
'Meta': {'unique_together': "(('name', 'accounting_year'), ('account_number', 'accounting_year'))", 'object_name': 'Account'},
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'accounting_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountingYear']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountCategory']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'visibility': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'accounting_core.accountcategory': {
'Meta': {'unique_together': "(('name', 'accounting_year'),)", 'object_name': 'AccountCategory'},
'accounting_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountingYear']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'parent_hierarchique': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountCategory']", 'null': 'True', 'blank': 'True'})
},
u'accounting_core.accountingyear': {
'Meta': {'object_name': 'AccountingYear'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_preparing'", 'max_length': '255'}),
'subvention_deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'accounting_core.costcenter': {
'Meta': {'unique_together': "(('name', 'accounting_year'), ('account_number', 'accounting_year'))", 'object_name': 'CostCenter'},
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'accounting_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountingYear']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']"})
},
u'accounting_main.accountingerror': {
'Meta': {'object_name': 'AccountingError'},
'accounting_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountingYear']"}),
'costcenter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.CostCenter']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_remark': ('django.db.models.fields.TextField', [], {}),
'linked_line': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_main.AccountingLine']", 'null': 'True', 'blank': 'True'}),
'linked_line_cache': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_drafting'", 'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']"})
},
u'accounting_main.accountingerrorlogging': {
'Meta': {'object_name': 'AccountingErrorLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['accounting_main.AccountingError']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'accounting_main.accountingerrormessage': {
'Meta': {'object_name': 'AccountingErrorMessage'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"}),
'error': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_main.AccountingError']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'accounting_main.accountingline': {
'Meta': {'object_name': 'AccountingLine'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.Account']"}),
'accounting_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountingYear']"}),
'costcenter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.CostCenter']"}),
'current_sum': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'date': ('django.db.models.fields.DateField', [], {}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'document_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'output': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_imported'", 'max_length': '255'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'tva': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']"})
},
u'accounting_main.accountinglinelogging': {
'Meta': {'object_name': 'AccountingLineLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['accounting_main.AccountingLine']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'accounting_main.budget': {
'Meta': {'object_name': 'Budget'},
'accounting_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountingYear']"}),
'costcenter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.CostCenter']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_draft'", 'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']"})
},
u'accounting_main.budgetline': {
'Meta': {'object_name': 'BudgetLine'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'budget': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_main.Budget']"}),
'compte': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.Account']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'accounting_main.budgetlogging': {
'Meta': {'object_name': 'BudgetLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['accounting_main.Budget']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'units.unit': {
'Meta': {'object_name': 'Unit'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_epfl': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'is_commission': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_equipe': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent_hierarchique': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'users.truffeuser': {
'Meta': {'object_name': 'TruffeUser'},
'adresse': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'default': "'.'", 'max_length': '1'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
'email_perso': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'iban_ou_ccp': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'nom_banque': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['accounting_main']
|
{
"content_hash": "debf83bb671b660c4d404dc9e5d066b9",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 195,
"avg_line_length": 78.39574468085107,
"alnum_prop": 0.5699397492265104,
"repo_name": "ArcaniteSolutions/truffe2",
"id": "3fb99479e4d0002ac59f0acb0ea34ca3ccc6543a",
"size": "18447",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "truffe2/accounting_main/migrations/0009_auto__add_budget__add_budgetline__add_budgetlogging.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15982"
},
{
"name": "CSS",
"bytes": "552855"
},
{
"name": "HTML",
"bytes": "742372"
},
{
"name": "JavaScript",
"bytes": "1859724"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "3048852"
}
],
"symlink_target": ""
}
|
import os
import time
import threading
import logging
from cactus.utils.filesystem import fileList
from cactus.utils.network import retry
class PollingListener(object):
def __init__(self, path, f, delay = .5, ignore = None):
self.path = path
self.f = f
self.delay = delay
self.ignore = ignore
self._pause = False
self._checksums = {}
def checksums(self):
checksumMap = {}
for f in fileList(self.path):
if f.startswith('.'):
continue
if self.ignore and self.ignore(f):
continue
try:
checksumMap[f] = int(os.stat(f).st_mtime)
except OSError:
continue
return checksumMap
def run(self):
# self._loop()
t = threading.Thread(target=self._loop)
t.daemon = True
t.start()
def pause(self):
self._pause = True
def resume(self):
self._checksums = self.checksums()
self._pause = False
def _loop(self):
self._checksums = self.checksums()
while True:
self._run()
@retry((Exception,), tries = 5, delay = 0.5)
def _run(self):
if not self._pause:
oldChecksums = self._checksums
newChecksums = self.checksums()
result = {
'added': [],
'deleted': [],
'changed': [],
}
for k, v in oldChecksums.iteritems():
if k not in newChecksums:
result['deleted'].append(k)
elif v != newChecksums[k]:
result['changed'].append(k)
for k, v in newChecksums.iteritems():
if k not in oldChecksums:
result['added'].append(k)
result['any'] = result['added'] + result['deleted'] + result['changed']
if result['any']:
self._checksums = newChecksums
self.f(result)
time.sleep(self.delay)
|
{
"content_hash": "516349e73e57837baf43bfb04c7502b5",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 83,
"avg_line_length": 24.879518072289155,
"alnum_prop": 0.49878934624697335,
"repo_name": "danielmorosan/Cactus",
"id": "ea67ab2a17dc38a16735345dd2a4727cc33b51a7",
"size": "2079",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "cactus/listener/polling.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "261"
},
{
"name": "HTML",
"bytes": "8133"
},
{
"name": "JavaScript",
"bytes": "60"
},
{
"name": "Makefile",
"bytes": "448"
},
{
"name": "Python",
"bytes": "226039"
}
],
"symlink_target": ""
}
|
class BansheePackage (Package):
def __init__ (self):
Package.__init__ (self, 'banshee', 'master')
self.sources = [
'git://git.gnome.org/banshee'
]
self.git_branch = 'master'
self.configure = [ 'NOCONFIGURE=1 ./autogen.sh && ./profile-configure %{profile.name} --prefix=%{prefix}' ]
self.sources.extend([
])
def prep (self):
Package.prep (self)
#self.sh ('patch -p1 < %{sources[1]}')
BansheePackage ()
|
{
"content_hash": "a59f2a1c1020b44fea311433cee21bcb",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 109,
"avg_line_length": 23.77777777777778,
"alnum_prop": 0.6238317757009346,
"repo_name": "bl8/bockbuild",
"id": "79f1f30c809fc1082e022e2f0044af11b16081b9",
"size": "428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/banshee-git.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "35607"
},
{
"name": "Python",
"bytes": "64404"
},
{
"name": "Shell",
"bytes": "18662"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.conf import settings
from allauth.account.models import EmailAddress
def _path_to_avatar(instance, filename):
return '{user_id}/{dirname}/{filename}'.format(
user_id=instance.user.id,
dirname=settings.AVATAR_DIR_NAME,
filename=filename)
class UserProfile(models.Model):
"""
User's profile adding more information for Django Auth User
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL, primary_key=True, related_name='profile')
#avatar = models.ImageField(upload_to=_path_to_avatar, blank=True, default='', max_length=257)
followers = models.ManyToManyField('self', through='FollowShip',
through_fields=('followee', 'follower'),
related_name='following', symmetrical=False)
# def delete(self, *args, **kwargs):
# self.user.delete()
# return super(self.__class__, self).delete(*args, **kwargs)
def account_verified(self):
verified = False
if self.user.is_authenticated:
verified = EmailAddress.objects.filter(
email=self.user.email).exists()
return verified
class FollowShip(models.Model):
follower = models.ForeignKey(UserProfile, related_name='follower')
followee = models.ForeignKey(UserProfile, related_name='followee')
time = models.DateTimeField(auto_now=True)
|
{
"content_hash": "8a5e25c694cfa88f558dd683137d000b",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 99,
"avg_line_length": 39.39473684210526,
"alnum_prop": 0.6259185036740147,
"repo_name": "vuonghv/brs",
"id": "cb110924b0604685f889e834b65f2d85acef8ef0",
"size": "1497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/users/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1062528"
},
{
"name": "HTML",
"bytes": "1877883"
},
{
"name": "JavaScript",
"bytes": "3157021"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "84367"
}
],
"symlink_target": ""
}
|
"""
open-dobot Accelerometers Calibration tool.
This tool continuously reports accelerometers and angles from those.
Use this tool to find offsets for your accelerometers.
Follow the procedure below to enable accelerometers reporting mode on FPGA.
No action required on RAMPS as GY-521 accelerometers can be read at any time there.
1. Turn off power on the arm and disconnect USB cable
2. Remove accelerometers from the arm and put them on a flat surface that has no inclination
3. Connect USB cable
4. Enable accelerometers reporting mode:
4.1. Press and hold the "Sensor Calibration" button on FPGA version or ground pin D23 on AUX-4 on RAMPS version
4.2. Press and release the "Reset" button
4.3. Start this tool (still holding "Sensor Calibration" button on FPGA version or keeping pin D23 grounded on RAMPS)
4.4. Wait for the accelerometer data to start flowing on your console/whatever_you_use_to_start_this_tool
4.5. Release the "Sensor Calibration" button
5. Gently push down the accelerometers so that they are evenly on the surface. Don't touch any contacts/leads.
You can push them one by one, not necessary to push both at the same time
6. Note the "Raw" data from accelerometers reported on the console. Those are your accelerometers' offsets
7. Turn off power on the arm, disconnect USB cable, mount accelerometers back onto the arm
Author: maxosprojects (March 18 2016)
Additional Authors: <put your name here>
Version: 1.2.2
License: MIT
"""
import math
from dobot import DobotDriver
from dobot import DobotKinematics
# driver = DobotDriver('COM4')
driver = DobotDriver('/dev/tty.usbmodem1421')
driver.Open()
# driver.Open(timeout=0.3)
kinematics = DobotKinematics()
# Offsets must be found using this tool for your Dobot once
# (rear arm, frontarm)
offsets = (1024, 1024)
def toEndEffectorHeight(rear, front):
ret = kinematics.coordinatesFromAngles(0, rear, front)
return ret[2]
while True:
ret = driver.GetAccelerometers()
if ret[0]:
if driver.isFpga():
print("Rear arm: {0:10f} | Front arm: {1:10f} | End effector height: {2:10f} | Raw rear arm: {3:4d} | Raw front arm: {4:4d}".format(\
driver.accelToAngle(ret[1], offsets[0]), driver.accelToAngle(ret[4], offsets[1]),\
toEndEffectorHeight(driver.accelToRadians(ret[1], offsets[0]), driver.accelToRadians(ret[4], offsets[1])),\
ret[1], ret[4]))
else:
print("Rear arm: {0:6.2f} | Front arm: {1:6.2f} | End effector height: {2:7.2f} | Raw rear arm: {3:6d} {4:6d} {5:6d} | Raw front arm: {6:6d} {7:6d} {8:6d}".format(\
driver.accel3DXToAngle(ret[1], ret[2], ret[3]), -driver.accel3DXToAngle(ret[4], ret[5], ret[6]),\
toEndEffectorHeight(driver.accel3DXToRadians(ret[1], ret[2], ret[3]), -driver.accel3DXToRadians(ret[4], ret[5], ret[6])),\
ret[1], ret[2], ret[3], ret[4], ret[5], ret[6]))
else:
print('Error occurred reading data')
|
{
"content_hash": "87aa58dd2d8eb581f0d7b2f04831e234",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 167,
"avg_line_length": 44.12307692307692,
"alnum_prop": 0.7294281729428173,
"repo_name": "maxosprojects/open-dobot",
"id": "7f55458d3cbf42896acaf493192ce75d4f6eed3c",
"size": "2892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/python/calibrate-accelerometers.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "7275"
},
{
"name": "Batchfile",
"bytes": "2116"
},
{
"name": "C",
"bytes": "99852"
},
{
"name": "C++",
"bytes": "41168"
},
{
"name": "Processing",
"bytes": "31517"
},
{
"name": "Python",
"bytes": "72605"
},
{
"name": "Shell",
"bytes": "3033"
}
],
"symlink_target": ""
}
|
"""
The NumastatCollector collects numa statistics for all nodes.
#### Dependencies
* /sys/devices/system/node
#### Starting point
* https://github.com/disqus/diamond-collectors/blob/master/numastat/numastat.py
"""
import os
import diamond.collector
class NumastatCollector(diamond.collector.Collector):
NODE = '/sys/devices/system/node'
def get_default_config_help(self):
config_help = super(NumastatCollector,
self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NumastatCollector, self).get_default_config()
config.update({
'path': 'numastat'
})
return config
def get_data(self, path):
numastat = open(path, 'r')
data = dict([line.split() for line in numastat])
return data
def find_paths(self, path):
paths = []
for d in os.listdir(path):
if d.startswith('node'):
p = os.path.join(path, d, 'numastat')
if os.access(p, os.R_OK):
paths.append(p)
return paths
def collect(self):
if not os.access(self.NODE, os.R_OK):
self.log.error('Unable to read: ' + self.NODE)
return None
data = {}
for path in self.find_paths(self.NODE):
node = path.split(os.path.sep)[5]
for k, v in self.get_data(path).items():
self.dimensions = {
'node' : node,
}
self.publish_cumulative_counter('numastat' + '.' + k, long(v))
return True
|
{
"content_hash": "8a5c0476216c937c0e29004bc5ba3e69",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 80,
"avg_line_length": 24.47222222222222,
"alnum_prop": 0.5465380249716232,
"repo_name": "venkey-ariv/fullerite",
"id": "2df1b20734da23d3701a8a92d906495dac3caacb",
"size": "1778",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/diamond/collectors/numastat/numastat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "359040"
},
{
"name": "HTML",
"bytes": "21681"
},
{
"name": "Makefile",
"bytes": "4853"
},
{
"name": "Protocol Buffer",
"bytes": "1180"
},
{
"name": "Python",
"bytes": "1232620"
},
{
"name": "Roff",
"bytes": "17806"
},
{
"name": "Shell",
"bytes": "11159"
}
],
"symlink_target": ""
}
|
from django import forms
from .validators import IBANValidator, BICValidator, IBAN_COUNTRY_CODE_LENGTH
DEFAULT_DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # '2006-10-25', '25/10/2006', '25/10/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
DEFAULT_DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
IBAN_MIN_LENGTH = min(IBAN_COUNTRY_CODE_LENGTH.values())
class DateField(forms.DateField):
"""
A date input field which uses non-US date input formats by default.
"""
def __init__(self, input_formats=None, *args, **kwargs):
input_formats = input_formats or DEFAULT_DATE_INPUT_FORMATS
super(DateField, self).__init__(input_formats=input_formats, *args, **kwargs)
class DateTimeField(forms.DateTimeField):
"""
A date and time input field which uses non-US date and time input formats
by default.
"""
def __init__(self, input_formats=None, *args, **kwargs):
input_formats = input_formats or DEFAULT_DATETIME_INPUT_FORMATS
super(DateTimeField, self).__init__(input_formats=input_formats, *args, **kwargs)
class SplitDateTimeField(forms.SplitDateTimeField):
"""
Split date and time input fields which use non-US date and time input
formats by default.
"""
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
input_date_formats = input_date_formats or DEFAULT_DATE_INPUT_FORMATS
super(SplitDateTimeField, self).__init__(input_date_formats=input_date_formats,
input_time_formats=input_time_formats, *args, **kwargs)
class IBANFormField(forms.CharField):
"""
An IBAN consists of up to 34 alphanumeric characters.
To limit validation to specific countries, set the 'include_countries' argument with a tuple or list of ISO 3166-1
alpha-2 codes. For example, `include_countries=('NL', 'BE, 'LU')`.
A list of countries that use IBANs as part of SEPA is included for convenience. To use this feature, set
`include_countries=IBAN_SEPA_COUNTRIES` as an argument to the field.
Example:
.. code-block:: python
from django import forms
from localflavor.generic.forms import IBANFormField
from localflavor.generic.countries.sepa import IBAN_SEPA_COUNTRIES
class MyForm(forms.Form):
iban = IBANFormField(include_countries=IBAN_SEPA_COUNTRIES)
In addition to validating official IBANs, this field can optionally validate unofficial IBANs that have been
catalogued by Nordea by setting the `use_nordea_extensions` argument to True.
https://en.wikipedia.org/wiki/International_Bank_Account_Number
.. versionadded:: 1.1
"""
def __init__(self, use_nordea_extensions=False, include_countries=None, *args, **kwargs):
kwargs.setdefault('min_length', IBAN_MIN_LENGTH)
kwargs.setdefault('max_length', 34)
self.default_validators = [IBANValidator(use_nordea_extensions, include_countries)]
super(IBANFormField, self).__init__(*args, **kwargs)
def to_python(self, value):
value = super(IBANFormField, self).to_python(value)
if value is not None:
return value.upper().replace(' ', '').replace('-', '')
return value
def prepare_value(self, value):
""" The display format for IBAN has a space every 4 characters. """
if value is None:
return value
grouping = 4
value = value.upper().replace(' ', '').replace('-', '')
return ' '.join(value[i:i + grouping] for i in range(0, len(value), grouping))
class BICFormField(forms.CharField):
"""
A BIC consists of 8 (BIC8) or 11 (BIC11) alphanumeric characters.
BICs are also known as SWIFT-BIC, BIC code, SWIFT ID, SWIFT code or ISO 9362.
https://en.wikipedia.org/wiki/ISO_9362
.. versionadded:: 1.1
"""
default_validators = [BICValidator()]
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', 11)
super(BICFormField, self).__init__(*args, **kwargs)
def to_python(self, value):
# BIC is always written in upper case.
# https://www2.swift.com/uhbonline/books/public/en_uk/bic_policy/bic_policy.pdf
value = super(BICFormField, self).to_python(value)
return value.upper()
def prepare_value(self, value):
# BIC is always written in upper case.
value = super(BICFormField, self).prepare_value(value)
if value is not None:
return value.upper()
return value
|
{
"content_hash": "9588a5e3ac06fd3b8e7c3e5e04f684c7",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 118,
"avg_line_length": 38.91111111111111,
"alnum_prop": 0.6105082809822958,
"repo_name": "patrickcurl/ztruck",
"id": "0051a2dc3dbe39e7d48d364e34065682763e00b8",
"size": "5253",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "dj/lib/python2.7/site-packages/localflavor/generic/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "42439"
},
{
"name": "Groff",
"bytes": "28"
},
{
"name": "HTML",
"bytes": "70584"
},
{
"name": "JavaScript",
"bytes": "104857"
},
{
"name": "Python",
"bytes": "5847138"
},
{
"name": "Shell",
"bytes": "6197"
}
],
"symlink_target": ""
}
|
import os
from wic import msger
from wic.utils import runner
from wic.utils.errors import ImageError
from wic.utils.fs_related import *
from wic.utils.oe.misc import *
# Overhead of the MBR partitioning scheme (just one sector)
MBR_OVERHEAD = 1
# Size of a sector in bytes
SECTOR_SIZE = 512
class Image:
"""
Generic base object for an image.
An Image is a container for a set of DiskImages and associated
partitions.
"""
def __init__(self):
self.disks = {}
self.partitions = []
self.parted = find_binary_path("parted")
# Size of a sector used in calculations
self.sector_size = SECTOR_SIZE
self._partitions_layed_out = False
def __add_disk(self, disk_name):
""" Add a disk 'disk_name' to the internal list of disks. Note,
'disk_name' is the name of the disk in the target system
(e.g., sdb). """
if disk_name in self.disks:
# We already have this disk
return
assert not self._partitions_layed_out
self.disks[disk_name] = \
{ 'disk': None, # Disk object
'numpart': 0, # Number of allocate partitions
'partitions': [], # Indexes to self.partitions
'offset': 0, # Offset of next partition (in sectors)
# Minimum required disk size to fit all partitions (in bytes)
'min_size': 0,
'ptable_format': "msdos" } # Partition table format
def add_disk(self, disk_name, disk_obj):
""" Add a disk object which have to be partitioned. More than one disk
can be added. In case of multiple disks, disk partitions have to be
added for each disk separately with 'add_partition()". """
self.__add_disk(disk_name)
self.disks[disk_name]['disk'] = disk_obj
def __add_partition(self, part):
""" This is a helper function for 'add_partition()' which adds a
partition to the internal list of partitions. """
assert not self._partitions_layed_out
self.partitions.append(part)
self.__add_disk(part['disk_name'])
def add_partition(self, size, disk_name, mountpoint, source_file = None, fstype = None,
label=None, fsopts = None, boot = False, align = None,
part_type = None):
""" Add the next partition. Prtitions have to be added in the
first-to-last order. """
ks_pnum = len(self.partitions)
# Converting MB to sectors for parted
size = size * 1024 * 1024 / self.sector_size
# We still need partition for "/" or non-subvolume
if mountpoint == "/" or not fsopts:
part = { 'ks_pnum' : ks_pnum, # Partition number in the KS file
'size': size, # In sectors
'mountpoint': mountpoint, # Mount relative to chroot
'source_file': source_file, # partition contents
'fstype': fstype, # Filesystem type
'fsopts': fsopts, # Filesystem mount options
'label': label, # Partition label
'disk_name': disk_name, # physical disk name holding partition
'device': None, # kpartx device node for partition
'num': None, # Partition number
'boot': boot, # Bootable flag
'align': align, # Partition alignment
'part_type' : part_type } # Partition type
self.__add_partition(part)
def layout_partitions(self, ptable_format = "msdos"):
""" Layout the partitions, meaning calculate the position of every
partition on the disk. The 'ptable_format' parameter defines the
partition table format and may be "msdos". """
msger.debug("Assigning %s partitions to disks" % ptable_format)
if ptable_format not in ('msdos'):
raise ImageError("Unknown partition table format '%s', supported " \
"formats are: 'msdos'" % ptable_format)
if self._partitions_layed_out:
return
self._partitions_layed_out = True
# Go through partitions in the order they are added in .ks file
for n in range(len(self.partitions)):
p = self.partitions[n]
if not self.disks.has_key(p['disk_name']):
raise ImageError("No disk %s for partition %s" \
% (p['disk_name'], p['mountpoint']))
if p['part_type']:
# The --part-type can also be implemented for MBR partitions,
# in which case it would map to the 1-byte "partition type"
# filed at offset 3 of the partition entry.
raise ImageError("setting custom partition type is not " \
"implemented for msdos partitions")
# Get the disk where the partition is located
d = self.disks[p['disk_name']]
d['numpart'] += 1
d['ptable_format'] = ptable_format
if d['numpart'] == 1:
if ptable_format == "msdos":
overhead = MBR_OVERHEAD
# Skip one sector required for the partitioning scheme overhead
d['offset'] += overhead
if p['align']:
# If not first partition and we do have alignment set we need
# to align the partition.
# FIXME: This leaves a empty spaces to the disk. To fill the
# gaps we could enlargea the previous partition?
# Calc how much the alignment is off.
align_sectors = d['offset'] % (p['align'] * 1024 / self.sector_size)
if align_sectors:
# If partition is not aligned as required, we need
# to move forward to the next alignment point
align_sectors = (p['align'] * 1024 / self.sector_size) - align_sectors
msger.debug("Realignment for %s%s with %s sectors, original"
" offset %s, target alignment is %sK." %
(p['disk_name'], d['numpart'], align_sectors,
d['offset'], p['align']))
# increase the offset so we actually start the partition on right alignment
d['offset'] += align_sectors
p['start'] = d['offset']
d['offset'] += p['size']
p['type'] = 'primary'
p['num'] = d['numpart']
if d['ptable_format'] == "msdos":
if d['numpart'] > 2:
# Every logical partition requires an additional sector for
# the EBR, so steal the last sector from the end of each
# partition starting from the 3rd one for the EBR. This
# will make sure the logical partitions are aligned
# correctly.
p['size'] -= 1
if d['numpart'] > 3:
p['type'] = 'logical'
p['num'] = d['numpart'] + 1
d['partitions'].append(n)
msger.debug("Assigned %s to %s%d, sectors range %d-%d size %d "
"sectors (%d bytes)." \
% (p['mountpoint'], p['disk_name'], p['num'],
p['start'], p['start'] + p['size'] - 1,
p['size'], p['size'] * self.sector_size))
# Once all the partitions have been layed out, we can calculate the
# minumim disk sizes.
for disk_name, d in self.disks.items():
d['min_size'] = d['offset']
d['min_size'] *= self.sector_size
def __run_parted(self, args):
""" Run parted with arguments specified in the 'args' list. """
args.insert(0, self.parted)
msger.debug(args)
rc, out = runner.runtool(args, catch = 3)
out = out.strip()
if out:
msger.debug('"parted" output: %s' % out)
if rc != 0:
# We don't throw exception when return code is not 0, because
# parted always fails to reload part table with loop devices. This
# prevents us from distinguishing real errors based on return
# code.
msger.error("WARNING: parted returned '%s' instead of 0 (use --debug for details)" % rc)
def __create_partition(self, device, parttype, fstype, start, size):
""" Create a partition on an image described by the 'device' object. """
# Start is included to the size so we need to substract one from the end.
end = start + size - 1
msger.debug("Added '%s' partition, sectors %d-%d, size %d sectors" %
(parttype, start, end, size))
args = ["-s", device, "unit", "s", "mkpart", parttype]
if fstype:
args.extend([fstype])
args.extend(["%d" % start, "%d" % end])
return self.__run_parted(args)
def __format_disks(self):
self.layout_partitions()
for dev in self.disks.keys():
d = self.disks[dev]
msger.debug("Initializing partition table for %s" % \
(d['disk'].device))
self.__run_parted(["-s", d['disk'].device, "mklabel",
d['ptable_format']])
msger.debug("Creating partitions")
for p in self.partitions:
d = self.disks[p['disk_name']]
if d['ptable_format'] == "msdos" and p['num'] == 5:
# The last sector of the 3rd partition was reserved for the EBR
# of the first _logical_ partition. This is why the extended
# partition should start one sector before the first logical
# partition.
self.__create_partition(d['disk'].device, "extended",
None, p['start'] - 1,
d['offset'] - p['start'])
if p['fstype'] == "swap":
parted_fs_type = "linux-swap"
elif p['fstype'] == "vfat":
parted_fs_type = "fat32"
elif p['fstype'] == "msdos":
parted_fs_type = "fat16"
else:
# Type for ext2/ext3/ext4/btrfs
parted_fs_type = "ext2"
# Boot ROM of OMAP boards require vfat boot partition to have an
# even number of sectors.
if p['mountpoint'] == "/boot" and p['fstype'] in ["vfat", "msdos"] \
and p['size'] % 2:
msger.debug("Substracting one sector from '%s' partition to " \
"get even number of sectors for the partition" % \
p['mountpoint'])
p['size'] -= 1
self.__create_partition(d['disk'].device, p['type'],
parted_fs_type, p['start'], p['size'])
if p['boot']:
flag_name = "boot"
msger.debug("Set '%s' flag for partition '%s' on disk '%s'" % \
(flag_name, p['num'], d['disk'].device))
self.__run_parted(["-s", d['disk'].device, "set",
"%d" % p['num'], flag_name, "on"])
# Parted defaults to enabling the lba flag for fat16 partitions,
# which causes compatibility issues with some firmware (and really
# isn't necessary).
if parted_fs_type == "fat16":
if d['ptable_format'] == 'msdos':
msger.debug("Disable 'lba' flag for partition '%s' on disk '%s'" % \
(p['num'], d['disk'].device))
self.__run_parted(["-s", d['disk'].device, "set",
"%d" % p['num'], "lba", "off"])
def cleanup(self):
if self.disks:
for dev in self.disks.keys():
d = self.disks[dev]
try:
d['disk'].cleanup()
except:
pass
def __write_partition(self, num, source_file, start, size):
"""
Install source_file contents into a partition.
"""
if not source_file: # nothing to write
return
# Start is included in the size so need to substract one from the end.
end = start + size - 1
msger.debug("Installed %s in partition %d, sectors %d-%d, size %d sectors" % (source_file, num, start, end, size))
dd_cmd = "dd if=%s of=%s bs=%d seek=%d count=%d conv=notrunc" % \
(source_file, self.image_file, self.sector_size, start, size)
exec_cmd(dd_cmd)
def assemble(self, image_file):
msger.debug("Installing partitions")
self.image_file = image_file
for p in self.partitions:
d = self.disks[p['disk_name']]
if d['ptable_format'] == "msdos" and p['num'] == 5:
# The last sector of the 3rd partition was reserved for the EBR
# of the first _logical_ partition. This is why the extended
# partition should start one sector before the first logical
# partition.
self.__write_partition(p['num'], p['source_file'],
p['start'] - 1,
d['offset'] - p['start'])
self.__write_partition(p['num'], p['source_file'],
p['start'], p['size'])
def create(self):
for dev in self.disks.keys():
d = self.disks[dev]
d['disk'].create()
self.__format_disks()
return
|
{
"content_hash": "c09e69b0e936aa98bc03fbc19038ec3b",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 122,
"avg_line_length": 40.911764705882355,
"alnum_prop": 0.5098490294751977,
"repo_name": "wwright2/dcim3-angstrom1",
"id": "fb95cc790e0016a2ecf307841b9d9f48a1f04f13",
"size": "14754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sources/openembedded-core/scripts/lib/wic/utils/partitionedfs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "73541"
},
{
"name": "Awk",
"bytes": "286"
},
{
"name": "Batchfile",
"bytes": "19960"
},
{
"name": "BitBake",
"bytes": "2875212"
},
{
"name": "BlitzBasic",
"bytes": "6367"
},
{
"name": "C",
"bytes": "1598095"
},
{
"name": "C++",
"bytes": "2198121"
},
{
"name": "CMake",
"bytes": "7277"
},
{
"name": "CSS",
"bytes": "28636"
},
{
"name": "Groff",
"bytes": "502999"
},
{
"name": "HTML",
"bytes": "210823"
},
{
"name": "JavaScript",
"bytes": "23100"
},
{
"name": "Lua",
"bytes": "1194"
},
{
"name": "Makefile",
"bytes": "32539"
},
{
"name": "Nginx",
"bytes": "2744"
},
{
"name": "PHP",
"bytes": "829048"
},
{
"name": "Pascal",
"bytes": "17352"
},
{
"name": "Perl",
"bytes": "66339"
},
{
"name": "Python",
"bytes": "3672452"
},
{
"name": "QMake",
"bytes": "165"
},
{
"name": "Ruby",
"bytes": "10695"
},
{
"name": "Shell",
"bytes": "820076"
},
{
"name": "SourcePawn",
"bytes": "259600"
},
{
"name": "Tcl",
"bytes": "4897"
},
{
"name": "VimL",
"bytes": "8483"
},
{
"name": "XSLT",
"bytes": "9089"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from copy import copy
from collections import (Iterable, Mapping, defaultdict)
import functools
import itertools
import six
def is_nonstring_iterable(value):
"""
:param value: the object to check
:return: whether the given value is a non-string iterable object
"""
return isinstance(value, Iterable) and not isinstance(value, six.string_types)
def concat(*iterables):
"""
:param iterables: the iterables to concatenate
:return: the concatenated list
:rtype: list
"""
return list(itertools.chain(*iterables))
def tuplize(iterable):
"""
Recursively creates nested tuples from the given iterable object.
:param iterable: the iterable to convert
:return: the comparable tuple
"""
return tuple(tuplize(elt) if is_nonstring_iterable(elt) else elt
for elt in iterable)
def to_series(items, conjunction='and'):
"""
Formats the given items as a series string.
Example:
>>> to_series([1, 2, 3])
'1, 2 and 3'
:param items: the items to format in a series
:param conjunction: the series conjunction
:return: the items series
:rtype: str
"""
if not items:
return ''
prefix = ', '.join([str(i) for i in items[:-1]])
suffix = str(items[-1])
if not prefix:
return suffix
else:
return (' ' + conjunction + ' ').join([prefix, suffix])
def nested_defaultdict(factory, levels=0):
"""
Makes a defaultdict for the given factory and number of levels, e.g.::
>> from qiutil.collections import nested_defaultdict as dd
>> dd(list, 0)[1]
[]
>> dd(dict, 2)[1][2][3]
{}
Note that the default levels parameter value 0 is synonymous with the
standard Python collections defaultdict, i.e.::
dd(list)
is the same as::
dd(list, 0)
or::
from collections import defaultdict
defaultdict(list)
Thus, this ``nested_defaultdict`` function can serve as a drop-in
replacement for ``defaultdict``.
:param factory: the 0th level defaultdict factory.
:param levels: the number of levels
"""
# The recursive nested dictionary generator, where f is the factory
# and n is the number of levels.
dd = lambda f, n: defaultdict((lambda: dd(f, n - 1)) if n else f)
return dd(factory, levels)
def update(target, *sources, **opts):
"""
Updates the given target object from the given source objects.
The target object can be a dictionary, list or set. The target
and sources are validated for compatibility as follows:
* If the target object is a Mapping, then the sources must
be Mappings.
* Otherwise, if the target object is a list or set, then the
sources must be non-string iterables.
The target is updated from the sources in order as follows:
* If the target object is a Mapping and the *recursive* flag is
falsey, then the standard Python dictionary update is applied.
* If the target object is a Mapping and the *recursive* flag is
truthy, then the update is applied recursively to nested
dictionaries, e.g.:
>> from qiutil.collections import update
>> target = dict(a=dict(aa=1))
>> update(target, dict(a=dict(aa=2, ab=3)))
>> target
{'a': {'aa': 2, 'ab': 3}}
* If the target object is a list or set, then the source items
which are not yet in the target are added to the target, e.g.:
>> from qiutil.collections import update
>> target = [1, 2, 2, 5]
>> update(target, [4, 2, 6, 6])
>> target
[1, 2, 2, 5, 4, 6, 6]
This function adapts the solution offered in a
`StackOverflow post <http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth>`
to support lists, sets and multiple sources.
:param target: the dictionary to update
:param sources: the update source dictionaries
:param opts: the following keyword options:
:keyword recursive: if True, then apply the update recursively to
nested dictionaries
"""
# Validate the sources.
_validate_update_compatibility(target, *sources)
# Make the update helper function. This idiom refactors the source
# iteration block into a callable function with a sole source argument.
# This pattern is a little obscure to those not well-versed in functional
# programming, but it is cleaner than the alternative of embedding the
# _updater logic into the source iteration.
updater = _create_updater(target, **opts)
# Apply the successive source updates.
for source in sources:
updater(source)
def _create_updater(target, **opts):
"""
:param target: the update target
:param opts: the following keyword options:
:keyword recursive: if True, then apply the update recursively to
nested dictionaries
:return: the function to apply to a *source* argument
"""
if isinstance(target, Mapping):
if opts.get('recursive'):
return functools.partial(_update_dict_recursive, target)
else:
# Apply the standard Python dictionary update.
return lambda src: target.update(src)
else:
return functools.partial(_update_collection, target)
def _update_dict_recursive(target, source):
for key, srcval in source.iteritems():
if key in target:
tgtval = target[key]
# If the target value can be merged from the source
# value, then replace the target value with a shallow
# copy and update it recursively.
if isinstance(tgtval, Mapping) and isinstance(srcval, Mapping):
target[key] = tgtval = copy(tgtval)
_update_dict_recursive(tgtval, srcval)
continue
# Set the target item.
target[key] = copy(srcval)
def _validate_update_compatibility(target, *sources):
if isinstance(target, Mapping):
for source in sources:
if not isinstance(source, Mapping):
raise TypeError("Update source is incompatible with the"
" dictionary target: %s" % source)
elif isinstance(target, list) or isinstance(target, set):
for source in sources:
if not is_nonstring_iterable(source):
raise TypeError("Update source is incompatible with the"
" collection target: %s" % source)
else:
raise TypeError("Update target is type is not supported: %s" % target)
def _update_collection(target, source):
"""
Adds to the target those source items which are not
yet in the target, as described in :meth:`update`.
:param target: the list or set to update
:param source: the input non-string iterable
:raise TypeError: if the target is neither a list or set
"""
if isinstance(target, set):
target.update(source)
elif isinstance(target, list):
exclude = set(target)
diff = (item for item in source if item not in exclude)
target.extend(diff)
else:
raise TypeError("Update target type not supported")
class ImmutableDict(dict):
"""
ImmutableDict is a dictionary that cannot be changed after creation.
An ImmutableDict is *not* hashable and therefore cannot be used as a
dictionary key or set member. See http://www.python.org/dev/peps/pep-0351
for the rationale.
"""
def __init__(self, *args, **kwargs):
super(ImmutableDict, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
"""
:raise NotImplementedError: always
"""
raise NotImplementedError("The dictionary is immutable: %s" % self)
EMPTY_DICT = ImmutableDict()
"""
An immutable empty dictionary.
This constant serves as an efficient method return default value.
"""
|
{
"content_hash": "db3f87b23c3d51f1911d90b63f08a368",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 122,
"avg_line_length": 32.77732793522267,
"alnum_prop": 0.6364871541501976,
"repo_name": "ohsu-qin/qiutil",
"id": "d191c7e29bfec9492f9c23802dbf3b0efac6202d",
"size": "8275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiutil/collections.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "66256"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="sankey.hoverlabel", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
{
"content_hash": "a09f29d3f342bb4c721e7dea9c7a2d6e",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 82,
"avg_line_length": 35.2,
"alnum_prop": 0.5984848484848485,
"repo_name": "plotly/python-api",
"id": "cf9dd26c84aff54cf6d7a2dda10826f92a148df5",
"size": "528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/sankey/hoverlabel/_bordercolor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
"""Tests for the Bond fan device."""
from __future__ import annotations
from datetime import timedelta
from unittest.mock import call
from bond_api import Action, DeviceType, Direction
import pytest
from homeassistant import core
from homeassistant.components import fan
from homeassistant.components.bond.const import (
DOMAIN as BOND_DOMAIN,
SERVICE_SET_FAN_SPEED_TRACKED_STATE,
)
from homeassistant.components.bond.fan import PRESET_MODE_BREEZE
from homeassistant.components.fan import (
ATTR_DIRECTION,
ATTR_PERCENTAGE,
ATTR_PRESET_MODE,
ATTR_PRESET_MODES,
DIRECTION_FORWARD,
DIRECTION_REVERSE,
DOMAIN as FAN_DOMAIN,
SERVICE_SET_DIRECTION,
SERVICE_SET_PERCENTAGE,
SERVICE_SET_PRESET_MODE,
)
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.util import utcnow
from .common import (
help_test_entity_available,
patch_bond_action,
patch_bond_action_returns_clientresponseerror,
patch_bond_device_state,
setup_platform,
)
from tests.common import async_fire_time_changed
def ceiling_fan(name: str):
"""Create a ceiling fan with given name."""
return {
"name": name,
"type": DeviceType.CEILING_FAN,
"actions": ["SetSpeed", "SetDirection"],
}
def ceiling_fan_with_breeze(name: str):
"""Create a ceiling fan with given name with breeze support."""
return {
"name": name,
"type": DeviceType.CEILING_FAN,
"actions": ["SetSpeed", "SetDirection", "BreezeOn"],
}
async def turn_fan_on(
hass: core.HomeAssistant,
fan_id: str,
percentage: int | None = None,
preset_mode: str | None = None,
) -> None:
"""Turn the fan on at the specified speed."""
service_data = {ATTR_ENTITY_ID: fan_id}
if preset_mode:
service_data[fan.ATTR_PRESET_MODE] = preset_mode
if percentage is not None:
service_data[fan.ATTR_PERCENTAGE] = percentage
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_TURN_ON,
service_data=service_data,
blocking=True,
)
await hass.async_block_till_done()
async def test_entity_registry(hass: core.HomeAssistant):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_version={"bondid": "test-hub-id"},
bond_device_id="test-device-id",
)
registry: EntityRegistry = er.async_get(hass)
entity = registry.entities["fan.name_1"]
assert entity.unique_id == "test-hub-id_test-device-id"
device_registry = dr.async_get(hass)
device = device_registry.async_get(entity.device_id)
assert device.configuration_url == "http://some host"
async def test_non_standard_speed_list(hass: core.HomeAssistant):
"""Tests that the device is registered with custom speed list if number of supported speeds differs form 3."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_device_id="test-device-id",
props={"max_speed": 6},
)
with patch_bond_device_state():
with patch_bond_action() as mock_set_speed_low:
await turn_fan_on(hass, "fan.name_1", percentage=100 / 6 * 2)
mock_set_speed_low.assert_called_once_with(
"test-device-id", Action.set_speed(2)
)
with patch_bond_action() as mock_set_speed_medium:
await turn_fan_on(hass, "fan.name_1", percentage=100 / 6 * 4)
mock_set_speed_medium.assert_called_once_with(
"test-device-id", Action.set_speed(4)
)
with patch_bond_action() as mock_set_speed_high:
await turn_fan_on(hass, "fan.name_1", percentage=100)
mock_set_speed_high.assert_called_once_with(
"test-device-id", Action.set_speed(6)
)
async def test_fan_speed_with_no_max_speed(hass: core.HomeAssistant):
"""Tests that fans without max speed (increase/decrease controls) map speed to HA standard."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_device_id="test-device-id",
props={"no": "max_speed"},
state={"power": 1, "speed": 14},
)
assert hass.states.get("fan.name_1").attributes["percentage"] == 100
async def test_turn_on_fan_with_speed(hass: core.HomeAssistant):
"""Tests that turn on command delegates to set speed API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_set_speed, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", percentage=1)
mock_set_speed.assert_called_with("test-device-id", Action.set_speed(1))
async def test_turn_on_fan_with_percentage_3_speeds(hass: core.HomeAssistant):
"""Tests that turn on command delegates to set speed API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_set_speed, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", percentage=10)
mock_set_speed.assert_called_with("test-device-id", Action.set_speed(1))
mock_set_speed.reset_mock()
with patch_bond_action() as mock_set_speed, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", percentage=50)
mock_set_speed.assert_called_with("test-device-id", Action.set_speed(2))
mock_set_speed.reset_mock()
with patch_bond_action() as mock_set_speed, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", percentage=100)
mock_set_speed.assert_called_with("test-device-id", Action.set_speed(3))
async def test_turn_on_fan_with_percentage_6_speeds(hass: core.HomeAssistant):
"""Tests that turn on command delegates to set speed API."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_device_id="test-device-id",
props={"max_speed": 6},
)
with patch_bond_action() as mock_set_speed, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", percentage=10)
mock_set_speed.assert_called_with("test-device-id", Action.set_speed(1))
mock_set_speed.reset_mock()
with patch_bond_action() as mock_set_speed, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", percentage=50)
mock_set_speed.assert_called_with("test-device-id", Action.set_speed(3))
mock_set_speed.reset_mock()
with patch_bond_action() as mock_set_speed, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", percentage=100)
mock_set_speed.assert_called_with("test-device-id", Action.set_speed(6))
async def test_turn_on_fan_preset_mode(hass: core.HomeAssistant):
"""Tests that turn on command delegates to breeze on API."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan_with_breeze("name-1"),
bond_device_id="test-device-id",
props={"max_speed": 6},
)
assert hass.states.get("fan.name_1").attributes[ATTR_PRESET_MODES] == [
PRESET_MODE_BREEZE
]
with patch_bond_action() as mock_set_preset_mode, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", preset_mode=PRESET_MODE_BREEZE)
mock_set_preset_mode.assert_called_with("test-device-id", Action(Action.BREEZE_ON))
with patch_bond_action() as mock_set_preset_mode, patch_bond_device_state():
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_PRESET_MODE,
service_data={
ATTR_PRESET_MODE: PRESET_MODE_BREEZE,
ATTR_ENTITY_ID: "fan.name_1",
},
blocking=True,
)
mock_set_preset_mode.assert_called_with("test-device-id", Action(Action.BREEZE_ON))
async def test_turn_on_fan_preset_mode_not_supported(hass: core.HomeAssistant):
"""Tests calling breeze mode on a fan that does not support it raises."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_device_id="test-device-id",
props={"max_speed": 6},
)
with patch_bond_action(), patch_bond_device_state(), pytest.raises(ValueError):
await turn_fan_on(hass, "fan.name_1", preset_mode=PRESET_MODE_BREEZE)
with patch_bond_action(), patch_bond_device_state(), pytest.raises(ValueError):
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_PRESET_MODE,
service_data={
ATTR_PRESET_MODE: PRESET_MODE_BREEZE,
ATTR_ENTITY_ID: "fan.name_1",
},
blocking=True,
)
async def test_turn_on_fan_with_off_with_breeze(hass: core.HomeAssistant):
"""Tests that turn off command delegates to turn off API."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan_with_breeze("name-1"),
bond_device_id="test-device-id",
state={"breeze": [1, 0, 0]},
)
assert (
hass.states.get("fan.name_1").attributes[ATTR_PRESET_MODE] == PRESET_MODE_BREEZE
)
with patch_bond_action() as mock_actions, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", percentage=0)
assert mock_actions.mock_calls == [
call("test-device-id", Action(Action.BREEZE_OFF)),
call("test-device-id", Action.turn_off()),
]
async def test_turn_on_fan_without_speed(hass: core.HomeAssistant):
"""Tests that turn on command delegates to turn on API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_on, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1")
mock_turn_on.assert_called_with("test-device-id", Action.turn_on())
async def test_turn_on_fan_with_off_percentage(hass: core.HomeAssistant):
"""Tests that turn off command delegates to turn off API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_off, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", percentage=0)
mock_turn_off.assert_called_with("test-device-id", Action.turn_off())
async def test_set_speed_off(hass: core.HomeAssistant):
"""Tests that set_speed(off) command delegates to turn off API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_off, patch_bond_device_state():
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_PERCENTAGE,
service_data={ATTR_ENTITY_ID: "fan.name_1", ATTR_PERCENTAGE: 0},
blocking=True,
)
await hass.async_block_till_done()
mock_turn_off.assert_called_with("test-device-id", Action.turn_off())
async def test_turn_off_fan(hass: core.HomeAssistant):
"""Tests that turn off command delegates to API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_off, patch_bond_device_state():
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "fan.name_1"},
blocking=True,
)
await hass.async_block_till_done()
mock_turn_off.assert_called_once_with("test-device-id", Action.turn_off())
async def test_set_speed_belief_speed_zero(hass: core.HomeAssistant):
"""Tests that set power belief service delegates to API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_action, patch_bond_device_state():
await hass.services.async_call(
BOND_DOMAIN,
SERVICE_SET_FAN_SPEED_TRACKED_STATE,
{ATTR_ENTITY_ID: "fan.name_1", "speed": 0},
blocking=True,
)
await hass.async_block_till_done()
mock_action.assert_called_once_with(
"test-device-id", Action.set_power_state_belief(False)
)
async def test_set_speed_belief_speed_api_error(hass: core.HomeAssistant):
"""Tests that set power belief service delegates to API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with pytest.raises(
HomeAssistantError
), patch_bond_action_returns_clientresponseerror(), patch_bond_device_state():
await hass.services.async_call(
BOND_DOMAIN,
SERVICE_SET_FAN_SPEED_TRACKED_STATE,
{ATTR_ENTITY_ID: "fan.name_1", "speed": 100},
blocking=True,
)
await hass.async_block_till_done()
async def test_set_speed_belief_speed_100(hass: core.HomeAssistant):
"""Tests that set power belief service delegates to API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_action, patch_bond_device_state():
await hass.services.async_call(
BOND_DOMAIN,
SERVICE_SET_FAN_SPEED_TRACKED_STATE,
{ATTR_ENTITY_ID: "fan.name_1", "speed": 100},
blocking=True,
)
await hass.async_block_till_done()
mock_action.assert_any_call("test-device-id", Action.set_power_state_belief(True))
mock_action.assert_called_with("test-device-id", Action.set_speed_belief(3))
async def test_update_reports_fan_on(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports fan power is on."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"power": 1, "speed": 1}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").state == "on"
async def test_update_reports_fan_off(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports fan power is off."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"power": 0, "speed": 1}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").state == "off"
async def test_update_reports_direction_forward(hass: core.HomeAssistant):
"""Tests that update command sets correct direction when Bond API reports fan direction is forward."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"direction": Direction.FORWARD}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").attributes[ATTR_DIRECTION] == DIRECTION_FORWARD
async def test_update_reports_direction_reverse(hass: core.HomeAssistant):
"""Tests that update command sets correct direction when Bond API reports fan direction is reverse."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"direction": Direction.REVERSE}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").attributes[ATTR_DIRECTION] == DIRECTION_REVERSE
async def test_set_fan_direction(hass: core.HomeAssistant):
"""Tests that set direction command delegates to API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_set_direction, patch_bond_device_state():
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_DIRECTION,
{ATTR_ENTITY_ID: "fan.name_1", ATTR_DIRECTION: DIRECTION_FORWARD},
blocking=True,
)
await hass.async_block_till_done()
mock_set_direction.assert_called_once_with(
"test-device-id", Action.set_direction(Direction.FORWARD)
)
async def test_fan_available(hass: core.HomeAssistant):
"""Tests that available state is updated based on API errors."""
await help_test_entity_available(
hass, FAN_DOMAIN, ceiling_fan("name-1"), "fan.name_1"
)
|
{
"content_hash": "d002eadc53f259c014c3c70c8714871c",
"timestamp": "",
"source": "github",
"line_count": 477,
"max_line_length": 114,
"avg_line_length": 35.39622641509434,
"alnum_prop": 0.650497512437811,
"repo_name": "GenericStudent/home-assistant",
"id": "061e94595bf41de71efe8f89efb5b6c780cde7c5",
"size": "16884",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/bond/test_fan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
}
|
import copy
import tkinter as tk
from pprint import pprint
import numpy as np
from img_utils.tk_scrolled_frame import ScrolledFrame
from img_utils.scrolled_canvas import ScrolledCanvas
class EntryTable(tk.Frame):
"""
Tkinter table sth like Excel matrix.
raw_kernel = [][] Table with tk.StringVar()
np_kernel = [][] np.array
"""
def __init__(self, parent, size):
super(EntryTable, self).__init__(parent)
self.master = parent
self.table = ScrolledFrame(parent)
self.table.pack(fill=tk.BOTH, expand=True)
self.excel = self.table.interior
self.raw_kernel = []
self.size = size
def draw(self, values=None):
"""
Searching order is by columns from left to right.
:param values: Table[][] with valid values
:return:
"""
bucket = list(self.excel.children.values())
try:
for child in bucket:
child.destroy()
except Exception as e:
pprint(e)
self.raw_kernel = []
for x in range(self.size[0]):
matrix_row = []
for y in range(self.size[1]):
value = values[x][y] if values is not None else 1
buk = Bucket(table=self, x=x, y=y, value=value)
buk.bucket.grid(column=x, row=y, padx=2, pady=2)
matrix_row.append(buk.value)
self.raw_kernel.append(copy.copy(matrix_row))
def get_values(self):
"""
Get Values from table
:return: np.array
"""
_x, _y = self.size
np_kernel = np.zeros((_x, _y))
for x in range(_x):
for y in range(_y):
np_kernel[x][y] = self.raw_kernel[x][y].get()
return np_kernel
class Bucket:
def __init__(self, table: EntryTable, x, y, value):
self.x = x
self.y = y
self.value = tk.StringVar()
self.value.set(value)
self.bucket = tk.Entry(table.excel, textvariable=self.value, width=3)
vcmd = self.bucket.register(self.check_entry)
self.bucket.configure(validate='key', validatecommand=(vcmd, '%d', '%S'))
@staticmethod
def check_entry(why, what):
if int(why) >= 0:
if what in '0123456789-.':
return True
else:
return False
else:
return True
if __name__ == '__main__':
pass
|
{
"content_hash": "8a5fb3fc9cb2850b58fa301a8c7f0f5e",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 81,
"avg_line_length": 28.36046511627907,
"alnum_prop": 0.5432554325543255,
"repo_name": "hawwestin/MSR.APO",
"id": "4b3de929c5db8053d4da6764f8bec1db9b022650",
"size": "2439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "img_utils/EntryTable.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "134680"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/chemistry/shared_medpack_wound_strength_e.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "c0694169ca73c4e3b724b62e24d4308d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 89,
"avg_line_length": 24.46153846153846,
"alnum_prop": 0.7012578616352201,
"repo_name": "anhstudios/swganh",
"id": "39d20dd3498ec3725cedf9843f9a0b7f35e5686c",
"size": "463",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/draft_schematic/chemistry/shared_medpack_wound_strength_e.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
from sentry.utils.locking.lock import Lock
class LockManager(object):
def __init__(self, backend):
self.backend = backend
def get(self, key, duration, routing_key=None):
"""
Retrieve a ``Lock`` instance.
"""
return Lock(self.backend, key, duration, routing_key)
|
{
"content_hash": "d7827a7826762ddb7ad5ffd55e720442",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 61,
"avg_line_length": 26.083333333333332,
"alnum_prop": 0.6134185303514377,
"repo_name": "mitsuhiko/sentry",
"id": "e479dc5e1be8daa0c7b65e9ab5776874e2fb4087",
"size": "313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/utils/locking/manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "171113"
},
{
"name": "Python",
"bytes": "877258"
}
],
"symlink_target": ""
}
|
import sys
# [START storage_release_event_based_hold]
from google.cloud import storage
def release_event_based_hold(bucket_name, blob_name):
"""Releases the event based hold on a given blob"""
# bucket_name = "my-bucket"
# blob_name = "my-blob"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(blob_name)
blob.event_based_hold = False
blob.patch()
print(f"Event based hold was released for {blob_name}")
# [END storage_release_event_based_hold]
if __name__ == "__main__":
release_event_based_hold(bucket_name=sys.argv[1], blob_name=sys.argv[2])
|
{
"content_hash": "f8ddd4a8697b5b9e2944d874238eb881",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 76,
"avg_line_length": 23.85185185185185,
"alnum_prop": 0.6739130434782609,
"repo_name": "googleapis/python-storage",
"id": "1db637cd9e7f40203698a425983ef5c0e10e55b7",
"size": "1264",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/snippets/storage_release_event_based_hold.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1392987"
},
{
"name": "Shell",
"bytes": "32171"
}
],
"symlink_target": ""
}
|
"""Part of the Keras training engine related to distributed training.
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.python.framework import errors
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.platform import tf_logging as logging
def fit_loop(
model,
inputs,
targets,
epochs=100,
verbose=1,
callbacks=None,
val_inputs=None,
val_targets=None,
callback_metrics=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None):
"""fit function when using DistributionStrategy for training.
Arguments:
model: Keras Model instance.
inputs: List of input arrays.
targets: List of target arrays.
epochs: Number of times to iterate over the data
verbose: Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
val_inputs: List of input arrays.
val_targets: List of target arrays.
callback_metrics: List of strings, the display names of the metrics
passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
validation_steps: Number of steps to run validation for
(only if doing validation from data tensors).
Ignored with the default value of `None`.
Returns:
`History` object.
Raises:
ValueError: in case of invalid arguments.
"""
current_strategy = model._distribution_strategy
def _per_device_train_function(model):
model._make_train_function()
return (model.train_function.inputs,
model.train_function.outputs,
model.train_function.updates_op,
model.train_function.session_kwargs)
with current_strategy.scope():
# Create train ops on each of the devices when we call
# `_per_device_train_function`.
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_tower(
_per_device_train_function, model._grouped_model)
# Unwrap all the per device values returned from `call_for_each_tower`.
# Unwrapping per device values gives you a list of values that can be
# used to construct a new train function that is composed of update ops on
# all the devices over which the model is distributed.
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs,
grouped_updates, grouped_session_args, with_loss_tensor=True)
# Dataset inputs and targets are also per devices values that need to be
# unwrapped.
dataset_inputs = distributed_training_utils.flatten_perdevice_values(
current_strategy, inputs)
dataset_targets = distributed_training_utils.flatten_perdevice_values(
current_strategy, targets)
# Create a train function that is composed of all the parameters above.
distributed_train_function = K.Function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_train_function',
**all_session_args)
# We need to set sample_weights to None since there are sample weight
# placeholders that are created with default values.
sample_weights = [None for _ in range(len(model.outputs) *
current_strategy.num_towers)]
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = dataset_inputs + dataset_targets + sample_weights + [1]
else:
ins = dataset_inputs + dataset_targets
do_validation = False
if validation_steps:
do_validation = True
if steps_per_epoch is None:
raise ValueError('Can only use `validation_steps` '
'when doing step-wise '
'training, i.e. `steps_per_epoch` '
'must be set.')
out_labels = model.metrics_names
if do_validation:
callback_metrics = copy.copy(out_labels) + [
'val_' + n for n in out_labels
]
else:
callback_metrics = copy.copy(out_labels)
model.history = cbks.History()
all_callbacks = [cbks.BaseLogger(
stateful_metrics=model.stateful_metric_names)]
if verbose:
# We assume that `steps_per_epoch` is always set since we have to use
# Datasets.
count_mode = 'steps'
all_callbacks.append(
cbks.ProgbarLogger(
count_mode, stateful_metrics=model.stateful_metric_names))
all_callbacks += (callbacks or []) + [model.history]
callbacks = cbks.CallbackList(all_callbacks)
out_labels = out_labels or []
# We set the callback model to an instance of the `DistributedModel` that we
# create in the `compile` call. The `DistributedModel` is initialized with
# the first replicated model. We need to set the callback model to a
# DistributedModel to allow us to override saving and loading weights when
# we checkpoint the model during training.
callback_model = model._replicated_model
callbacks.set_model(callback_model)
callbacks.set_params({
'epochs': epochs,
'steps': steps_per_epoch,
'samples': None,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics or [],
})
callbacks.on_train_begin()
callback_model.stop_training = False
out_labels = out_labels or []
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
for epoch in range(initial_epoch, epochs):
callbacks.on_epoch_begin(epoch)
if steps_per_epoch is not None:
epoch_logs = {}
for step_index in range(steps_per_epoch):
batch_logs = {}
batch_logs['batch'] = step_index
batch_logs['size'] = 1
callbacks.on_batch_begin(step_index, batch_logs)
try:
outs = distributed_train_function(ins)
except errors.OutOfRangeError:
logging.warning('Your dataset iterator ran out of data; '
'interrupting training. Make sure that your dataset '
'can generate at least `steps_per_epoch * epochs` '
'batches (in this case, %d batches).' %
steps_per_epoch * epochs)
break
if not isinstance(outs, list):
outs = [outs]
# TODO(anjalisridhar): Temporary workaround for aggregating metrics
# across towers. Replace with the new metrics module eventually.
merged_output = []
# The first output is the total loss.
merged_output.append(outs[0])
current_index = 1
num_devices = len(current_strategy._devices)
# Each label in `out_labels` corresponds to one set of metrics. The
# number of metric values corresponds to the number of devices. We
# currently take the mean of the values.
for _ in out_labels[1:]:
m = np.mean(outs[current_index:current_index + num_devices])
merged_output.append(m)
current_index += num_devices
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(step_index, batch_logs)
if callback_model.stop_training:
break
if do_validation:
val_outs = test_loop(
model,
val_inputs,
val_targets,
steps=validation_steps,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
callbacks.on_train_end()
# Copy the weights back from the replicated model to the original model.
with current_strategy.scope():
updated_weights = current_strategy.unwrap(
model._grouped_model)[0].get_weights()
model.set_weights(updated_weights)
return model.history
def test_loop(model, inputs, targets, verbose=0, steps=None):
"""evaluate method to validate a model that uses DistributionStrategy.
Arguments:
model: Keras Model instance.
inputs: List of input arrays.
targets: List of target arrays.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
current_strategy = model._distribution_strategy
def _per_device_test_function(model):
model._make_test_function()
return (model.test_function.inputs,
model.test_function.outputs,
model.test_function.updates_op,
model.test_function.session_kwargs)
with current_strategy.scope():
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_tower(
_per_device_test_function, model._grouped_model)
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args, with_loss_tensor=True)
dataset_inputs = distributed_training_utils.flatten_perdevice_values(
current_strategy, inputs)
dataset_targets = distributed_training_utils.flatten_perdevice_values(
current_strategy, targets)
distributed_test_function = K.Function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_test_function',
**all_session_args)
# We need to set sample_weights to None since there are sample weight
# placeholders that are created with default values.
sample_weights = [None for _ in range(len(model.outputs) *
current_strategy.num_towers)]
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = dataset_inputs + dataset_targets + sample_weights + [0]
else:
ins = dataset_inputs + dataset_targets
if hasattr(model, 'metrics'):
for m in model.stateful_metric_functions:
m.reset_states()
stateful_metric_indices = [
i for i, name in enumerate(model.metrics_names)
if str(name) in model.stateful_metric_names
]
else:
stateful_metric_indices = []
outs = []
if verbose == 1:
progbar = Progbar(target=steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
if steps is not None:
for step in range(steps):
batch_outs = distributed_test_function(ins)
if isinstance(batch_outs, list):
if step == 0:
for _ in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
if i in stateful_metric_indices:
outs[i] = batch_out
else:
outs[i] += batch_out
else:
if step == 0:
outs.append(0.)
outs[0] += batch_outs
if verbose == 1:
progbar.update(step + 1)
for i in range(len(outs)):
if i not in stateful_metric_indices:
outs[i] /= steps
if len(outs) == 1:
return outs[0]
return outs
def predict_loop(model, inputs, verbose=0, steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
model: Keras Model instance.
inputs: list of tensors to be fed to `f`.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
current_strategy = model._distribution_strategy
def _per_device_predict_function(model):
model._make_predict_function()
return (model.predict_function.inputs,
model.predict_function.outputs,
model.predict_function.updates_op,
model.predict_function.session_kwargs)
with current_strategy.scope():
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_tower(
_per_device_predict_function, model._grouped_model)
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
dataset_inputs = distributed_training_utils.flatten_perdevice_values(
current_strategy, inputs)
distributed_predict_function = K.Function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_predict_function',
**all_session_args)
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = dataset_inputs + [0]
else:
ins = dataset_inputs
if verbose == 1:
progbar = Progbar(target=steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
if steps is not None:
# Since we do not know how many samples we will see, we cannot pre-allocate
# the returned Numpy arrays. Instead, we store one array per batch seen
# and concatenate them upon returning.
unconcatenated_outs = []
for step in range(steps):
batch_outs = distributed_predict_function(ins)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if step == 0:
for _ in batch_outs:
unconcatenated_outs.append([])
for i, batch_out in enumerate(batch_outs):
unconcatenated_outs[i].append(batch_out)
if verbose == 1:
progbar.update(step + 1)
if len(unconcatenated_outs) == 1:
return np.concatenate(unconcatenated_outs[0], axis=0)
return [
np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))
]
def clone_and_build_model(model):
"""Clone and build the given keras_model."""
# We need to set the import here since we run into a circular dependency
# error.
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
cloned_model = models.clone_model(model, input_tensors=None)
# Compile and build model.
if isinstance(model.optimizer, optimizers.TFOptimizer):
optimizer = model.optimizer
else:
optimizer_config = model.optimizer.get_config()
optimizer = model.optimizer.__class__.from_config(optimizer_config)
cloned_model.compile(
optimizer,
model.loss,
metrics=model.metrics,
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=model.weighted_metrics)
return cloned_model
|
{
"content_hash": "5e65437a63aca1d8616e7069cc12e554",
"timestamp": "",
"source": "github",
"line_count": 441,
"max_line_length": 83,
"avg_line_length": 37.36507936507937,
"alnum_prop": 0.667738803252822,
"repo_name": "aselle/tensorflow",
"id": "75e466d593899ee3c94d257981afc62ce014f07a",
"size": "17167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/engine/training_distributed.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "321697"
},
{
"name": "C#",
"bytes": "7259"
},
{
"name": "C++",
"bytes": "46003590"
},
{
"name": "CMake",
"bytes": "207738"
},
{
"name": "Dockerfile",
"bytes": "6905"
},
{
"name": "Go",
"bytes": "1210133"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "829230"
},
{
"name": "Jupyter Notebook",
"bytes": "2578736"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52243"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99265"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "39898642"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "447009"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListInventorySupplyByDateRange(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListInventorySupplyByDateRange Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListInventorySupplyByDateRange, self).__init__(temboo_session, '/Library/Amazon/Marketplace/Inventory/ListInventorySupplyByDateRange')
def new_input_set(self):
return ListInventorySupplyByDateRangeInputSet()
def _make_result_set(self, result, path):
return ListInventorySupplyByDateRangeResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListInventorySupplyByDateRangeChoreographyExecution(session, exec_id, path)
class ListInventorySupplyByDateRangeInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListInventorySupplyByDateRange
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
super(ListInventorySupplyByDateRangeInputSet, self)._set_input('AWSAccessKeyId', value)
def set_AWSMarketplaceId(self, value):
"""
Set the value of the AWSMarketplaceId input for this Choreo. ((required, string) The Marketplace ID provided by Amazon Web Services.)
"""
super(ListInventorySupplyByDateRangeInputSet, self)._set_input('AWSMarketplaceId', value)
def set_AWSMerchantId(self, value):
"""
Set the value of the AWSMerchantId input for this Choreo. ((required, string) The Merchant ID provided by Amazon Web Services.)
"""
super(ListInventorySupplyByDateRangeInputSet, self)._set_input('AWSMerchantId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
super(ListInventorySupplyByDateRangeInputSet, self)._set_input('AWSSecretKeyId', value)
def set_Endpoint(self, value):
"""
Set the value of the Endpoint input for this Choreo. ((conditional, string) The base URL for the MWS endpoint. Defaults to mws.amazonservices.co.uk.)
"""
super(ListInventorySupplyByDateRangeInputSet, self)._set_input('Endpoint', value)
def set_PageToken(self, value):
"""
Set the value of the PageToken input for this Choreo. ((conditional, string) The value returned in the NextPageToken output of this Choreo when there are multiple pages of inventory items to retrieve. Required unless providing QueryStartDateTime.)
"""
super(ListInventorySupplyByDateRangeInputSet, self)._set_input('PageToken', value)
def set_QueryStartDateTime(self, value):
"""
Set the value of the QueryStartDateTime input for this Choreo. ((conditional, date) A date used for selecting items that have had changes in inventory availability after (or at) a specified time, in ISO 8601 date format (i.e. 2012-01-01). Required unless providing PageToken.)
"""
super(ListInventorySupplyByDateRangeInputSet, self)._set_input('QueryStartDateTime', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".)
"""
super(ListInventorySupplyByDateRangeInputSet, self)._set_input('ResponseFormat', value)
def set_ResponseGroup(self, value):
"""
Set the value of the ResponseGroup input for this Choreo. ((optional, string) Indicates whether or not to return the SupplyDetail element in the response. Valid values are: "Basic" (does not include SupplyDetail), and "Detailed" (includes SupplyDetail). Defaults to "Basic".)
"""
super(ListInventorySupplyByDateRangeInputSet, self)._set_input('ResponseGroup', value)
class ListInventorySupplyByDateRangeResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListInventorySupplyByDateRange Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (Stores the response from Amazon.)
"""
return self._output.get('Response', None)
def get_NextPageToken(self):
"""
Retrieve the value for the "NextPageToken" output from this Choreo execution. ((string) A token used to retrieve the next page of results. If a token is not returned, there are no more results to retrieve. This token can be passed to the PageToken input of this Choreo.)
"""
return self._output.get('NextPageToken', None)
class ListInventorySupplyByDateRangeChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListInventorySupplyByDateRangeResultSet(response, path)
|
{
"content_hash": "49907e9c1c6b82966e1343daac0a6ed9",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 284,
"avg_line_length": 55.21782178217822,
"alnum_prop": 0.7202797202797203,
"repo_name": "jordanemedlock/psychtruths",
"id": "61e3c76d51e500036cd9c948163dd5f7504e9786",
"size": "6505",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "temboo/core/Library/Amazon/Marketplace/Inventory/ListInventorySupplyByDateRange.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
}
|
import re
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from flask import url_for
from flask.ext.login import UserMixin
from . import db, login_manager
class User(UserMixin, db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True)
email = db.Column(db.String(64), unique=True)
password_hash = db.Column(db.String(128))
member_since = db.Column(db.DateTime, default=datetime.utcnow)
last_seen = db.Column(db.DateTime, default=datetime.utcnow)
is_admin = db.Column(db.Boolean, default=False)
todolists = db.relationship('TodoList', backref='user', lazy='dynamic')
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
def __repr__(self):
if self.is_admin:
return '<Admin {0}>'.format(self.username)
return '<User {0}>'.format(self.username)
@staticmethod
def is_valid_username(username):
return len(username) <= 64 and re.match('^\S+$', username)
@staticmethod
def is_valid_email(email):
return len(email) <= 64 and re.match('^\S+@\S+\.\S+$', email)
@staticmethod
def is_valid_password(passwd):
return len(generate_password_hash(passwd)) <= 128 and passwd
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def seen(self):
self.last_seen = datetime.utcnow()
return self.save()
def to_json(self):
json_user = {
'username': self.username,
'user_url': url_for('api.get_user', username=self.username,
_external=True),
'member_since': self.member_since,
'last_seen': self.last_seen,
'todolists': url_for('api.get_user_todolists',
username=self.username, _external=True),
'todolist_count': self.todolists.count()
}
return json_user
def promote_to_admin(self):
self.is_admin = True
return self.save()
def delete(self):
db.session.delete(self)
db.session.commit()
def save(self):
from sqlalchemy.exc import IntegrityError
db.session.add(self)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
return
return self
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class TodoList(db.Model):
__tablename__ = 'todolist'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(128))
created_at = db.Column(db.DateTime, default=datetime.utcnow)
creator = db.Column(db.String(64), db.ForeignKey('user.username'))
todos = db.relationship('Todo', backref='todolist', lazy='dynamic')
def __init__(self, title='untitled', creator=None,
created_at=datetime.utcnow()):
self.title = title
self.creator = creator
self.created_at = created_at
def __repr__(self):
return '<Todolist: {0}>'.format(self.title)
@staticmethod
def is_valid_title(list_title):
return len(list_title) <= 128 and list_title
def change_title(self, new_title):
self.title = new_title
self.save()
def to_json(self):
if self.creator:
todos_url = url_for( 'api.get_user_todolist_todos',
todolist_id=self.id, username=self.creator, _external=True)
else:
todos_url = url_for('api.get_todolist_todos', todolist_id=self.id,
_external=True)
json_todolist = {
'title': self.title,
'creator': self.creator,
'created_at': self.created_at,
'total_todo_count': self.count_todos(),
'open_todo_count': self.count_open(),
'finished_todo_count': self.count_finished(),
'todos': todos_url
}
return json_todolist
def count_todos(self):
return self.todos.order_by(None).count()
def count_finished(self):
return self.todos.filter_by(is_finished=True).count()
def count_open(self):
return self.todos.filter_by(is_finished=False).count()
def delete(self):
db.session.delete(self)
db.session.commit()
def save(self):
db.session.add(self)
db.session.commit()
return self
class Todo(db.Model):
__tablename__ = 'todo'
id = db.Column(db.Integer, primary_key=True)
description = db.Column(db.String(128))
created_at = db.Column(db.DateTime, index=True, default=datetime.utcnow)
finished_at = db.Column(db.DateTime, index=True, default=None)
is_finished = db.Column(db.Boolean, default=False)
creator = db.Column(db.String(64), db.ForeignKey('user.username'))
todolist_id = db.Column(db.Integer, db.ForeignKey('todolist.id'))
def __init__(self, description, todolist_id, creator=None,
created_at=datetime.utcnow()):
self.description = description
self.todolist_id = todolist_id
self.creator = creator
self.created_at = created_at
def __repr__(self):
if self.creator is None:
return '<Todo: {0}>'.format(self.description)
status = 'finished' if self.is_finished else 'open'
return '<{0} todo: {1} by {2}>'.format(
status, self.description, self.creator)
def finished(self):
self.is_finished = True
self.finished_at = datetime.utcnow()
self.save()
def reopen(self):
self.is_finished = False
self.finished_at = None
self.save()
def to_json(self):
json_todo = {
'description': self.description,
'creator': self.creator,
'created_at': self.created_at,
'status' : 'finished' if self.is_finished else 'open'
}
return json_todo
def delete(self):
db.session.delete(self)
db.session.commit()
def save(self):
db.session.add(self)
db.session.commit()
return self
|
{
"content_hash": "c2ee5dcd966b31db1f68c8c10115b522",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 78,
"avg_line_length": 31.048076923076923,
"alnum_prop": 0.5977082688138743,
"repo_name": "poulp/flask-todolist",
"id": "26334a230f6a93d6ff4b9ec2753be93cc7f98467",
"size": "6483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17242"
},
{
"name": "HTML",
"bytes": "11399"
},
{
"name": "JavaScript",
"bytes": "2556"
},
{
"name": "Python",
"bytes": "68303"
}
],
"symlink_target": ""
}
|
import collections
import contextlib
import json
import os
import posixpath
import socket
import sys
import threading
import types
from . import streamname, varint
_StreamParamsBase = collections.namedtuple('_StreamParamsBase',
('name', 'type', 'content_type', 'tags', 'tee', 'binary_file_extension'))
# Magic number at the beginning of a Butler stream
#
# See "ProtocolFrameHeaderMagic" in:
# <luci-go>/logdog/client/butlerlib/streamproto
BUTLER_MAGIC = 'BTLR1\x1e'
class StreamParams(_StreamParamsBase):
"""Defines the set of parameters to apply to a new stream."""
# A text content stream.
TEXT = 'text'
# A binary content stream.
BINARY = 'binary'
# A datagram content stream.
DATAGRAM = 'datagram'
# Tee parameter to tee this stream through the Butler's STDOUT.
TEE_STDOUT = 'stdout'
# Tee parameter to tee this stream through the Butler's STDERR.
TEE_STDERR = 'stderr'
@classmethod
def make(cls, **kwargs):
"""Returns (StreamParams): A new StreamParams instance with supplied values.
Any parameter that isn't supplied will be set to None.
Args:
kwargs (dict): Named parameters to apply.
"""
return cls(**{f: kwargs.get(f) for f in cls._fields})
def validate(self):
"""Raises (ValueError): if the parameters are not valid."""
streamname.validate_stream_name(self.name)
if self.type not in (self.TEXT, self.BINARY, self.DATAGRAM):
raise ValueError('Invalid type (%s)' % (self.type,))
if self.tags is not None:
if not isinstance(self.tags, collections.Mapping):
raise ValueError('Invalid tags type (%s)' % (self.tags,))
for k, v in self.tags.items():
streamname.validate_tag(k, v)
if self.tee not in (None, self.TEE_STDOUT, self.TEE_STDERR):
raise ValueError('Invalid tee type (%s)' % (self.tee,))
if not isinstance(self.binary_file_extension,
(types.NoneType, types.StringTypes)):
raise ValueError('Invalid binary file extension type (%s)' % (
self.binary_file_extension,))
def to_json(self):
"""Returns (str): The JSON representation of the StreamParams.
Converts stream parameters to JSON for Butler consumption.
Raises:
ValueError: if these parameters are not valid.
"""
self.validate()
obj = {
'name': self.name,
'type': self.type,
}
def maybe_add(key, value):
if value is not None:
obj[key] = value
maybe_add('contentType', self.content_type)
maybe_add('tags', self.tags)
maybe_add('tee', self.tee)
maybe_add('binaryFileExtension', self.binary_file_extension)
# Note that "dumps' will dump UTF-8 by default, which is what Butler wants.
return json.dumps(obj, sort_keys=True, ensure_ascii=True, indent=None)
class StreamProtocolRegistry(object):
"""Registry of streamserver URI protocols and their client classes.
"""
def __init__(self):
self._registry = {}
def register_protocol(self, protocol, client_cls):
assert issubclass(client_cls, StreamClient)
if self._registry.get(protocol) is not None:
raise KeyError('Duplicate protocol registered.')
self._registry[protocol] = client_cls
def create(self, uri, **kwargs):
"""Returns (StreamClient): A stream client for the specified URI.
This uses the default StreamProtocolRegistry to instantiate a StreamClient
for the specified URI.
Args:
uri (str): The streamserver URI.
kwargs: keyword arguments to forward to the stream. See
StreamClient.__init__.
Raises:
ValueError: if the supplied URI references an invalid or improperly
configured streamserver.
"""
uri = uri.split(':', 1)
if len(uri) != 2:
raise ValueError('Invalid stream server URI [%s]' % (uri,))
protocol, value = uri
client_cls = self._registry.get(protocol)
if not client_cls:
raise ValueError('Unknown stream client protocol (%s)' % (protocol,))
return client_cls._create(value, **kwargs)
# Default (global) registry.
_default_registry = StreamProtocolRegistry()
create = _default_registry.create
class StreamClient(object):
"""Abstract base class for a streamserver client.
"""
class _StreamBase(object):
"""ABC for StreamClient streams."""
def __init__(self, stream_client, params):
self._stream_client = stream_client
self._params = params
@property
def params(self):
"""Returns (StreamParams): The stream parameters."""
return self._params
@property
def path(self):
"""Returns (streamname.StreamPath): The stream path.
Raises:
ValueError: if the stream path is invalid, or if the stream prefix is
not defined in the client.
"""
return self._stream_client.get_stream_path(self._params.name)
def get_viewer_url(self):
"""Returns (str): The viewer URL for this stream.
Raises:
KeyError: if information needed to construct the URL is missing.
ValueError: if the stream prefix or name do not form a valid stream
path.
"""
return self._stream_client.get_viewer_url(self._params.name)
class _BasicStream(_StreamBase):
"""Wraps a basic file descriptor, offering "write" and "close"."""
def __init__(self, stream_client, params, fd):
super(StreamClient._BasicStream, self).__init__(stream_client, params)
self._fd = fd
@property
def fd(self):
return self._fd
def fileno(self):
return self._fd.fileno()
def write(self, data):
return self._fd.write(data)
def close(self):
return self._fd.close()
class _DatagramStream(_StreamBase):
"""Wraps a stream object to write length-prefixed datagrams."""
def __init__(self, stream_client, params, fd):
super(StreamClient._DatagramStream, self).__init__(stream_client, params)
self._fd = fd
def send(self, data):
varint.write_uvarint(self._fd, len(data))
self._fd.write(data)
def close(self):
return self._fd.close()
def __init__(self, project=None, prefix=None, coordinator_host=None,
namespace=''):
"""Constructs a new base StreamClient instance.
Args:
project (str or None): If not None, the name of the log stream project.
prefix (str or None): If not None, the log stream session prefix.
coordinator_host (str or None): If not None, the name of the Coordinator
host that this stream client is bound to. This will be used to
construct viewer URLs for generated streams.
namespace (str): The prefix to apply to all streams opened by this client.
"""
self._project = project
self._prefix = prefix
self._coordinator_host = coordinator_host
self._namespace = namespace
self._name_lock = threading.Lock()
self._names = set()
@property
def project(self):
"""Returns (str or None): The stream project, or None if not configured."""
return self._project
@property
def prefix(self):
"""Returns (str or None): The stream prefix, or None if not configured."""
return self._prefix
@property
def coordinator_host(self):
"""Returns (str or None): The coordinator host, or None if not configured.
"""
return self._coordinator_host
def get_stream_path(self, name):
"""Returns (streamname.StreamPath): The stream path.
Args:
name (str): The name of the stream.
Raises:
KeyError: if information needed to construct the path is missing.
ValueError: if the stream path is invalid, or if the stream prefix is
not defined in the client.
"""
if not self._prefix:
raise KeyError('Stream prefix is not configured')
return streamname.StreamPath.make(self._prefix, name)
def get_viewer_url(self, name):
"""Returns (str): The LogDog viewer URL for the named stream.
Args:
name (str): The name of the stream. This can also be a query glob.
Raises:
KeyError: if information needed to construct the URL is missing.
ValueError: if the stream prefix or name do not form a valid stream
path.
"""
if not self._coordinator_host:
raise KeyError('Coordinator host is not configured')
if not self._project:
raise KeyError('Stream project is not configured')
return streamname.get_logdog_viewer_url(
self._coordinator_host,
self._project,
self.get_stream_path(name))
def _register_new_stream(self, name):
"""Registers a new stream name.
The Butler will internally reject any duplicate stream names. However, there
isn't really feedback when this happens except a closed stream client. This
is a client-side check to provide a more user-friendly experience in the
event that a user attempts to register a duplicate stream name.
Note that this is imperfect, as something else could register stream names
with the same Butler instance and this library has no means of tracking.
This is a best-effort experience, not a reliable check.
Args:
name (str): The name of the stream.
Raises:
ValueError if the stream name has already been registered.
"""
with self._name_lock:
if name in self._names:
raise ValueError("Duplicate stream name [%s]" % (name,))
self._names.add(name)
@classmethod
def _create(cls, value, **kwargs):
"""Returns (StreamClient): A new stream client instance.
Validates the streamserver parameters and creates a new StreamClient
instance that connects to them.
Implementing classes must override this.
"""
raise NotImplementedError()
def _connect_raw(self):
"""Returns (file): A new file-like stream.
Creates a new raw connection to the streamserver. This connection MUST not
have any data written to it past initialization (if needed) when it has been
returned.
The file-like object must implement `write` and `close`.
Implementing classes must override this.
"""
raise NotImplementedError()
def new_connection(self, params):
"""Returns (file): A new configured stream.
The returned object implements (minimally) `write` and `close`.
Creates a new LogDog stream with the specified parameters.
Args:
params (StreamParams): The parameters to use with the new connection.
Raises:
ValueError if the stream name has already been used, or if the parameters
are not valid.
"""
self._register_new_stream(params.name)
params_json = params.to_json()
fd = self._connect_raw()
fd.write(BUTLER_MAGIC)
varint.write_uvarint(fd, len(params_json))
fd.write(params_json)
return fd
@contextlib.contextmanager
def text(self, name, **kwargs):
"""Context manager to create, use, and teardown a TEXT stream.
This context manager creates a new butler TEXT stream with the specified
parameters, yields it, and closes it on teardown.
Args:
name (str): the LogDog name of the stream.
kwargs (dict): Log stream parameters. These may be any keyword arguments
accepted by `open_text`.
Returns (file): A file-like object to a Butler UTF-8 text stream supporting
`write`.
"""
fd = None
try:
fd = self.open_text(name, **kwargs)
yield fd
finally:
if fd is not None:
fd.close()
def open_text(self, name, content_type=None, tags=None, tee=None,
binary_file_extension=None):
"""Returns (file): A file-like object for a single text stream.
This creates a new butler TEXT stream with the specified parameters.
Args:
name (str): the LogDog name of the stream.
content_type (str): The optional content type of the stream. If None, a
default content type will be chosen by the Butler.
tags (dict): An optional key/value dictionary pair of LogDog stream tags.
tee (str): Describes how stream data should be tee'd through the Butler.
One of StreamParams' TEE arguments.
binary_file_extension (str): A custom binary file extension. If not
provided, a default extension may be chosen or the binary stream may
not be emitted.
Returns (file): A file-like object to a Butler text stream. This object can
have UTF-8 text content written to it with its `write` method, and must
be closed when finished using its `close` method.
"""
params = StreamParams.make(
name=posixpath.join(self._namespace, name),
type=StreamParams.TEXT,
content_type=content_type,
tags=tags,
tee=tee,
binary_file_extension=binary_file_extension)
return self._BasicStream(self, params, self.new_connection(params))
@contextlib.contextmanager
def binary(self, name, **kwargs):
"""Context manager to create, use, and teardown a BINARY stream.
This context manager creates a new butler BINARY stream with the specified
parameters, yields it, and closes it on teardown.
Args:
name (str): the LogDog name of the stream.
kwargs (dict): Log stream parameters. These may be any keyword arguments
accepted by `open_binary`.
Returns (file): A file-like object to a Butler binary stream supporting
`write`.
"""
fd = None
try:
fd = self.open_binary(name, **kwargs)
yield fd
finally:
if fd is not None:
fd.close()
def open_binary(self, name, content_type=None, tags=None, tee=None,
binary_file_extension=None):
"""Returns (file): A file-like object for a single binary stream.
This creates a new butler BINARY stream with the specified parameters.
Args:
name (str): the LogDog name of the stream.
content_type (str): The optional content type of the stream. If None, a
default content type will be chosen by the Butler.
tags (dict): An optional key/value dictionary pair of LogDog stream tags.
tee (str): Describes how stream data should be tee'd through the Butler.
One of StreamParams' TEE arguments.
binary_file_extension (str): A custom binary file extension. If not
provided, a default extension may be chosen or the binary stream may
not be emitted.
Returns (file): A file-like object to a Butler binary stream. This object
can have UTF-8 content written to it with its `write` method, and must
be closed when finished using its `close` method.
"""
params = StreamParams.make(
name=posixpath.join(self._namespace, name),
type=StreamParams.BINARY,
content_type=content_type,
tags=tags,
tee=tee,
binary_file_extension=binary_file_extension)
return self._BasicStream(self, params, self.new_connection(params))
@contextlib.contextmanager
def datagram(self, name, **kwargs):
"""Context manager to create, use, and teardown a DATAGRAM stream.
This context manager creates a new butler DATAAGRAM stream with the
specified parameters, yields it, and closes it on teardown.
Args:
name (str): the LogDog name of the stream.
kwargs (dict): Log stream parameters. These may be any keyword arguments
accepted by `open_datagram`.
Returns (_DatagramStream): A datagram stream object. Datagrams can be
written to it using its `send` method.
"""
fd = None
try:
fd = self.open_datagram(name, **kwargs)
yield fd
finally:
if fd is not None:
fd.close()
def open_datagram(self, name, content_type=None, tags=None, tee=None,
binary_file_extension=None):
"""Creates a new butler DATAGRAM stream with the specified parameters.
Args:
name (str): the LogDog name of the stream.
content_type (str): The optional content type of the stream. If None, a
default content type will be chosen by the Butler.
tags (dict): An optional key/value dictionary pair of LogDog stream tags.
tee (str): Describes how stream data should be tee'd through the Butler.
One of StreamParams' TEE arguments.
binary_file_extension (str): A custom binary file extension. If not
provided, a default extension may be chosen or the binary stream may
not be emitted.
Returns (_DatagramStream): A datagram stream object. Datagrams can be
written to it using its `send` method. This object must be closed when
finished by using its `close` method.
"""
params = StreamParams.make(
name=posixpath.join(self._namespace, name),
type=StreamParams.DATAGRAM,
content_type=content_type,
tags=tags,
tee=tee,
binary_file_extension=binary_file_extension)
return self._DatagramStream(self, params, self.new_connection(params))
class _NamedPipeStreamClient(StreamClient):
"""A StreamClient implementation that connects to a Windows named pipe.
"""
def __init__(self, name, **kwargs):
r"""Initializes a new Windows named pipe stream client.
Args:
name (str): The name of the Windows named pipe to use (e.g., "\\.\name")
"""
super(_NamedPipeStreamClient, self).__init__(**kwargs)
self._name = name
@classmethod
def _create(cls, value, **kwargs):
return cls(value, **kwargs)
def _connect_raw(self):
return open(self._name, 'wb')
_default_registry.register_protocol('net.pipe', _NamedPipeStreamClient)
class _UnixDomainSocketStreamClient(StreamClient):
"""A StreamClient implementation that uses a UNIX domain socket.
"""
class SocketFile(object):
"""A write-only file-like object that writes to a UNIX socket."""
def __init__(self, fd):
self._fd = fd
def fileno(self):
return self._fd
def write(self, data):
self._fd.send(data)
def close(self):
self._fd.close()
def __init__(self, path, **kwargs):
"""Initializes a new UNIX domain socket stream client.
Args:
path (str): The path to the named UNIX domain socket.
"""
super(_UnixDomainSocketStreamClient, self).__init__(**kwargs)
self._path = path
@classmethod
def _create(cls, value, **kwargs):
if not os.path.exists(value):
raise ValueError('UNIX domain socket [%s] does not exist.' % (value,))
return cls(value, **kwargs)
def _connect_raw(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self._path)
return self.SocketFile(sock)
_default_registry.register_protocol('unix', _UnixDomainSocketStreamClient)
|
{
"content_hash": "dd21d39038faf531565c4b60e09b45d2",
"timestamp": "",
"source": "github",
"line_count": 579,
"max_line_length": 80,
"avg_line_length": 32.15371329879102,
"alnum_prop": 0.6653596175538486,
"repo_name": "endlessm/chromium-browser",
"id": "89bbe77f09ab427fba5d028f4546bfe1a909fbf2",
"size": "18791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/swarming_client/libs/logdog/stream.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import time
from ray.tune.error import TuneError
from ray.tune.experiment import convert_to_experiment_list, Experiment
from ray.tune.analysis import ExperimentAnalysis
from ray.tune.suggest import BasicVariantGenerator
from ray.tune.trial import Trial, DEBUG_PRINT_INTERVAL
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.syncer import wait_for_sync
from ray.tune.trial_runner import TrialRunner
from ray.tune.schedulers import (HyperBandScheduler, AsyncHyperBandScheduler,
FIFOScheduler, MedianStoppingRule)
from ray.tune.web_server import TuneServer
logger = logging.getLogger(__name__)
_SCHEDULERS = {
"FIFO": FIFOScheduler,
"MedianStopping": MedianStoppingRule,
"HyperBand": HyperBandScheduler,
"AsyncHyperBand": AsyncHyperBandScheduler,
}
def _make_scheduler(args):
if args.scheduler in _SCHEDULERS:
return _SCHEDULERS[args.scheduler](**args.scheduler_config)
else:
raise TuneError("Unknown scheduler: {}, should be one of {}".format(
args.scheduler, _SCHEDULERS.keys()))
def run(run_or_experiment,
name=None,
stop=None,
config=None,
resources_per_trial=None,
num_samples=1,
local_dir=None,
upload_dir=None,
trial_name_creator=None,
loggers=None,
sync_to_cloud=None,
sync_to_driver=None,
checkpoint_freq=0,
checkpoint_at_end=False,
keep_checkpoints_num=None,
checkpoint_score_attr=None,
global_checkpoint_period=10,
export_formats=None,
max_failures=3,
restore=None,
search_alg=None,
scheduler=None,
with_server=False,
server_port=TuneServer.DEFAULT_PORT,
verbose=2,
resume=False,
queue_trials=False,
reuse_actors=False,
trial_executor=None,
raise_on_failed_trial=True,
return_trials=False,
ray_auto_init=True,
sync_function=None):
"""Executes training.
Args:
run_or_experiment (function|class|str|Experiment): If
function|class|str, this is the algorithm or model to train.
This may refer to the name of a built-on algorithm
(e.g. RLLib's DQN or PPO), a user-defined trainable
function or class, or the string identifier of a
trainable function or class registered in the tune registry.
If Experiment, then Tune will execute training based on
Experiment.spec.
name (str): Name of experiment.
stop (dict|func): The stopping criteria. If dict, the keys may be
any field in the return result of 'train()', whichever is
reached first. If function, it must take (trial_id, result) as
arguments and return a boolean (True if trial should be stopped,
False otherwise).
config (dict): Algorithm-specific configuration for Tune variant
generation (e.g. env, hyperparams). Defaults to empty dict.
Custom search algorithms may ignore this.
resources_per_trial (dict): Machine resources to allocate per trial,
e.g. ``{"cpu": 64, "gpu": 8}``. Note that GPUs will not be
assigned unless you specify them here. Defaults to 1 CPU and 0
GPUs in ``Trainable.default_resource_request()``.
num_samples (int): Number of times to sample from the
hyperparameter space. Defaults to 1. If `grid_search` is
provided as an argument, the grid will be repeated
`num_samples` of times.
local_dir (str): Local dir to save training results to.
Defaults to ``~/ray_results``.
upload_dir (str): Optional URI to sync training results
to (e.g. ``s3://bucket``).
trial_name_creator (func): Optional function for generating
the trial string representation.
loggers (list): List of logger creators to be used with
each Trial. If None, defaults to ray.tune.logger.DEFAULT_LOGGERS.
See `ray/tune/logger.py`.
sync_to_cloud (func|str): Function for syncing the local_dir to and
from upload_dir. If string, then it must be a string template
that includes `{source}` and `{target}` for the syncer to run.
If not provided, the sync command defaults to standard
S3 or gsutil sync comamnds.
sync_to_driver (func|str): Function for syncing trial logdir from
remote node to local. If string, then it must be a string template
that includes `{source}` and `{target}` for the syncer to run.
If not provided, defaults to using rsync.
checkpoint_freq (int): How many training iterations between
checkpoints. A value of 0 (default) disables checkpointing.
checkpoint_at_end (bool): Whether to checkpoint at the end of the
experiment regardless of the checkpoint_freq. Default is False.
keep_checkpoints_num (int): Number of checkpoints to keep. A value of
`None` keeps all checkpoints. Defaults to `None`. If set, need
to provide `checkpoint_score_attr`.
checkpoint_score_attr (str): Specifies by which attribute to rank the
best checkpoint. Default is increasing order. If attribute starts
with `min-` it will rank attribute in decreasing order, i.e.
`min-validation_loss`.
global_checkpoint_period (int): Seconds between global checkpointing.
This does not affect `checkpoint_freq`, which specifies frequency
for individual trials.
export_formats (list): List of formats that exported at the end of
the experiment. Default is None.
max_failures (int): Try to recover a trial from its last
checkpoint at least this many times. Only applies if
checkpointing is enabled. Setting to -1 will lead to infinite
recovery retries. Defaults to 3.
restore (str): Path to checkpoint. Only makes sense to set if
running 1 trial. Defaults to None.
search_alg (SearchAlgorithm): Search Algorithm. Defaults to
BasicVariantGenerator.
scheduler (TrialScheduler): Scheduler for executing
the experiment. Choose among FIFO (default), MedianStopping,
AsyncHyperBand, and HyperBand.
with_server (bool): Starts a background Tune server. Needed for
using the Client API.
server_port (int): Port number for launching TuneServer.
verbose (int): 0, 1, or 2. Verbosity mode. 0 = silent,
1 = only status updates, 2 = status and trial results.
resume (str|bool): One of "LOCAL", "REMOTE", "PROMPT", or bool.
LOCAL/True restores the checkpoint from the local_checkpoint_dir.
REMOTE restores the checkpoint from remote_checkpoint_dir.
PROMPT provides CLI feedback. False forces a new
experiment. If resume is set but checkpoint does not exist,
ValueError will be thrown.
queue_trials (bool): Whether to queue trials when the cluster does
not currently have enough resources to launch one. This should
be set to True when running on an autoscaling cluster to enable
automatic scale-up.
reuse_actors (bool): Whether to reuse actors between different trials
when possible. This can drastically speed up experiments that start
and stop actors often (e.g., PBT in time-multiplexing mode). This
requires trials to have the same resource requirements.
trial_executor (TrialExecutor): Manage the execution of trials.
raise_on_failed_trial (bool): Raise TuneError if there exists failed
trial (of ERROR state) when the experiments complete.
ray_auto_init (bool): Automatically starts a local Ray cluster
if using a RayTrialExecutor (which is the default) and
if Ray is not initialized. Defaults to True.
sync_function: Deprecated. See `sync_to_cloud` and
`sync_to_driver`.
Returns:
List of Trial objects.
Raises:
TuneError if any trials failed and `raise_on_failed_trial` is True.
Examples:
>>> tune.run(mytrainable, scheduler=PopulationBasedTraining())
>>> tune.run(mytrainable, num_samples=5, reuse_actors=True)
>>> tune.run(
"PG",
num_samples=5,
config={
"env": "CartPole-v0",
"lr": tune.sample_from(lambda _: np.random.rand())
}
)
"""
trial_executor = trial_executor or RayTrialExecutor(
queue_trials=queue_trials,
reuse_actors=reuse_actors,
ray_auto_init=ray_auto_init)
experiment = run_or_experiment
if not isinstance(run_or_experiment, Experiment):
run_identifier = Experiment._register_if_needed(run_or_experiment)
experiment = Experiment(
name=name,
run=run_identifier,
stop=stop,
config=config,
resources_per_trial=resources_per_trial,
num_samples=num_samples,
local_dir=local_dir,
upload_dir=upload_dir,
sync_to_driver=sync_to_driver,
trial_name_creator=trial_name_creator,
loggers=loggers,
checkpoint_freq=checkpoint_freq,
checkpoint_at_end=checkpoint_at_end,
keep_checkpoints_num=keep_checkpoints_num,
checkpoint_score_attr=checkpoint_score_attr,
export_formats=export_formats,
max_failures=max_failures,
restore=restore,
sync_function=sync_function)
else:
logger.debug("Ignoring some parameters passed into tune.run.")
if sync_to_cloud:
assert experiment.remote_checkpoint_dir, (
"Need `upload_dir` if `sync_to_cloud` given.")
runner = TrialRunner(
search_alg=search_alg or BasicVariantGenerator(),
scheduler=scheduler or FIFOScheduler(),
local_checkpoint_dir=experiment.checkpoint_dir,
remote_checkpoint_dir=experiment.remote_checkpoint_dir,
sync_to_cloud=sync_to_cloud,
checkpoint_period=global_checkpoint_period,
resume=resume,
launch_web_server=with_server,
server_port=server_port,
verbose=bool(verbose > 1),
trial_executor=trial_executor)
runner.add_experiment(experiment)
if verbose:
print(runner.debug_string(max_debug=99999))
last_debug = 0
while not runner.is_finished():
runner.step()
if time.time() - last_debug > DEBUG_PRINT_INTERVAL:
if verbose:
print(runner.debug_string())
last_debug = time.time()
try:
runner.checkpoint(force=True)
except Exception:
logger.exception("Trial Runner checkpointing failed.")
if verbose:
print(runner.debug_string(max_debug=99999))
wait_for_sync()
errored_trials = []
for trial in runner.get_trials():
if trial.status != Trial.TERMINATED:
errored_trials += [trial]
if errored_trials:
if raise_on_failed_trial:
raise TuneError("Trials did not complete", errored_trials)
else:
logger.error("Trials did not complete: %s", errored_trials)
trials = runner.get_trials()
if return_trials:
return trials
logger.info("Returning an analysis object by default. You can call "
"`analysis.trials` to retrieve a list of trials. "
"This message will be removed in future versions of Tune.")
return ExperimentAnalysis(runner.checkpoint_file, trials=trials)
def run_experiments(experiments,
search_alg=None,
scheduler=None,
with_server=False,
server_port=TuneServer.DEFAULT_PORT,
verbose=2,
resume=False,
queue_trials=False,
reuse_actors=False,
trial_executor=None,
raise_on_failed_trial=True):
"""Runs and blocks until all trials finish.
Examples:
>>> experiment_spec = Experiment("experiment", my_func)
>>> run_experiments(experiments=experiment_spec)
>>> experiment_spec = {"experiment": {"run": my_func}}
>>> run_experiments(experiments=experiment_spec)
>>> run_experiments(
>>> experiments=experiment_spec,
>>> scheduler=MedianStoppingRule(...))
>>> run_experiments(
>>> experiments=experiment_spec,
>>> search_alg=SearchAlgorithm(),
>>> scheduler=MedianStoppingRule(...))
Returns:
List of Trial objects, holding data for each executed trial.
"""
# This is important to do this here
# because it schematize the experiments
# and it conducts the implicit registration.
experiments = convert_to_experiment_list(experiments)
trials = []
for exp in experiments:
trials += run(
exp,
search_alg=search_alg,
scheduler=scheduler,
with_server=with_server,
server_port=server_port,
verbose=verbose,
resume=resume,
queue_trials=queue_trials,
reuse_actors=reuse_actors,
trial_executor=trial_executor,
raise_on_failed_trial=raise_on_failed_trial,
return_trials=True)
return trials
|
{
"content_hash": "72f1e3c06ee9469fd42b6639bf066ae5",
"timestamp": "",
"source": "github",
"line_count": 335,
"max_line_length": 79,
"avg_line_length": 41.298507462686565,
"alnum_prop": 0.6252981568485725,
"repo_name": "ujvl/ray-ng",
"id": "73747c449da79a665dbca7bf1595350f3dbc0a08",
"size": "13835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/tune/tune.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "349753"
},
{
"name": "C++",
"bytes": "6547"
},
{
"name": "CMake",
"bytes": "4927"
},
{
"name": "Makefile",
"bytes": "5285"
},
{
"name": "Python",
"bytes": "260095"
},
{
"name": "Shell",
"bytes": "6666"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class MyblogConfig(AppConfig):
name = 'myblog'
|
{
"content_hash": "33031554fb7df867314e5223ddc0157e",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 17.4,
"alnum_prop": 0.7471264367816092,
"repo_name": "robalford/reConstruct",
"id": "5e29c8d94efb8d09fa361bbfd37b8672a7e3a7e4",
"size": "87",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "myblog/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7751"
},
{
"name": "CSS",
"bytes": "24416"
},
{
"name": "HTML",
"bytes": "27813"
},
{
"name": "JavaScript",
"bytes": "88031"
},
{
"name": "Makefile",
"bytes": "8093"
},
{
"name": "Python",
"bytes": "162582"
}
],
"symlink_target": ""
}
|
"""Unit test for cgroups module.
"""
import builtins
import io
import os
import shutil
import tempfile
import unittest
import mock
import treadmill
from treadmill import cgroups
from treadmill import cgutils
PROCCGROUPS = """#subsys_name hierarchy num_cgroups enabled
cpuset 4 1 0
ns 10 3 0
cpu 2 3 1
cpuacct 3 3 1
memory 7 3 1
devices 5 1 0
freezer 6 1 0
net_cls 8 1 0
blkio 1 1 0
perf_event 11 1 0
net_prio 9 1 0"""
class CGroupsTest(unittest.TestCase):
"""Tests for teadmill.cgroups."""
def setUp(self):
self.root = tempfile.mkdtemp()
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@mock.patch('treadmill.cgroups.get_data',
mock.Mock(side_effect=['2', '1\n2', '-1', '']))
def test_get_value(self):
"""Test cgroup value fetching"""
value = cgroups.get_value('memory', 'foo', 'memory,usage_in_bytes')
self.assertEqual(value, 2)
value = cgroups.get_value('memory', 'foo', 'memory,usage_in_bytes')
self.assertEqual(value, 1)
value = cgroups.get_value('memory', 'foo', 'memory,usage_in_bytes')
self.assertEqual(value, 0)
value = cgroups.get_value('memory', 'foo', 'memory,usage_in_bytes')
self.assertEqual(value, 0)
@mock.patch('treadmill.cgroups.get_mountpoint',
mock.Mock(return_value='/cgroups'))
@mock.patch('os.makedirs', mock.Mock())
def test_create(self):
"""Tests cgroup creation."""
group = os.path.join('treadmill', 'apps', 'test1')
cgroups.create('cpu', group)
cgroups.create('memory', group)
cgroups.create('cpuacct', group)
os.makedirs.assert_has_calls(
[mock.call('/cgroups/treadmill/apps/test1'),
mock.call('/cgroups/treadmill/apps/test1'),
mock.call('/cgroups/treadmill/apps/test1')])
@mock.patch('treadmill.cgroups.get_mountpoint', mock.Mock())
def test_extractpath(self):
""" test cgroup name from a cgroup path"""
treadmill.cgroups.get_mountpoint.return_value = '/fs/cgroup/memory'
cgrp = cgroups.extractpath('/fs/cgroup/memory/treadmill/core',
'memory')
self.assertEqual(cgrp, 'treadmill/core')
cgrp = cgroups.extractpath('/fs/cgroup/memory/treadmill/core/foo',
'memory', 'foo')
self.assertEqual(cgrp, 'treadmill/core')
with self.assertRaises(ValueError):
cgroups.extractpath('/cgroup/memory/treadmill/core', 'memory')
with self.assertRaises(ValueError):
cgroups.extractpath('/fs/cgroup/memory/treadmill/core/foo',
'cpu', 'bar')
@mock.patch('treadmill.cgroups.get_mountpoint', mock.Mock())
@mock.patch('os.rmdir', mock.Mock())
def test_delete(self):
"""Tests cgroup deletion."""
cgroups_dir = os.path.join(self.root, 'cgroups')
treadmill.cgroups.get_mountpoint.return_value = cgroups_dir
group = os.path.join('treadmill', 'apps', 'test1')
# Create a directory for the cgroup
os.makedirs(os.path.join(cgroups_dir, group))
cgroups.delete('cpu', group)
os.rmdir.assert_called_once_with(
os.path.join(cgroups_dir, group)
)
@mock.patch('treadmill.cgroups.get_mountpoint',
mock.Mock(return_value='/cgroups'))
@mock.patch('builtins.open', mock.mock_open())
def test_join(self):
"""Tests joining the cgroup."""
group = os.path.join('treadmill', 'apps', 'test1')
cgroups.join('cpu', group, '1234')
builtins.open.assert_called_once_with(
'/cgroups/treadmill/apps/test1/tasks', 'w+')
builtins.open().write.assert_called_once_with('1234')
@mock.patch('treadmill.cgroups.mounted_subsystems',
mock.Mock(return_value={'cpu': '/cgroup/cpu'}))
@mock.patch('treadmill.cgroups.mount', mock.Mock())
def test_ensure_mounted_missing(self):
"""Checks that missing subsystem is mounted."""
cgroups.ensure_mounted(['cpu', 'memory'])
treadmill.cgroups.mount.assert_called_with('memory')
@mock.patch('builtins.open',
mock.Mock(return_value=io.StringIO(PROCCGROUPS)))
def test_available_subsystems(self):
"""Test functions """
subsystems = cgroups.available_subsystems()
self.assertEqual(['cpu', 'cpuacct', 'memory'], subsystems)
@mock.patch('treadmill.cgroups.create', mock.Mock())
@mock.patch('treadmill.cgroups.set_value', mock.Mock())
@mock.patch('treadmill.cgroups.get_data',
mock.Mock(side_effect=['0', '0', '', '1024', '512']))
@mock.patch('treadmill.sysinfo.cpu_count',
mock.Mock(return_value=4))
def test_create_treadmill_cgroups(self):
"""Test the creation of core treadmill cgroups"""
system_cpu_shares = 50
treadmill_cpu_shares = 50
treadmill_core_cpu_shares = 10
treadmill_apps_cpu_shares = 90
treadmill_cpu_cores = 0
treadmill_mem = 1024
treadmill_core_mem = 512
treadmill_apps_mem = treadmill_mem - treadmill_core_mem
cgutils.create_treadmill_cgroups(system_cpu_shares,
treadmill_cpu_shares,
treadmill_core_cpu_shares,
treadmill_apps_cpu_shares,
treadmill_cpu_cores,
treadmill_mem,
treadmill_core_mem)
calls = [mock.call('cpu', 'system'),
mock.call('cpu', 'treadmill'),
mock.call('cpu', 'treadmill/core'),
mock.call('cpu', 'treadmill/apps'),
mock.call('cpuacct', 'system'),
mock.call('cpuacct', 'treadmill'),
mock.call('cpuacct', 'treadmill/core'),
mock.call('cpuacct', 'treadmill/apps'),
mock.call('cpuset', 'system'),
mock.call('cpuset', 'treadmill'),
mock.call('memory', 'system'),
mock.call('memory', 'treadmill'),
mock.call('memory', 'treadmill/core'),
mock.call('memory', 'treadmill/apps')]
treadmill.cgroups.create.assert_has_calls(calls)
calls = [mock.call('cpu', 'treadmill',
'cpu.shares', treadmill_cpu_shares),
mock.call('cpu', 'system',
'cpu.shares', system_cpu_shares),
mock.call('cpu', 'treadmill/core',
'cpu.shares', treadmill_core_cpu_shares),
mock.call('cpu', 'treadmill/apps',
'cpu.shares', treadmill_apps_cpu_shares),
mock.call('cpuset', 'system',
'cpuset.mems', 0),
mock.call('cpuset', 'treadmill',
'cpuset.mems', 0),
mock.call('cpuset', 'treadmill',
'cpuset.cpus', '0-3'),
mock.call('cpuset', 'system',
'cpuset.cpus', '0-3'),
mock.call('memory', 'system',
'memory.move_charge_at_immigrate', 1),
mock.call('memory', 'treadmill',
'memory.move_charge_at_immigrate', 1),
mock.call('memory', 'treadmill',
'memory.use_hierarchy', '1'),
mock.call('memory', 'treadmill',
'memory.limit_in_bytes', treadmill_mem),
mock.call('memory', 'treadmill',
'memory.memsw.limit_in_bytes', treadmill_mem),
mock.call('memory', 'treadmill',
'memory.oom_control', '0'),
mock.call('memory', 'treadmill/core',
'memory.move_charge_at_immigrate', 1),
mock.call('memory', 'treadmill/apps',
'memory.move_charge_at_immigrate', 1),
mock.call('memory', 'treadmill/core',
'memory.limit_in_bytes', treadmill_core_mem),
mock.call('memory', 'treadmill/core',
'memory.memsw.limit_in_bytes', treadmill_core_mem),
mock.call('memory', 'treadmill/core',
'memory.soft_limit_in_bytes', treadmill_core_mem),
mock.call('memory', 'treadmill/apps',
'memory.limit_in_bytes', treadmill_apps_mem),
mock.call('memory', 'treadmill/apps',
'memory.memsw.limit_in_bytes', treadmill_apps_mem)]
treadmill.cgroups.set_value.assert_has_calls(calls)
# @mock.patch('os.kill', mock.Mock())
# def test_kill_apps_in_cgroup(self):
# """Make sure we kill all the stale apps."""
# os.mkdir(os.path.join(self.root, 'a/b/c'))
# os.mkdir(os.path.join(self.root, 'a/b/c/XXX'))
# with open(os.path.join(self.root, 'a/b/c/tasks'), 'w+') as f:
# f.write('123\n231\n')
#
# cgutils.kill_apps_in_cgroup(self.root, 'a/b/c', delete_cgrp=True)
# os.kill.assert_has_calls([mock.call(123, signal.SIGKILL),
# mock.call(321, signal.SIGKILL)])
# self.assertFalse(os.path.exists(os.path.join(self.root, 'a/b/c')))
@mock.patch('treadmill.cgroups.set_value',
mock.Mock())
@mock.patch('treadmill.cgroups.get_value',
mock.Mock(return_value=512))
@mock.patch('treadmill.cgroups.makepath',
mock.Mock(return_value='/cgroup/memory/treadmill/apps'))
@mock.patch('treadmill.cgutils.total_soft_memory_limits',
mock.Mock(return_value=1024))
@mock.patch('os.listdir',
mock.Mock(return_value=['a', 'b']))
@mock.patch('os.path.isdir',
mock.Mock(return_value=True))
def test_reset_mem_limit_in_bytes(self):
"""Make sure we are setting hardlimits right"""
cgutils.reset_memory_limit_in_bytes()
mock_calls = [mock.call('memory',
'treadmill/apps',
'memory.limit_in_bytes'),
mock.call('memory',
'treadmill/apps/a',
'memory.soft_limit_in_bytes'),
mock.call('memory',
'treadmill/apps/b',
'memory.soft_limit_in_bytes')]
cgroups.get_value.assert_has_calls(mock_calls)
mock_calls = [mock.call('memory',
'treadmill/apps/a',
'memory.limit_in_bytes',
512),
mock.call('memory',
'treadmill/apps/a',
'memory.memsw.limit_in_bytes',
512),
mock.call('memory',
'treadmill/apps/b',
'memory.limit_in_bytes',
512),
mock.call('memory',
'treadmill/apps/b',
'memory.memsw.limit_in_bytes',
512)]
cgroups.set_value.assert_has_calls(mock_calls)
@mock.patch('treadmill.cgutils.set_memory_hardlimit', mock.Mock())
@mock.patch('treadmill.cgroups.get_value',
mock.Mock(return_value=512))
@mock.patch('treadmill.cgroups.makepath',
mock.Mock(return_value='/cgroup/memory/treadmill/apps'))
@mock.patch('treadmill.cgutils.total_soft_memory_limits',
mock.Mock(return_value=1024))
@mock.patch('os.listdir',
mock.Mock(return_value=['a']))
@mock.patch('os.path.isdir',
mock.Mock(return_value=True))
def test_reset_mem_limit_kill(self):
"""Make sure we kill groups when we cannot lower their hardlimits."""
treadmill.cgutils.set_memory_hardlimit.side_effect = \
cgutils.TreadmillCgroupError('test')
res = cgutils.reset_memory_limit_in_bytes()
self.assertEqual(res, ['a'])
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "410caa0f1474e95932f2f3f570564226",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 78,
"avg_line_length": 42.851851851851855,
"alnum_prop": 0.5202325764123517,
"repo_name": "keithhendry/treadmill",
"id": "5d0901fd8cbadbee8dab3595767d74d039e46bde",
"size": "12727",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/cgroups_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "63"
},
{
"name": "Python",
"bytes": "2205483"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "48410"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2016, John Deutscher
Description: Sample Python script for Azure Media Indexer V1
License: MIT (see LICENSE.txt file for details)
Documentation : https://azure.microsoft.com/en-us/documentation/articles/media-services-index-content/
"""
import os
import json
import azurerm
import time
import sys
#import pytz
import urllib
import logging
import datetime
from azure import *
from azure.storage.blob import BlockBlobService
from azure.storage.blob import ContentSettings
###########################################################################################
##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER #####
###########################################################################################
# ALL CODE IN THIS DIRECTOY (INCLUDING THIS FILE) ARE EXAMPLE CODES THAT WILL ACT ON YOUR
# AMS ACCOUNT. IT ASSUMES THAT THE AMS ACCOUNT IS CLEAN (e.g.: BRAND NEW), WITH NO DATA OR
# PRODUCTION CODE ON IT. DO NOT, AGAIN: DO NOT RUN ANY EXAMPLE CODE AGAINST PRODUCTION AMS
# ACCOUNT! IF YOU RUN ANY EXAMPLE CODE AGAINST YOUR PRODUCTION AMS ACCOUNT, YOU CAN LOSE
# DATA, AND/OR PUT YOUR AMS SERVICES IN A DEGRADED OR UNAVAILABLE STATE. BE WARNED!
###########################################################################################
##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER #####
###########################################################################################
# Load Azure app defaults
try:
with open('../../config.json') as configFile:
configData = json.load(configFile)
except FileNotFoundError:
print_phase_message("ERROR: Expecting config.json in examples folder")
sys.exit()
account_name = configData['accountName']
account_key = configData['accountKey']
sto_account_name = configData['sto_accountName']
sto_accountKey = configData['sto_accountKey']
log_name = configData['logName']
log_level = configData['logLevel']
purge_log = configData['purgeLog']
#Initialization...
print ("\n-----------------------= AMS Py =----------------------")
print ("Azure Media Analytics - Indexer v1 Sample")
print ("-------------------------------------------------------\n")
#Remove old log file if requested (default behavior)...
if (os.path.isdir('./log') != True):
os.mkdir('log')
if (purge_log.lower() == "yes"):
if (os.path.isfile(log_name)):
os.remove(log_name)
#Basic Logging...
logging.basicConfig(format='%(asctime)s - %(levelname)s:%(message)s', level=log_level, filename=log_name)
# Get the access token...
response = azurerm.get_ams_access_token(account_name, account_key)
resjson = response.json()
access_token = resjson["access_token"]
#Some global vars...
NAME = "movie"
COUNTER = 0;
ENCRYPTION = "1" # 0=None, StorageEncrypted=1, CommonEncryptionProtected=2, EnvelopeEncryptionProtected=4
ENCRYPTION_SCHEME = "StorageEncryption" # StorageEncryption or CommonEncryption.
VIDEO_NAME = "movie.mp4"
VIDEO_PATH = "../assets/movie.mp4"
ASSET_FINAL_NAME = "Python Sample-Indexer"
PROCESSOR_NAME = "Azure Media Indexer"
INDEXER_V1_XML_PRESET = "indexerv1.xml"
# Just a simple wrapper function to print the title of each of our phases to the console...
def print_phase_header(message):
global COUNTER;
print ("\n[" + str("%02d" % int(COUNTER)) + "] >>> " + message)
COUNTER += 1;
# This wrapper function prints our messages to the console with a timestamp...
def print_phase_message(message):
time_stamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print (str(time_stamp) + ": " + message)
### get ams redirected url
response = azurerm.get_url(access_token)
if (response.status_code == 200):
ams_redirected_rest_endpoint = str(response.url)
else:
print_phase_message("GET Status: " + str(response.status_code) + " - Getting Redirected URL ERROR." + str(response.content))
exit(1)
######################### PHASE 1: UPLOAD #########################
### create an asset
print_phase_header("Creating a Media Asset")
response = azurerm.create_media_asset(access_token, NAME)
if (response.status_code == 201):
resjson = response.json()
asset_id = str(resjson['d']['Id'])
print_phase_message("POST Status.............................: " + str(response.status_code))
print_phase_message("Media Asset Name........................: " + NAME)
print_phase_message("Media Asset Id..........................: " + asset_id)
else:
print_phase_message("POST Status.............................: " + str(response.status_code) + " - Media Asset: '" + NAME + "' Creation ERROR." + str(response.content))
### create an assetfile
print_phase_header("Creating a Media Assetfile (for the video file)")
response = azurerm.create_media_assetfile(access_token, asset_id, VIDEO_NAME, "false", "false")
if (response.status_code == 201):
resjson = response.json()
video_assetfile_id = str(resjson['d']['Id'])
print_phase_message("POST Status.............................: " + str(response.status_code))
print_phase_message("Media Assetfile Name....................: " + str(resjson['d']['Name']))
print_phase_message("Media Assetfile Id......................: " + video_assetfile_id)
print_phase_message("Media Assetfile IsPrimary...............: " + str(resjson['d']['IsPrimary']))
else:
print_phase_message("POST Status: " + str(response.status_code) + " - Media Assetfile: '" + VIDEO_NAME + "' Creation ERROR." + str(response.content))
### create an asset write access policy for uploading
print_phase_header("Creating an Asset Write Access Policy")
duration = "440"
response = azurerm.create_asset_accesspolicy(access_token, "NewUploadPolicy", duration, "2")
if (response.status_code == 201):
resjson = response.json()
write_accesspolicy_id = str(resjson['d']['Id'])
print_phase_message("POST Status.............................: " + str(response.status_code))
print_phase_message("Asset Access Policy Id..................: " + write_accesspolicy_id)
print_phase_message("Asset Access Policy Duration/min........: " + str(resjson['d']['DurationInMinutes']))
else:
print_phase_message("POST Status: " + str(response.status_code) + " - Asset Write Access Policy Creation ERROR." + str(response.content))
### create a sas locator
print_phase_header("Creating a write SAS Locator")
## INFO: If you need to upload your files immediately, you should set your StartTime value to five minutes before the current time.
#This is because there may be clock skew between your client machine and Media Services.
#Also, your StartTime value must be in the following DateTime format: YYYY-MM-DDTHH:mm:ssZ (for example, "2014-05-23T17:53:50Z").
# EDITED: Not providing starttime is the best approach to be able to upload a file immediatly...
#starttime = datetime.datetime.now(pytz.timezone(time_zone)).strftime("%Y-%m-%dT%H:%M:%SZ")
#response = azurerm.create_sas_locator(access_token, asset_id, write_accesspolicy_id, starttime)
response = azurerm.create_sas_locator(access_token, asset_id, write_accesspolicy_id)
if (response.status_code == 201):
resjson = response.json()
saslocator_id = str(resjson['d']['Id'])
saslocator_baseuri = str(resjson['d']['BaseUri'])
sto_asset_name = os.path.basename(os.path.normpath(saslocator_baseuri))
saslocator_cac = str(resjson['d']['ContentAccessComponent'])
print_phase_message("POST Status.............................: " + str(response.status_code))
print_phase_message("SAS URL Locator StartTime...............: " + str(resjson['d']['StartTime']))
print_phase_message("SAS URL Locator Id......................: " + saslocator_id)
print_phase_message("SAS URL Locator Base URI................: " + saslocator_baseuri)
print_phase_message("SAS URL Locator Content Access Component: " + saslocator_cac)
else:
print_phase_message("POST Status: " + str(response.status_code) + " - SAS URL Locator Creation ERROR." + str(response.content))
### Use the Azure Blob Blob Servic library from the Azure Storage SDK.
block_blob_service = BlockBlobService(account_name=sto_account_name, sas_token=saslocator_cac[1:])
### Define a callback method to show progress of large uploads
def uploadCallback(current, total):
if (current != None):
print_phase_message('{0:2,f}/{1:2,.0f} MB'.format(current,total/1024/1024))
### Start upload the video file
print_phase_header("Uploading the Video File")
with open(VIDEO_PATH, mode='rb') as file:
video_content = file.read()
video_content_length = len(video_content)
response = block_blob_service.create_blob_from_path(
sto_asset_name,
VIDEO_NAME,
VIDEO_PATH,
max_connections=5,
content_settings=ContentSettings(content_type='video/mp4'),
progress_callback=uploadCallback,
)
if (response == None):
print_phase_message("PUT Status..............................: 201")
print_phase_message("Video File Uploaded.....................: OK")
### update the assetfile metadata after uploading
print_phase_header("Updating the Video Assetfile")
response = azurerm.update_media_assetfile(access_token, asset_id, video_assetfile_id, video_content_length, VIDEO_NAME)
if (response.status_code == 204):
print_phase_message("MERGE Status............................: " + str(response.status_code))
print_phase_message("Assetfile Content Length Updated........: " + str(video_content_length))
else:
print_phase_message("MERGE Status............................: " + str(response.status_code) + " - Assetfile: '" + VIDEO_NAME + "' Update ERROR." + str(response.content))
### delete the locator, so that it can't be used again
print_phase_header("Deleting the Locator")
response = azurerm.delete_sas_locator(access_token, saslocator_id)
if (response.status_code == 204):
print_phase_message("DELETE Status...........................: " + str(response.status_code))
print_phase_message("SAS URL Locator Deleted.................: " + saslocator_id)
else:
print_phase_message("DELETE Status...........................: " + str(response.status_code) + " - SAS URL Locator: '" + saslocator_id + "' Delete ERROR." + str(response.content))
### delete the asset access policy
print_phase_header("Deleting the Acess Policy")
response = azurerm.delete_asset_accesspolicy(access_token, write_accesspolicy_id)
if (response.status_code == 204):
print_phase_message("DELETE Status...........................: " + str(response.status_code))
print_phase_message("Asset Access Policy Deleted.............: " + write_accesspolicy_id)
else:
print_phase_message("DELETE Status...........................: " + str(response.status_code) + " - Asset Access Policy: '" + write_accesspolicy_id + "' Delete ERROR." + str(response.content))
### get the media processor for Indexer v1
print_phase_header("Getting the Media Processor for Indexer")
response = azurerm.list_media_processor(access_token)
if (response.status_code == 200):
resjson = response.json()
print_phase_message("GET Status..............................: " + str(response.status_code))
for mp in resjson['d']['results']:
if(str(mp['Name']) == PROCESSOR_NAME):
processor_id = str(mp['Id'])
print_phase_message("MEDIA Processor Id......................: " + processor_id)
print_phase_message("MEDIA Processor Name....................: " + PROCESSOR_NAME)
else:
print_phase_message("GET Status: " + str(response.status_code) + " - Media Processors Listing ERROR." + str(response.content))
## create an Indexer v1 job
print_phase_header("Creating a Media Job to index the content")
with open(INDEXER_V1_XML_PRESET, mode='r') as file:
indexer_preset = file.read()
response = azurerm.encode_mezzanine_asset(access_token, processor_id, asset_id, ASSET_FINAL_NAME, indexer_preset)
if (response.status_code == 201):
resjson = response.json()
job_id = str(resjson['d']['Id'])
print_phase_message("POST Status.............................: " + str(response.status_code))
print_phase_message("Media Job Id............................: " + job_id)
else:
print_phase_message("POST Status.............................: " + str(response.status_code) + " - Media Job Creation ERROR." + str(response.content))
### list a media job
print_phase_header("Getting the Media Job Status")
flag = 1
while (flag):
response = azurerm.list_media_job(access_token, job_id)
if (response.status_code == 200):
resjson = response.json()
job_state = str(resjson['d']['State'])
if (resjson['d']['EndTime'] != None):
joboutputassets_uri = resjson['d']['OutputMediaAssets']['__deferred']['uri']
flag = 0
print_phase_message("GET Status..............................: " + str(response.status_code))
print_phase_message("Media Job Status........................: " + azurerm.translate_job_state(job_state))
else:
print_phase_message("GET Status..............................: " + str(response.status_code) + " - Media Job: '" + asset_id + "' Listing ERROR." + str(response.content))
time.sleep(5)
## getting the indexed asset id
print_phase_header("Getting the Indexed Media Asset Id")
response = azurerm.get_url(access_token, joboutputassets_uri, False)
if (response.status_code == 200):
resjson = response.json()
output_asset_id = resjson['d']['results'][0]['Id']
print_phase_message("GET Status..............................: " + str(response.status_code))
print_phase_message("Indexed output Media Asset Id..................: " + output_asset_id)
else:
print_phase_message("GET Status..............................: " + str(response.status_code) + " - Media Job Output Asset: '" + job_id + "' Getting ERROR." + str(response.content))
# Get Asset by using the list_media_asset method and the Asset ID
response = azurerm.list_media_asset(access_token,output_asset_id)
if (response.status_code == 200):
resjson = response.json()
# Get the container name from the Uri
outputAssetContainer = resjson['d']['Uri'].split('/')[3]
print(outputAssetContainer)
else:
print("Not a 200: " + str(response.status_code))
exit(-1)
### Use the Azure Blob Blob Service library from the Azure Storage SDK to download just the output WebVTT file
block_blob_service = BlockBlobService(account_name=sto_account_name,account_key=sto_accountKey)
generator = block_blob_service.list_blobs(outputAssetContainer)
for blob in generator:
print_phase_message("Output File Name........................: " + blob.name)
if(blob.name.endswith(".vtt")):
blobText = block_blob_service.get_blob_to_text(outputAssetContainer, blob.name)
print_phase_message("\n\n##### WEB VTT ######")
print(blobText.content.encode('utf-8'))
block_blob_service.get_blob_to_path(outputAssetContainer, blob.name, "output/" + blob.name)
|
{
"content_hash": "e22203b475f4ae298b8c2a4d27134911",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 192,
"avg_line_length": 50.430034129692835,
"alnum_prop": 0.6389415268002165,
"repo_name": "gbowerman/azurerm",
"id": "6d7ad92fd630ed5c3c569f0bf4712c8697e57838",
"size": "14776",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/media_services/analytics/indexer_v1/indexer_v1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "28"
},
{
"name": "Python",
"bytes": "235742"
}
],
"symlink_target": ""
}
|
from oslo_config import cfg
ephemeral_storage_encryption_group = cfg.OptGroup(
name='ephemeral_storage_encryption',
title='Ephemeral storage encryption options')
ephemeral_storage_encryption_opts = [
cfg.BoolOpt('enabled',
default=False,
help="""
Enables/disables LVM ephemeral storage encryption.
"""),
cfg.StrOpt('cipher',
default='aes-xts-plain64',
help="""
Cipher-mode string to be used
The cipher and mode to be used to encrypt ephemeral
storage. The set of cipher-mode combinations available
depends on kernel support.
Possible values:
* aes-xts-plain64 (Default), see /proc/crypto for available options.
"""),
cfg.IntOpt('key_size',
default=512,
min=1,
help="""
Encryption key length in bits
The bit length of the encryption key to be used to
encrypt ephemeral storage (in XTS mode only half of
the bits are used for encryption key).
"""),
]
def register_opts(conf):
conf.register_group(ephemeral_storage_encryption_group)
conf.register_opts(ephemeral_storage_encryption_opts,
group='ephemeral_storage_encryption')
def list_opts():
return {'ephemeral_storage_encryption': ephemeral_storage_encryption_opts}
|
{
"content_hash": "b91de3ee32f0e6d08a41d3b9dbd42b01",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 26.387755102040817,
"alnum_prop": 0.6705336426914154,
"repo_name": "xuweiliang/Codelibrary",
"id": "8747cfc27287bf982c9b45c37c6ad42872c5495c",
"size": "2147",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/conf/ephemeral_storage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "134284"
},
{
"name": "HTML",
"bytes": "830844"
},
{
"name": "JavaScript",
"bytes": "2421484"
},
{
"name": "Makefile",
"bytes": "4934"
},
{
"name": "Python",
"bytes": "17185807"
},
{
"name": "Shell",
"bytes": "9144"
}
],
"symlink_target": ""
}
|
class GiraffeException(Exception):
pass
class EdgeCreatesLoop(GiraffeException):
pass
class NoSuchVertex(GiraffeException):
pass
class NoSuchEdge(GiraffeException):
pass
|
{
"content_hash": "480ae9d18c320b44b52f1971b0c1266c",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 40,
"avg_line_length": 17.181818181818183,
"alnum_prop": 0.7724867724867724,
"repo_name": "Julian/giraffe",
"id": "965f28f66ffe60471fca40f6d0cc3a907060502f",
"size": "189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "giraffe/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "348190"
},
{
"name": "Python",
"bytes": "9713"
}
],
"symlink_target": ""
}
|
import re
import uuid
from pylons import tmpl_context as c
from pylons.controllers.util import abort
from r2.lib.validator import (
Validator,
VBoolean,
VLength,
VMarkdownLength,
VPermissions,
)
from r2.lib.db import tdb_cassandra
from r2.lib.errors import errors
from r2.lib.utils import UrlParser
from reddit_liveupdate import models
from reddit_liveupdate.permissions import ContributorPermissionSet
class VLiveUpdateEvent(Validator):
splitter = re.compile('[ ,]+')
def __init__(self, param, multiple=False, **kw):
self.multiple = multiple
Validator.__init__(self, param, kw)
def param_docs(self):
if self.multiple:
return {
self.param: ("A comma-separated list of ids"),
}
else:
return {
self.param: ("A live update event id"),
}
def run(self, id):
if not id:
return None
try:
if self.multiple:
items = self.splitter.split(id)
else:
items = id
return models.LiveUpdateEvent._byID(items)
except tdb_cassandra.NotFound:
return None
class VLiveUpdateEventUrl(VLiveUpdateEvent):
def run(self, url):
if not url:
return None
u = UrlParser(url)
# TODO: We should probably set error messages in these cases.
if not u.is_reddit_url():
return None
event_id = re.match(r'/live/(\w+)/?', u.path)
if not event_id:
return None
return VLiveUpdateEvent.run(self, event_id.group(1))
class VLiveUpdateID(Validator):
def run(self, fullname):
if not fullname or not fullname.startswith("LiveUpdate_"):
return
id = fullname[len("LiveUpdate_"):]
try:
return uuid.UUID(id)
except (ValueError, TypeError):
return
def param_docs(self):
return {
self.param: "the ID of a single update. e.g. `LiveUpdate_ff87068e-a126-11e3-9f93-12313b0b3603`",
}
class VLiveUpdate(VLiveUpdateID):
def run(self, fullname):
id = VLiveUpdateID.run(self, fullname)
if id:
try:
return models.LiveUpdateStream.get_update(
c.liveupdate_event, id)
except tdb_cassandra.NotFound:
pass
self.set_error(errors.NO_THING_ID)
class VLiveUpdateContributorWithPermission(Validator):
def __init__(self, permission):
self.permission = permission
Validator.__init__(self)
def run(self):
if not c.liveupdate_permissions.allow(self.permission):
abort(403, "Forbidden")
class VLiveUpdatePermissions(VPermissions):
types = {
"liveupdate_contributor": ContributorPermissionSet,
"liveupdate_contributor_invite": ContributorPermissionSet,
}
def param_docs(self):
return {
self.param[0]:
"one of (%s)" % ", ".join("`%s`" % s for s in self.types),
self.param[1]:
"permission description e.g. `+update,+edit,-manage`",
}
EVENT_CONFIGURATION_VALIDATORS = {
"title": VLength("title", max_length=120),
"description": VMarkdownLength("description", max_length=120, empty_error=None),
"resources": VMarkdownLength("resources", max_length=10000, empty_error=None),
"nsfw": VBoolean("nsfw"),
}
def is_event_configuration_valid(form):
if form.has_errors("title", errors.NO_TEXT,
errors.TOO_LONG):
return False
if form.has_errors("description", errors.TOO_LONG):
return False
if form.has_errors("resources", errors.TOO_LONG):
return False
return True
|
{
"content_hash": "13756ee13fc466dd751bf73db284a0a0",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 108,
"avg_line_length": 25.91156462585034,
"alnum_prop": 0.5914938304016802,
"repo_name": "madbook/reddit-plugin-liveupdate",
"id": "f2a556ecc68e622054b1cb025ca733587270c378",
"size": "3809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reddit_liveupdate/validators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "17923"
},
{
"name": "HTML",
"bytes": "20872"
},
{
"name": "JavaScript",
"bytes": "340563"
},
{
"name": "Python",
"bytes": "107658"
}
],
"symlink_target": ""
}
|
"""test checking use of the logging module
"""
__revision__ = ''
import __builtin__
# Muck up the names in an effort to confuse...
import logging as renamed_logging
import os as logging
FORMAT_STR = '{0}, {1}'
# Statements that should be flagged:
renamed_logging.debug('{0}, {1}'.format(4, 5))
renamed_logging.log(renamed_logging.DEBUG, 'msg: {}'.format('Run!'))
renamed_logging.debug(FORMAT_STR.format(4, 5))
renamed_logging.log(renamed_logging.DEBUG, FORMAT_STR.format(4, 5))
# Statements that should not be flagged:
renamed_logging.debug(format(66, 'x'))
renamed_logging.debug(__builtin__.format(66, 'x'))
renamed_logging.log(renamed_logging.DEBUG, 'msg: Run!'.upper())
logging.debug('{0}, {1}'.format(4, 5))
logging.log(logging.DEBUG, 'msg: {}'.format('Run!'))
|
{
"content_hash": "284b8f3bd19d883393871facc5132fb2",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 68,
"avg_line_length": 30.84,
"alnum_prop": 0.6964980544747081,
"repo_name": "willemneal/Docky",
"id": "efe6b1a80154df48a8f45b2883ad433c8b58a096",
"size": "815",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/pylint/test/input/func_w1202.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "636"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "5145"
},
{
"name": "Agda",
"bytes": "3154"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "3168"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "667"
},
{
"name": "Awk",
"bytes": "4528"
},
{
"name": "BlitzBasic",
"bytes": "1730"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "109073"
},
{
"name": "C#",
"bytes": "17784"
},
{
"name": "C++",
"bytes": "79372"
},
{
"name": "COBOL",
"bytes": "114812"
},
{
"name": "CSS",
"bytes": "26952"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "2574"
},
{
"name": "Clean",
"bytes": "2878"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9006"
},
{
"name": "Common Lisp",
"bytes": "91743"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "5475"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Emacs Lisp",
"bytes": "5709"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19156"
},
{
"name": "FORTRAN",
"bytes": "27879"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "15760"
},
{
"name": "Gnuplot",
"bytes": "10376"
},
{
"name": "Go",
"bytes": "172"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "Haskell",
"bytes": "49593"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "1944"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "81613"
},
{
"name": "JavaScript",
"bytes": "14143"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "306"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "76274"
},
{
"name": "Mathematica",
"bytes": "191"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "3385"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "318"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "17354"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "84519"
},
{
"name": "Perl",
"bytes": "3611"
},
{
"name": "Perl6",
"bytes": "49676"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PowerShell",
"bytes": "6932"
},
{
"name": "Prolog",
"bytes": "738"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "6272729"
},
{
"name": "R",
"bytes": "4057"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "1887"
},
{
"name": "Red",
"bytes": "10536"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6788"
},
{
"name": "Scala",
"bytes": "730"
},
{
"name": "Scheme",
"bytes": "47137"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "121510"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smalltalk",
"bytes": "156665"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "16922"
},
{
"name": "Visual Basic",
"bytes": "17210"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XSLT",
"bytes": "755"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "26388"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
}
|
"""
Module to do callbacks for Keras models.
"""
from __future__ import division
from keras.callbacks import Callback # ModelCheckpoint , EarlyStopping
import matplotlib.pyplot as plt
import h5py
import itertools
import logging
import numpy as np
import os
import subprocess
import shutil
import codecs
import sys
from time import gmtime, strftime
import math
import time
from copy import deepcopy
# Set up logger
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
#logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
# Dimensionality of image feature vector
IMG_FEATS = 4096
HSN_SIZE = 409
MULTEVAL_DIR = '../multeval-0.5.1' if "util" in os.getcwd() else "multeval-0.5.1"
class cd:
"""Context manager for changing the current working directory"""
"""http://stackoverflow.com/questions/431684/how-do-i-cd-in-python"""
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class CompilationOfCallbacks(Callback):
""" Collection of compiled callbacks."""
def __init__(self, word2index, index2word, argsDict, dataset,
data_generator, use_sourcelang=False, use_image=True):
super(Callback, self).__init__()
self.verbose = True
self.filename = "weights.hdf5"
self.save_best_only = True
self.val_loss = []
self.best_val_loss = np.inf
self.val_metric = []
self.best_val_metric = np.NINF
self.word2index = word2index
self.index2word = index2word
self.args = argsDict
# used to control early stopping on the validation data
self.wait = 0
self.patience = self.args.patience
# needed by model.predict in generate_sentences
self.use_sourcelang = use_sourcelang
self.use_image = use_image
# controversial assignment but it makes it much easier to
# do early stopping based on metrics
self.data_generator = data_generator
# this results in two file handlers for dataset (here and
# data_generator)
if not dataset:
logger.warn("No dataset given, using flickr8k")
self.dataset = h5py.File("flickr8k/dataset.h5", "r")
else:
self.dataset = h5py.File("%s/dataset.h5" % dataset, "r")
if self.args.source_vectors is not None:
self.source_dataset = h5py.File("%s/dataset.h5" % self.args.source_vectors, "r")
def on_epoch_end(self, epoch, logs={}):
'''
At the end of each epoch we
1. create a directory to checkpoint data
2. save the arguments used to initialise the run
3. generate N sentences in the val data by sampling from the model
4. calculate metric score of the generated sentences
5. determine whether to stop training and sys.exit(0)
6. save the model parameters using BLEU
'''
savetime = strftime("%d%m%Y-%H%M%S", gmtime())
path = self.create_checkpoint_directory(savetime)
self.save_run_arguments(path)
# Generate training and val sentences to check for overfitting
self.generate_sentences(path)
meteor, bleu, ter = self.multeval_scores(path)
val_loss = logs.get('val_loss')
self.early_stop_decision(len(self.val_metric)+1, meteor, val_loss)
self.checkpoint_parameters(epoch, logs, path, meteor, val_loss)
self.log_performance()
def early_stop_decision(self, epoch, val_metric, val_loss):
'''
Stop training if validation loss has stopped decreasing and
validation BLEU score has not increased for --patience epochs.
WARNING: quits with sys.exit(0).
TODO: this doesn't yet support early stopping based on TER
'''
if val_loss < self.best_val_loss:
self.wait = 0
elif val_metric > self.best_val_metric or self.args.no_early_stopping:
self.wait = 0
else:
self.wait += 1
if self.wait >= self.patience:
# we have exceeded patience
if val_loss > self.best_val_loss:
# and loss is no longer decreasing
logger.info("Epoch %d: early stopping", epoch)
handle = open("checkpoints/%s/summary"
% self.args.run_string, "a")
handle.write("Early stopping because patience exceeded\n")
best_bleu = np.nanargmax(self.val_metric)
best_loss = np.nanargmin(self.val_loss)
logger.info("Best Metric: %d | val loss %.5f score %.2f",
best_bleu+1, self.val_loss[best_bleu],
self.val_metric[best_bleu])
logger.info("Best loss: %d | val loss %.5f score %.2f",
best_loss+1, self.val_loss[best_loss],
self.val_metric[best_loss])
handle.close()
sys.exit(0)
def log_performance(self):
'''
Record model performance so far, based on validation loss.
'''
handle = open("checkpoints/%s/summary" % self.args.run_string, "w")
for epoch in range(len(self.val_loss)):
handle.write("Checkpoint %d | val loss: %.5f bleu %.2f\n"
% (epoch+1, self.val_loss[epoch],
self.val_metric[epoch]))
logger.info("---") # break up the presentation for clarity
# BLEU is the quickest indicator of performance for our task
# but loss is our objective function
best_bleu = np.nanargmax(self.val_metric)
best_loss = np.nanargmin(self.val_loss)
logger.info("Best Metric: %d | val loss %.5f score %.2f",
best_bleu+1, self.val_loss[best_bleu],
self.val_metric[best_bleu])
handle.write("Best Metric: %d | val loss %.5f score %.2f\n"
% (best_bleu+1, self.val_loss[best_bleu],
self.val_metric[best_bleu]))
logger.info("Best loss: %d | val loss %.5f score %.2f",
best_loss+1, self.val_loss[best_loss],
self.val_metric[best_loss])
handle.write("Best loss: %d | val loss %.5f score %.2f\n"
% (best_loss+1, self.val_loss[best_loss],
self.val_metric[best_loss]))
logger.info("Early stopping marker: wait/patience: %d/%d\n",
self.wait, self.patience)
handle.write("Early stopping marker: wait/patience: %d/%d\n" %
(self.wait, self.patience))
handle.close()
def extract_references(self, directory, split):
"""
Get reference descriptions for val or test data.
"""
references = self.data_generator.get_refs_by_split_as_list(split)
for refid in xrange(len(references[0])):
codecs.open('%s/%s_reference.ref%d' % (directory, split, refid),
'w', 'utf-8').write('\n'.join([x[refid] for x in references]))
#'w', 'utf-8').write('\n'.join(['\n'.join(x) for x in references]))
return references
def __bleu_score__(self, directory, val=True):
'''
Loss is only weakly correlated with improvements in BLEU,
and thus improvements in human judgements. Let's also track
BLEU score of a subset of generated sentences in the val split
to decide on early stopping, etc.
'''
prefix = "val" if val else "test"
self.extract_references(directory, split=prefix)
subprocess.check_call(
['perl multi-bleu.perl %s/%s_reference.ref < %s/%sGenerated > %s/%sBLEU'
% (directory, prefix, directory, prefix, directory, prefix)],
shell=True)
bleudata = open("%s/%sBLEU" % (directory, prefix)).readline()
data = bleudata.split(",")[0]
bleuscore = data.split("=")[1]
bleu = float(bleuscore.lstrip())
return bleu
def multeval_scores(self, directory, val=True):
'''
Maybe you want to do early stopping using Meteor, TER, or BLEU?
'''
prefix = "val" if val else "test"
self.extract_references(directory, prefix)
# First you want re-compound the split German words
if self.args.meteor_lang == 'de':
subprocess.check_call(
["cp %s/%sGenerated %s/%sGenerated.orig" % (directory, prefix,
directory, prefix)], shell=True)
subprocess.check_call(
["sed -i -r 's/ @(.*?)@ //g' %s/%sGenerated" % (directory, prefix)], shell=True)
subprocess.check_call(
["sed -i -r 's/ @(.*?)@ //g' %s/%s_reference.*" % (directory, prefix)], shell=True)
with cd(MULTEVAL_DIR):
subprocess.check_call(
['./multeval.sh eval --refs ../%s/%s_reference.* \
--hyps-baseline ../%s/%sGenerated \
--meteor.language %s \
--threads 1 \
2> %s-multevaloutput 1> %s-multevaloutput'
% (directory, prefix, directory, prefix,
self.args.meteor_lang, self.args.run_string,
self.args.run_string)], shell=True)
handle = open("%s-multevaloutput" % self.args.run_string)
multdata = handle.readlines()
handle.close()
for line in multdata:
if line.startswith("RESULT: baseline: BLEU: AVG:"):
mbleu = line.split(":")[4]
mbleu = mbleu.replace("\n","")
mbleu = mbleu.strip()
lr = mbleu.split(".")
mbleu = float(lr[0]+"."+lr[1][0:2])
if line.startswith("RESULT: baseline: METEOR: AVG:"):
mmeteor = line.split(":")[4]
mmeteor = mmeteor.replace("\n","")
mmeteor = mmeteor.strip()
lr = mmeteor.split(".")
mmeteor = float(lr[0]+"."+lr[1][0:2])
if line.startswith("RESULT: baseline: TER: AVG:"):
mter = line.split(":")[4]
mter = mter.replace("\n","")
mter = mter.strip()
lr = mter.split(".")
mter = float(lr[0]+"."+lr[1][0:2])
logger.info("Meteor = %.2f | BLEU = %.2f | TER = %.2f",
mmeteor, mbleu, mter)
return mmeteor, mbleu, mter
def create_checkpoint_directory(self, savetime):
'''
We will create one directory to store all of the epochs data inside.
The name is based on the run_string (if provided) or the current time.
'''
prefix = self.args.run_string if self.args.run_string != "" else ""
number = "%03d" % (len(self.val_metric) + 1)
filepath = "checkpoints/%s/%s-%s" % ((prefix, number, savetime))
try:
os.mkdir("checkpoints/%s/" % (prefix))
shutil.copyfile("train.py", "checkpoints/%s/train.py" % prefix)
shutil.copyfile("models.py", "checkpoints/%s/models.py" % prefix)
except OSError:
pass # directory already exists
try:
os.mkdir(filepath)
except OSError:
pass # directory already exists
logger.info("\nIn %s ...",filepath)
return filepath
def save_run_arguments(self, filepath):
'''
Save the command-line arguments, along with the method defaults,
used to parameterise this run.
'''
handle = open("%s/argparse.args" % filepath, "w")
for arg, value in self.args.__dict__.iteritems():
handle.write("%s: %s\n" % (arg, str(value)))
handle.close()
def checkpoint_parameters(self, epoch, logs, filepath, cur_val_metric,
cur_val_loss=0.):
'''
We checkpoint the model parameters based on either PPLX reduction or
metric score increase in the validation data. This is driven by the
user-specified argument self.args.stopping_loss.
TODO: this doesn't yet support early stopping based on TER
'''
weights_path = "%s/weights.hdf5" % filepath
self.val_loss.append(cur_val_loss)
if cur_val_loss < self.best_val_loss:
self.best_val_loss = cur_val_loss
# save the weights anyway for debug purposes
self.model.save_weights(weights_path, overwrite=True)
# update the best values, if applicable
self.val_metric.append(cur_val_metric)
if cur_val_metric > self.best_val_metric:
self.best_val_metric = cur_val_metric
optimiser_params = open("%s/optimiser_params" % filepath, "w")
for key, value in self.model.optimizer.get_config().items():
optimiser_params.write("%s: %s\n" % (key, value))
optimiser_params.close()
def reset_text_arrays(self, text_arrays, fixed_words=1):
""" Reset the values in the text data structure to zero so we cannot
accidentally pass them into the model """
reset_arrays = deepcopy(text_arrays)
reset_arrays[:,fixed_words:, :] = 0
return reset_arrays
def generate_sentences(self, filepath, val=True):
"""
Generates descriptions of images for --generation_timesteps
iterations through the LSTM. Each input description is clipped to
the first <BOS> token, or, if --generate_from_N_words is set, to the
first N following words (N + 1 BOS token).
This process can be additionally conditioned
on source language hidden representations, if provided by the
--source_vectors parameter.
The output is clipped to the first EOS generated, if it exists.
TODO: duplicated method with generate.py
"""
prefix = "val" if val else "test"
logger.info("Generating %s descriptions", prefix)
start_gen = self.args.generate_from_N_words + 1 # include BOS
handle = codecs.open("%s/%sGenerated" % (filepath, prefix),
"w", 'utf-8')
val_generator = self.data_generator.generation_generator(prefix,
in_callbacks=True)
seen = 0
for data in val_generator:
inputs = data[0]
text = deepcopy(inputs['text'])
# Append the first start_gen words to the complete_sentences list
# for each instance in the batch.
complete_sentences = [[] for _ in range(text.shape[0])]
for t in range(start_gen): # minimum 1
for i in range(text.shape[0]):
w = np.argmax(text[i, t])
complete_sentences[i].append(self.index2word[w])
del inputs['text']
text = self.reset_text_arrays(text, start_gen)
Y_target = data[1]
inputs['text'] = text
for t in range(start_gen, self.args.generation_timesteps):
logger.debug("Input token: %s" % self.index2word[np.argmax(inputs['text'][0,t-1])])
preds = self.model.predict(inputs, verbose=0)
# Look at the last indices for the words.
#next_word_indices = np.argmax(preds['output'][:, t-1], axis=1)
next_word_indices = np.argmax(preds[:, t-1], axis=1)
logger.debug("Predicted token: %s" % self.index2word[next_word_indices[0]])
# update array[0]/sentence-so-far with generated words.
for i in range(len(next_word_indices)):
inputs['text'][i, t, next_word_indices[i]] = 1.
next_words = [self.index2word[x] for x in next_word_indices]
for i in range(len(next_words)):
complete_sentences[i].append(next_words[i])
sys.stdout.flush()
# print/extract each sentence until it hits the first end-of-string token
for s in complete_sentences:
decoded_str = ' '.join([x for x
in itertools.takewhile(
lambda n: n != "<E>", s[1:])])
handle.write(decoded_str + "\n")
seen += text.shape[0]
if seen >= self.data_generator.split_sizes['val']:
# Hacky way to break out of the generator
break
handle.close()
|
{
"content_hash": "61cbe0a68630e4dbc8f652806788a389",
"timestamp": "",
"source": "github",
"line_count": 404,
"max_line_length": 99,
"avg_line_length": 41.38861386138614,
"alnum_prop": 0.562346749596316,
"repo_name": "elliottd/GroundedTranslation",
"id": "5766868b9e5ffbb1a2b492901d1b7734582e4ae7",
"size": "16721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Callbacks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "355"
},
{
"name": "Matlab",
"bytes": "3415"
},
{
"name": "Perl",
"bytes": "9498"
},
{
"name": "Python",
"bytes": "202526"
}
],
"symlink_target": ""
}
|
from decimal import Decimal as D
import zlib
from django.db import models
from django.db.models import Sum
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from oscar.apps.basket.managers import OpenBasketManager, SavedBasketManager
from oscar.apps.offer import results
from oscar.core.utils import get_default_currency
from oscar.core.compat import AUTH_USER_MODEL
from oscar.templatetags.currency_filters import currency
@python_2_unicode_compatible
class AbstractBasket(models.Model):
"""
Basket object
"""
# Baskets can be anonymously owned - hence this field is nullable. When a
# anon user signs in, their two baskets are merged.
owner = models.ForeignKey(
AUTH_USER_MODEL, related_name='baskets', null=True,
verbose_name=_("Owner"))
# Basket statuses
# - Frozen is for when a basket is in the process of being submitted
# and we need to prevent any changes to it.
OPEN, MERGED, SAVED, FROZEN, SUBMITTED = (
"Open", "Merged", "Saved", "Frozen", "Submitted")
STATUS_CHOICES = (
(OPEN, _("Open - currently active")),
(MERGED, _("Merged - superceded by another basket")),
(SAVED, _("Saved - for items to be purchased later")),
(FROZEN, _("Frozen - the basket cannot be modified")),
(SUBMITTED, _("Submitted - has been ordered at the checkout")),
)
status = models.CharField(
_("Status"), max_length=128, default=OPEN, choices=STATUS_CHOICES)
# A basket can have many vouchers attached to it. However, it is common
# for sites to only allow one voucher per basket - this will need to be
# enforced in the project's codebase.
vouchers = models.ManyToManyField(
'voucher.Voucher', null=True, verbose_name=_("Vouchers"), blank=True)
date_created = models.DateTimeField(_("Date created"), auto_now_add=True)
date_merged = models.DateTimeField(_("Date merged"), null=True, blank=True)
date_submitted = models.DateTimeField(_("Date submitted"), null=True,
blank=True)
# Only if a basket is in one of these statuses can it be edited
editable_statuses = (OPEN, SAVED)
class Meta:
abstract = True
app_label = 'basket'
verbose_name = _('Basket')
verbose_name_plural = _('Baskets')
objects = models.Manager()
open = OpenBasketManager()
saved = SavedBasketManager()
def __init__(self, *args, **kwargs):
super(AbstractBasket, self).__init__(*args, **kwargs)
# We keep a cached copy of the basket lines as we refer to them often
# within the same request cycle. Also, applying offers will append
# discount data to the basket lines which isn't persisted to the DB and
# so we want to avoid reloading them as this would drop the discount
# information.
self._lines = None
self.offer_applications = results.OfferApplications()
def __str__(self):
return _(
u"%(status)s basket (owner: %(owner)s, lines: %(num_lines)d)") \
% {'status': self.status,
'owner': self.owner,
'num_lines': self.num_lines}
# ========
# Strategy
# ========
@property
def has_strategy(self):
return hasattr(self, '_strategy')
def _get_strategy(self):
if not self.has_strategy:
raise RuntimeError(
"No strategy class has been assigned to this basket. "
"This is normally assigned to the incoming request in "
"oscar.apps.basket.middleware.BasketMiddleware. "
"Since it is missing, you must be doing something different. "
"Ensure that a strategy instance is assigned to the basket!"
)
return self._strategy
def _set_strategy(self, strategy):
self._strategy = strategy
strategy = property(_get_strategy, _set_strategy)
def all_lines(self):
"""
Return a cached set of basket lines.
This is important for offers as they alter the line models and you
don't want to reload them from the DB as that information would be
lost.
"""
if self.id is None:
return self.lines.none()
if self._lines is None:
self._lines = (
self.lines
.select_related('product', 'stockrecord')
.prefetch_related(
'attributes', 'product__images'))
return self._lines
def is_quantity_allowed(self, qty):
"""
Test whether the passed quantity of items can be added to the basket
"""
# We enfore a max threshold to prevent a DOS attack via the offers
# system.
basket_threshold = settings.OSCAR_MAX_BASKET_QUANTITY_THRESHOLD
if basket_threshold:
total_basket_quantity = self.num_items
max_allowed = basket_threshold - total_basket_quantity
if qty > max_allowed:
return False, _(
"Due to technical limitations we are not able "
"to ship more than %(threshold)d items in one order.") \
% {'threshold': basket_threshold}
return True, None
# ============
# Manipulation
# ============
def flush(self):
"""
Remove all lines from basket.
"""
if self.status == self.FROZEN:
raise PermissionDenied("A frozen basket cannot be flushed")
self.lines.all().delete()
self._lines = None
def add_product(self, product, quantity=1, options=None):
"""
Add a product to the basket
'stock_info' is the price and availability data returned from
a partner strategy class.
The 'options' list should contains dicts with keys 'option' and 'value'
which link the relevant product.Option model and string value
respectively.
Returns (line, created).
line: the matching basket line
created: whether the line was created or updated
"""
if options is None:
options = []
if not self.id:
self.save()
# Ensure that all lines are the same currency
price_currency = self.currency
stock_info = self.strategy.fetch_for_product(product)
if price_currency and stock_info.price.currency != price_currency:
raise ValueError((
"Basket lines must all have the same currency. Proposed "
"line has currency %s, while basket has currency %s")
% (stock_info.price.currency, price_currency))
if stock_info.stockrecord is None:
raise ValueError((
"Basket lines must all have stock records. Strategy hasn't "
"found any stock record for product %s") % product)
# Line reference is used to distinguish between variations of the same
# product (eg T-shirts with different personalisations)
line_ref = self._create_line_reference(
product, stock_info.stockrecord, options)
# Determine price to store (if one exists). It is only stored for
# audit and sometimes caching.
defaults = {
'quantity': quantity,
'price_excl_tax': stock_info.price.excl_tax,
'price_currency': stock_info.price.currency,
}
if stock_info.price.is_tax_known:
defaults['price_incl_tax'] = stock_info.price.incl_tax
line, created = self.lines.get_or_create(
line_reference=line_ref,
product=product,
stockrecord=stock_info.stockrecord,
defaults=defaults)
if created:
for option_dict in options:
line.attributes.create(option=option_dict['option'],
value=option_dict['value'])
else:
line.quantity += quantity
line.save()
self.reset_offer_applications()
# Returning the line is useful when overriding this method.
return line, created
add_product.alters_data = True
add = add_product
def applied_offers(self):
"""
Return a dict of offers successfully applied to the basket.
This is used to compare offers before and after a basket change to see
if there is a difference.
"""
return self.offer_applications.offers
def reset_offer_applications(self):
"""
Remove any discounts so they get recalculated
"""
self.offer_applications = results.OfferApplications()
self._lines = None
def merge_line(self, line, add_quantities=True):
"""
For transferring a line from another basket to this one.
This is used with the "Saved" basket functionality.
"""
try:
existing_line = self.lines.get(line_reference=line.line_reference)
except ObjectDoesNotExist:
# Line does not already exist - reassign its basket
line.basket = self
line.save()
else:
# Line already exists - assume the max quantity is correct and
# delete the old
if add_quantities:
existing_line.quantity += line.quantity
else:
existing_line.quantity = max(existing_line.quantity,
line.quantity)
existing_line.save()
line.delete()
finally:
self._lines = None
merge_line.alters_data = True
def merge(self, basket, add_quantities=True):
"""
Merges another basket with this one.
:basket: The basket to merge into this one.
:add_quantities: Whether to add line quantities when they are merged.
"""
# Use basket.lines.all instead of all_lines as this function is called
# before a strategy has been assigned.
for line_to_merge in basket.lines.all():
self.merge_line(line_to_merge, add_quantities)
basket.status = self.MERGED
basket.date_merged = now()
basket._lines = None
basket.save()
# Ensure all vouchers are moved to the new basket
for voucher in basket.vouchers.all():
basket.vouchers.remove(voucher)
self.vouchers.add(voucher)
merge.alters_data = True
def freeze(self):
"""
Freezes the basket so it cannot be modified.
"""
self.status = self.FROZEN
self.save()
freeze.alters_data = True
def thaw(self):
"""
Unfreezes a basket so it can be modified again
"""
self.status = self.OPEN
self.save()
thaw.alters_data = True
def submit(self):
"""
Mark this basket as submitted
"""
self.status = self.SUBMITTED
self.date_submitted = now()
self.save()
submit.alters_data = True
# Kept for backwards compatibility
set_as_submitted = submit
def is_shipping_required(self):
"""
Test whether the basket contains physical products that require
shipping.
"""
for line in self.all_lines():
if line.product.is_shipping_required:
return True
return False
# =======
# Helpers
# =======
def _create_line_reference(self, product, stockrecord, options):
"""
Returns a reference string for a line based on the item
and its options.
"""
base = '%s_%s' % (product.id, stockrecord.id)
if not options:
return base
return "%s_%s" % (base, zlib.crc32(repr(options).encode('utf8')))
def _get_total(self, property):
"""
For executing a named method on each line of the basket
and returning the total.
"""
total = D('0.00')
for line in self.all_lines():
try:
total += getattr(line, property)
except ObjectDoesNotExist:
# Handle situation where the product may have been deleted
pass
return total
# ==========
# Properties
# ==========
@property
def is_empty(self):
"""
Test if this basket is empty
"""
return self.id is None or self.num_lines == 0
@property
def is_tax_known(self):
"""
Test if tax values are known for this basket
"""
return all([line.is_tax_known for line in self.all_lines()])
@property
def total_excl_tax(self):
"""
Return total line price excluding tax
"""
return self._get_total('line_price_excl_tax_incl_discounts')
@property
def total_tax(self):
"""Return total tax for a line"""
return self._get_total('line_tax')
@property
def total_incl_tax(self):
"""
Return total price inclusive of tax and discounts
"""
return self._get_total('line_price_incl_tax_incl_discounts')
@property
def total_incl_tax_excl_discounts(self):
"""
Return total price inclusive of tax but exclusive discounts
"""
return self._get_total('line_price_incl_tax')
@property
def total_discount(self):
return self._get_total('discount_value')
@property
def offer_discounts(self):
"""
Return basket discounts from non-voucher sources. Does not include
shipping discounts.
"""
return self.offer_applications.offer_discounts
@property
def voucher_discounts(self):
"""
Return discounts from vouchers
"""
return self.offer_applications.voucher_discounts
@property
def has_shipping_discounts(self):
return len(self.shipping_discounts) > 0
@property
def shipping_discounts(self):
"""
Return discounts from vouchers
"""
return self.offer_applications.shipping_discounts
@property
def post_order_actions(self):
"""
Return discounts from vouchers
"""
return self.offer_applications.post_order_actions
@property
def grouped_voucher_discounts(self):
"""
Return discounts from vouchers but grouped so that a voucher which
links to multiple offers is aggregated into one object.
"""
return self.offer_applications.grouped_voucher_discounts
@property
def total_excl_tax_excl_discounts(self):
"""
Return total price excluding tax and discounts
"""
return self._get_total('line_price_excl_tax')
@property
def num_lines(self):
"""Return number of lines"""
return self.all_lines().count()
@property
def num_items(self):
"""Return number of items"""
return sum(line.quantity for line in self.lines.all())
@property
def num_items_without_discount(self):
num = 0
for line in self.all_lines():
num += line.quantity_without_discount
return num
@property
def num_items_with_discount(self):
num = 0
for line in self.all_lines():
num += line.quantity_with_discount
return num
@property
def time_before_submit(self):
if not self.date_submitted:
return None
return self.date_submitted - self.date_created
@property
def time_since_creation(self, test_datetime=None):
if not test_datetime:
test_datetime = now()
return test_datetime - self.date_created
@property
def contains_a_voucher(self):
if not self.id:
return False
return self.vouchers.exists()
@property
def is_submitted(self):
return self.status == self.SUBMITTED
@property
def can_be_edited(self):
"""
Test if a basket can be edited
"""
return self.status in self.editable_statuses
@property
def currency(self):
# Since all lines should have the same currency, return the currency of
# the first one found.
for line in self.all_lines():
return line.price_currency
# =============
# Query methods
# =============
def contains_voucher(self, code):
"""
Test whether the basket contains a voucher with a given code
"""
if self.id is None:
return False
try:
self.vouchers.get(code=code)
except ObjectDoesNotExist:
return False
else:
return True
def product_quantity(self, product):
"""
Return the quantity of a product in the basket
The basket can contain multiple lines with the same product, but
different options and stockrecords. Those quantities are summed up.
"""
matching_lines = self.lines.filter(product=product)
quantity = matching_lines.aggregate(Sum('quantity'))['quantity__sum']
return quantity or 0
def line_quantity(self, product, stockrecord, options=None):
"""
Return the current quantity of a specific product and options
"""
ref = self._create_line_reference(product, stockrecord, options)
try:
return self.lines.get(line_reference=ref).quantity
except ObjectDoesNotExist:
return 0
@python_2_unicode_compatible
class AbstractLine(models.Model):
"""
A line of a basket (product and a quantity)
"""
basket = models.ForeignKey('basket.Basket', related_name='lines',
verbose_name=_("Basket"))
# This is to determine which products belong to the same line
# We can't just use product.id as you can have customised products
# which should be treated as separate lines. Set as a
# SlugField as it is included in the path for certain views.
line_reference = models.SlugField(
_("Line Reference"), max_length=128, db_index=True)
product = models.ForeignKey(
'catalogue.Product', related_name='basket_lines',
verbose_name=_("Product"))
# We store the stockrecord that should be used to fulfil this line.
stockrecord = models.ForeignKey(
'partner.StockRecord', related_name='basket_lines')
quantity = models.PositiveIntegerField(_('Quantity'), default=1)
# We store the unit price incl tax of the product when it is first added to
# the basket. This allows us to tell if a product has changed price since
# a person first added it to their basket.
price_currency = models.CharField(
_("Currency"), max_length=12, default=get_default_currency)
price_excl_tax = models.DecimalField(
_('Price excl. Tax'), decimal_places=2, max_digits=12,
null=True)
price_incl_tax = models.DecimalField(
_('Price incl. Tax'), decimal_places=2, max_digits=12, null=True)
# Track date of first addition
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
def __init__(self, *args, **kwargs):
super(AbstractLine, self).__init__(*args, **kwargs)
# Instance variables used to persist discount information
self._discount_excl_tax = D('0.00')
self._discount_incl_tax = D('0.00')
self._affected_quantity = 0
class Meta:
abstract = True
app_label = 'basket'
unique_together = ("basket", "line_reference")
verbose_name = _('Basket line')
verbose_name_plural = _('Basket lines')
def __str__(self):
return _(
u"Basket #%(basket_id)d, Product #%(product_id)d, quantity"
u" %(quantity)d") % {'basket_id': self.basket.pk,
'product_id': self.product.pk,
'quantity': self.quantity}
def save(self, *args, **kwargs):
if not self.basket.can_be_edited:
raise PermissionDenied(
_("You cannot modify a %s basket") % (
self.basket.status.lower(),))
return super(AbstractLine, self).save(*args, **kwargs)
# =============
# Offer methods
# =============
def clear_discount(self):
"""
Remove any discounts from this line.
"""
self._discount_excl_tax = D('0.00')
self._discount_incl_tax = D('0.00')
self._affected_quantity = 0
def discount(self, discount_value, affected_quantity, incl_tax=True):
"""
Apply a discount to this line
"""
if incl_tax:
if self._discount_excl_tax > 0:
raise RuntimeError(
"Attempting to discount the tax-inclusive price of a line "
"when tax-exclusive discounts are already applied")
self._discount_incl_tax += discount_value
else:
if self._discount_incl_tax > 0:
raise RuntimeError(
"Attempting to discount the tax-exclusive price of a line "
"when tax-inclusive discounts are already applied")
self._discount_excl_tax += discount_value
self._affected_quantity += int(affected_quantity)
def consume(self, quantity):
"""
Mark all or part of the line as 'consumed'
Consumed items are no longer available to be used in offers.
"""
if quantity > self.quantity - self._affected_quantity:
inc = self.quantity - self._affected_quantity
else:
inc = quantity
self._affected_quantity += int(inc)
def get_price_breakdown(self):
"""
Return a breakdown of line prices after discounts have been applied.
Returns a list of (unit_price_incl_tax, unit_price_excl_tax, quantity)
tuples.
"""
if not self.is_tax_known:
raise RuntimeError("A price breakdown can only be determined "
"when taxes are known")
prices = []
if not self.discount_value:
prices.append((self.unit_price_incl_tax, self.unit_price_excl_tax,
self.quantity))
else:
# Need to split the discount among the affected quantity
# of products.
item_incl_tax_discount = (
self.discount_value / int(self._affected_quantity))
item_excl_tax_discount = item_incl_tax_discount * self._tax_ratio
item_excl_tax_discount = item_excl_tax_discount.quantize(D('0.01'))
prices.append((self.unit_price_incl_tax - item_incl_tax_discount,
self.unit_price_excl_tax - item_excl_tax_discount,
self._affected_quantity))
if self.quantity_without_discount:
prices.append((self.unit_price_incl_tax,
self.unit_price_excl_tax,
self.quantity_without_discount))
return prices
# =======
# Helpers
# =======
@property
def _tax_ratio(self):
if not self.unit_price_incl_tax:
return 0
return self.unit_price_excl_tax / self.unit_price_incl_tax
# ==========
# Properties
# ==========
@property
def has_discount(self):
return self.quantity > self.quantity_without_discount
@property
def quantity_with_discount(self):
return self._affected_quantity
@property
def quantity_without_discount(self):
return int(self.quantity - self._affected_quantity)
@property
def is_available_for_discount(self):
return self.quantity_without_discount > 0
@property
def discount_value(self):
# Only one of the incl- and excl- discounts should be non-zero
return max(self._discount_incl_tax, self._discount_excl_tax)
@property
def purchase_info(self):
"""
Return the stock/price info
"""
if not hasattr(self, '_info'):
# Cache the PurchaseInfo instance.
self._info = self.basket.strategy.fetch_for_product(
self.product, self.stockrecord)
return self._info
@property
def is_tax_known(self):
return self.purchase_info.price.is_tax_known
@property
def unit_effective_price(self):
"""
The price to use for offer calculations
"""
return self.purchase_info.price.effective_price
@property
def unit_price_excl_tax(self):
return self.purchase_info.price.excl_tax
@property
def unit_price_incl_tax(self):
return self.purchase_info.price.incl_tax
@property
def unit_tax(self):
return self.purchase_info.price.tax
@property
def line_price_excl_tax(self):
return self.quantity * self.unit_price_excl_tax
@property
def line_price_excl_tax_incl_discounts(self):
if self._discount_excl_tax:
return self.line_price_excl_tax - self._discount_excl_tax
if self._discount_incl_tax:
# This is a tricky situation. We know the discount as calculated
# against tax inclusive prices but we need to guess how much of the
# discount applies to tax-exclusive prices. We do this by
# assuming a linear tax and scaling down the original discount.
return self.line_price_excl_tax \
- self._tax_ratio * self._discount_incl_tax
return self.line_price_excl_tax
@property
def line_price_incl_tax_incl_discounts(self):
# We use whichever discount value is set. If the discount value was
# calculated against the tax-exclusive prices, then the line price
# including tax
return self.line_price_incl_tax - self.discount_value
@property
def line_tax(self):
return self.quantity * self.unit_tax
@property
def line_price_incl_tax(self):
return self.quantity * self.unit_price_incl_tax
@property
def description(self):
d = str(self.product)
ops = []
for attribute in self.attributes.all():
ops.append("%s = '%s'" % (attribute.option.name, attribute.value))
if ops:
d = "%s (%s)" % (d.decode('utf-8'), ", ".join(ops))
return d
def get_warning(self):
"""
Return a warning message about this basket line if one is applicable
This could be things like the price has changed
"""
if not self.stockrecord:
msg = u"'%(product)s' is no longer available"
return _(msg) % {'product': self.product.get_title()}
if not self.price_incl_tax:
return
if not self.purchase_info.price.is_tax_known:
return
# Compare current price to price when added to basket
current_price_incl_tax = self.purchase_info.price.incl_tax
if current_price_incl_tax != self.price_incl_tax:
product_prices = {
'product': self.product.get_title(),
'old_price': currency(self.price_incl_tax),
'new_price': currency(current_price_incl_tax)
}
if current_price_incl_tax > self.price_incl_tax:
warning = _("The price of '%(product)s' has increased from"
" %(old_price)s to %(new_price)s since you added"
" it to your basket")
return warning % product_prices
else:
warning = _("The price of '%(product)s' has decreased from"
" %(old_price)s to %(new_price)s since you added"
" it to your basket")
return warning % product_prices
class AbstractLineAttribute(models.Model):
"""
An attribute of a basket line
"""
line = models.ForeignKey('basket.Line', related_name='attributes',
verbose_name=_("Line"))
option = models.ForeignKey('catalogue.Option', verbose_name=_("Option"))
value = models.CharField(_("Value"), max_length=255)
class Meta:
abstract = True
app_label = 'basket'
verbose_name = _('Line attribute')
verbose_name_plural = _('Line attributes')
|
{
"content_hash": "3ac67394ea5396c912dcf10074c74c1d",
"timestamp": "",
"source": "github",
"line_count": 850,
"max_line_length": 79,
"avg_line_length": 33.758823529411764,
"alnum_prop": 0.5871406168322008,
"repo_name": "kapt/django-oscar",
"id": "da4cfc6f3e8a49876e5a08acfa66cf8de63b93cd",
"size": "28695",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "oscar/apps/basket/abstract_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1013938"
},
{
"name": "JavaScript",
"bytes": "926045"
},
{
"name": "Python",
"bytes": "5840384"
},
{
"name": "Shell",
"bytes": "6015"
},
{
"name": "XSLT",
"bytes": "49764"
}
],
"symlink_target": ""
}
|
import os
import shutil
import time
from mklib import Task
from mklib.common import relpath
class foo(Task):
default = True
results = ["foo.txt"]
deps = ["bar.txt"]
def make(self):
src = self.deps[0].path
dst = self.results[0].path
self.log.info("cp %s %s", relpath(src), relpath(dst))
shutil.copy(src, dst)
class bar(Task):
def make(self):
f = open("bar.txt", 'w')
f.write(str(time.time()))
f.close()
class clean(Task):
def make(self):
for p in ("foo.txt", "bar.txt"):
if os.path.exists(p):
os.remove(p)
|
{
"content_hash": "5b67ef5c4266b68223ce534c2b38cd46",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 61,
"avg_line_length": 22.321428571428573,
"alnum_prop": 0.552,
"repo_name": "ActiveState/mk",
"id": "f730d5a0e8a7dfd1b3a7e4a2eaba92d308f2bc8f",
"size": "625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/data/outofdate/Makefile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "242701"
}
],
"symlink_target": ""
}
|
import pytest
from actstream.actions import follow
from grandchallenge.notifications.models import Notification
from tests.factories import UserFactory
from tests.notifications_tests.factories import NotificationFactory
@pytest.mark.django_db
def test_notification_preference_created():
u = UserFactory()
prefs = u.user_profile
assert prefs
assert prefs.user == u
assert prefs.receive_notification_emails is True
assert prefs.notification_email_last_sent_at is None
assert prefs.has_unread_notifications is False
@pytest.mark.django_db
def test_notifications_filtered():
u1 = UserFactory()
u2 = UserFactory()
follow(u1, u2)
n = NotificationFactory(
user=u1, type=Notification.Type.GENERIC, actor=u1, message="says hi"
)
assert u2.user_profile.has_unread_notifications is False
assert u1.user_profile.has_unread_notifications is True
n.read = True
n.save()
assert u1.user_profile.has_unread_notifications is False
|
{
"content_hash": "176e68198db54166d40969afff57ca8b",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 76,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.7422577422577422,
"repo_name": "comic/comic-django",
"id": "713e58a2168a13477cee67e68fca66a30e66c868",
"size": "1001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/tests/profiles_tests/test_models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "94300"
},
{
"name": "HTML",
"bytes": "101108"
},
{
"name": "JavaScript",
"bytes": "122734"
},
{
"name": "PHP",
"bytes": "99155"
},
{
"name": "Python",
"bytes": "486219"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
}
|
''' Used by :class:`hystrix.command.Command` to record metrics.'''
from __future__ import absolute_import
import logging
import six
from hystrix.rolling_number import RollingNumber
log = logging.getLogger(__name__)
class Metrics(object):
""" Base class for metrics
Args:
counter (:class:`hystrix.rolling_number.RollingNumber`): Used to
increment or set values over time.
"""
def __init__(self, counter):
self.counter = counter
def cumulative_count(self, event):
""" Cumulative count
Get the **cumulative** count since the start of the application for the
given :class:`RollingNumberEvent`.
Args:
event (:class:`RollingNumberEvent`): The Event to retrieve a
**sum** for.
Returns:
long: Returns the long cumulative count.
"""
return self.counter.cumulative_sum(event)
def rolling_count(self, event):
""" **Rolling** count
Get the rolling count for the given:class:`RollingNumberEvent`.
Args:
event (:class:`RollingNumberEvent`): The Event to retrieve a
**sum** for.
Returns:
long: Returns the long cumulative count.
"""
return self.counter.rolling_sum(event)
class CommandMetricsMetaclass(type):
""" Metaclass for :class:`CommandMetrics`
Return a cached or create the :class:`CommandMetrics` instance for a given
:class:`hystrix.command.Command` name.
This ensures only 1 :class:`CommandMetrics` instance per
:class:`hystrix.command.Command` name.
"""
__instances__ = dict()
__blacklist__ = ('CommandMetrics', 'CommandMetricsMetaclass')
def __new__(cls, name, bases, attrs):
# Do not use cache for black listed classes.
if name in cls.__blacklist__:
return super(CommandMetricsMetaclass, cls).__new__(cls, name,
bases, attrs)
# User defined class name or create a default.
class_name = attrs.get('__command_metrics_name__',
'{}CommandMetrics'.format(name))
# Check for CommandMetrics class instance
if class_name not in cls.__instances__:
new_class = super(CommandMetricsMetaclass, cls).__new__(cls,
class_name,
bases,
attrs)
setattr(new_class, 'command_metrics_name', class_name)
cls.__instances__[class_name] = new_class
return cls.__instances__[class_name]
class CommandMetrics(six.with_metaclass(CommandMetricsMetaclass, Metrics)):
""" Command metrics
"""
__command_metrics_name__ = None
# def __init__(self, command_key, command_group, properties,
# event_notifier):
def __init__(self):
# self.properties = properties
# counter = RollingNumber(self.properties.milliseconds,
# self.properties.bucket_numbers)
super(CommandMetrics, self).__init__(None) # use counter here
class ExecutorMetricsMetaclass(type):
__instances__ = dict()
__blacklist = ('ExecutorMetrics', 'ExecutorMetricsMetaclass')
def __new__(cls, name, bases, attrs):
if name in cls.__blacklist:
return super(ExecutorMetricsMetaclass, cls).__new__(cls, name,
bases, attrs)
class_name = attrs.get('__executor_metrics_name__',
'{}ExecutorMetrics'.format(name))
new_class = super(ExecutorMetricsMetaclass, cls).__new__(cls,
class_name,
bases, attrs)
setattr(new_class, 'executor_metrics_name', class_name)
if class_name not in cls.__instances__:
cls.__instances__[class_name] = new_class
return cls.__instances__[class_name]
class ExecutorMetrics(six.with_metaclass(ExecutorMetricsMetaclass, object)):
__executor_metrics_name__ = None
class HealthCounts(object):
""" Number of requests during rolling window.
Number that failed (failure + success + timeout + thread pool rejected +
short circuited + semaphore rejected).
Error percentage;
"""
def __init__(self, total, error, error_percentage):
self._total_count = total
self._error_count = error
self._error_percentage = error_percentage
def total_requests(self):
""" Total reqeust
Returns:
int: Returns total request count.
"""
return self._total_count
def error_count(self):
""" Error count
Returns:
int: Returns error count.
"""
return self._error_count
def error_percentage(self):
""" Error percentage
Returns:
int: Returns error percentage.
"""
return self._error_percentage
|
{
"content_hash": "43748aadd633d3fa2bd089a70264da8a",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 79,
"avg_line_length": 31.221556886227546,
"alnum_prop": 0.5573456079785194,
"repo_name": "yonglehou/hystrix-py",
"id": "dc9438fa29d8c590147f50e874b7eb2f1d76813f",
"size": "5214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hystrix/metrics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "269"
},
{
"name": "Python",
"bytes": "161351"
},
{
"name": "Shell",
"bytes": "457"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.address import SyntheticAddress, parse_spec
from pants.base.address_lookup_error import AddressLookupError
from pants.base.build_graph import BuildGraph
from pants.base.target import Target
from pants_test.base_test import BaseTest
# TODO(Eric Ayers) There are many untested methods in BuildGraph left to be tested.
class BuildGraphTest(BaseTest):
def inject_graph(self, root_spec, graph_dict):
"""Given a root spec, injects relevant targets from the graph represented by graph_dict.
graph_dict should contain address specs, keyed by sources with lists of value destinations.
Each created target will be a simple `target` alias.
Returns the parsed Address for the root_spec.
"""
for src, targets in graph_dict.items():
src_path, src_name = parse_spec(src)
if not src_path:
# The target is located in the root.
src_path = '.'
self.add_to_build_file(
'{}/BUILD'.format(src_path),
'''target(name='{}', dependencies=[{}])\n'''.format(
src_name,
"'{}'".format("','".join(targets)) if targets else ''
)
)
root_address = SyntheticAddress.parse(root_spec)
self.build_graph.inject_address_closure(root_address)
return root_address
def test_target_invalid(self):
self.add_to_build_file('a/BUILD', 'target(name="a")')
with self.assertRaises(AddressLookupError):
self.build_graph.inject_address_closure(SyntheticAddress.parse('a:nope'))
self.add_to_build_file('b/BUILD', 'target(name="a")')
with self.assertRaises(AddressLookupError):
self.build_graph.inject_address_closure(SyntheticAddress.parse('b'))
with self.assertRaises(AddressLookupError):
self.build_graph.inject_address_closure(SyntheticAddress.parse('b:b'))
def test_transitive_closure_address(self):
root_address = self.inject_graph('//:foo', {
"//:foo": ['a'],
"a": ['a/b:bat'],
"a/b:bat": [],
})
self.assertEqual(len(self.build_graph.transitive_subgraph_of_addresses([root_address])), 3)
def test_no_targets(self):
self.add_to_build_file('empty/BUILD', 'pass')
with self.assertRaises(AddressLookupError):
self.build_graph.inject_address_closure(SyntheticAddress.parse('empty'))
with self.assertRaises(AddressLookupError):
self.build_graph.inject_address_closure(SyntheticAddress.parse('empty:foo'))
def test_contains_address(self):
a = SyntheticAddress.parse('a')
self.assertFalse(self.build_graph.contains_address(a))
target = Target(name='a',
address=a,
build_graph=self.build_graph)
self.build_graph.inject_target(target)
self.assertTrue(self.build_graph.contains_address(a))
def test_get_target_from_spec(self):
a = self.make_target('foo:a')
result = self.build_graph.get_target_from_spec('foo:a')
self.assertEquals(a, result)
b = self.make_target('foo:b')
result = self.build_graph.get_target_from_spec(':b', relative_to='foo')
self.assertEquals(b, result)
def test_walk_graph(self):
# Make sure that BuildGraph.walk_transitive_dependency_graph() and
# BuildGraph.walk_transitive_dependee_graph() return DFS preorder (or postorder) traversal.
def assertDependencyWalk(target, results, postorder=False):
targets = []
self.build_graph.walk_transitive_dependency_graph([target.address],
lambda x: targets.append(x),
postorder=postorder)
self.assertEquals(results, targets)
def assertDependeeWalk(target, results, postorder=False):
targets = []
self.build_graph.walk_transitive_dependee_graph([target.address],
lambda x: targets.append(x),
postorder=postorder)
self.assertEquals(results, targets)
a = self.make_target('a')
b = self.make_target('b', dependencies=[a])
c = self.make_target('c', dependencies=[b])
d = self.make_target('d', dependencies=[c, a])
e = self.make_target('e', dependencies=[d])
assertDependencyWalk(a, [a])
assertDependencyWalk(b, [b, a])
assertDependencyWalk(c, [c, b, a])
assertDependencyWalk(d, [d, c, b, a])
assertDependencyWalk(e, [e, d, c, b, a])
assertDependeeWalk(a, [a, b, c, d, e])
assertDependeeWalk(b, [b, c, d, e])
assertDependeeWalk(c, [c, d, e])
assertDependeeWalk(d, [d, e])
assertDependeeWalk(e, [e])
assertDependencyWalk(a, [a], postorder=True)
assertDependencyWalk(b, [a, b], postorder=True)
assertDependencyWalk(c, [a, b, c], postorder=True)
assertDependencyWalk(d, [a, b, c, d], postorder=True)
assertDependencyWalk(e, [a, b, c, d, e], postorder=True)
assertDependeeWalk(a, [e, d, c, b, a], postorder=True)
assertDependeeWalk(b, [e, d, c, b], postorder=True)
assertDependeeWalk(c, [e, d, c], postorder=True)
assertDependeeWalk(d, [e, d], postorder=True)
assertDependeeWalk(e, [e], postorder=True)
# Try a case where postorder traversal is not identical to reversed preorder traversal
c = self.make_target('c1', dependencies=[])
d = self.make_target('d1', dependencies=[c])
b = self.make_target('b1', dependencies=[c, d])
e = self.make_target('e1', dependencies=[b])
a = self.make_target('a1', dependencies=[b, e])
assertDependencyWalk(a, [a, b, c, d, e])
assertDependencyWalk(a, [c, d, b, e, a], postorder=True)
def test_target_closure(self):
a = self.make_target('a')
self.assertEquals([a], a.closure())
b = self.make_target('b', dependencies=[a])
self.assertEquals([b, a], b.closure())
c = self.make_target('c', dependencies=[b])
self.assertEquals([c, b, a], c.closure())
d = self.make_target('d', dependencies=[a, c])
self.assertEquals([d, a, c, b], d.closure())
def test_transitive_subgraph_of_addresses_bfs(self):
root = self.inject_graph('a', {
'a': ['b', 'c'],
'b': ['d', 'e'],
'c': ['f', 'g'],
'd': ['h', 'i'],
'e': ['j', 'k'],
'f': ['l', 'm'],
'g': ['n', 'o'],
'h': [], 'i': [], 'j': [], 'k': [], 'l': [], 'm': [], 'n': [], 'o': [],
})
self.assertEquals(
[t.address.target_name for t in self.build_graph.transitive_subgraph_of_addresses_bfs([root])],
[str(unichr(x)) for x in xrange(ord('a'), ord('o') + 1)],
)
def test_transitive_subgraph_of_addresses_bfs_predicate(self):
root = self.inject_graph('a', {
'a': ['b', 'c'],
'b': ['d', 'e'],
'c': [], 'd': [], 'e': [],
})
predicate = lambda t: t.address.target_name != 'b'
filtered = self.build_graph.transitive_subgraph_of_addresses_bfs([root], predicate=predicate)
self.assertEquals([t.address.target_name for t in filtered], ['a', 'c'])
def test_target_walk(self):
def assertWalk(expected, target):
results = []
target.walk(lambda x: results.append(x))
self.assertEquals(expected, results)
a = self.make_target('a')
assertWalk([a], a)
b = self.make_target('b', dependencies=[a])
assertWalk([b, a], b)
c = self.make_target('c', dependencies=[b])
assertWalk([c, b, a], c)
d = self.make_target('d', dependencies=[a, c])
assertWalk([d, a, c, b], d)
def test_lookup_exception(self):
# There is code that depends on the fact that TransitiveLookupError is a subclass of
# AddressLookupError
self.assertIsInstance(BuildGraph.TransitiveLookupError(), AddressLookupError)
def inject_address_closure(self, spec):
self.build_graph.inject_address_closure(SyntheticAddress.parse(spec))
def test_invalid_address(self):
with self.assertRaisesRegexp(AddressLookupError,
'^BUILD file does not exist at:.*/BUILD'):
self.inject_address_closure('//:a')
self.add_to_build_file('BUILD',
'target(name="a", '
' dependencies=["non-existent-path:b"],'
')')
with self.assertRaisesRegexp(BuildGraph.TransitiveLookupError,
'^BUILD file does not exist at:.*/non-existent-path/BUILD'
'\s+when translating spec non-existent-path:b'
'\s+referenced from :a$'):
self.inject_address_closure('//:a')
def test_invalid_address_two_hops(self):
self.add_to_build_file('BUILD',
'target(name="a", '
' dependencies=["goodpath:b"],'
')')
self.add_to_build_file('goodpath/BUILD',
'target(name="b", '
' dependencies=["non-existent-path:c"],'
')')
with self.assertRaisesRegexp(BuildGraph.TransitiveLookupError,
'^BUILD file does not exist at: .*/non-existent-path/BUILD'
'\s+when translating spec non-existent-path:c'
'\s+referenced from goodpath:b'
'\s+referenced from :a$'):
self.inject_address_closure('//:a')
def test_invalid_address_two_hops_same_file(self):
self.add_to_build_file('BUILD',
'target(name="a", '
' dependencies=["goodpath:b"],'
')')
self.add_to_build_file('goodpath/BUILD',
'target(name="b", '
' dependencies=[":c"],'
')\n'
'target(name="c", '
' dependencies=["non-existent-path:d"],'
')')
with self.assertRaisesRegexp(BuildGraph.TransitiveLookupError,
'^BUILD file does not exist at:.*/non-existent-path/BUILD'
'\s+when translating spec non-existent-path:d'
'\s+referenced from goodpath:c'
'\s+referenced from goodpath:b'
'\s+referenced from :a$'):
self.inject_address_closure('//:a')
def test_raise_on_duplicate_dependencies(self):
self.add_to_build_file('BUILD',
'target(name="a", '
' dependencies=['
' "other:b",'
' "//other:b",' # we should perform the test on normalized addresses
'])')
self.add_to_build_file('other/BUILD',
'target(name="b")')
with self.assertRaisesRegexp(
BuildGraph.TransitiveLookupError,
'^Addresses in dependencies must be unique. \'other:b\' is referenced more than once.'
'\s+referenced from :a$'):
self.inject_address_closure('//:a')
|
{
"content_hash": "563e7c63423059146255d7a145489a74",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 103,
"avg_line_length": 41.80898876404494,
"alnum_prop": 0.5739496551106333,
"repo_name": "TansyArron/pants",
"id": "63afac7ce997ff0dc3cf1670a9bcb5489cef6efc",
"size": "11310",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/base/test_build_graph.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11139"
},
{
"name": "GAP",
"bytes": "4818"
},
{
"name": "Go",
"bytes": "1596"
},
{
"name": "HTML",
"bytes": "68162"
},
{
"name": "Java",
"bytes": "316044"
},
{
"name": "JavaScript",
"bytes": "10157"
},
{
"name": "Protocol Buffer",
"bytes": "7038"
},
{
"name": "Python",
"bytes": "3362372"
},
{
"name": "Scala",
"bytes": "77693"
},
{
"name": "Shell",
"bytes": "48118"
},
{
"name": "Thrift",
"bytes": "3485"
}
],
"symlink_target": ""
}
|
"""
Freezes final grades for a course
"""
import csv
import argparse
from collections import namedtuple
from django.core.management import BaseCommand, CommandError
from grades.models import ProctoredExamGrade
class ParsingError(CommandError):
"""Custom class for parsing exceptions"""
pass
class GradeRowParser:
"""Parser for rows of grade adjustment information in a CSV"""
RowProps = namedtuple('RowProps', ['exam_grade_id', 'score'])
default_col_names = dict(
exam_grade_id='proctoredexam_id',
score='score',
)
def __init__(self, col_names=None):
"""
Args:
col_names (dict): Mapping of RowProps property name to the name of the column in the CSV
"""
col_names = col_names or {}
self.col_names = self.RowProps(**{**self.default_col_names, **col_names})
def parse_and_validate_row(self, row):
"""Parses a row of grade adjustment info and makes sure it doesn't contain bad data"""
try:
parsed_row = self.RowProps(
exam_grade_id=int(row[self.col_names.exam_grade_id]),
score=float(row[self.col_names.score]),
)
except KeyError as e:
raise ParsingError('Row is missing a required column: {}'.format(str(e)))
except ValueError as e:
raise ParsingError('Row has an invalid value: {}'.format(str(e)))
if parsed_row.score < 0.0 or parsed_row.score > 100.0:
row_identifier = '{}: {}'.format(self.col_names.exam_grade_id, parsed_row.exam_grade_id)
raise ParsingError('[{}] "score" value must be between 0 and 100'.format(row_identifier))
return parsed_row
def parse_exam_grade_adjustments(self, csv_reader):
"""
Parses all rows of grade adjustment info from a CSV and yields each ProctoredExamGrade object
with its associated grade adjustment row from the CSV
Args:
csv_reader (csv.DictReader): A DictReader instance
Returns:
tuple(ProctoredExamGrade, RowProps):
A tuple containing a ProctoredExamGrade and its associated parsed CSV row
"""
parsed_row_dict = {}
for row in csv_reader:
parsed_row = self.parse_and_validate_row(row)
parsed_row_dict[parsed_row.exam_grade_id] = parsed_row
exam_grade_query = ProctoredExamGrade.objects.filter(id__in=parsed_row_dict.keys())
if exam_grade_query.count() < len(parsed_row_dict):
bad_exam_grade_ids = set(parsed_row_dict.keys()) - set(exam_grade_query.values_list('id', flat=True))
raise ParsingError(
'Some exam grade IDs do not match any ProctoredExamGrade records: {}'.format(bad_exam_grade_ids)
)
for exam_grade in exam_grade_query.all():
yield exam_grade, parsed_row_dict[exam_grade.id]
class Command(BaseCommand):
"""Parses a csv with exam grade adjustment information and changes the appropriate grades"""
help = "Parses a csv with exam grade adjustment information and changes the appropriate grades"
def add_arguments(self, parser):
parser.add_argument('csvfile', type=argparse.FileType('r'), help='')
parser.add_argument(
'--grade-id-col-name',
default=GradeRowParser.default_col_names['exam_grade_id'],
help='Name of the column that contains the proctored exam grade id')
parser.add_argument(
'--score-col-name',
default=GradeRowParser.default_col_names['score'],
help='Name of the column that contains the score value'
)
def handle(self, *args, **kwargs): # pylint: disable=unused-argument,too-many-locals
col_names = dict(
exam_grade_id=kwargs.get('grade_id_col_name'),
score=kwargs.get('score_col_name'),
)
csvfile = kwargs.get('csvfile')
reader = csv.DictReader(csvfile.read().splitlines())
grade_row_parser = GradeRowParser(col_names=col_names)
total_rows = 0
grades_changed = 0
grades_unchanged = 0
for exam_grade, parsed_adjustment_row in grade_row_parser.parse_exam_grade_adjustments(reader):
if exam_grade.score != parsed_adjustment_row.score:
exam_grade.set_score(parsed_adjustment_row.score)
exam_grade.save_and_log(None)
grades_changed = grades_changed + 1
else:
grades_unchanged = grades_unchanged + 1
total_rows = total_rows + 1
result_messages = ['Total rows: {}'.format(total_rows)]
if grades_changed:
result_messages.append('Grades changed: {}'.format(grades_changed))
if grades_unchanged:
result_messages.append('Grades found with no change in score: {}'.format(grades_unchanged))
self.stdout.write(self.style.SUCCESS('\n'.join(result_messages)))
|
{
"content_hash": "01360eb1614b6d95f456767cac91d90f",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 113,
"avg_line_length": 41.79831932773109,
"alnum_prop": 0.6252513067953357,
"repo_name": "mitodl/micromasters",
"id": "b59f947632eb19e13dc0384944eada9ee7051e2d",
"size": "4974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grades/management/commands/adjust_exam_grades_from_csv.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9764"
},
{
"name": "Dockerfile",
"bytes": "958"
},
{
"name": "HTML",
"bytes": "84519"
},
{
"name": "JavaScript",
"bytes": "1462849"
},
{
"name": "Procfile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "2098424"
},
{
"name": "SCSS",
"bytes": "135082"
},
{
"name": "Shell",
"bytes": "10764"
}
],
"symlink_target": ""
}
|
"""
.. module: security_monkey.views.GuardDutyEventMapPointsList
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Pritam D. Gautam <pritam.gautam@nuagedm.com> @nuagedm
"""
import datetime
from flask import jsonify, request
from security_monkey import db, rbac
from security_monkey.views import AuthenticatedService
from security_monkey.datastore import (
GuardDutyEvent,
Item,
ItemAudit,
Account,
AccountType,
Technology,
AuditorSettings,
Datastore,
ItemRevision)
# Severity Levels for GuardDuty Findings
# https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings.html#guardduty_findings-severity
def sev_name(val):
if 0.1 <= val <= 3.9:
return 'Low'
if 4.0 <= val <= 6.9:
return 'Medium'
if 7.0 <= val <= 8.9:
return 'High'
# Returns a list of Map Circle Marker Points List
class GuardDutyEventMapPointsList(AuthenticatedService):
decorators = [rbac.allow(['View'], ["GET"])]
def get(self):
"""
.. http:get:: /api/1/worldmapguarddutydata
Get a list of World Map Data points matching the given criteria.
**Example Request**:
.. sourcecode:: http
GET /api/1/worldmapguarddutydata HTTP/1.1
Host: example.com
Accept: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"auth": {
"authenticated": true,
"roles": [
{
"name": "Admin"
},
{
"name": "Justify"
},
{
"name": "Comment"
},
{
"name": "View"
}
],
"user": "admin@example.org"
},
"items": [
{
"cityName": "Mar del Plata",
"count": 1,
"countryName": "Argentina",
"lat": -38.0,
"localPort": 22.0,
"localPortName": "SSH",
"lon": -57.55,
"remoteIpV4": "186.62.51.117",
"remoteOrg": "Telefonica de Argentina",
"remoteOrgASN": 22927.0,
"remoteOrgASNOrg": "Telefonica de Argentina",
"remoteOrgISP": "Telefonica de Argentina"
}
],
"page": 1,
"total": 197
}
:statuscode 200: no error
:statuscode 401: Authentication Error. Please Login.
"""
# Reference query as provided by Rick
# select
# g.item_id,
# g.config -> 'detail' -> 'service' -> 'action' -> 'portProbeAction' -> 'portProbeDetails' as "guarddutyjson"
# from item i
# inner join itemaudit ia on i.id = ia.item_id
# inner join guarddutyevent g ON i.id = g.item_id
# where coalesce(justified, FALSE) = FALSE
# and coalesce(fixed, FALSE) = FALSE
# and g.config -> 'detail' -> 'service' -> 'action' -> 'portProbeAction' -> 'portProbeDetails' is not NULL;
# """
self.reqparse.add_argument('accounts', type=str, default=None, location='args')
args = self.reqparse.parse_args()
for k, v in args.items():
if not v:
del args[k]
# @pritam: 25 July, 2018
# With implementation of GuardDuty Data Injection using Custom Watcher, changing the source of GuardDutyEvent
# data for this query to ItemRevision Table
# inner join itemrevision g ON i.id = g.item_id
# select g.item_id,
# g.config -> 'Service' -> 'Action' -> 'PortProbeAction' -> 'PortProbeDetails' as "guarddutyjson"
# from item i
# inner join itemaudit ia on i.id = ia.item_id
# inner join itemrevision g ON i.latest_revision_id = g.id
# where coalesce(justified, FALSE) = FALSE
# and coalesce(fixed, FALSE) = FALSE
# and g.config -> 'Service' -> 'Action' -> 'PortProbeAction' -> 'PortProbeDetails' is not NULL;
# Adding following additonal output data fields for display details modal popup of Map
# g.config -> 'Description' as "description",
# g.config -> 'Severity' as "severity",
# g.config -> 'Region' as "region",
# g.config -> 'Service' -> 'Count' as "count",
# g.config -> 'AccountId' as "accountid"
# Read more about filtering:
# https://docs.sqlalchemy.org/en/latest/orm/query.html
from sqlalchemy.sql.functions import coalesce
query = ItemRevision.query.with_entities(
ItemRevision.item_id,
ItemRevision.config[('Service', 'Action', 'PortProbeAction','PortProbeDetails')].label('portprobedetails'),
ItemRevision.config[('Description')].label('description'),
ItemRevision.config[('Severity')].label('severity'),
ItemRevision.config[('Region')].label('region'),
ItemRevision.config[('Service')].label('service'),
ItemRevision.config[('Resource')].label('resource'),
ItemRevision.config[('AccountId')].label('accountid'),
) \
.join((Item, Item.latest_revision_id == ItemRevision.id), (ItemAudit, Item.id == ItemAudit.item_id)) \
.filter((coalesce(ItemAudit.justified, False) == False), (coalesce(ItemAudit.fixed, False) == False),
(ItemRevision.config[
('Service', 'Action', 'PortProbeAction', 'PortProbeDetails')] != None))
if 'accounts' in args:
accounts = args['accounts'].split(',')
query = query.join((Account, Account.id == Item.account_id))
query = query.filter(Account.name.in_(accounts))
records = query.all()
items = []
def flatten_structure( rec):
result = dict(rec.__dict__)
if result.has_key('service'):
result.pop('service')
if result.has_key('resource'):
result.pop('resource')
if result.has_key('portprobedetails'):
result.pop('portprobedetails')
result.update(flatten_json(rec.portprobedetails[0]))
result['probe_count'] = rec.service['Count']
result['first_seen'] = rec.service['EventFirstSeen']
result['last_seen'] = rec.service['EventLastSeen']
result['resource_type'] = rec.resource['ResourceType']
result['instance_id'] = rec.resource['InstanceDetails']['InstanceId']
instance_tag_name = [k['Value'] for k in rec.resource['InstanceDetails']['Tags'] if k['Key']=='Name' ]
if instance_tag_name:
result['instance_name'] = instance_tag_name[0]
else:
result['instance_name'] = 'NA'
if result.has_key('_labels'):
result.pop('_labels')
# Convert Severity from float to Text
result['severity'] = sev_name(result['severity'])
return result
if len(records) > 0:
import pandas as pd
from ..flatten import flatten_json
flatten_records = (flatten_structure(record) for record in records)
fulldata_dataFrame = pd.DataFrame(flatten_records).rename(
columns={'RemoteIpDetails_GeoLocation_Lat': 'lat',
'RemoteIpDetails_GeoLocation_Lon': 'lon',
'LocalPortDetails_Port': 'localPort',
'LocalPortDetails_portName': 'localPortName',
'RemoteIpDetails_City_CityName': 'cityName',
'RemoteIpDetails_Country_CountryName': 'countryName',
'RemoteIpDetails_IpAddressV4': 'remoteIpV4',
'RemoteIpDetails_Organization_Asn': 'remoteOrgASN',
'RemoteIpDetails_Organization_AsnOrg': 'remoteOrgASNOrg',
'RemoteIpDetails_Organization_Isp': 'remoteOrgISP',
'RemoteIpDetails_Organization_Org': 'remoteOrg',
'counts': 'count'})
# Removing drop duplicates as each Probe will probably have different info to be displayed in popup
mapdata_dataframe = fulldata_dataFrame.groupby(['lat', 'lon']).size().reset_index(name='count').merge(
fulldata_dataFrame, on=['lat', 'lon'], how='left')
items = mapdata_dataframe.to_dict('records')
marshaled_dict = {
'page': 1,
'total': len(items),
'auth': self.auth_dict,
'items': items
}
return marshaled_dict, 200
# Returns a list of Top 10 Countries by number of probe events received to display in Bar Chart
class GuardDutyEventTop10Countries(AuthenticatedService):
decorators = [rbac.allow(['View'], ["GET"])]
def get(self):
"""
.. http:get:: /api/1/top10countryguarddutydata
Get a list of Top 10 Countries by number of probe events received to display in Bar Chart
**Example Request**:
.. sourcecode:: http
GET /api/1/worldmapguarddutydata HTTP/1.1
Host: example.com
Accept: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"auth": {
"authenticated": true,
"roles": [
{
"name": "Admin"
},
{
"name": "Justify"
},
{
"name": "Comment"
},
{
"name": "View"
}
],
"user": "admin@example.org"
},
"items": [
{
"count": 1527,
"countryName": "China"
},
{
"count": 456,
"countryName": "United States"
},
{
"count": 116,
"countryName": "Russia"
},
],
"page": 1,
"total": 197
}
:statuscode 200: no error
:statuscode 401: Authentication Error. Please Login.
"""
self.reqparse.add_argument('accounts', type=str, default=None, location='args')
args = self.reqparse.parse_args()
for k, v in args.items():
if not v:
del args[k]
# Reference query as provided by Rick
# select
# g.item_id,
# g.config -> 'detail' -> 'service' -> 'action' -> 'portProbeAction' -> 'portProbeDetails' as "guarddutyjson"
# from item i
# inner join itemaudit ia on i.id = ia.item_id
# inner join guarddutyevent g ON i.id = g.item_id
# where coalesce(justified, FALSE) = FALSE
# and coalesce(fixed, FALSE) = FALSE
# and g.config -> 'detail' -> 'service' -> 'action' -> 'portProbeAction' -> 'portProbeDetails' is not NULL;
# """
# @pritam: 25 July, 2018
# With implementation of GuardDuty Data Injection using Custom Watcher, changing the source of GuardDutyEvent
# data for this query to ItemRevision Table
# select g.item_id,
# g.config -> 'Service' -> 'Action' -> 'PortProbeAction' -> 'PortProbeDetails' as "guarddutyjson"
# from item i
# inner join itemaudit ia on i.id = ia.item_id
# inner join itemrevision g ON i.latest_revision_id = g.id
# where coalesce(justified, FALSE) = FALSE
# and coalesce(fixed, FALSE) = FALSE
# and g.config -> 'Service' -> 'Action' -> 'PortProbeAction' -> 'PortProbeDetails' is not NULL;
# Read more about filtering:
# https://docs.sqlalchemy.org/en/latest/orm/query.html
from sqlalchemy.sql.functions import coalesce
query = ItemRevision.query.with_entities(
ItemRevision.item_id, ItemRevision.config[('Service', 'Action', 'PortProbeAction',
'PortProbeDetails')]) \
.join((Item, Item.latest_revision_id == ItemRevision.id), (ItemAudit, Item.id == ItemAudit.item_id)) \
.filter((coalesce(ItemAudit.justified, False) == False), (coalesce(ItemAudit.fixed, False) == False),
(ItemRevision.config[
('Service', 'Action', 'PortProbeAction', 'PortProbeDetails')] != None))
if 'accounts' in args:
accounts = args['accounts'].split(',')
query = query.join((Account, Account.id == Item.account_id))
query = query.filter(Account.name.in_(accounts))
records = query.all()
items = []
if len(records) > 0:
import pandas as pd
from ..flatten import flatten_json
flatten_records = (flatten_json(record[1][0]) for record in records)
fulldata_dataFrame = pd.DataFrame(flatten_records).rename(
columns={'RemoteIpDetails_GeoLocation_Lat': 'lat',
'RemoteIpDetails_GeoLocation_Lon': 'lon',
'LocalPortDetails_Port': 'localPort',
'LocalPortDetails_portName': 'localPortName',
'RemoteIpDetails_City_CityName': 'cityName',
'RemoteIpDetails_Country_CountryName': 'countryName',
'RemoteIpDetails_IpAddressV4': 'remoteIpV4',
'RemoteIpDetails_Organization_Asn': 'remoteOrgASN',
'RemoteIpDetails_Organization_AsnOrg': 'remoteOrgASNOrg',
'RemoteIpDetails_Organization_Isp': 'remoteOrgISP',
'RemoteIpDetails_Organization_Org': 'remoteOrg',
'counts': 'count'})
# Sorting and Limiting the resultset to 10
items = fulldata_dataFrame.groupby(['countryName']).size().reset_index(
name='count').sort_values(['count'], ascending=False).head(10).to_dict('records')
marshaled_dict = {
'page': 1,
'total': len(items),
'auth': self.auth_dict,
'items': items
}
return marshaled_dict, 200
class GuardDutyEventService(AuthenticatedService):
decorators = [
rbac.allow(["Admin"], ["POST"])
]
def post(self):
datastore = Datastore()
config = request.get_json(force=True)
#action_type = config['detail']['service']['action']['actionType']
action_type = 'guardduty'
gd_tech = Technology.query.filter(Technology.name == action_type).first()
if not gd_tech:
gd_tech = Technology(name=action_type)
db.session.add(gd_tech)
db.session.commit()
db.session.refresh(gd_tech)
identifier = config['detail']['accountId']
account = Account.query.filter(Account.identifier == identifier).first()
if not account:
raise Exception(
"Account with identifier [{}] not found.".format(identifier)
)
item = datastore.store(
gd_tech.name,
config['region'],
account.name,
config['detail']['type'],
True,
config
)
auditor_settings = AuditorSettings.query.filter(
AuditorSettings.auditor_class=='GuardDuty',
AuditorSettings.tech_id==gd_tech.id,
AuditorSettings.account_id==account.id
).first()
if not auditor_settings:
auditor_settings = AuditorSettings(
disabled=False,
issue_text='Guard Duty',
auditor_class='GuardDuty',
tech_id=gd_tech.id,
account_id=account.id
)
db.session.add(auditor_settings)
db.session.commit()
db.session.refresh(auditor_settings)
issue = ItemAudit(
score=int(config['detail']['severity']),
issue=config['detail']['title'],
notes=config['detail']['description'],
item_id=item.id,
auditor_setting_id=auditor_settings.id,
)
db.session.add(issue)
db.session.commit()
db.session.refresh(issue)
gd_event = GuardDutyEvent(
item_id=item.id,
config=config,
date_created=datetime.datetime.utcnow()
)
db.session.add(gd_event)
db.session.commit()
db.session.refresh(gd_event)
return {
'id': gd_event.id,
'config': gd_event.config,
}, 201
|
{
"content_hash": "c2306316d4a72c752cef44c42eb0334e",
"timestamp": "",
"source": "github",
"line_count": 458,
"max_line_length": 119,
"avg_line_length": 39.87117903930131,
"alnum_prop": 0.4967964514539182,
"repo_name": "stackArmor/security_monkey",
"id": "0f0bc0167acec2cd27c33fd12e97f1f0414f9e7f",
"size": "18262",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "security_monkey/views/guard_duty_event.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "33462"
},
{
"name": "Dart",
"bytes": "137774"
},
{
"name": "Dockerfile",
"bytes": "3798"
},
{
"name": "HTML",
"bytes": "165572"
},
{
"name": "JavaScript",
"bytes": "984069"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1682110"
},
{
"name": "Shell",
"bytes": "29978"
}
],
"symlink_target": ""
}
|
"""Policy package for prngmgr."""
|
{
"content_hash": "a8849ba1bd2a0cb6a29a394b72c21635",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 33,
"avg_line_length": 34,
"alnum_prop": 0.6764705882352942,
"repo_name": "wolcomm/prngmgr",
"id": "54797a7390af4037d259ae135d97ad4809e508d6",
"size": "686",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prngmgr/policy/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1156"
},
{
"name": "HTML",
"bytes": "13475"
},
{
"name": "JavaScript",
"bytes": "2894"
},
{
"name": "Python",
"bytes": "105489"
}
],
"symlink_target": ""
}
|
from thumbor.loaders import http_loader
from tornado.concurrent import return_future
from urllib import unquote
def _normalize_url(url):
url = http_loader.quote_url(unquote(url))
return url if url.startswith('http') else 'https://%s' % url
def validate(context, url):
return http_loader.validate(context, url, normalize_url_func=_normalize_url)
def return_contents(response, url, callback, context):
return http_loader.return_contents(response, url, callback, context)
@return_future
def load(context, url, callback):
return http_loader.load_sync(context, url, callback, normalize_url_func=_normalize_url)
def encode(string):
return http_loader.encode(string)
|
{
"content_hash": "1f32bf39a46d61c8ad12792f86f4f4ca",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 91,
"avg_line_length": 27.76,
"alnum_prop": 0.7463976945244957,
"repo_name": "jdunaravich/thumbor",
"id": "cee70e9564c7a78b136def10eeb1dc7c23e56210",
"size": "945",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "thumbor/loaders/https_loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "58039"
},
{
"name": "HTML",
"bytes": "1737"
},
{
"name": "JavaScript",
"bytes": "409"
},
{
"name": "Makefile",
"bytes": "2194"
},
{
"name": "Python",
"bytes": "535471"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import SimpleHTTPServer
import SocketServer
import unittest
from contextlib import contextmanager
from threading import Thread
from pants.base.build_invalidator import CacheKey
from pants.cache.artifact_cache import UnreadableArtifact, call_insert, call_use_cached_files
from pants.cache.local_artifact_cache import LocalArtifactCache, TempLocalArtifactCache
from pants.cache.restful_artifact_cache import InvalidRESTfulCacheProtoError, RESTfulArtifactCache
from pants.util.contextutil import pushd, temporary_dir, temporary_file
from pants.util.dirutil import safe_mkdir
from pants_test.base.context_utils import create_context
# A very trivial server that serves files under the cwd.
class SimpleRESTHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def __init__(self, request, client_address, server):
# The base class implements GET and HEAD.
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(self, request, client_address, server)
def do_HEAD(self):
return SimpleHTTPServer.SimpleHTTPRequestHandler.do_HEAD(self)
def do_PUT(self):
path = self.translate_path(self.path)
content_length = int(self.headers.getheader('content-length'))
content = self.rfile.read(content_length)
safe_mkdir(os.path.dirname(path))
with open(path, 'wb') as outfile:
outfile.write(content)
self.send_response(200)
self.end_headers()
def do_DELETE(self):
path = self.translate_path(self.path)
if os.path.exists(path):
os.unlink(path)
self.send_response(200)
else:
self.send_error(404, 'File not found')
self.end_headers()
class FailRESTHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Reject all requests"""
def __init__(self, request, client_address, server):
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(self, request, client_address, server)
def _return_failed(self):
self.send_response(401, 'Forced test failure')
self.end_headers()
def do_HEAD(self):
return self._return_failed()
def do_GET(self):
return self._return_failed()
def do_PUT(self):
return self._return_failed()
def do_DELETE(self):
return self._return_failed()
TEST_CONTENT1 = 'muppet'
TEST_CONTENT2 = 'kermit'
class TestArtifactCache(unittest.TestCase):
@contextmanager
def setup_local_cache(self):
with temporary_dir() as artifact_root:
with temporary_dir() as cache_root:
yield LocalArtifactCache(artifact_root, cache_root, compression=0)
@contextmanager
def setup_server(self, return_failed=False):
httpd = None
httpd_thread = None
try:
with temporary_dir() as cache_root:
with pushd(cache_root): # SimpleRESTHandler serves from the cwd.
if return_failed:
handler = FailRESTHandler
else:
handler = SimpleRESTHandler
httpd = SocketServer.TCPServer(('localhost', 0), handler)
port = httpd.server_address[1]
httpd_thread = Thread(target=httpd.serve_forever)
httpd_thread.start()
yield 'http://localhost:{0}'.format(port)
finally:
if httpd:
httpd.shutdown()
if httpd_thread:
httpd_thread.join()
@contextmanager
def setup_rest_cache(self, local=None, return_failed=False):
with temporary_dir() as artifact_root:
local = local or TempLocalArtifactCache(artifact_root, 0)
with self.setup_server(return_failed=return_failed) as base_url:
yield RESTfulArtifactCache(artifact_root, base_url, local)
@contextmanager
def setup_test_file(self, parent):
with temporary_file(parent) as f:
# Write the file.
f.write(TEST_CONTENT1)
path = f.name
f.close()
yield path
def test_local_cache(self):
with self.setup_local_cache() as artifact_cache:
self.do_test_artifact_cache(artifact_cache)
def test_restful_cache(self):
with self.assertRaises(InvalidRESTfulCacheProtoError):
RESTfulArtifactCache('foo', 'ftp://localhost/bar', 'foo')
with self.setup_rest_cache() as artifact_cache:
self.do_test_artifact_cache(artifact_cache)
def do_test_artifact_cache(self, artifact_cache):
key = CacheKey('muppet_key', 'fake_hash', 42)
with self.setup_test_file(artifact_cache.artifact_root) as path:
# Cache it.
self.assertFalse(artifact_cache.has(key))
self.assertFalse(bool(artifact_cache.use_cached_files(key)))
artifact_cache.insert(key, [path])
self.assertTrue(artifact_cache.has(key))
# Stomp it.
with open(path, 'w') as outfile:
outfile.write(TEST_CONTENT2)
# Recover it from the cache.
self.assertTrue(bool(artifact_cache.use_cached_files(key)))
# Check that it was recovered correctly.
with open(path, 'r') as infile:
content = infile.read()
self.assertEquals(content, TEST_CONTENT1)
# Delete it.
artifact_cache.delete(key)
self.assertFalse(artifact_cache.has(key))
def test_local_backed_remote_cache(self):
"""make sure that the combined cache finds what it should and that it backfills"""
with self.setup_server() as url:
with self.setup_local_cache() as local:
tmp = TempLocalArtifactCache(local.artifact_root, 0)
remote = RESTfulArtifactCache(local.artifact_root, url, tmp)
combined = RESTfulArtifactCache(local.artifact_root, url, local)
key = CacheKey('muppet_key', 'fake_hash', 42)
with self.setup_test_file(local.artifact_root) as path:
# No cache has key.
self.assertFalse(local.has(key))
self.assertFalse(remote.has(key))
self.assertFalse(combined.has(key))
# No cache returns key.
self.assertFalse(bool(local.use_cached_files(key)))
self.assertFalse(bool(remote.use_cached_files(key)))
self.assertFalse(bool(combined.use_cached_files(key)))
# Attempting to use key that no cache had should not change anything.
self.assertFalse(local.has(key))
self.assertFalse(remote.has(key))
self.assertFalse(combined.has(key))
# Add to only remote cache.
remote.insert(key, [path])
# After insertion to remote, remote and only remote should have key
self.assertFalse(local.has(key))
self.assertTrue(remote.has(key))
self.assertTrue(combined.has(key))
# Successfully using via remote should NOT change local.
self.assertTrue(bool(remote.use_cached_files(key)))
self.assertFalse(local.has(key))
# Successfully using via combined SHOULD backfill local.
self.assertTrue(bool(combined.use_cached_files(key)))
self.assertTrue(local.has(key))
self.assertTrue(bool(local.use_cached_files(key)))
def test_multiproc(self):
context = create_context()
key = CacheKey('muppet_key', 'fake_hash', 42)
with self.setup_local_cache() as cache:
self.assertEquals(context.subproc_map(call_use_cached_files, [(cache, key)]), [False])
with self.setup_test_file(cache.artifact_root) as path:
context.subproc_map(call_insert, [(cache, key, [path], False)])
self.assertEquals(context.subproc_map(call_use_cached_files, [(cache, key)]), [True])
with self.setup_rest_cache() as cache:
self.assertEquals(context.subproc_map(call_use_cached_files, [(cache, key)]), [False])
with self.setup_test_file(cache.artifact_root) as path:
context.subproc_map(call_insert, [(cache, key, [path], False)])
self.assertEquals(context.subproc_map(call_use_cached_files, [(cache, key)]), [True])
def test_failed_multiproc(self):
context = create_context()
key = CacheKey('muppet_key', 'fake_hash', 55)
# Failed requests should return failure status, but not raise exceptions
with self.setup_rest_cache(return_failed=True) as cache:
self.assertFalse(context.subproc_map(call_use_cached_files, [(cache, key)])[0])
with self.setup_test_file(cache.artifact_root) as path:
context.subproc_map(call_insert, [(cache, key, [path], False)])
self.assertFalse(context.subproc_map(call_use_cached_files, [(cache, key)])[0])
|
{
"content_hash": "74b4b3fa2d8c40cb3c4990ba23d9d120",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 98,
"avg_line_length": 37.14666666666667,
"alnum_prop": 0.6834170854271356,
"repo_name": "scode/pants",
"id": "845a7723daa582db60f83b79bd27a6fc06728838",
"size": "8505",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/cache/test_artifact_cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11139"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1437"
},
{
"name": "HTML",
"bytes": "69479"
},
{
"name": "Java",
"bytes": "302900"
},
{
"name": "JavaScript",
"bytes": "10157"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "3788845"
},
{
"name": "Scala",
"bytes": "76623"
},
{
"name": "Shell",
"bytes": "49094"
},
{
"name": "Thrift",
"bytes": "2583"
}
],
"symlink_target": ""
}
|
"""Generate notebooks and documentation from python scripts."""
from __future__ import print_function
import os
import os.path
from glob import glob
import re
import pickle
from timeit import default_timer as timer
import warnings
import py2jn
import pypandoc
import nbformat
from sphinx.ext import intersphinx
from nbconvert import RSTExporter
from nbconvert.preprocessors import ExecutePreprocessor
def mkdir(pth):
"""Make a directory if it doesn't exist."""
if not os.path.exists(pth):
os.mkdir(pth)
def pathsplit(pth, dropext=True):
"""Split a path into a tuple of all of its components."""
if dropext:
pth = os.path.splitext(pth)[0]
parts = os.path.split(pth)
if parts[0] == '':
return parts[1:]
elif len(parts[0]) == 1:
return parts
else:
return pathsplit(parts[0], dropext=False) + parts[1:]
def update_required(srcpth, dstpth):
"""
If the file at `dstpth` is generated from the file at `srcpth`,
determine whether an update is required. Returns True if `dstpth`
does not exist, or if `srcpth` has been more recently modified
than `dstpth`.
"""
return not os.path.exists(dstpth) or \
os.stat(srcpth).st_mtime > os.stat(dstpth).st_mtime
def fetch_intersphinx_inventory(uri):
"""
Fetch and read an intersphinx inventory file at a specified uri,
which can either be a url (e.g. http://...) or a local file system
filename.
"""
# See https://stackoverflow.com/a/30981554
class MockConfig(object):
intersphinx_timeout = None
tls_verify = False
class MockApp(object):
srcdir = ''
config = MockConfig()
def warn(self, msg):
warnings.warn(msg)
return intersphinx.fetch_inventory(MockApp(), '', uri)
def read_sphinx_environment(pth):
"""Read the sphinx environment.pickle file at path `pth`."""
with open(pth, 'rb') as fo:
env = pickle.load(fo)
return env
def parse_rst_index(rstpth):
"""
Parse the top-level RST index file, at `rstpth`, for the example
python scripts. Returns a list of subdirectories in order of
appearance in the index file, and a dict mapping subdirectory name
to a description.
"""
pthidx = {}
pthlst = []
with open(rstpth) as fd:
lines = fd.readlines()
for i, l in enumerate(lines):
if i > 0:
if re.match(r'^ \w+', l) is not None and \
re.match(r'^\w+', lines[i - 1]) is not None:
# List of subdirectories in order of appearance in index.rst
pthlst.append(lines[i - 1][:-1])
# Dict mapping subdirectory name to description
pthidx[lines[i - 1][:-1]] = l[2:-1]
return pthlst, pthidx
def preprocess_script_string(str):
"""
Process python script represented as string `str` in preparation
for conversion to a notebook. This processing includes removal of
the header comment, modification of the plotting configuration,
and replacement of certain sphinx cross-references with
appropriate links to online docs.
"""
# Remove header comment
str = re.sub(r'^(#[^#\n]+\n){5}\n*', r'', str)
# Insert notebook plotting configuration function
str = re.sub(r'from sporco import plot', r'from sporco import plot'
'\nplot.config_notebook_plotting()',
str, flags=re.MULTILINE)
# Remove final input statement and preceding comment
str = re.sub(r'\n*# Wait for enter on keyboard.*\ninput().*\n*',
r'', str, flags=re.MULTILINE)
return str
def script_string_to_notebook_object(str):
"""
Convert a python script represented as string `str` to a notebook
object.
"""
return py2jn.py_string_to_notebook(str, nbver=4)
def script_string_to_notebook(str, pth):
"""
Convert a python script represented as string `str` to a notebook
with filename `pth`.
"""
nb = py2jn.py_string_to_notebook(str)
py2jn.write_notebook(nb, pth)
def script_to_notebook(spth, npth, cr):
"""
Convert the script at `spth` to a notebook at `npth`. Parameter `cr`
is a CrossReferenceLookup object.
"""
# Read entire text of example script
with open(spth) as f:
stxt = f.read()
# Process script text
stxt = preprocess_script_string(stxt)
# If the notebook file exists and has been executed, try to
# update markdown cells without deleting output cells
if os.path.exists(npth) and notebook_executed(npth):
# Read current notebook file
nbold = nbformat.read(npth, as_version=4)
# Construct updated notebook
nbnew = script_string_to_notebook_object(stxt)
if cr is not None:
notebook_substitute_ref_with_url(nbnew, cr)
# If the code cells of the two notebooks match, try to
# update markdown cells without deleting output cells
if same_notebook_code(nbnew, nbold):
try:
replace_markdown_cells(nbnew, nbold)
except Exception:
script_string_to_notebook_with_links(stxt, npth, cr)
else:
with open(npth, 'wt') as f:
nbformat.write(nbold, f)
else:
# Write changed text to output notebook file
script_string_to_notebook_with_links(stxt, npth, cr)
else:
# Write changed text to output notebook file
script_string_to_notebook_with_links(stxt, npth, cr)
def script_string_to_notebook_with_links(str, pth, cr=None):
"""
Convert a python script represented as string `str` to a notebook
with filename `pth` and replace sphinx cross-references with links
to online docs. Parameter `cr` is a CrossReferenceLookup object.
"""
if cr is None:
script_string_to_notebook(str, pth)
else:
ntbk = script_string_to_notebook_object(str)
notebook_substitute_ref_with_url(ntbk, cr)
with open(pth, 'wt') as f:
nbformat.write(ntbk, f)
def rst_to_notebook(infile, outfile, diridx=False):
"""Convert an rst file to a notebook file."""
# Read infile into a string
with open(infile, 'r') as fin:
rststr = fin.read()
# Convert string from rst to markdown
mdfmt = 'markdown_github+tex_math_dollars+fenced_code_attributes'
mdstr = pypandoc.convert_text(rststr, mdfmt, format='rst',
extra_args=['--atx-headers'])
# In links, replace .py extensions with .ipynb
mdstr = re.sub(r'\(([^\)]+).py\)', r'(\1.ipynb)', mdstr)
# Links to subdirectories require explicit index file inclusion
if diridx:
mdstr = re.sub(r']\(([^\)/]+)\)', r'](\1/index.ipynb)', mdstr)
# Enclose the markdown within triple quotes and convert from
# python to notebook
mdstr = '"""' + mdstr + '"""'
nb = py2jn.py_string_to_notebook(mdstr)
py2jn.tools.write_notebook(nb, outfile, nbver=4)
def markdown_to_notebook(infile, outfile):
"""Convert a markdown file to a notebook file."""
# Read infile into a string
with open(infile, 'r') as fin:
str = fin.read()
# Enclose the markdown within triple quotes and convert from
# python to notebook
str = '"""' + str + '"""'
nb = py2jn.py_string_to_notebook(str)
py2jn.tools.write_notebook(nb, outfile, nbver=4)
def rst_to_docs_rst(infile, outfile):
"""Convert an rst file to a sphinx docs rst file."""
# Read infile into a list of lines
with open(infile, 'r') as fin:
rst = fin.readlines()
# Inspect outfile path components to determine whether outfile
# is in the root of the examples directory or in a subdirectory
# thererof
ps = pathsplit(outfile)[-3:]
if ps[-2] == 'examples':
ps = ps[-2:]
idx = 'index'
else:
idx = ''
# Output string starts with a cross-reference anchor constructed from
# the file name and path
out = '.. _' + '_'.join(ps) + ':\n\n'
# Iterate over lines from infile
it = iter(rst)
for line in it:
if line[0:12] == '.. toc-start': # Line has start of toc marker
# Initialise current toc array and iterate over lines until
# end of toc marker encountered
toc = []
for line in it:
if line == '\n': # Drop newline lines
continue
elif line[0:10] == '.. toc-end': # End of toc marker
# Add toctree section to output string
out += '.. toctree::\n :maxdepth: 1\n\n'
for c in toc:
out += ' %s <%s>\n' % c
break
else: # Still within toc section
# Extract link text and target url and append to
# toc array
m = re.search(r'`(.*?)\s*<(.*?)(?:.py)?>`', line)
if m:
if idx == '':
toc.append((m.group(1), m.group(2)))
else:
toc.append((m.group(1),
os.path.join(m.group(2), idx)))
else: # Not within toc section
out += line
with open(outfile, 'w') as fout:
fout.write(out)
def parse_notebook_index(ntbkpth):
"""
Parse the top-level notebook index file at `ntbkpth`. Returns a
list of subdirectories in order of appearance in the index file,
and a dict mapping subdirectory name to a description.
"""
# Convert notebook to RST text in string
rex = RSTExporter()
rsttxt = rex.from_filename(ntbkpth)[0]
# Clean up trailing whitespace
rsttxt = re.sub(r'\n ', r'', rsttxt, re.M | re.S)
pthidx = {}
pthlst = []
lines = rsttxt.split('\n')
for l in lines:
m = re.match(r'^-\s+`([^<]+)\s+<([^>]+).ipynb>`__', l)
if m:
# List of subdirectories in order of appearance in index.rst
pthlst.append(m.group(2))
# Dict mapping subdirectory name to description
pthidx[m.group(2)] = m.group(1)
return pthlst, pthidx
def construct_notebook_index(title, pthlst, pthidx):
"""
Construct a string containing a markdown format index for the list
of paths in `pthlst`. The title for the index is in `title`, and
`pthidx` is a dict giving label text for each path.
"""
# Insert title text
txt = '"""\n## %s\n"""\n\n"""' % title
# Insert entry for each item in pthlst
for pth in pthlst:
# If pth refers to a .py file, replace .py with .ipynb, otherwise
# assume it's a directory name and append '/index.ipynb'
if pth[-3:] == '.py':
link = os.path.splitext(pth)[0] + '.ipynb'
else:
link = os.path.join(pth, 'index.ipynb')
txt += '- [%s](%s)\n' % (pthidx[pth], link)
txt += '"""'
return txt
def notebook_executed(pth):
"""Determine whether the notebook at `pth` has been executed."""
try:
nb = nbformat.read(pth, as_version=4)
except (AttributeError, nbformat.reader.NotJSONError):
raise RuntimeError('Error reading notebook file %s' % pth)
for n in range(len(nb['cells'])):
if nb['cells'][n].cell_type == 'code' and \
nb['cells'][n].execution_count is None:
return False
return True
def same_notebook_code(nb1, nb2):
"""
Return true of the code cells of notebook objects `nb1` and `nb2`
are the same.
"""
# Notebooks do not match of the number of cells differ
if len(nb1['cells']) != len(nb2['cells']):
return False
# Iterate over cells in nb1
for n in range(len(nb1['cells'])):
# Notebooks do not match if corresponding cells have different
# types
if nb1['cells'][n]['cell_type'] != nb2['cells'][n]['cell_type']:
return False
# Notebooks do not match if source of corresponding code cells
# differ
if nb1['cells'][n]['cell_type'] == 'code' and \
nb1['cells'][n]['source'] != nb2['cells'][n]['source']:
return False
return True
def execute_notebook(npth, dpth, timeout=1800, kernel='python3'):
"""
Execute the notebook at `npth` using `dpth` as the execution
directory. The execution timeout and kernel are `timeout` and
`kernel` respectively.
"""
ep = ExecutePreprocessor(timeout=timeout, kernel_name=kernel)
nb = nbformat.read(npth, as_version=4)
t0 = timer()
ep.preprocess(nb, {'metadata': {'path': dpth}})
t1 = timer()
with open(npth, 'wt') as f:
nbformat.write(nb, f)
return t1 - t0
def replace_markdown_cells(src, dst):
"""
Overwrite markdown cells in notebook object `dst` with corresponding
cells in notebook object `src`.
"""
# It is an error to attempt markdown replacement if src and dst
# have different numbers of cells
if len(src['cells']) != len(dst['cells']):
raise ValueError('notebooks do not have the same number of cells')
# Iterate over cells in src
for n in range(len(src['cells'])):
# It is an error to attempt markdown replacement if any
# corresponding pair of cells have different type
if src['cells'][n]['cell_type'] != dst['cells'][n]['cell_type']:
raise ValueError('cell number %d of different type in src and dst')
# If current src cell is a markdown cell, copy the src cell to
# the dst cell
if src['cells'][n]['cell_type'] == 'markdown':
dst['cells'][n]['source'] = src['cells'][n]['source']
def notebook_substitute_ref_with_url(ntbk, cr):
"""
In markdown cells of notebook object `ntbk`, replace sphinx
cross-references with links to online docs. Parameter `cr` is a
CrossReferenceLookup object.
"""
# Iterate over cells in notebook
for n in range(len(ntbk['cells'])):
# Only process cells of type 'markdown'
if ntbk['cells'][n]['cell_type'] == 'markdown':
# Get text of markdown cell
txt = ntbk['cells'][n]['source']
# Replace links to online docs with sphinx cross-references
txt = cr.substitute_ref_with_url(txt)
# Replace current cell text with processed text
ntbk['cells'][n]['source'] = txt
def preprocess_notebook(ntbk, cr):
"""
Process notebook object `ntbk` in preparation for conversion to an
rst document. This processing replaces links to online docs with
corresponding sphinx cross-references within the local docs.
Parameter `cr` is a CrossReferenceLookup object.
"""
# Iterate over cells in notebook
for n in range(len(ntbk['cells'])):
# Only process cells of type 'markdown'
if ntbk['cells'][n]['cell_type'] == 'markdown':
# Get text of markdown cell
txt = ntbk['cells'][n]['source']
# Replace links to online docs with sphinx cross-references
txt = cr.substitute_url_with_ref(txt)
# Replace current cell text with processed text
ntbk['cells'][n]['source'] = txt
def write_notebook_rst(txt, res, fnm, pth):
"""
Write the converted notebook text `txt` and resources `res` to
filename `fnm` in directory `pth`.
"""
# Extended filename used for output images
extfnm = fnm + '_files'
# Directory into which output images are written
extpth = os.path.join(pth, extfnm)
# Make output image directory if it doesn't exist
mkdir(extpth)
# Iterate over output images in resources dict
for r in res['outputs'].keys():
# New name for current output image
rnew = re.sub('output', fnm, r)
# Partial path for current output image
rpth = os.path.join(extfnm, rnew)
# In RST text, replace old output name with the new one
txt = re.sub('\.\. image:: ' + r, '.. image:: ' + rpth, txt, re.M)
# Full path of the current output image
fullrpth = os.path.join(pth, rpth)
# Write the current output image to disk
with open(fullrpth, 'wb') as fo:
fo.write(res['outputs'][r])
# Remove trailing whitespace in RST text
txt = re.sub(r'[ \t]+$', '', txt, flags=re.M)
# Write RST text to disk
with open(os.path.join(pth, fnm + '.rst'), 'wt') as fo:
fo.write(txt)
def notebook_to_rst(npth, rpth, rdir, cr=None):
"""
Convert notebook at `npth` to rst document at `rpth`, in directory
`rdir`. Parameter `cr` is a CrossReferenceLookup object.
"""
# Read the notebook file
ntbk = nbformat.read(npth, nbformat.NO_CONVERT)
# Convert notebook object to rstpth
notebook_object_to_rst(ntbk, rpth, rdir, cr)
def notebook_object_to_rst(ntbk, rpth, cr=None):
"""
Convert notebook object `ntbk` to rst document at `rpth`, in
directory `rdir`. Parameter `cr` is a CrossReferenceLookup
object.
"""
# Parent directory of file rpth
rdir = os.path.dirname(rpth)
# File basename
rb = os.path.basename(os.path.splitext(rpth)[0])
# Pre-process notebook prior to conversion to rst
if cr is not None:
preprocess_notebook(ntbk, cr)
# Convert notebook to rst
rex = RSTExporter()
rsttxt, rstres = rex.from_notebook_node(ntbk)
# Replace `` with ` in sphinx cross-references
rsttxt = re.sub(r':([^:]+):``(.*?)``', r':\1:`\2`', rsttxt)
# Insert a cross-reference target at top of file
reflbl = '.. _examples_' + os.path.basename(rdir) + '_' + \
rb.replace('-', '_') + ':\n\n'
rsttxt = reflbl + rsttxt
# Write the converted rst to disk
write_notebook_rst(rsttxt, rstres, rb, rdir)
def script_and_notebook_to_rst(spth, npth, rpth):
"""
Convert a script and the corresponding executed notebook to rst.
The script is converted to notebook format *without* replacement
of sphinx cross-references with links to online docs, and the
resulting markdown cells are inserted into the executed notebook,
which is then converted to rst.
"""
# Read entire text of script at spth
with open(spth) as f:
stxt = f.read()
# Process script text
stxt = preprocess_script_string(stxt)
# Convert script text to notebook object
nbs = script_string_to_notebook_object(stxt)
# Read notebook file npth
nbn = nbformat.read(npth, as_version=4)
# Overwrite markdown cells in nbn with those from nbs
try:
replace_markdown_cells(nbs, nbn)
except ValueError:
raise ValueError('mismatch between source script %s and notebook %s' %
(spth, npth))
# Convert notebook object to rst
notebook_object_to_rst(nbn, rpth)
class IntersphinxInventory(object):
"""
Class supporting look up of relevant information from an intersphinx
inventory dict.
"""
domainrole = {'py:module': 'mod', 'py:function': 'func',
'py:data': 'data', 'py:class': 'class',
'py:method': 'meth', 'py:attribute': 'attr',
'py:exception': 'exc'}
"""Dict providing lookup of sphinx role labels from domain labels"""
roledomain = {r: d for d, r in domainrole.items()}
"""Dict providing lookup of sphinx domain labels from role labels"""
def __init__(self, inv, baseurl, addbase=False):
"""
Parameter are:
`inv` : an intersphinx inventory dict
`baseurl` : the base url for the objects in this inventory
`addbase` : flag indicating whether it is necessary to append
the base url onto the entries in the inventory
"""
self.inv = inv
self.baseurl = baseurl
self.addbase = addbase
# Initialise dicts used for reverse lookup and partial name lookup
self.revinv, self.rolnam = IntersphinxInventory.inventory_maps(inv)
def get_label_from_name(self, name):
"""
Convert a sphinx reference name (or partial name) into a link
label.
"""
if name[0] == '.':
return name[1:]
else:
return name
def get_full_name(self, role, name):
"""
If ``name`` is already the full name of an object, return
``name``. Otherwise, if ``name`` is a partial object name,
look up the full name and return it.
"""
# An initial '.' indicates a partial name
if name[0] == '.':
# Find matches for the partial name in the string
# containing all full names for this role
ptrn = r'(?<= )[^,]*' + name + r'(?=,)'
ml = re.findall(ptrn, self.rolnam[role])
# Handle cases depending on the number of returned matches,
# raising an error if exactly one match is not found
if len(ml) == 0:
raise KeyError('name matching %s not found' % name,
'name', len(ml))
elif len(ml) > 1:
raise KeyError('multiple names matching %s found' % name,
'name', len(ml))
else:
return ml[0]
else:
# The absence of an initial '.' indicates a full
# name. Return the name if it is present in the inventory,
# otherwise raise an error
try:
dom = IntersphinxInventory.roledomain[role]
except KeyError:
raise KeyError('role %s not found' % role, 'role', 0)
if name in self.inv[dom]:
return name
else:
raise KeyError('name %s not found' % name, 'name', 0)
def get_docs_url(self, role, name):
"""
Get a url for the online docs corresponding to a sphinx cross
reference :role:`name`.
"""
# Expand partial names to full names
name = self.get_full_name(role, name)
# Look up domain corresponding to role
dom = IntersphinxInventory.roledomain[role]
# Get the inventory entry tuple corresponding to the name
# of the referenced type
itpl = self.inv[dom][name]
# Get the required path postfix from the inventory entry
# tuple
path = itpl[2]
# Construct link url, appending the base url or note
# depending on the addbase flag
return self.baseurl + path if self.addbase else path
def matching_base_url(self, url):
"""
Return True if the initial part of `url` matches the base url
passed to the initialiser of this object, and False otherwise.
"""
n = len(self.baseurl)
return url[0:n] == self.baseurl
def get_sphinx_ref(self, url, label=None):
"""
Get an internal sphinx cross reference corresponding to `url`
into the online docs, associated with a link with label `label`
(if not None).
"""
# Raise an exception if the initial part of url does not match
# the base url for this object
n = len(self.baseurl)
if url[0:n] != self.baseurl:
raise KeyError('base of url %s does not match base url %s' %
(url, self.baseurl))
# The reverse lookup key is either the full url or the postfix
# to the base url, depending on flag addbase
if self.addbase:
pstfx = url[n:]
else:
pstfx = url
# Look up the cross-reference role and referenced object
# name via the postfix to the base url
role, name = self.revinv[pstfx]
# If the label string is provided and is shorter than the name
# string we have lookup up, assume it is a partial name for
# the same object: append a '.' at the front and use it as the
# object name in the cross-reference
if label is not None and len(label) < len(name):
name = '.' + label
# Construct cross-reference
ref = ':%s:`%s`' % (role, name)
return ref
@staticmethod
def inventory_maps(inv):
"""
Construct dicts facilitating information lookup in an
inventory dict. A reversed dict allows lookup of a tuple
specifying the sphinx cross-reference role and the name of the
referenced type from the intersphinx inventory url postfix
string. A role-specific name lookup string allows the set of all
names corresponding to a specific role to be searched via regex.
"""
# Initialise dicts
revinv = {}
rolnam = {}
# Iterate over domain keys in inventory dict
for d in inv:
# Since keys seem to be duplicated, ignore those not
# starting with 'py:'
if d[0:3] == 'py:' and d in IntersphinxInventory.domainrole:
# Get role corresponding to current domain
r = IntersphinxInventory.domainrole[d]
# Initialise role-specific name lookup string
rolnam[r] = ''
# Iterate over all type names for current domain
for n in inv[d]:
# Get the url postfix string for the current
# domain and type name
p = inv[d][n][2]
# Allow lookup of role and object name tuple from
# url postfix
revinv[p] = (r, n)
# Append object name to a string for this role,
# allowing regex searching for partial names
rolnam[r] += ' ' + n + ','
return revinv, rolnam
class CrossReferenceLookup(object):
"""
Class supporting cross reference lookup for citations and all
document sets recorded by intersphinx.
"""
def __init__(self, env, inv, baseurl):
"""
Parameter are:
`env` : a sphinx environment object
`inv` : an intersphinx inventory dict
`baseurl` : the base url for the objects in this inventory
"""
self.baseurl = baseurl
# Construct a list of IntersphinxInventory objects. The first
# entry in the list is for the intersphinx inventory for the
# package for which we are building sphinx docs
self.invlst = [IntersphinxInventory(inv, baseurl, addbase=True),]
self.env = env
# Add additional entries to the list for each external package
# docs set included by intersphinx
for b in env.intersphinx_cache:
self.invlst.append(IntersphinxInventory(
env.intersphinx_cache[b][2], b))
# Recent versions of sphinx environment do not have a
# bibtex_cache attribute. In this case, extract citation data
# from env.domaindata
self.citenum = {}
self.citeid = {}
if not hasattr(env, 'bibtex_cache'):
for cite in env.domaindata['cite']['citations']:
self.citenum[cite.key] = cite.label
self.citeid[cite.key] = cite.citation_id
def get_docs_url(self, role, name):
"""
Get the online docs url for sphinx cross-reference :role:`name`.
"""
if role == 'cite':
# If the cross-reference is a citation, make sure that
# the cite key is in the sphinx environment bibtex cache.
# If it is, construct the url from the cite key, otherwise
# raise an exception
if hasattr(self.env, 'bibtex_cache'):
id = name
if name not in self.env.bibtex_cache.get_all_cited_keys():
raise KeyError('cite key %s not found' % name, 'cite', 0)
else:
id = self.citeid[name]
url = self.baseurl + 'zreferences.html#' + id
elif role == 'ref':
try:
reftpl = self.env.domaindata['std']['labels'][name]
except Exception:
raise KeyError('ref label %s not found' % name, 'ref', 0)
url = self.baseurl + reftpl[0] + '.html#' + reftpl[1]
else:
# If the cross-reference is not a citation, try to look it
# up in each of the IntersphinxInventory objects in our list
url = None
for ii in self.invlst:
try:
url = ii.get_docs_url(role, name)
except KeyError as ex:
# Re-raise the exception if multiple matches found,
# otherwise ignore it
if ex.args[1] == 'role' or ex.args[2] > 1:
raise ex
else:
# If an exception was not raised, the lookup must
# have succeeded: break from the loop to terminate
# further searching
break
if url is None:
raise KeyError('name %s not found' % name, 'name', 0)
return url
def get_docs_label(self, role, name):
"""Get an appropriate label to use in a link to the online docs."""
if role == 'cite':
# Get the string used as the citation label in the text
if hasattr(self.env, 'bibtex_cache'):
try:
cstr = self.env.bibtex_cache.get_label_from_key(name)
except Exception:
raise KeyError('cite key %s not found' % name, 'cite', 0)
else:
try:
cstr = self.citenum[name]
except KeyError:
raise KeyError('cite key %s not found' % name, 'cite', 0)
# The link label is the citation label (number) enclosed
# in square brackets
return '[%s]' % cstr
elif role == 'ref':
try:
reftpl = self.env.domaindata['std']['labels'][name]
except Exception:
raise KeyError('ref label %s not found' % name, 'ref', 0)
return reftpl[2]
else:
# Use the object name as a label, omiting any initial '.'
if name[0] == '.':
return name[1:]
else:
return name
def get_sphinx_ref(self, url, label=None):
"""
Get an internal sphinx cross reference corresponding to `url`
into the online docs, associated with a link with label `label`
(if not None).
"""
# A url is assumed to correspond to a citation if it contains
# 'zreferences.html#'
if 'zreferences.html#' in url:
key = url.partition('zreferences.html#')[2]
ref = ':cite:`%s`' % key
else:
# If the url does not correspond to a citation, try to look it
# up in each of the IntersphinxInventory objects in our list
ref = None
# Iterate over IntersphinxInventory objects in our list
for ii in self.invlst:
# If the baseurl for the current IntersphinxInventory
# object matches the url, try to look up the reference
# from the url and terminate the loop of the look up
# succeeds
if ii.matching_base_url(url):
ref = ii.get_sphinx_ref(url, label)
break
if ref is None:
raise KeyError('no match found for url %s' % url)
return ref
def substitute_ref_with_url(self, txt):
"""
In the string `txt`, replace sphinx references with
corresponding links to online docs.
"""
# Find sphinx cross-references
mi = re.finditer(r':([^:]+):`([^`]+)`', txt)
if mi:
# Iterate over match objects in iterator returned by re.finditer
for mo in mi:
# Initialize link label and url for substitution
lbl = None
url = None
# Get components of current match: full matching text, the
# role label in the reference, and the name of the
# referenced type
mtxt = mo.group(0)
role = mo.group(1)
name = mo.group(2)
# If role is 'ref', the name component is in the form
# label <name>
if role == 'ref':
ma = re.match(r'\s*([^\s<]+)\s*<([^>]+)+>', name)
if ma:
name = ma.group(2)
lbl = ma.group(1)
# Try to look up the current cross-reference. Issue a
# warning if the lookup fails, and do the substitution
# if it succeeds.
try:
url = self.get_docs_url(role, name)
if role != 'ref':
lbl = self.get_docs_label(role, name)
except KeyError as ex:
if len(ex.args) == 1 or ex.args[1] != 'role':
print('Warning: %s' % ex.args[0])
else:
# If the cross-reference lookup was successful, replace
# it with an appropriate link to the online docs
rtxt = '[%s](%s)' % (lbl, url)
txt = re.sub(mtxt, rtxt, txt, flags=re.M)
return txt
def substitute_url_with_ref(self, txt):
"""
In the string `txt`, replace links to online docs with
corresponding sphinx cross-references.
"""
# Find links
mi = re.finditer(r'\[([^\]]+|\[[^\]]+\])\]\(([^\)]+)\)', txt)
if mi:
# Iterate over match objects in iterator returned by
# re.finditer
for mo in mi:
# Get components of current match: full matching text,
# the link label, and the postfix to the base url in the
# link url
mtxt = mo.group(0)
lbl = mo.group(1)
url = mo.group(2)
# Try to look up the current link url. Issue a warning if
# the lookup fails, and do the substitution if it succeeds.
try:
ref = self.get_sphinx_ref(url, lbl)
except KeyError as ex:
print('Warning: %s' % ex.args[0])
else:
txt = re.sub(re.escape(mtxt), ref, txt)
return txt
def make_example_scripts_docs(spth, npth, rpth):
"""
Generate rst docs from example scripts. Arguments `spth`, `npth`,
and `rpth` are the top-level scripts directory, the top-level
notebooks directory, and the top-level output directory within the
docs respectively.
"""
# Ensure that output directory exists
mkdir(rpth)
# Iterate over index files
for fp in glob(os.path.join(spth, '*.rst')) + \
glob(os.path.join(spth, '*', '*.rst')):
# Index basename
b = os.path.basename(fp)
# Index dirname
dn = os.path.dirname(fp)
# Name of subdirectory of examples directory containing current index
sd = os.path.split(dn)
# Set d to the name of the subdirectory of the root directory
if dn == spth: # fp is the root directory index file
d = ''
else: # fp is a subdirectory index file
d = sd[-1]
# Path to corresponding subdirectory in docs directory
fd = os.path.join(rpth, d)
# Ensure notebook subdirectory exists
mkdir(fd)
# Filename of index file to be constructed
fn = os.path.join(fd, b)
# Process current index file if corresponding notebook file
# doesn't exist, or is older than index file
if update_required(fp, fn):
print('Converting %s ' % os.path.join(d, b),
end='\r')
# Convert script index to docs index
rst_to_docs_rst(fp, fn)
# Iterate over example scripts
for fp in sorted(glob(os.path.join(spth, '*', '*.py'))):
# Name of subdirectory of examples directory containing current script
d = os.path.split(os.path.dirname(fp))[1]
# Script basename
b = os.path.splitext(os.path.basename(fp))[0]
# Path to corresponding notebook
fn = os.path.join(npth, d, b + '.ipynb')
# Path to corresponding sphinx doc file
fr = os.path.join(rpth, d, b + '.rst')
# Only proceed if script and notebook exist
if os.path.exists(fp) and os.path.exists(fn):
# Convert notebook to rst if notebook is newer than rst
# file or if rst file doesn't exist
if update_required(fn, fr):
fnb = os.path.join(d, b + '.ipynb')
print('Processing %s ' % fnb, end='\r')
script_and_notebook_to_rst(fp, fn, fr)
else:
print('WARNING: script %s or notebook %s not found' %
(fp, fn))
|
{
"content_hash": "1a3021bc407e9249c890a33a2a2d8924",
"timestamp": "",
"source": "github",
"line_count": 1078,
"max_line_length": 79,
"avg_line_length": 34.57421150278293,
"alnum_prop": 0.5694239489147058,
"repo_name": "bwohlberg/sporco",
"id": "cce59696931ad81a988fd8ae58feb3853f1ae2d3",
"size": "37318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/docntbk.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1317433"
},
{
"name": "Shell",
"bytes": "3620"
}
],
"symlink_target": ""
}
|
import argparse
import sys
from cardice.config import Configurator
from cardice.provision import Provisioner
USAGE = """cardice command [options...]"""
def make_parser():
"""Parse commandline arguments using a git-like subcommands scheme"""
common_parser = argparse.ArgumentParser(
add_help=False,
)
common_parser.add_argument(
"--cardice-folder",
default="~/.cardice",
help="Folder to store the cardice configuration and cluster info."
)
common_parser.add_argument(
"--log-level",
default="INFO",
help="Minimum log level for the file log (under NXDRIVE_HOME/logs)."
)
common_parser.add_argument(
"--cluster",
default=None,
help="Perform the command on a specific cluster. "
"Otherwise the default cluster is the last selected cluster."
)
parser = argparse.ArgumentParser(
parents=[common_parser],
usage=USAGE,
)
subparsers = parser.add_subparsers(
title='valid commands',
)
# init a cluster config
init_parser = subparsers.add_parser(
'init', help='Create a new cluster configuration.',
parents=[common_parser],
usage="cardice init name",
)
init_parser.set_defaults(command='init')
init_parser.add_argument(
"name", help="Name of a new cluster configuration.")
select_parser = subparsers.add_parser(
'select', help="Mark the requested cluster as the default cluster.",
parents=[common_parser],
)
select_parser.set_defaults(command='select')
select_parser.add_argument(
"name", help="Name of the cluster configuration to select"
" as default cluster.")
start_parser = subparsers.add_parser(
'start', help="Start the selected cluster configuration.",
parents=[common_parser],
)
start_parser.set_defaults(command='start')
start_parser.add_argument(
"profile",
help="Name of the profile to use to provision new nodes.")
start_parser.add_argument(
"--n-nodes", default=1, type=int,
help="Number of nodes to start.")
stop_parser = subparsers.add_parser(
'stop', help="Stop the selected cluster configuration.",
parents=[common_parser],
)
stop_parser.set_defaults(command='stop')
ssh_parser = subparsers.add_parser(
'ssh', help="ssh into a node of a running cluster.",
parents=[common_parser],
)
ssh_parser.add_argument(
"--node", default=None, type=int,
help="Name of the node to ssh into.")
ssh_parser.set_defaults(command='ssh')
terminate_parser = subparsers.add_parser(
'terminate', help="Stop the selected cluster configuration and free"
" all related cloud resources. WARNING: all unsaved"
" data will be lost.",
parents=[common_parser],
)
terminate_parser.set_defaults(command='terminate')
return parser
class CommandHandler(object):
"""Dispatch the commandline instructions to the right component."""
def __init__(self, options):
self.options = options
self.config = Configurator(options)
def run(self):
"""Execute the specified command parameterized by CLI options"""
getattr(self, 'run_' + self.options.command)()
def interrupt(self):
"""Perform clean up operations on user interuptions (if any)"""
handler = getattr(self, 'interrupt_' + self.options.command, None)
if handler is not None:
return handler()
def run_init(self):
self.config.init_cluster(self.options.name)
def run_select(self):
self.config.set_default_cluster(self.options.name)
def run_start(self):
provisioner = Provisioner(self.config)
provisioner.start(self.options.profile,
n_nodes=self.options.n_nodes)
def main(args=None):
if args is None:
args = sys.argv[1:]
parser = make_parser()
options = parser.parse_args(args)
handler = CommandHandler(options)
try:
handler.run()
except KeyboardInterrupt:
handler.interupt()
except Exception as e:
if options.log_level.upper() == 'DEBUG':
raise
else:
handler.config.log.error(str(e))
sys.exit(1)
|
{
"content_hash": "a19817ad1643bb89e5c34beb142866cb",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 78,
"avg_line_length": 30.618055555555557,
"alnum_prop": 0.6194148332955318,
"repo_name": "ogrisel/cardice",
"id": "b880a6cef9df89f105772a1d196900f6fc2d1f54",
"size": "4409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cardice/commandline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17829"
}
],
"symlink_target": ""
}
|
"""cutarelease -- Cut a release of your project.
A script that will help cut a release for a git-based project that follows
a few conventions. It'll update your changelog (CHANGES.md), add a git
tag, push those changes, update your version to the next patch level release
and create a new changelog section for that new version.
Conventions:
- XXX
"""
__version_info__ = (1, 0, 7)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
from os.path import join, dirname, normpath, abspath, exists, basename, splitext
from glob import glob
from pprint import pprint
import re
import codecs
import logging
import optparse
import json
#---- globals and config
log = logging.getLogger("cutarelease")
class Error(Exception):
pass
#---- main functionality
def cutarelease(project_name, version_files, dry_run=False):
"""Cut a release.
@param project_name {str}
@param version_files {list} List of paths to files holding the version
info for this project.
If none are given it attempts to guess the version file:
package.json or VERSION.txt or VERSION or $project_name.py
or lib/$project_name.py or $project_name.js or lib/$project_name.js.
The version file can be in one of the following forms:
- A .py file, in which case the file is expect to have a top-level
global called "__version_info__" as follows. [1]
__version_info__ = (0, 7, 6)
Note that I typically follow that with the following to get a
string version attribute on my modules:
__version__ = '.'.join(map(str, __version_info__))
- A .js file, in which case the file is expected to have a top-level
global called "VERSION" as follows:
ver VERSION = "1.2.3";
- A "package.json" file, typical of a node.js npm-using project.
The package.json file must have a "version" field.
- TODO: A simple version file whose only content is a "1.2.3"-style version
string.
[1]: This is a convention I tend to follow in my projects.
Granted it might not be your cup of tea. I should add support for
just `__version__ = "1.2.3"`. I'm open to other suggestions too.
"""
dry_run_str = dry_run and " (dry-run)" or ""
if not version_files:
log.info("guessing version file")
candidates = [
"package.json",
"VERSION.txt",
"VERSION",
"%s.py" % project_name,
"lib/%s.py" % project_name,
"%s.js" % project_name,
"lib/%s.js" % project_name,
]
for candidate in candidates:
if exists(candidate):
version_files = [candidate]
break
else:
raise Error("could not find a version file: specify its path or "
"add one of the following to your project: '%s'"
% "', '".join(candidates))
log.info("using '%s' as version file", version_files[0])
parsed_version_files = [_parse_version_file(f) for f in version_files]
version_file_type, version_info = parsed_version_files[0]
version = _version_from_version_info(version_info)
# Confirm
if not dry_run:
answer = query_yes_no("* * *\n"
"Are you sure you want cut a %s release?\n"
"This will involved commits and a push." % version,
default="no")
print "* * *"
if answer != "yes":
log.info("user abort")
return
log.info("cutting a %s release%s", version, dry_run_str)
# Checks: Ensure there is a section in changes for this version.
changes_path = "CHANGES.md"
changes_txt, changes, nyr = parse_changelog(changes_path)
#pprint(changes)
top_ver = changes[0]["version"]
if top_ver != version:
raise Error("changelog '%s' top section says "
"version %r, expected version %r: aborting"
% (changes_path, top_ver, version))
top_verline = changes[0]["verline"]
if not top_verline.endswith(nyr):
answer = query_yes_no("\n* * *\n"
"The changelog '%s' top section doesn't have the expected\n"
"'%s' marker. Has this been released already?"
% (changes_path, nyr), default="yes")
print "* * *"
if answer != "no":
log.info("abort")
return
top_body = changes[0]["body"]
if top_body.strip() == "(nothing yet)":
raise Error("top section body is `(nothing yet)': it looks like "
"nothing has been added to this release")
# Commits to prepare release.
changes_txt_before = changes_txt
changes_txt = changes_txt.replace(" (not yet released)", "", 1)
if not dry_run and changes_txt != changes_txt_before:
log.info("prepare `%s' for release", changes_path)
f = codecs.open(changes_path, 'w', 'utf-8')
f.write(changes_txt)
f.close()
run('git commit %s -m "prepare for %s release"'
% (changes_path, version))
# Tag version and push.
curr_tags = set(t for t in _capture_stdout(["git", "tag", "-l"]).split('\n') if t)
if not dry_run and version not in curr_tags:
log.info("tag the release")
run('git tag -a "%s" -m "version %s"' % (version, version))
run('git push --tags')
# Optionally release.
if exists("package.json"):
answer = query_yes_no("\n* * *\nPublish to npm?", default="yes")
print "* * *"
if answer == "yes":
if dry_run:
log.info("skipping npm publish (dry-run)")
else:
run('npm publish')
elif exists("setup.py"):
answer = query_yes_no("\n* * *\nPublish to pypi?", default="yes")
print "* * *"
if answer == "yes":
if dry_run:
log.info("skipping pypi publish (dry-run)")
else:
run("%spython setup.py sdist --formats zip upload"
% _setup_command_prefix())
# Commits to prepare for future dev and push.
# - update changelog file
next_version_info = _get_next_version_info(version_info)
next_version = _version_from_version_info(next_version_info)
log.info("prepare for future dev (version %s)", next_version)
marker = "## " + changes[0]["verline"]
if marker.endswith(nyr):
marker = marker[0:-len(nyr)]
if marker not in changes_txt:
raise Error("couldn't find `%s' marker in `%s' "
"content: can't prep for subsequent dev" % (marker, changes_path))
next_verline = "%s %s%s" % (marker.rsplit(None, 1)[0], next_version, nyr)
changes_txt = changes_txt.replace(marker + '\n',
"%s\n\n(nothing yet)\n\n\n%s\n" % (next_verline, marker))
if not dry_run:
f = codecs.open(changes_path, 'w', 'utf-8')
f.write(changes_txt)
f.close()
# - update version file
next_version_tuple = _tuple_from_version(next_version)
for i, ver_file in enumerate(version_files):
ver_content = codecs.open(ver_file, 'r', 'utf-8').read()
ver_file_type, ver_info = parsed_version_files[i]
if ver_file_type == "json":
marker = '"version": "%s"' % version
if marker not in ver_content:
raise Error("couldn't find `%s' version marker in `%s' "
"content: can't prep for subsequent dev" % (marker, ver_file))
ver_content = ver_content.replace(marker,
'"version": "%s"' % next_version)
elif ver_file_type == "javascript":
marker = 'var VERSION = "%s";' % version
if marker not in ver_content:
raise Error("couldn't find `%s' version marker in `%s' "
"content: can't prep for subsequent dev" % (marker, ver_file))
ver_content = ver_content.replace(marker,
'var VERSION = "%s";' % next_version)
elif ver_file_type == "python":
marker = "__version_info__ = %r" % (version_info,)
if marker not in ver_content:
raise Error("couldn't find `%s' version marker in `%s' "
"content: can't prep for subsequent dev" % (marker, ver_file))
ver_content = ver_content.replace(marker,
"__version_info__ = %r" % (next_version_tuple,))
elif ver_file_type == "version":
ver_content = next_version
else:
raise Error("unknown ver_file_type: %r" % ver_file_type)
if not dry_run:
log.info("update version to '%s' in '%s'", next_version, ver_file)
f = codecs.open(ver_file, 'w', 'utf-8')
f.write(ver_content)
f.close()
if not dry_run:
run('git commit %s %s -m "prep for future dev"' % (
changes_path, ' '.join(version_files)))
run('git push')
#---- internal support routines
def _indent(s, indent=' '):
return indent + indent.join(s.splitlines(True))
def _tuple_from_version(version):
def _intify(s):
try:
return int(s)
except ValueError:
return s
return tuple(_intify(b) for b in version.split('.'))
def _get_next_version_info(version_info):
next = list(version_info[:])
next[-1] += 1
return tuple(next)
def _version_from_version_info(version_info):
v = str(version_info[0])
state_dot_join = True
for i in version_info[1:]:
if state_dot_join:
try:
int(i)
except ValueError:
state_dot_join = False
else:
pass
if state_dot_join:
v += "." + str(i)
else:
v += str(i)
return v
_version_re = re.compile(r"^(\d+)\.(\d+)(?:\.(\d+)([abc](\d+)?)?)?$")
def _version_info_from_version(version):
m = _version_re.match(version)
if not m:
raise Error("could not convert '%s' version to version info" % version)
version_info = []
for g in m.groups():
if g is None:
break
try:
version_info.append(int(g))
except ValueError:
version_info.append(g)
return tuple(version_info)
def _parse_version_file(version_file):
"""Get version info from the given file. It can be any of:
Supported version file types (i.e. types of files from which we know
how to parse the version string/number -- often by some convention):
- json: use the "version" key
- javascript: look for a `var VERSION = "1.2.3";`
- python: Python script/module with `__version_info__ = (1, 2, 3)`
- version: a VERSION.txt or VERSION file where the whole contents are
the version string
@param version_file {str} Can be a path or "type:path", where "type"
is one of the supported types.
"""
# Get version file *type*.
version_file_type = None
match = re.compile("^([a-z]+):(.*)$").search(version_file)
if match:
version_file = match.group(2)
version_file_type = match.group(1)
aliases = {
"js": "javascript"
}
if version_file_type in aliases:
version_file_type = aliases[version_file_type]
f = codecs.open(version_file, 'r', 'utf-8')
content = f.read()
f.close()
if not version_file_type:
# Guess the type.
base = basename(version_file)
ext = splitext(base)[1]
if ext == ".json":
version_file_type = "json"
elif ext == ".py":
version_file_type = "python"
elif ext == ".js":
version_file_type = "javascript"
elif content.startswith("#!"):
shebang = content.splitlines(False)[0]
shebang_bits = re.split(r'[/ \t]', shebang)
for name, typ in {"python": "python", "node": "javascript"}.items():
if name in shebang_bits:
version_file_type = typ
break
elif base in ("VERSION", "VERSION.txt"):
version_file_type = "version"
if not version_file_type:
raise RuntimeError("can't extract version from '%s': no idea "
"what type of file it it" % version_file)
if version_file_type == "json":
obj = json.loads(content)
version_info = _version_info_from_version(obj["version"])
elif version_file_type == "python":
m = re.search(r'^__version_info__ = (.*?)$', content, re.M)
version_info = eval(m.group(1))
elif version_file_type == "javascript":
m = re.search(r'^var VERSION = "(.*?)";$', content, re.M)
version_info = _version_info_from_version(m.group(1))
elif version_file_type == "version":
version_info = _version_info_from_version(content.strip())
else:
raise RuntimeError("unexpected version_file_type: %r"
% version_file_type)
return version_file_type, version_info
def parse_changelog(changes_path):
"""Parse the given changelog path and return `(content, parsed, nyr)`
where `nyr` is the ' (not yet released)' marker and `parsed` looks like:
[{'body': u'\n(nothing yet)\n\n',
'verline': u'restify 1.0.1 (not yet released)',
'version': u'1.0.1'}, # version is parsed out for top section only
{'body': u'...',
'verline': u'1.0.0'},
{'body': u'...',
'verline': u'1.0.0-rc2'},
{'body': u'...',
'verline': u'1.0.0-rc1'}]
A changelog (CHANGES.md) is expected to look like this:
# $project Changelog
## $next_version (not yet released)
...
## $version1
...
## $version2
... and so on
The version lines are enforced as follows:
- The top entry should have a " (not yet released)" suffix. "Should"
because recovery from half-cutarelease failures is supported.
- A version string must be extractable from there, but it tries to
be loose (though strict "X.Y.Z" versioning is preferred). Allowed
## 1.0.0
## my project 1.0.1
## foo 1.2.3-rc2
Basically, (a) the " (not yet released)" is stripped, (b) the
last token is the version, and (c) that version must start with
a digit (sanity check).
"""
if not exists(changes_path):
raise Error("changelog file '%s' not found" % changes_path)
content = codecs.open(changes_path, 'r', 'utf-8').read()
parser = re.compile(
r'^##\s*(?P<verline>[^\n]*?)\s*$(?P<body>.*?)(?=^##|\Z)',
re.M | re.S)
sections = parser.findall(content)
# Sanity checks on changelog format.
if not sections:
template = "## 1.0.0 (not yet released)\n\n(nothing yet)\n"
raise Error("changelog '%s' must have at least one section, "
"suggestion:\n\n%s" % (changes_path, _indent(template)))
first_section_verline = sections[0][0]
nyr = ' (not yet released)'
#if not first_section_verline.endswith(nyr):
# eg = "## %s%s" % (first_section_verline, nyr)
# raise Error("changelog '%s' top section must end with %r, "
# "naive e.g.: '%s'" % (changes_path, nyr, eg))
items = []
for i, section in enumerate(sections):
item = {
"verline": section[0],
"body": section[1]
}
if i == 0:
# We only bother to pull out 'version' for the top section.
verline = section[0]
if verline.endswith(nyr):
verline = verline[0:-len(nyr)]
version = verline.split()[-1]
try:
int(version[0])
except ValueError:
msg = ''
if version.endswith(')'):
msg = " (cutarelease is picky about the trailing %r " \
"on the top version line. Perhaps you misspelled " \
"that?)" % nyr
raise Error("changelog '%s' top section version '%s' is "
"invalid: first char isn't a number%s"
% (changes_path, version, msg))
item["version"] = version
items.append(item)
return content, items, nyr
## {{{ http://code.activestate.com/recipes/577058/ (r2)
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes":"yes", "y":"yes", "ye":"yes",
"no":"no", "n":"no"}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
## end of http://code.activestate.com/recipes/577058/ }}}
def _capture_stdout(argv):
import subprocess
p = subprocess.Popen(argv, stdout=subprocess.PIPE)
return p.communicate()[0]
class _NoReflowFormatter(optparse.IndentedHelpFormatter):
"""An optparse formatter that does NOT reflow the description."""
def format_description(self, description):
return description or ""
def run(cmd):
"""Run the given command.
Raises OSError is the command returns a non-zero exit status.
"""
log.debug("running '%s'", cmd)
fixed_cmd = cmd
if sys.platform == "win32" and cmd.count('"') > 2:
fixed_cmd = '"' + cmd + '"'
retval = os.system(fixed_cmd)
if hasattr(os, "WEXITSTATUS"):
status = os.WEXITSTATUS(retval)
else:
status = retval
if status:
raise OSError(status, "error running '%s'" % cmd)
def _setup_command_prefix():
prefix = ""
if sys.platform == "darwin":
# http://forums.macosxhints.com/archive/index.php/t-43243.html
# This is an Apple customization to `tar` to avoid creating
# '._foo' files for extended-attributes for archived files.
prefix = "COPY_EXTENDED_ATTRIBUTES_DISABLE=1 "
return prefix
#---- mainline
def main(argv):
logging.basicConfig(format="%(name)s: %(levelname)s: %(message)s")
log.setLevel(logging.INFO)
# Parse options.
parser = optparse.OptionParser(prog="cutarelease", usage='',
version="%prog " + __version__, description=__doc__,
formatter=_NoReflowFormatter())
parser.add_option("-v", "--verbose", dest="log_level",
action="store_const", const=logging.DEBUG,
help="more verbose output")
parser.add_option("-q", "--quiet", dest="log_level",
action="store_const", const=logging.WARNING,
help="quieter output (just warnings and errors)")
parser.set_default("log_level", logging.INFO)
parser.add_option("--test", action="store_true",
help="run self-test and exit (use 'eol.py -v --test' for verbose test output)")
parser.add_option("-p", "--project-name", metavar="NAME",
help='the name of this project (default is the base dir name)',
default=basename(os.getcwd()))
parser.add_option("-f", "--version-file", metavar="[TYPE:]PATH",
action='append', dest="version_files",
help='The path to the project file holding the version info. Can be '
'specified multiple times if more than one file should be updated '
'with new version info. If excluded, it will be guessed.')
parser.add_option("-n", "--dry-run", action="store_true",
help='Do a dry-run', default=False)
opts, args = parser.parse_args()
log.setLevel(opts.log_level)
cutarelease(opts.project_name, opts.version_files, dry_run=opts.dry_run)
## {{{ http://code.activestate.com/recipes/577258/ (r5+)
if __name__ == "__main__":
try:
retval = main(sys.argv)
except KeyboardInterrupt:
sys.exit(1)
except SystemExit:
raise
except:
import traceback, logging
if not log.handlers and not logging.root.handlers:
logging.basicConfig()
skip_it = False
exc_info = sys.exc_info()
if hasattr(exc_info[0], "__name__"):
exc_class, exc, tb = exc_info
if isinstance(exc, IOError) and exc.args[0] == 32:
# Skip 'IOError: [Errno 32] Broken pipe': often a cancelling of `less`.
skip_it = True
if not skip_it:
tb_path, tb_lineno, tb_func = traceback.extract_tb(tb)[-1][:3]
log.error("%s (%s:%s in %s)", exc_info[1], tb_path,
tb_lineno, tb_func)
else: # string exception
log.error(exc_info[0])
if not skip_it:
if log.isEnabledFor(logging.DEBUG):
traceback.print_exception(*exc_info)
sys.exit(1)
else:
sys.exit(retval)
## end of http://code.activestate.com/recipes/577258/ }}}
|
{
"content_hash": "b90aeb100d6b8e68349594070865e1c4",
"timestamp": "",
"source": "github",
"line_count": 593,
"max_line_length": 87,
"avg_line_length": 36.251264755480605,
"alnum_prop": 0.5661720240033493,
"repo_name": "Mitali-Sodhi/CodeLingo",
"id": "67ae02050fd1abb5d0d0084d4153c608cbf270f0",
"size": "21581",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Dataset/python/cutarelease.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9681846"
},
{
"name": "C#",
"bytes": "1741915"
},
{
"name": "C++",
"bytes": "5686017"
},
{
"name": "HTML",
"bytes": "11812193"
},
{
"name": "Java",
"bytes": "11198971"
},
{
"name": "JavaScript",
"bytes": "21693468"
},
{
"name": "M",
"bytes": "61627"
},
{
"name": "Objective-C",
"bytes": "4085820"
},
{
"name": "Perl",
"bytes": "193472"
},
{
"name": "Perl6",
"bytes": "176248"
},
{
"name": "Python",
"bytes": "10296284"
},
{
"name": "Ruby",
"bytes": "1050136"
}
],
"symlink_target": ""
}
|
from case import Case
class Case7_1_2(Case):
DESCRIPTION = """Send two close frames"""
EXPECTATION = """Clean close with normal code. Second close frame ignored."""
def init(self):
self.suppressClose = True
def onConnectionLost(self, failedByMe):
Case.onConnectionLost(self, failedByMe)
if self.behaviorClose == Case.WRONG_CODE:
self.behavior = Case.FAILED
self.passed = False
self.result = self.resultClose
def onOpen(self):
payload = "Hello World!"
self.expected[Case.OK] = []
self.expectedClose = {"closedByMe":True,"closeCode":[self.p.CLOSE_STATUS_CODE_NORMAL],"requireClean":True}
self.p.sendClose(self.p.CLOSE_STATUS_CODE_NORMAL)
self.p.sendFrame(opcode = 8)
self.p.killAfter(1)
|
{
"content_hash": "6d77a3d91d2e1d73d53f6c14afe5776c",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 112,
"avg_line_length": 29.20689655172414,
"alnum_prop": 0.6139315230224321,
"repo_name": "frivoal/presto-testo",
"id": "26394a4fc76d07a0efb7ad4b441c211246f1b3b9",
"size": "1629",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "wpt/websockets/autobahn/oberstet-Autobahn-643d2ee/lib/python/autobahn/case/case7_1_2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "2312"
},
{
"name": "ActionScript",
"bytes": "23470"
},
{
"name": "AutoHotkey",
"bytes": "8832"
},
{
"name": "Batchfile",
"bytes": "5001"
},
{
"name": "C",
"bytes": "116512"
},
{
"name": "C++",
"bytes": "219467"
},
{
"name": "CSS",
"bytes": "207914"
},
{
"name": "Erlang",
"bytes": "18523"
},
{
"name": "Groff",
"bytes": "674"
},
{
"name": "HTML",
"bytes": "103357488"
},
{
"name": "Haxe",
"bytes": "3874"
},
{
"name": "Java",
"bytes": "125658"
},
{
"name": "JavaScript",
"bytes": "22514682"
},
{
"name": "Makefile",
"bytes": "13409"
},
{
"name": "PHP",
"bytes": "531453"
},
{
"name": "Perl",
"bytes": "321672"
},
{
"name": "Python",
"bytes": "948191"
},
{
"name": "Ruby",
"bytes": "1006850"
},
{
"name": "Shell",
"bytes": "12140"
},
{
"name": "Smarty",
"bytes": "1860"
},
{
"name": "XSLT",
"bytes": "2567445"
}
],
"symlink_target": ""
}
|
"""
This file contains preprocessing tools based on polynomials.
"""
import collections
from numbers import Integral
from itertools import chain, combinations
from itertools import combinations_with_replacement as combinations_w_r
import numpy as np
from scipy import sparse
from scipy.interpolate import BSpline
from scipy.special import comb
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.validation import check_is_fitted, FLOAT_DTYPES, _check_sample_weight
from ..utils.validation import _check_feature_names_in
from ..utils._param_validation import Interval, StrOptions
from ..utils.stats import _weighted_percentile
from ._csr_polynomial_expansion import _csr_polynomial_expansion
__all__ = [
"PolynomialFeatures",
"SplineTransformer",
]
class PolynomialFeatures(TransformerMixin, BaseEstimator):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Read more in the :ref:`User Guide <polynomial_features>`.
Parameters
----------
degree : int or tuple (min_degree, max_degree), default=2
If a single int is given, it specifies the maximal degree of the
polynomial features. If a tuple `(min_degree, max_degree)` is passed,
then `min_degree` is the minimum and `max_degree` is the maximum
polynomial degree of the generated features. Note that `min_degree=0`
and `min_degree=1` are equivalent as outputting the degree zero term is
determined by `include_bias`.
interaction_only : bool, default=False
If `True`, only interaction features are produced: features that are
products of at most `degree` *distinct* input features, i.e. terms with
power of 2 or higher of the same input feature are excluded:
- included: `x[0]`, `x[1]`, `x[0] * x[1]`, etc.
- excluded: `x[0] ** 2`, `x[0] ** 2 * x[1]`, etc.
include_bias : bool, default=True
If `True` (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
order : {'C', 'F'}, default='C'
Order of output array in the dense case. `'F'` order is faster to
compute, but may slow down subsequent estimators.
.. versionadded:: 0.21
Attributes
----------
powers_ : ndarray of shape (`n_output_features_`, `n_features_in_`)
`powers_[i, j]` is the exponent of the jth input in the ith output.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
See Also
--------
SplineTransformer : Transformer that generates univariate B-spline bases
for features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import PolynomialFeatures
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
"""
_parameter_constraints: dict = {
"degree": [Interval(Integral, 0, None, closed="left"), "array-like"],
"interaction_only": ["boolean"],
"include_bias": ["boolean"],
"order": [StrOptions({"C", "F"})],
}
def __init__(
self, degree=2, *, interaction_only=False, include_bias=True, order="C"
):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
self.order = order
@staticmethod
def _combinations(
n_features, min_degree, max_degree, interaction_only, include_bias
):
comb = combinations if interaction_only else combinations_w_r
start = max(1, min_degree)
iter = chain.from_iterable(
comb(range(n_features), i) for i in range(start, max_degree + 1)
)
if include_bias:
iter = chain(comb(range(n_features), 0), iter)
return iter
@staticmethod
def _num_combinations(
n_features, min_degree, max_degree, interaction_only, include_bias
):
"""Calculate number of terms in polynomial expansion
This should be equivalent to counting the number of terms returned by
_combinations(...) but much faster.
"""
if interaction_only:
combinations = sum(
[
comb(n_features, i, exact=True)
for i in range(max(1, min_degree), min(max_degree, n_features) + 1)
]
)
else:
combinations = comb(n_features + max_degree, max_degree, exact=True) - 1
if min_degree > 0:
d = min_degree - 1
combinations -= comb(n_features + d, d, exact=True) - 1
if include_bias:
combinations += 1
return combinations
@property
def powers_(self):
"""Exponent for each of the inputs in the output."""
check_is_fitted(self)
combinations = self._combinations(
n_features=self.n_features_in_,
min_degree=self._min_degree,
max_degree=self._max_degree,
interaction_only=self.interaction_only,
include_bias=self.include_bias,
)
return np.vstack(
[np.bincount(c, minlength=self.n_features_in_) for c in combinations]
)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features is None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
powers = self.powers_
input_features = _check_feature_names_in(self, input_features)
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join(
"%s^%d" % (input_features[ind], exp)
if exp != 1
else input_features[ind]
for ind, exp in zip(inds, row[inds])
)
else:
name = "1"
feature_names.append(name)
return np.asarray(feature_names, dtype=object)
def fit(self, X, y=None):
"""
Compute number of output features.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Fitted transformer.
"""
self._validate_params()
_, n_features = self._validate_data(X, accept_sparse=True).shape
if isinstance(self.degree, Integral):
if self.degree == 0 and not self.include_bias:
raise ValueError(
"Setting degree to zero and include_bias to False would result in"
" an empty output array."
)
self._min_degree = 0
self._max_degree = self.degree
elif (
isinstance(self.degree, collections.abc.Iterable) and len(self.degree) == 2
):
self._min_degree, self._max_degree = self.degree
if not (
isinstance(self._min_degree, Integral)
and isinstance(self._max_degree, Integral)
and self._min_degree >= 0
and self._min_degree <= self._max_degree
):
raise ValueError(
"degree=(min_degree, max_degree) must "
"be non-negative integers that fulfil "
"min_degree <= max_degree, got "
f"{self.degree}."
)
elif self._max_degree == 0 and not self.include_bias:
raise ValueError(
"Setting both min_degree and max_degree to zero and include_bias to"
" False would result in an empty output array."
)
else:
raise ValueError(
"degree must be a non-negative int or tuple "
"(min_degree, max_degree), got "
f"{self.degree}."
)
self.n_output_features_ = self._num_combinations(
n_features=n_features,
min_degree=self._min_degree,
max_degree=self._max_degree,
interaction_only=self.interaction_only,
include_bias=self.include_bias,
)
# We also record the number of output features for
# _max_degree = 0
self._n_out_full = self._num_combinations(
n_features=n_features,
min_degree=0,
max_degree=self._max_degree,
interaction_only=self.interaction_only,
include_bias=self.include_bias,
)
return self
def transform(self, X):
"""Transform data to polynomial features.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to transform, row by row.
Prefer CSR over CSC for sparse input (for speed), but CSC is
required if the degree is 4 or higher. If the degree is less than
4 and the input format is CSC, it will be converted to CSR, have
its polynomial features generated, then converted back to CSC.
If the degree is 2 or 3, the method described in "Leveraging
Sparsity to Speed Up Polynomial Feature Expansions of CSR Matrices
Using K-Simplex Numbers" by Andrew Nystrom and John Hughes is
used, which is much faster than the method used on CSC input. For
this reason, a CSC input will be converted to CSR, and the output
will be converted back to CSC prior to being returned, hence the
preference of CSR.
Returns
-------
XP : {ndarray, sparse matrix} of shape (n_samples, NP)
The matrix of features, where `NP` is the number of polynomial
features generated from the combination of inputs. If a sparse
matrix is provided, it will be converted into a sparse
`csr_matrix`.
"""
check_is_fitted(self)
X = self._validate_data(
X, order="F", dtype=FLOAT_DTYPES, reset=False, accept_sparse=("csr", "csc")
)
n_samples, n_features = X.shape
if sparse.isspmatrix_csr(X):
if self._max_degree > 3:
return self.transform(X.tocsc()).tocsr()
to_stack = []
if self.include_bias:
to_stack.append(
sparse.csc_matrix(np.ones(shape=(n_samples, 1), dtype=X.dtype))
)
if self._min_degree <= 1 and self._max_degree > 0:
to_stack.append(X)
for deg in range(max(2, self._min_degree), self._max_degree + 1):
Xp_next = _csr_polynomial_expansion(
X.data, X.indices, X.indptr, X.shape[1], self.interaction_only, deg
)
if Xp_next is None:
break
to_stack.append(Xp_next)
if len(to_stack) == 0:
# edge case: deal with empty matrix
XP = sparse.csr_matrix((n_samples, 0), dtype=X.dtype)
else:
XP = sparse.hstack(to_stack, format="csr")
elif sparse.isspmatrix_csc(X) and self._max_degree < 4:
return self.transform(X.tocsr()).tocsc()
elif sparse.isspmatrix(X):
combinations = self._combinations(
n_features=n_features,
min_degree=self._min_degree,
max_degree=self._max_degree,
interaction_only=self.interaction_only,
include_bias=self.include_bias,
)
columns = []
for combi in combinations:
if combi:
out_col = 1
for col_idx in combi:
out_col = X[:, col_idx].multiply(out_col)
columns.append(out_col)
else:
bias = sparse.csc_matrix(np.ones((X.shape[0], 1)))
columns.append(bias)
XP = sparse.hstack(columns, dtype=X.dtype).tocsc()
else:
# Do as if _min_degree = 0 and cut down array after the
# computation, i.e. use _n_out_full instead of n_output_features_.
XP = np.empty(
shape=(n_samples, self._n_out_full), dtype=X.dtype, order=self.order
)
# What follows is a faster implementation of:
# for i, comb in enumerate(combinations):
# XP[:, i] = X[:, comb].prod(1)
# This implementation uses two optimisations.
# First one is broadcasting,
# multiply ([X1, ..., Xn], X1) -> [X1 X1, ..., Xn X1]
# multiply ([X2, ..., Xn], X2) -> [X2 X2, ..., Xn X2]
# ...
# multiply ([X[:, start:end], X[:, start]) -> ...
# Second optimisation happens for degrees >= 3.
# Xi^3 is computed reusing previous computation:
# Xi^3 = Xi^2 * Xi.
# degree 0 term
if self.include_bias:
XP[:, 0] = 1
current_col = 1
else:
current_col = 0
if self._max_degree == 0:
return XP
# degree 1 term
XP[:, current_col : current_col + n_features] = X
index = list(range(current_col, current_col + n_features))
current_col += n_features
index.append(current_col)
# loop over degree >= 2 terms
for _ in range(2, self._max_degree + 1):
new_index = []
end = index[-1]
for feature_idx in range(n_features):
start = index[feature_idx]
new_index.append(current_col)
if self.interaction_only:
start += index[feature_idx + 1] - index[feature_idx]
next_col = current_col + end - start
if next_col <= current_col:
break
# XP[:, start:end] are terms of degree d - 1
# that exclude feature #feature_idx.
np.multiply(
XP[:, start:end],
X[:, feature_idx : feature_idx + 1],
out=XP[:, current_col:next_col],
casting="no",
)
current_col = next_col
new_index.append(current_col)
index = new_index
if self._min_degree > 1:
n_XP, n_Xout = self._n_out_full, self.n_output_features_
if self.include_bias:
Xout = np.empty(
shape=(n_samples, n_Xout), dtype=XP.dtype, order=self.order
)
Xout[:, 0] = 1
Xout[:, 1:] = XP[:, n_XP - n_Xout + 1 :]
else:
Xout = XP[:, n_XP - n_Xout :].copy()
XP = Xout
return XP
# TODO:
# - sparse support (either scipy or own cython solution)?
class SplineTransformer(TransformerMixin, BaseEstimator):
"""Generate univariate B-spline bases for features.
Generate a new feature matrix consisting of
`n_splines=n_knots + degree - 1` (`n_knots - 1` for
`extrapolation="periodic"`) spline basis functions
(B-splines) of polynomial order=`degree` for each feature.
Read more in the :ref:`User Guide <spline_transformer>`.
.. versionadded:: 1.0
Parameters
----------
n_knots : int, default=5
Number of knots of the splines if `knots` equals one of
{'uniform', 'quantile'}. Must be larger or equal 2. Ignored if `knots`
is array-like.
degree : int, default=3
The polynomial degree of the spline basis. Must be a non-negative
integer.
knots : {'uniform', 'quantile'} or array-like of shape \
(n_knots, n_features), default='uniform'
Set knot positions such that first knot <= features <= last knot.
- If 'uniform', `n_knots` number of knots are distributed uniformly
from min to max values of the features.
- If 'quantile', they are distributed uniformly along the quantiles of
the features.
- If an array-like is given, it directly specifies the sorted knot
positions including the boundary knots. Note that, internally,
`degree` number of knots are added before the first knot, the same
after the last knot.
extrapolation : {'error', 'constant', 'linear', 'continue', 'periodic'}, \
default='constant'
If 'error', values outside the min and max values of the training
features raises a `ValueError`. If 'constant', the value of the
splines at minimum and maximum value of the features is used as
constant extrapolation. If 'linear', a linear extrapolation is used.
If 'continue', the splines are extrapolated as is, i.e. option
`extrapolate=True` in :class:`scipy.interpolate.BSpline`. If
'periodic', periodic splines with a periodicity equal to the distance
between the first and last knot are used. Periodic splines enforce
equal function values and derivatives at the first and last knot.
For example, this makes it possible to avoid introducing an arbitrary
jump between Dec 31st and Jan 1st in spline features derived from a
naturally periodic "day-of-year" input feature. In this case it is
recommended to manually set the knot values to control the period.
include_bias : bool, default=True
If True (default), then the last spline element inside the data range
of a feature is dropped. As B-splines sum to one over the spline basis
functions for each data point, they implicitly include a bias term,
i.e. a column of ones. It acts as an intercept term in a linear models.
order : {'C', 'F'}, default='C'
Order of output array. 'F' order is faster to compute, but may slow
down subsequent estimators.
Attributes
----------
bsplines_ : list of shape (n_features,)
List of BSplines objects, one for each feature.
n_features_in_ : int
The total number of input features.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_features_out_ : int
The total number of output features, which is computed as
`n_features * n_splines`, where `n_splines` is
the number of bases elements of the B-splines,
`n_knots + degree - 1` for non-periodic splines and
`n_knots - 1` for periodic ones.
If `include_bias=False`, then it is only
`n_features * (n_splines - 1)`.
See Also
--------
KBinsDiscretizer : Transformer that bins continuous data into intervals.
PolynomialFeatures : Transformer that generates polynomial and interaction
features.
Notes
-----
High degrees and a high number of knots can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import SplineTransformer
>>> X = np.arange(6).reshape(6, 1)
>>> spline = SplineTransformer(degree=2, n_knots=3)
>>> spline.fit_transform(X)
array([[0.5 , 0.5 , 0. , 0. ],
[0.18, 0.74, 0.08, 0. ],
[0.02, 0.66, 0.32, 0. ],
[0. , 0.32, 0.66, 0.02],
[0. , 0.08, 0.74, 0.18],
[0. , 0. , 0.5 , 0.5 ]])
"""
_parameter_constraints: dict = {
"n_knots": [Interval(Integral, 2, None, closed="left")],
"degree": [Interval(Integral, 0, None, closed="left")],
"knots": [StrOptions({"uniform", "quantile"}), "array-like"],
"extrapolation": [
StrOptions({"error", "constant", "linear", "continue", "periodic"})
],
"include_bias": ["boolean"],
"order": [StrOptions({"C", "F"})],
}
def __init__(
self,
n_knots=5,
degree=3,
*,
knots="uniform",
extrapolation="constant",
include_bias=True,
order="C",
):
self.n_knots = n_knots
self.degree = degree
self.knots = knots
self.extrapolation = extrapolation
self.include_bias = include_bias
self.order = order
@staticmethod
def _get_base_knot_positions(X, n_knots=10, knots="uniform", sample_weight=None):
"""Calculate base knot positions.
Base knots such that first knot <= feature <= last knot. For the
B-spline construction with scipy.interpolate.BSpline, 2*degree knots
beyond the base interval are added.
Returns
-------
knots : ndarray of shape (n_knots, n_features), dtype=np.float64
Knot positions (points) of base interval.
"""
if knots == "quantile":
percentiles = 100 * np.linspace(
start=0, stop=1, num=n_knots, dtype=np.float64
)
if sample_weight is None:
knots = np.percentile(X, percentiles, axis=0)
else:
knots = np.array(
[
_weighted_percentile(X, sample_weight, percentile)
for percentile in percentiles
]
)
else:
# knots == 'uniform':
# Note that the variable `knots` has already been validated and
# `else` is therefore safe.
# Disregard observations with zero weight.
mask = slice(None, None, 1) if sample_weight is None else sample_weight > 0
x_min = np.amin(X[mask], axis=0)
x_max = np.amax(X[mask], axis=0)
knots = np.linspace(
start=x_min,
stop=x_max,
num=n_knots,
endpoint=True,
dtype=np.float64,
)
return knots
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
n_splines = self.bsplines_[0].c.shape[0]
input_features = _check_feature_names_in(self, input_features)
feature_names = []
for i in range(self.n_features_in_):
for j in range(n_splines - 1 + self.include_bias):
feature_names.append(f"{input_features[i]}_sp_{j}")
return np.asarray(feature_names, dtype=object)
def fit(self, X, y=None, sample_weight=None):
"""Compute knot positions of splines.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
y : None
Ignored.
sample_weight : array-like of shape (n_samples,), default = None
Individual weights for each sample. Used to calculate quantiles if
`knots="quantile"`. For `knots="uniform"`, zero weighted
observations are ignored for finding the min and max of `X`.
Returns
-------
self : object
Fitted transformer.
"""
self._validate_params()
X = self._validate_data(
X,
reset=True,
accept_sparse=False,
ensure_min_samples=2,
ensure_2d=True,
)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
_, n_features = X.shape
if isinstance(self.knots, str):
base_knots = self._get_base_knot_positions(
X, n_knots=self.n_knots, knots=self.knots, sample_weight=sample_weight
)
else:
base_knots = check_array(self.knots, dtype=np.float64)
if base_knots.shape[0] < 2:
raise ValueError("Number of knots, knots.shape[0], must be >= 2.")
elif base_knots.shape[1] != n_features:
raise ValueError("knots.shape[1] == n_features is violated.")
elif not np.all(np.diff(base_knots, axis=0) > 0):
raise ValueError("knots must be sorted without duplicates.")
# number of knots for base interval
n_knots = base_knots.shape[0]
if self.extrapolation == "periodic" and n_knots <= self.degree:
raise ValueError(
"Periodic splines require degree < n_knots. Got n_knots="
f"{n_knots} and degree={self.degree}."
)
# number of splines basis functions
if self.extrapolation != "periodic":
n_splines = n_knots + self.degree - 1
else:
# periodic splines have self.degree less degrees of freedom
n_splines = n_knots - 1
degree = self.degree
n_out = n_features * n_splines
# We have to add degree number of knots below, and degree number knots
# above the base knots in order to make the spline basis complete.
if self.extrapolation == "periodic":
# For periodic splines the spacing of the first / last degree knots
# needs to be a continuation of the spacing of the last / first
# base knots.
period = base_knots[-1] - base_knots[0]
knots = np.r_[
base_knots[-(degree + 1) : -1] - period,
base_knots,
base_knots[1 : (degree + 1)] + period,
]
else:
# Eilers & Marx in "Flexible smoothing with B-splines and
# penalties" https://doi.org/10.1214/ss/1038425655 advice
# against repeating first and last knot several times, which
# would have inferior behaviour at boundaries if combined with
# a penalty (hence P-Spline). We follow this advice even if our
# splines are unpenalized. Meaning we do not:
# knots = np.r_[
# np.tile(base_knots.min(axis=0), reps=[degree, 1]),
# base_knots,
# np.tile(base_knots.max(axis=0), reps=[degree, 1])
# ]
# Instead, we reuse the distance of the 2 fist/last knots.
dist_min = base_knots[1] - base_knots[0]
dist_max = base_knots[-1] - base_knots[-2]
knots = np.r_[
np.linspace(
base_knots[0] - degree * dist_min,
base_knots[0] - dist_min,
num=degree,
),
base_knots,
np.linspace(
base_knots[-1] + dist_max,
base_knots[-1] + degree * dist_max,
num=degree,
),
]
# With a diagonal coefficient matrix, we get back the spline basis
# elements, i.e. the design matrix of the spline.
# Note, BSpline appreciates C-contiguous float64 arrays as c=coef.
coef = np.eye(n_splines, dtype=np.float64)
if self.extrapolation == "periodic":
coef = np.concatenate((coef, coef[:degree, :]))
extrapolate = self.extrapolation in ["periodic", "continue"]
bsplines = [
BSpline.construct_fast(
knots[:, i], coef, self.degree, extrapolate=extrapolate
)
for i in range(n_features)
]
self.bsplines_ = bsplines
self.n_features_out_ = n_out - n_features * (1 - self.include_bias)
return self
def transform(self, X):
"""Transform each feature data to B-splines.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to transform.
Returns
-------
XBS : ndarray of shape (n_samples, n_features * n_splines)
The matrix of features, where n_splines is the number of bases
elements of the B-splines, n_knots + degree - 1.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False, accept_sparse=False, ensure_2d=True)
n_samples, n_features = X.shape
n_splines = self.bsplines_[0].c.shape[1]
degree = self.degree
# Note that scipy BSpline returns float64 arrays and converts input
# x=X[:, i] to c-contiguous float64.
n_out = self.n_features_out_ + n_features * (1 - self.include_bias)
if X.dtype in FLOAT_DTYPES:
dtype = X.dtype
else:
dtype = np.float64
XBS = np.zeros((n_samples, n_out), dtype=dtype, order=self.order)
for i in range(n_features):
spl = self.bsplines_[i]
if self.extrapolation in ("continue", "error", "periodic"):
if self.extrapolation == "periodic":
# With periodic extrapolation we map x to the segment
# [spl.t[k], spl.t[n]].
# This is equivalent to BSpline(.., extrapolate="periodic")
# for scipy>=1.0.0.
n = spl.t.size - spl.k - 1
# Assign to new array to avoid inplace operation
x = spl.t[spl.k] + (X[:, i] - spl.t[spl.k]) % (
spl.t[n] - spl.t[spl.k]
)
else:
x = X[:, i]
XBS[:, (i * n_splines) : ((i + 1) * n_splines)] = spl(x)
else:
xmin = spl.t[degree]
xmax = spl.t[-degree - 1]
mask = (xmin <= X[:, i]) & (X[:, i] <= xmax)
XBS[mask, (i * n_splines) : ((i + 1) * n_splines)] = spl(X[mask, i])
# Note for extrapolation:
# 'continue' is already returned as is by scipy BSplines
if self.extrapolation == "error":
# BSpline with extrapolate=False does not raise an error, but
# output np.nan.
if np.any(np.isnan(XBS[:, (i * n_splines) : ((i + 1) * n_splines)])):
raise ValueError(
"X contains values beyond the limits of the knots."
)
elif self.extrapolation == "constant":
# Set all values beyond xmin and xmax to the value of the
# spline basis functions at those two positions.
# Only the first degree and last degree number of splines
# have non-zero values at the boundaries.
# spline values at boundaries
f_min = spl(xmin)
f_max = spl(xmax)
mask = X[:, i] < xmin
if np.any(mask):
XBS[mask, (i * n_splines) : (i * n_splines + degree)] = f_min[
:degree
]
mask = X[:, i] > xmax
if np.any(mask):
XBS[
mask,
((i + 1) * n_splines - degree) : ((i + 1) * n_splines),
] = f_max[-degree:]
elif self.extrapolation == "linear":
# Continue the degree first and degree last spline bases
# linearly beyond the boundaries, with slope = derivative at
# the boundary.
# Note that all others have derivative = value = 0 at the
# boundaries.
# spline values at boundaries
f_min, f_max = spl(xmin), spl(xmax)
# spline derivatives = slopes at boundaries
fp_min, fp_max = spl(xmin, nu=1), spl(xmax, nu=1)
# Compute the linear continuation.
if degree <= 1:
# For degree=1, the derivative of 2nd spline is not zero at
# boundary. For degree=0 it is the same as 'constant'.
degree += 1
for j in range(degree):
mask = X[:, i] < xmin
if np.any(mask):
XBS[mask, i * n_splines + j] = (
f_min[j] + (X[mask, i] - xmin) * fp_min[j]
)
mask = X[:, i] > xmax
if np.any(mask):
k = n_splines - 1 - j
XBS[mask, i * n_splines + k] = (
f_max[k] + (X[mask, i] - xmax) * fp_max[k]
)
if self.include_bias:
return XBS
else:
# We throw away one spline basis per feature.
# We chose the last one.
indices = [j for j in range(XBS.shape[1]) if (j + 1) % n_splines != 0]
return XBS[:, indices]
|
{
"content_hash": "a3c298d6dca5aa28cc6536fea0e5b628",
"timestamp": "",
"source": "github",
"line_count": 936,
"max_line_length": 88,
"avg_line_length": 38.48183760683761,
"alnum_prop": 0.5379660734612288,
"repo_name": "betatim/scikit-learn",
"id": "3e1dd4f6602b336a9539b1aedef7cbf7cec74fe9",
"size": "36019",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "sklearn/preprocessing/_polynomial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "668499"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10504881"
},
{
"name": "Shell",
"bytes": "41551"
}
],
"symlink_target": ""
}
|
import ctypes
from ctypes import WinDLL, wintypes
shell32 = WinDLL("shell32")
kernel32 = WinDLL("kernel32")
shlwapi = WinDLL("shlwapi")
GetCommandLineW = kernel32.GetCommandLineW
GetCommandLineW.argtypes = []
GetCommandLineW.restype = wintypes.LPCWSTR
CommandLineToArgvW = shell32.CommandLineToArgvW
CommandLineToArgvW.argtypes = [
wintypes.LPCWSTR, ctypes.POINTER(ctypes.c_int)]
CommandLineToArgvW.restype = ctypes.POINTER(wintypes.LPWSTR)
LocalFree = kernel32.LocalFree
LocalFree.argtypes = [wintypes.HLOCAL]
LocalFree.restype = wintypes.HLOCAL
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa383751.aspx
LPCTSTR = ctypes.c_wchar_p
LPWSTR = wintypes.LPWSTR
LPCWSTR = ctypes.c_wchar_p
LPTSTR = LPWSTR
PCWSTR = ctypes.c_wchar_p
PCTSTR = PCWSTR
PWSTR = ctypes.c_wchar_p
PTSTR = PWSTR
LPVOID = wintypes.LPVOID
WCHAR = wintypes.WCHAR
LPSTR = ctypes.c_char_p
BOOL = wintypes.BOOL
LPBOOL = ctypes.POINTER(BOOL)
UINT = wintypes.UINT
WORD = wintypes.WORD
DWORD = wintypes.DWORD
SHORT = wintypes.SHORT
HANDLE = wintypes.HANDLE
ULONG = wintypes.ULONG
LPCSTR = wintypes.LPCSTR
STD_INPUT_HANDLE = DWORD(-10)
STD_OUTPUT_HANDLE = DWORD(-11)
STD_ERROR_HANDLE = DWORD(-12)
INVALID_HANDLE_VALUE = wintypes.HANDLE(-1).value
INTERNET_MAX_SCHEME_LENGTH = 32
INTERNET_MAX_PATH_LENGTH = 2048
INTERNET_MAX_URL_LENGTH = (
INTERNET_MAX_SCHEME_LENGTH + len("://") + INTERNET_MAX_PATH_LENGTH)
FOREGROUND_BLUE = 0x0001
FOREGROUND_GREEN = 0x0002
FOREGROUND_RED = 0x0004
FOREGROUND_INTENSITY = 0x0008
BACKGROUND_BLUE = 0x0010
BACKGROUND_GREEN = 0x0020
BACKGROUND_RED = 0x0040
BACKGROUND_INTENSITY = 0x0080
COMMON_LVB_REVERSE_VIDEO = 0x4000
COMMON_LVB_UNDERSCORE = 0x8000
UrlCreateFromPathW = shlwapi.UrlCreateFromPathW
UrlCreateFromPathW.argtypes = [
PCTSTR, PTSTR, ctypes.POINTER(DWORD), DWORD]
UrlCreateFromPathW.restype = ctypes.HRESULT
SetEnvironmentVariableW = kernel32.SetEnvironmentVariableW
SetEnvironmentVariableW.argtypes = [LPCTSTR, LPCTSTR]
SetEnvironmentVariableW.restype = wintypes.BOOL
GetEnvironmentVariableW = kernel32.GetEnvironmentVariableW
GetEnvironmentVariableW.argtypes = [LPCTSTR, LPTSTR, DWORD]
GetEnvironmentVariableW.restype = DWORD
GetEnvironmentStringsW = kernel32.GetEnvironmentStringsW
GetEnvironmentStringsW.argtypes = []
GetEnvironmentStringsW.restype = ctypes.c_void_p
FreeEnvironmentStringsW = kernel32.FreeEnvironmentStringsW
FreeEnvironmentStringsW.argtypes = [ctypes.c_void_p]
FreeEnvironmentStringsW.restype = ctypes.c_bool
GetStdHandle = kernel32.GetStdHandle
GetStdHandle.argtypes = [DWORD]
GetStdHandle.restype = HANDLE
class COORD(ctypes.Structure):
_fields_ = [
("X", SHORT),
("Y", SHORT),
]
class SMALL_RECT(ctypes.Structure):
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT),
]
class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
GetConsoleScreenBufferInfo = kernel32.GetConsoleScreenBufferInfo
GetConsoleScreenBufferInfo.argtypes = [
HANDLE, ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
GetConsoleScreenBufferInfo.restype = BOOL
GetConsoleOutputCP = kernel32.GetConsoleOutputCP
GetConsoleOutputCP.argtypes = []
GetConsoleOutputCP.restype = UINT
SetConsoleOutputCP = kernel32.SetConsoleOutputCP
SetConsoleOutputCP.argtypes = [UINT]
SetConsoleOutputCP.restype = BOOL
GetConsoleCP = kernel32.GetConsoleCP
GetConsoleCP.argtypes = []
GetConsoleCP.restype = UINT
SetConsoleCP = kernel32.SetConsoleCP
SetConsoleCP.argtypes = [UINT]
SetConsoleCP.restype = BOOL
SetConsoleTextAttribute = kernel32.SetConsoleTextAttribute
SetConsoleTextAttribute.argtypes = [HANDLE, WORD]
SetConsoleTextAttribute.restype = BOOL
SetConsoleCursorPosition = kernel32.SetConsoleCursorPosition
SetConsoleCursorPosition.argtypes = [HANDLE, COORD]
SetConsoleCursorPosition.restype = BOOL
ReadConsoleW = kernel32.ReadConsoleW
ReadConsoleW.argtypes = [HANDLE, LPVOID, DWORD, ctypes.POINTER(DWORD), LPVOID]
ReadConsoleW.restype = BOOL
MultiByteToWideChar = kernel32.MultiByteToWideChar
MultiByteToWideChar.argtypes = [
UINT, DWORD, LPCSTR, ctypes.c_int, LPWSTR, ctypes.c_int]
MultiByteToWideChar.restype = ctypes.c_int
WideCharToMultiByte = kernel32.WideCharToMultiByte
WideCharToMultiByte.argtypes = [
UINT, DWORD, LPCWSTR, ctypes.c_int, LPSTR, ctypes.c_int, LPCSTR, LPBOOL]
WideCharToMultiByte.restpye = ctypes.c_int
MoveFileW = kernel32.MoveFileW
MoveFileW.argtypes = [LPCTSTR, LPCTSTR]
MoveFileW.restype = BOOL
|
{
"content_hash": "2db9e3a7246558adee1fde4ec3b377c0",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 78,
"avg_line_length": 27.579881656804734,
"alnum_prop": 0.7747264535507402,
"repo_name": "lazka/senf",
"id": "ab312e4317c63a5f1d2e00b5c41dfbaa436580e1",
"size": "5779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "senf/_winapi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108302"
},
{
"name": "Shell",
"bytes": "2839"
}
],
"symlink_target": ""
}
|
import json
import os
BASE_DIR = os.path.dirname(__file__)
def load_debater_rankings():
return json.load(
open(os.path.join(BASE_DIR, "debater_finished_scores.json"), "r"))
def load_team_rankings():
return json.load(
open(os.path.join(BASE_DIR, "team_finished_scores.json"), "r"))
|
{
"content_hash": "8f9b2965899c85a8f49a38d7c0bfea58",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 74,
"avg_line_length": 22.142857142857142,
"alnum_prop": 0.6516129032258065,
"repo_name": "jolynch/mit-tab",
"id": "fd5f7a4ddb6d2f45e9ffc85f6cc60241c6b05e41",
"size": "310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mittab/libs/tests/data/load_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17375"
},
{
"name": "HTML",
"bytes": "59858"
},
{
"name": "JavaScript",
"bytes": "13569"
},
{
"name": "Makefile",
"bytes": "344"
},
{
"name": "Python",
"bytes": "262840"
},
{
"name": "Shell",
"bytes": "1469"
}
],
"symlink_target": ""
}
|
"""Excel IRTDServer implementation.
This module is a functional example of how to implement the IRTDServer interface
in python, using the pywin32 extensions. Further details, about this interface
and it can be found at:
http://msdn.microsoft.com/library/default.asp?url=/library/en-us/dnexcl2k2/html/odc_xlrtdfaq.asp
"""
# Copyright (c) 2003-2004 by Chris Nilsson <chris@slort.org>
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Christopher Nilsson (the author) not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
import pythoncom
import win32com.client
from win32com import universal
from win32com.client import gencache
from win32com.server.exception import COMException
import threading
import datetime # For the example classes...
# Typelib info for version 10 - aka Excel XP.
# This is the minimum version of excel that we can work with as this is when
# Microsoft introduced these interfaces.
EXCEL_TLB_GUID = '{00020813-0000-0000-C000-000000000046}'
EXCEL_TLB_LCID = 0
EXCEL_TLB_MAJOR = 1
EXCEL_TLB_MINOR = 4
# Import the excel typelib to make sure we've got early-binding going on.
# The "ByRef" parameters we use later won't work without this.
gencache.EnsureModule(EXCEL_TLB_GUID, EXCEL_TLB_LCID, \
EXCEL_TLB_MAJOR, EXCEL_TLB_MINOR)
# Tell pywin to import these extra interfaces.
# --
# QUESTION: Why? The interfaces seem to descend from IDispatch, so
# I'd have thought, for example, calling callback.UpdateNotify() (on the
# IRTDUpdateEvent callback excel gives us) would work without molestation.
# But the callback needs to be cast to a "real" IRTDUpdateEvent type. Hmm...
# This is where my small knowledge of the pywin framework / COM gets hazy.
# --
# Again, we feed in the Excel typelib as the source of these interfaces.
universal.RegisterInterfaces(EXCEL_TLB_GUID,
EXCEL_TLB_LCID, EXCEL_TLB_MAJOR, EXCEL_TLB_MINOR,
['IRtdServer','IRTDUpdateEvent'])
class ExcelRTDServer(object):
"""Base RTDServer class.
Provides most of the features needed to implement the IRtdServer interface.
Manages topic adding, removal, and packing up the values for excel.
Shouldn't be instanciated directly.
Instead, descendant classes should override the CreateTopic() method.
Topic objects only need to provide a GetValue() function to play nice here.
The values given need to be atomic (eg. string, int, float... etc).
Also note: nothing has been done within this class to ensure that we get
time to check our topics for updates. I've left that up to the subclass
since the ways, and needs, of refreshing your topics will vary greatly. For
example, the sample implementation uses a timer thread to wake itself up.
Whichever way you choose to do it, your class needs to be able to wake up
occaisionally, since excel will never call your class without being asked to
first.
Excel will communicate with our object in this order:
1. Excel instanciates our object and calls ServerStart, providing us with
an IRTDUpdateEvent callback object.
2. Excel calls ConnectData when it wants to subscribe to a new "topic".
3. When we have new data to provide, we call the UpdateNotify method of the
callback object we were given.
4. Excel calls our RefreshData method, and receives a 2d SafeArray (row-major)
containing the Topic ids in the 1st dim, and the topic values in the
2nd dim.
5. When not needed anymore, Excel will call our DisconnectData to
unsubscribe from a topic.
6. When there are no more topics left, Excel will call our ServerTerminate
method to kill us.
Throughout, at undetermined periods, Excel will call our Heartbeat
method to see if we're still alive. It must return a non-zero value, or
we'll be killed.
NOTE: By default, excel will at most call RefreshData once every 2 seconds.
This is a setting that needs to be changed excel-side. To change this,
you can set the throttle interval like this in the excel VBA object model:
Application.RTD.ThrottleInterval = 1000 ' milliseconds
"""
_com_interfaces_ = ['IRtdServer']
_public_methods_ = ['ConnectData','DisconnectData','Heartbeat',
'RefreshData','ServerStart','ServerTerminate']
_reg_clsctx_ = pythoncom.CLSCTX_INPROC_SERVER
#_reg_clsid_ = "# subclass must provide this class attribute"
#_reg_desc_ = "# subclass should provide this description"
#_reg_progid_ = "# subclass must provide this class attribute"
ALIVE = 1
NOT_ALIVE = 0
def __init__(self):
"""Constructor"""
super(ExcelRTDServer, self).__init__()
self.IsAlive = self.ALIVE
self.__callback = None
self.topics = {}
def SignalExcel(self):
"""Use the callback we were given to tell excel new data is available."""
if self.__callback is None:
raise COMException(desc="Callback excel provided is Null")
self.__callback.UpdateNotify()
def ConnectData(self, TopicID, Strings, GetNewValues):
"""Creates a new topic out of the Strings excel gives us."""
try:
self.topics[TopicID] = self.CreateTopic(Strings)
except Exception as why:
raise COMException(desc=str(why))
GetNewValues = True
result = self.topics[TopicID]
if result is None:
result = "# %s: Waiting for update" % self.__class__.__name__
else:
result = result.GetValue()
# fire out internal event...
self.OnConnectData(TopicID)
# GetNewValues as per interface is ByRef, so we need to pass it back too.
return result, GetNewValues
def DisconnectData(self, TopicID):
"""Deletes the given topic."""
self.OnDisconnectData(TopicID)
if TopicID in self.topics:
self.topics[TopicID] = None
del self.topics[TopicID]
def Heartbeat(self):
"""Called by excel to see if we're still here."""
return self.IsAlive
def RefreshData(self, TopicCount):
"""Packs up the topic values. Called by excel when it's ready for an update.
Needs to:
* Return the current number of topics, via the "ByRef" TopicCount
* Return a 2d SafeArray of the topic data.
- 1st dim: topic numbers
- 2nd dim: topic values
We could do some caching, instead of repacking everytime...
But this works for demonstration purposes."""
TopicCount = len(self.topics)
self.OnRefreshData()
# Grow the lists, so we don't need a heap of calls to append()
results = [[None] * TopicCount, [None] * TopicCount]
# Excel expects a 2-dimensional array. The first dim contains the
# topic numbers, and the second contains the values for the topics.
# In true VBA style (yuck), we need to pack the array in row-major format,
# which looks like:
# ( (topic_num1, topic_num2, ..., topic_numN), \
# (topic_val1, topic_val2, ..., topic_valN) )
for idx, topicdata in enumerate(self.topics.items()):
topicNum, topic = topicdata
results[0][idx] = topicNum
results[1][idx] = topic.GetValue()
# TopicCount is meant to be passed to us ByRef, so return it as well, as per
# the way pywin32 handles ByRef arguments.
return tuple(results), TopicCount
def ServerStart(self, CallbackObject):
"""Excel has just created us... We take its callback for later, and set up shop."""
self.IsAlive = self.ALIVE
if CallbackObject is None:
raise COMException(desc='Excel did not provide a callback')
# Need to "cast" the raw PyIDispatch object to the IRTDUpdateEvent interface
IRTDUpdateEventKlass = win32com.client.CLSIDToClass.GetClass('{A43788C1-D91B-11D3-8F39-00C04F3651B8}')
self.__callback = IRTDUpdateEventKlass(CallbackObject)
self.OnServerStart()
return self.IsAlive
def ServerTerminate(self):
"""Called when excel no longer wants us."""
self.IsAlive = self.NOT_ALIVE # On next heartbeat, excel will free us
self.OnServerTerminate()
def CreateTopic(self, TopicStrings=None):
"""Topic factory method. Subclass must override.
Topic objects need to provide:
* GetValue() method which returns an atomic value.
Will raise NotImplemented if not overridden.
"""
raise NotImplemented('Subclass must implement')
# Overridable class events...
def OnConnectData(self, TopicID):
"""Called when a new topic has been created, at excel's request."""
pass
def OnDisconnectData(self, TopicID):
"""Called when a topic is about to be deleted, at excel's request."""
pass
def OnRefreshData(self):
"""Called when excel has requested all current topic data."""
pass
def OnServerStart(self):
"""Called when excel has instanciated us."""
pass
def OnServerTerminate(self):
"""Called when excel is about to destroy us."""
pass
class RTDTopic(object):
"""Base RTD Topic.
Only method required by our RTDServer implementation is GetValue().
The others are more for convenience."""
def __init__(self, TopicStrings):
super(RTDTopic, self).__init__()
self.TopicStrings = TopicStrings
self.__currentValue = None
self.__dirty = False
def Update(self, sender):
"""Called by the RTD Server.
Gives us a chance to check if our topic data needs to be
changed (eg. check a file, quiz a database, etc)."""
raise NotImplemented('subclass must implement')
def Reset(self):
"""Call when this topic isn't considered "dirty" anymore."""
self.__dirty = False
def GetValue(self):
return self.__currentValue
def SetValue(self, value):
self.__dirty = True
self.__currentValue = value
def HasChanged(self):
return self.__dirty
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
######################################
# Example classes
######################################
class TimeServer(ExcelRTDServer):
"""Example Time RTD server.
Sends time updates back to excel.
example of use, in an excel sheet:
=RTD("Python.RTD.TimeServer","","seconds","5")
This will cause a timestamp string to fill the cell, and update its value
every 5 seconds (or as close as possible depending on how busy excel is).
The empty string parameter denotes the com server is running on the local
machine. Otherwise, put in the hostname to look on. For more info
on this, lookup the Excel help for its "RTD" worksheet function.
Obviously, you'd want to wrap this kind of thing in a friendlier VBA
function.
Also, remember that the RTD function accepts a maximum of 28 arguments!
If you want to pass more, you may need to concatenate arguments into one
string, and have your topic parse them appropriately.
"""
# win32com.server setup attributes...
# Never copy the _reg_clsid_ value in your own classes!
_reg_clsid_ = '{EA7F2CF1-11A2-45E4-B2D5-68E240DB8CB1}'
_reg_progid_ = 'Python.RTD.TimeServer'
_reg_desc_ = "Python class implementing Excel IRTDServer -- feeds time"
# other class attributes...
INTERVAL = 0.5 # secs. Threaded timer will wake us up at this interval.
def __init__(self):
super(TimeServer, self).__init__()
# Simply timer thread to ensure we get to update our topics, and
# tell excel about any changes. This is a pretty basic and dirty way to
# do this. Ideally, there should be some sort of waitable (eg. either win32
# event, socket data event...) and be kicked off by that event triggering.
# As soon as we set up shop here, we _must_ return control back to excel.
# (ie. we can't block and do our own thing...)
self.ticker = threading.Timer(self.INTERVAL, self.Update)
def OnServerStart(self):
self.ticker.start()
def OnServerTerminate(self):
if not self.ticker.finished.isSet():
self.ticker.cancel() # Cancel our wake-up thread. Excel has killed us.
def Update(self):
# Get our wake-up thread ready...
self.ticker = threading.Timer(self.INTERVAL, self.Update)
try:
# Check if any of our topics have new info to pass on
if len(self.topics):
refresh = False
for topic in self.topics.values():
topic.Update(self)
if topic.HasChanged():
refresh = True
topic.Reset()
if refresh:
self.SignalExcel()
finally:
self.ticker.start() # Make sure we get to run again
def CreateTopic(self, TopicStrings=None):
"""Topic factory. Builds a TimeTopic object out of the given TopicStrings."""
return TimeTopic(TopicStrings)
class TimeTopic(RTDTopic):
"""Example topic for example RTD server.
Will accept some simple commands to alter how long to delay value updates.
Commands:
* seconds, delay_in_seconds
* minutes, delay_in_minutes
* hours, delay_in_hours
"""
def __init__(self, TopicStrings):
super(TimeTopic, self).__init__(TopicStrings)
try:
self.cmd, self.delay = self.TopicStrings
except Exception as E:
# We could simply return a "# ERROR" type string as the
# topic value, but explosions like this should be able to get handled by
# the VBA-side "On Error" stuff.
raise ValueError("Invalid topic strings: %s" % str(TopicStrings))
#self.cmd = str(self.cmd)
self.delay = float(self.delay)
# setup our initial value
self.checkpoint = self.timestamp()
self.SetValue(str(self.checkpoint))
def timestamp(self):
return datetime.datetime.now()
def Update(self, sender):
now = self.timestamp()
delta = now - self.checkpoint
refresh = False
if self.cmd == "seconds":
if delta.seconds >= self.delay:
refresh = True
elif self.cmd == "minutes":
if delta.minutes >= self.delay:
refresh = True
elif self.cmd == "hours":
if delta.hours >= self.delay:
refresh = True
else:
self.SetValue("#Unknown command: " + self.cmd)
if refresh:
self.SetValue(str(now))
self.checkpoint = now
if __name__ == "__main__":
import win32com.server.register
# Register/Unregister TimeServer example
# eg. at the command line: excelrtd.py --register
# Then type in an excel cell something like:
# =RTD("Python.RTD.TimeServer","","seconds","5")
win32com.server.register.UseCommandLine(TimeServer)
|
{
"content_hash": "8494596cc0a4cc33fe03134e039c6c19",
"timestamp": "",
"source": "github",
"line_count": 409,
"max_line_length": 106,
"avg_line_length": 37.960880195599024,
"alnum_prop": 0.6818240370990596,
"repo_name": "huguesv/PTVS",
"id": "fea0754fec63380fac06b42b697321be71fb0c51",
"size": "15526",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32com/demos/excelRTDServer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "10898"
},
{
"name": "C",
"bytes": "23236"
},
{
"name": "C#",
"bytes": "12464429"
},
{
"name": "C++",
"bytes": "211838"
},
{
"name": "CSS",
"bytes": "7025"
},
{
"name": "HTML",
"bytes": "34251"
},
{
"name": "JavaScript",
"bytes": "87257"
},
{
"name": "PowerShell",
"bytes": "25220"
},
{
"name": "Python",
"bytes": "913395"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "8156"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
}
|
import re
import smartypants
def amp(text):
"""Wraps apersands in HTML with ``<span class="amp">`` so they can be
styled with CSS. Apersands are also normalized to ``&``. Requires
ampersands to have whitespace or an `` `` on both sides.
>>> amp('One & two')
u'One <span class="amp">&</span> two'
>>> amp('One & two')
u'One <span class="amp">&</span> two'
>>> amp('One & two')
u'One <span class="amp">&</span> two'
>>> amp('One & two')
u'One <span class="amp">&</span> two'
It won't mess up & that are already wrapped, in entities or URLs
>>> amp('One <span class="amp">&</span> two')
u'One <span class="amp">&</span> two'
>>> amp('“this” & <a href="/?that&test">that</a>')
u'“this” <span class="amp">&</span> <a href="/?that&test">that</a>'
It should ignore standalone amps that are in attributes
>>> amp('<link href="xyz.html" title="One & Two">xyz</link>')
u'<link href="xyz.html" title="One & Two">xyz</link>'
"""
text = unicode(text)
# tag_pattern from http://haacked.com/archive/2004/10/25/usingregularexpressionstomatchhtml.aspx
# it kinda sucks but it fixes the standalone amps in attributes bug
tag_pattern = '</?\w+((\s+\w+(\s*=\s*(?:".*?"|\'.*?\'|[^\'">\s]+))?)+\s*|\s*)/?>'
amp_finder = re.compile(r"(\s| )(&|&|&\#38;)(\s| )")
intra_tag_finder = re.compile(r'(?P<prefix>(%s)?)(?P<text>([^<]*))(?P<suffix>(%s)?)' % (tag_pattern, tag_pattern))
def _amp_process(groups):
prefix = groups.group('prefix') or ''
text = amp_finder.sub(r"""\1<span class="amp">&</span>\3""", groups.group('text'))
suffix = groups.group('suffix') or ''
return prefix + text + suffix
output = intra_tag_finder.sub(_amp_process, text)
return output
def caps(text):
"""Wraps multiple capital letters in ``<span class="caps">``
so they can be styled with CSS.
>>> caps("A message from KU")
u'A message from <span class="caps">KU</span>'
Uses the smartypants tokenizer to not screw with HTML or with tags it shouldn't.
>>> caps("<PRE>CAPS</pre> more CAPS")
u'<PRE>CAPS</pre> more <span class="caps">CAPS</span>'
>>> caps("A message from 2KU2 with digits")
u'A message from <span class="caps">2KU2</span> with digits'
>>> caps("Dotted caps followed by spaces should never include them in the wrap D.O.T. like so.")
u'Dotted caps followed by spaces should never include them in the wrap <span class="caps">D.O.T.</span> like so.'
All caps with with apostrophes in them shouldn't break. Only handles dump apostrophes though.
>>> caps("JIMMY'S")
u'<span class="caps">JIMMY\\'S</span>'
>>> caps("<i>D.O.T.</i>HE34T<b>RFID</b>")
u'<i><span class="caps">D.O.T.</span></i><span class="caps">HE34T</span><b><span class="caps">RFID</span></b>'
"""
text = unicode(text)
tokens = smartypants._tokenize(text)
result = []
in_skipped_tag = False
cap_finder = re.compile(r"""(
(\b[A-Z\d]* # Group 2: Any amount of caps and digits
[A-Z]\d*[A-Z] # A cap string much at least include two caps (but they can have digits between them)
[A-Z\d']*\b) # Any amount of caps and digits or dumb apostsrophes
| (\b[A-Z]+\.\s? # OR: Group 3: Some caps, followed by a '.' and an optional space
(?:[A-Z]+\.\s?)+) # Followed by the same thing at least once more
(?:\s|\b|$))
""", re.VERBOSE)
def _cap_wrapper(matchobj):
"""This is necessary to keep dotted cap strings to pick up extra spaces"""
if matchobj.group(2):
return """<span class="caps">%s</span>""" % matchobj.group(2)
else:
if matchobj.group(3)[-1] == " ":
caps = matchobj.group(3)[:-1]
tail = ' '
else:
caps = matchobj.group(3)
tail = ''
return """<span class="caps">%s</span>%s""" % (caps, tail)
tags_to_skip_regex = re.compile("<(/)?(?:pre|code|kbd|script|math)[^>]*>", re.IGNORECASE)
for token in tokens:
if token[0] == "tag":
# Don't mess with tags.
result.append(token[1])
close_match = tags_to_skip_regex.match(token[1])
if close_match and close_match.group(1) == None:
in_skipped_tag = True
else:
in_skipped_tag = False
else:
if in_skipped_tag:
result.append(token[1])
else:
result.append(cap_finder.sub(_cap_wrapper, token[1]))
output = "".join(result)
return output
def initial_quotes(text):
"""Wraps initial quotes in ``class="dquo"`` for double quotes or
``class="quo"`` for single quotes. Works in these block tags ``(h1-h6, p, li, dt, dd)``
and also accounts for potential opening inline elements ``a, em, strong, span, b, i``
>>> initial_quotes('"With primes"')
u'<span class="dquo">"</span>With primes"'
>>> initial_quotes("'With single primes'")
u'<span class="quo">\\'</span>With single primes\\''
>>> initial_quotes('<a href="#">"With primes and a link"</a>')
u'<a href="#"><span class="dquo">"</span>With primes and a link"</a>'
>>> initial_quotes('“With smartypanted quotes”')
u'<span class="dquo">“</span>With smartypanted quotes”'
"""
text = unicode(text)
quote_finder = re.compile(r"""((<(p|h[1-6]|li|dt|dd)[^>]*>|^) # start with an opening p, h1-6, li, dd, dt or the start of the string
\s* # optional white space!
(<(a|em|span|strong|i|b)[^>]*>\s*)*) # optional opening inline tags, with more optional white space for each.
(("|“|&\#8220;)|('|‘|&\#8216;)) # Find me a quote! (only need to find the left quotes and the primes)
# double quotes are in group 7, singles in group 8
""", re.VERBOSE)
def _quote_wrapper(matchobj):
if matchobj.group(7):
classname = "dquo"
quote = matchobj.group(7)
else:
classname = "quo"
quote = matchobj.group(8)
return """%s<span class="%s">%s</span>""" % (matchobj.group(1), classname, quote)
output = quote_finder.sub(_quote_wrapper, text)
return output
def smartquotes(text):
"""Applies smarty pants to curl quotes.
>>> smartquotes('The "Green" man')
u'The “Green” man'
"""
text = unicode(text)
output = smartypants.smartyPants(text)
return output
def typogrify(text):
"""The super typography filter
Applies the following filters: widont, smartquotes, caps, amp, initial_quotes
>>> typogrify('<h2>"Jayhawks" & KU fans act extremely obnoxiously</h2>')
u'<h2><span class="dquo">“</span>Jayhawks” <span class="amp">&</span> <span class="caps">KU</span> fans act extremely obnoxiously</h2>'
"""
text = unicode(text)
text = amp(text)
text = widont(text)
text = smartquotes(text)
text = caps(text)
text = initial_quotes(text)
return text
def widont(text):
"""Replaces the space between the last two words in a string with `` ``
Works in these block tags ``(h1-h6, p, li, dd, dt)`` and also accounts for
potential closing inline elements ``a, em, strong, span, b, i``
>>> widont('A very simple test')
u'A very simple test'
Single word items shouldn't be changed
>>> widont('Test')
u'Test'
>>> widont(' Test')
u' Test'
>>> widont('<ul><li>Test</p></li><ul>')
u'<ul><li>Test</p></li><ul>'
>>> widont('<ul><li> Test</p></li><ul>')
u'<ul><li> Test</p></li><ul>'
>>> widont('<p>In a couple of paragraphs</p><p>paragraph two</p>')
u'<p>In a couple of paragraphs</p><p>paragraph two</p>'
>>> widont('<h1><a href="#">In a link inside a heading</i> </a></h1>')
u'<h1><a href="#">In a link inside a heading</i> </a></h1>'
>>> widont('<p>Some text <a href="#">nearly ends with a link</a>.</p>')
u'<p>Some text <a href="#">nearly ends with a link</a>.</p>'
>>> widont('<h1><a href="#">In a link</a> followed by other text</h1>')
u'<h1><a href="#">In a link</a> followed by other text</h1>'
Empty HTMLs shouldn't error
>>> widont('<h1><a href="#"></a></h1>')
u'<h1><a href="#"></a></h1>'
>>> widont('<div>Divs get no love!</div>')
u'<div>Divs get no love!</div>'
>>> widont('<pre>Neither do PREs</pre>')
u'<pre>Neither do PREs</pre>'
>>> widont('<div><p>But divs with paragraphs do!</p></div>')
u'<div><p>But divs with paragraphs do!</p></div>'
"""
text = unicode(text)
widont_finder = re.compile(r"""((?:</?(?:a|em|span|strong|i|b)[^>]*>)|[^<>\s]) # must be proceeded by an approved inline opening or closing tag or a nontag/nonspace
\s+ # the space to replace
([^<>\s]+ # must be followed by non-tag non-space characters
\s* # optional white space!
(</(a|em|span|strong|i|b)>\s*)* # optional closing inline tags with optional white space after each
\.? # optional period
((</(p|h[1-6]|li|dt|dd)>)|$)) # end with a closing p, h1-6, li or the end of the string
""", re.VERBOSE)
output = widont_finder.sub(r'\1 \2', text)
return output
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
{
"content_hash": "e6cef0023106799faeadd54e485f442d",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 168,
"avg_line_length": 42.118367346938776,
"alnum_prop": 0.532319023161159,
"repo_name": "mirisuzanne/sidesaddle",
"id": "f0517e960d1f476f5a53d848212593900c9208ed",
"size": "10319",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/typogrify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29795"
},
{
"name": "HTML",
"bytes": "7160"
},
{
"name": "JavaScript",
"bytes": "3034"
},
{
"name": "Makefile",
"bytes": "156"
},
{
"name": "Python",
"bytes": "18473"
},
{
"name": "Ruby",
"bytes": "265"
}
],
"symlink_target": ""
}
|
"""
Code in ``conda.base`` is the lowest level of the application stack. It is loaded and executed
virtually every time the application is executed. Any code within, and any of its imports, must
be highly performant.
Conda modules importable from ``conda.base`` are
- ``conda._vendor``
- ``conda.base``
- ``conda.common``
Modules prohibited from importing ``conda.base`` are:
- ``conda._vendor``
- ``conda.common``
All other ``conda`` modules may import from ``conda.base``.
"""
|
{
"content_hash": "d51d4e84da87aa92914f02ddfd2ae267",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 95,
"avg_line_length": 26.944444444444443,
"alnum_prop": 0.7072164948453609,
"repo_name": "zooba/PTVS",
"id": "b5bc3ae1ad278385fe1bb12b03ec2d75b4cacc54",
"size": "584",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/base/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "10898"
},
{
"name": "C",
"bytes": "23236"
},
{
"name": "C#",
"bytes": "12390821"
},
{
"name": "C++",
"bytes": "209386"
},
{
"name": "CSS",
"bytes": "7025"
},
{
"name": "HTML",
"bytes": "34251"
},
{
"name": "JavaScript",
"bytes": "87257"
},
{
"name": "PowerShell",
"bytes": "25220"
},
{
"name": "Python",
"bytes": "888412"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "8156"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
}
|
import dataclasses
import json # type: ignore
import re
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import warnings
from google.api_core import gapic_v1, path_template, rest_helpers, rest_streaming
from google.api_core import exceptions as core_exceptions
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.transport.requests import AuthorizedSession # type: ignore
from google.protobuf import json_format
import grpc # type: ignore
from requests import __version__ as requests_version
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.types import compute
from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
from .base import RegionUrlMapsTransport
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
grpc_version=None,
rest_version=requests_version,
)
class RegionUrlMapsRestInterceptor:
"""Interceptor for RegionUrlMaps.
Interceptors are used to manipulate requests, request metadata, and responses
in arbitrary ways.
Example use cases include:
* Logging
* Verifying requests according to service or custom semantics
* Stripping extraneous information from responses
These use cases and more can be enabled by injecting an
instance of a custom subclass when constructing the RegionUrlMapsRestTransport.
.. code-block:: python
class MyCustomRegionUrlMapsInterceptor(RegionUrlMapsRestInterceptor):
def pre_delete(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_delete(response):
logging.log(f"Received response: {response}")
def pre_get(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get(response):
logging.log(f"Received response: {response}")
def pre_insert(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_insert(response):
logging.log(f"Received response: {response}")
def pre_list(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list(response):
logging.log(f"Received response: {response}")
def pre_patch(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_patch(response):
logging.log(f"Received response: {response}")
def pre_update(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_update(response):
logging.log(f"Received response: {response}")
def pre_validate(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_validate(response):
logging.log(f"Received response: {response}")
transport = RegionUrlMapsRestTransport(interceptor=MyCustomRegionUrlMapsInterceptor())
client = RegionUrlMapsClient(transport=transport)
"""
def pre_delete(
self,
request: compute.DeleteRegionUrlMapRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.DeleteRegionUrlMapRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for delete
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionUrlMaps server.
"""
return request, metadata
def post_delete(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for delete
Override in a subclass to manipulate the response
after it is returned by the RegionUrlMaps server but before
it is returned to user code.
"""
return response
def pre_get(
self,
request: compute.GetRegionUrlMapRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.GetRegionUrlMapRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionUrlMaps server.
"""
return request, metadata
def post_get(self, response: compute.UrlMap) -> compute.UrlMap:
"""Post-rpc interceptor for get
Override in a subclass to manipulate the response
after it is returned by the RegionUrlMaps server but before
it is returned to user code.
"""
return response
def pre_insert(
self,
request: compute.InsertRegionUrlMapRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.InsertRegionUrlMapRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for insert
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionUrlMaps server.
"""
return request, metadata
def post_insert(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for insert
Override in a subclass to manipulate the response
after it is returned by the RegionUrlMaps server but before
it is returned to user code.
"""
return response
def pre_list(
self,
request: compute.ListRegionUrlMapsRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.ListRegionUrlMapsRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionUrlMaps server.
"""
return request, metadata
def post_list(self, response: compute.UrlMapList) -> compute.UrlMapList:
"""Post-rpc interceptor for list
Override in a subclass to manipulate the response
after it is returned by the RegionUrlMaps server but before
it is returned to user code.
"""
return response
def pre_patch(
self,
request: compute.PatchRegionUrlMapRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.PatchRegionUrlMapRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for patch
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionUrlMaps server.
"""
return request, metadata
def post_patch(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for patch
Override in a subclass to manipulate the response
after it is returned by the RegionUrlMaps server but before
it is returned to user code.
"""
return response
def pre_update(
self,
request: compute.UpdateRegionUrlMapRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.UpdateRegionUrlMapRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for update
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionUrlMaps server.
"""
return request, metadata
def post_update(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for update
Override in a subclass to manipulate the response
after it is returned by the RegionUrlMaps server but before
it is returned to user code.
"""
return response
def pre_validate(
self,
request: compute.ValidateRegionUrlMapRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.ValidateRegionUrlMapRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for validate
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionUrlMaps server.
"""
return request, metadata
def post_validate(
self, response: compute.UrlMapsValidateResponse
) -> compute.UrlMapsValidateResponse:
"""Post-rpc interceptor for validate
Override in a subclass to manipulate the response
after it is returned by the RegionUrlMaps server but before
it is returned to user code.
"""
return response
@dataclasses.dataclass
class RegionUrlMapsRestStub:
_session: AuthorizedSession
_host: str
_interceptor: RegionUrlMapsRestInterceptor
class RegionUrlMapsRestTransport(RegionUrlMapsTransport):
"""REST backend transport for RegionUrlMaps.
The RegionUrlMaps API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
NOTE: This REST transport functionality is currently in a beta
state (preview). We welcome your feedback via an issue in this
library's source repository. Thank you!
"""
def __init__(
self,
*,
host: str = "compute.googleapis.com",
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
url_scheme: str = "https",
interceptor: Optional[RegionUrlMapsRestInterceptor] = None,
api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
NOTE: This REST transport functionality is currently in a beta
state (preview). We welcome your feedback via a GitHub issue in
this library's repository. Thank you!
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you are developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
url_scheme: the protocol scheme for the API endpoint. Normally
"https", but for testing or local servers,
"http" can be specified.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
if maybe_url_match is None:
raise ValueError(
f"Unexpected hostname structure: {host}"
) # pragma: NO COVER
url_match_items = maybe_url_match.groupdict()
host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._interceptor = interceptor or RegionUrlMapsRestInterceptor()
self._prep_wrapped_messages(client_info)
class _Delete(RegionUrlMapsRestStub):
def __hash__(self):
return hash("Delete")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.DeleteRegionUrlMapRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the delete method over HTTP.
Args:
request (~.compute.DeleteRegionUrlMapRequest):
The request object. A request message for
RegionUrlMaps.Delete. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}",
},
]
request, metadata = self._interceptor.pre_delete(request, metadata)
pb_request = compute.DeleteRegionUrlMapRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation()
pb_resp = compute.Operation.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_delete(resp)
return resp
class _Get(RegionUrlMapsRestStub):
def __hash__(self):
return hash("Get")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.GetRegionUrlMapRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.UrlMap:
r"""Call the get method over HTTP.
Args:
request (~.compute.GetRegionUrlMapRequest):
The request object. A request message for
RegionUrlMaps.Get. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.UrlMap:
Represents a URL Map resource. Compute Engine has two
URL Map resources: \*
`Global </compute/docs/reference/rest/v1/urlMaps>`__ \*
`Regional </compute/docs/reference/rest/v1/regionUrlMaps>`__
A URL map resource is a component of certain types of
cloud load balancers and Traffic Director: \* urlMaps
are used by external HTTP(S) load balancers and Traffic
Director. \* regionUrlMaps are used by internal HTTP(S)
load balancers. For a list of supported URL map features
by the load balancer type, see the Load balancing
features: Routing and traffic management table. For a
list of supported URL map features for Traffic Director,
see the Traffic Director features: Routing and traffic
management table. This resource defines mappings from
hostnames and URL paths to either a backend service or a
backend bucket. To use the global urlMaps resource, the
backend service must have a loadBalancingScheme of
either EXTERNAL or INTERNAL_SELF_MANAGED. To use the
regionUrlMaps resource, the backend service must have a
loadBalancingScheme of INTERNAL_MANAGED. For more
information, read URL Map Concepts.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}",
},
]
request, metadata = self._interceptor.pre_get(request, metadata)
pb_request = compute.GetRegionUrlMapRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.UrlMap()
pb_resp = compute.UrlMap.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_get(resp)
return resp
class _Insert(RegionUrlMapsRestStub):
def __hash__(self):
return hash("Insert")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.InsertRegionUrlMapRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the insert method over HTTP.
Args:
request (~.compute.InsertRegionUrlMapRequest):
The request object. A request message for
RegionUrlMaps.Insert. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/regions/{region}/urlMaps",
"body": "url_map_resource",
},
]
request, metadata = self._interceptor.pre_insert(request, metadata)
pb_request = compute.InsertRegionUrlMapRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation()
pb_resp = compute.Operation.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_insert(resp)
return resp
class _List(RegionUrlMapsRestStub):
def __hash__(self):
return hash("List")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.ListRegionUrlMapsRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.UrlMapList:
r"""Call the list method over HTTP.
Args:
request (~.compute.ListRegionUrlMapsRequest):
The request object. A request message for
RegionUrlMaps.List. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.UrlMapList:
Contains a list of UrlMap resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/urlMaps",
},
]
request, metadata = self._interceptor.pre_list(request, metadata)
pb_request = compute.ListRegionUrlMapsRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.UrlMapList()
pb_resp = compute.UrlMapList.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_list(resp)
return resp
class _Patch(RegionUrlMapsRestStub):
def __hash__(self):
return hash("Patch")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.PatchRegionUrlMapRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the patch method over HTTP.
Args:
request (~.compute.PatchRegionUrlMapRequest):
The request object. A request message for
RegionUrlMaps.Patch. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "patch",
"uri": "/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}",
"body": "url_map_resource",
},
]
request, metadata = self._interceptor.pre_patch(request, metadata)
pb_request = compute.PatchRegionUrlMapRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation()
pb_resp = compute.Operation.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_patch(resp)
return resp
class _Update(RegionUrlMapsRestStub):
def __hash__(self):
return hash("Update")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.UpdateRegionUrlMapRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the update method over HTTP.
Args:
request (~.compute.UpdateRegionUrlMapRequest):
The request object. A request message for
RegionUrlMaps.Update. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "put",
"uri": "/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}",
"body": "url_map_resource",
},
]
request, metadata = self._interceptor.pre_update(request, metadata)
pb_request = compute.UpdateRegionUrlMapRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation()
pb_resp = compute.Operation.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_update(resp)
return resp
class _Validate(RegionUrlMapsRestStub):
def __hash__(self):
return hash("Validate")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.ValidateRegionUrlMapRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.UrlMapsValidateResponse:
r"""Call the validate method over HTTP.
Args:
request (~.compute.ValidateRegionUrlMapRequest):
The request object. A request message for
RegionUrlMaps.Validate. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.UrlMapsValidateResponse:
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}/validate",
"body": "region_url_maps_validate_request_resource",
},
]
request, metadata = self._interceptor.pre_validate(request, metadata)
pb_request = compute.ValidateRegionUrlMapRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.UrlMapsValidateResponse()
pb_resp = compute.UrlMapsValidateResponse.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_validate(resp)
return resp
@property
def delete(
self,
) -> Callable[[compute.DeleteRegionUrlMapRequest], compute.Operation]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._Delete(self._session, self._host, self._interceptor) # type: ignore
@property
def get(self) -> Callable[[compute.GetRegionUrlMapRequest], compute.UrlMap]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._Get(self._session, self._host, self._interceptor) # type: ignore
@property
def insert(
self,
) -> Callable[[compute.InsertRegionUrlMapRequest], compute.Operation]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._Insert(self._session, self._host, self._interceptor) # type: ignore
@property
def list(self) -> Callable[[compute.ListRegionUrlMapsRequest], compute.UrlMapList]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._List(self._session, self._host, self._interceptor) # type: ignore
@property
def patch(self) -> Callable[[compute.PatchRegionUrlMapRequest], compute.Operation]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._Patch(self._session, self._host, self._interceptor) # type: ignore
@property
def update(
self,
) -> Callable[[compute.UpdateRegionUrlMapRequest], compute.Operation]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._Update(self._session, self._host, self._interceptor) # type: ignore
@property
def validate(
self,
) -> Callable[
[compute.ValidateRegionUrlMapRequest], compute.UrlMapsValidateResponse
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._Validate(self._session, self._host, self._interceptor) # type: ignore
@property
def kind(self) -> str:
return "rest"
def close(self):
self._session.close()
__all__ = ("RegionUrlMapsRestTransport",)
|
{
"content_hash": "fdcef2f381bb998ff63f54c5cb2cf6a2",
"timestamp": "",
"source": "github",
"line_count": 1144,
"max_line_length": 105,
"avg_line_length": 40.25699300699301,
"alnum_prop": 0.5802101880401268,
"repo_name": "googleapis/python-compute",
"id": "8b1ac042a56a28e04ae5b6fad83a80c922b49449",
"size": "46655",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/compute_v1/services/region_url_maps/transports/rest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "32681847"
},
{
"name": "Shell",
"bytes": "30663"
}
],
"symlink_target": ""
}
|
import six.moves.builtins as __builtin__
setattr(__builtin__, '_', lambda x: x)
# Set up logging to output debugging
import logging
logger = logging.getLogger()
hdlr = logging.FileHandler('run_tests.log', 'w')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
|
{
"content_hash": "2c485807f5eaf90165d8edfea55fed62",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 70,
"avg_line_length": 33.27272727272727,
"alnum_prop": 0.7459016393442623,
"repo_name": "tantexian/sps-2014-12-4",
"id": "b1f12689adcacc3a739339b2cb1835bd1c9a5cbb",
"size": "1131",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sps/tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "807539"
},
{
"name": "Shell",
"bytes": "7396"
}
],
"symlink_target": ""
}
|
"""Common parameter types for validating a request reference."""
boolean = {
'type': 'boolean',
'enum': [True, False]
}
# NOTE(lbragstad): Be mindful of this pattern as it might require changes
# once this is used on user names, LDAP-based user names specifically since
# commas aren't allowed in the following pattern. Here we are only going to
# check the length of the name and ensure that it's a string. Right now we are
# not going to validate on a naming pattern for issues with
# internationalization.
name = {
'type': 'string',
'minLength': 1,
'maxLength': 255
}
id_string = {
'type': 'string',
'minLength': 1,
'maxLength': 64,
# TODO(lbragstad): Find a way to make this configurable such that the end
# user chooses how much control they want over id_strings with a regex
'pattern': '^[a-zA-Z0-9-]+$'
}
description = {
'type': 'string'
}
url = {
'type': 'string',
'minLength': 0,
'maxLength': 225,
# NOTE(lbragstad): Using a regular expression here instead of the
# FormatChecker object that is built into jsonschema. The FormatChecker
# can validate URI formats but it depends on rfc3987 to do that
# validation, and rfc3987 is GPL licensed. For our purposes here we will
# use a regex and not rely on rfc3987 to validate URIs.
'pattern': '^https?://'
'(?:(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)'
'+[a-zA-Z]{2,6}\.?|'
'localhost|'
'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
'(?::\d+)?'
'(?:/?|[/?]\S+)$'
}
email = {
'type': 'string',
'format': 'email'
}
|
{
"content_hash": "5821ac0820cfe0acc33dc5df1551fc57",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 78,
"avg_line_length": 30.72222222222222,
"alnum_prop": 0.596745027124774,
"repo_name": "hughsaunders/keystone",
"id": "4ee08e1320835b89d1a266938368672e3cb26566",
"size": "2199",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/common/validation/parameter_types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import statistics
from typing import TextIO, Tuple, Dict
from aoc2019.intcode import Computer, read_program
def render_screen(computer: Computer, screen: Dict[Tuple[int, int], int]):
while computer.output:
x = computer.output.popleft()
y = computer.output.popleft()
val = computer.output.popleft()
screen[x, y] = val
def part1(data: TextIO) -> int:
computer = Computer(read_program(data))
computer.run()
screen: Dict[Tuple[int, int], int] = {}
render_screen(computer, screen)
return sum(1 for val in screen.values() if val == 2)
def part2(data: TextIO) -> int:
computer = Computer(read_program(data))
computer.program[0] = 2
screen: Dict[Tuple[int, int], int] = {}
finished = False
while not finished:
try:
computer.run()
finished = True
except IndexError:
# Waiting for input
pass
render_screen(computer, screen)
ball_x = next(x for x, y in screen if screen[x, y] == 4)
paddle_x = statistics.mean(x for x, y in screen if screen[x, y] == 3)
if ball_x < paddle_x:
computer.input.append(-1)
elif ball_x > paddle_x:
computer.input.append(1)
else:
computer.input.append(0)
return screen[-1, 0]
|
{
"content_hash": "6ac0a2bb011fdef4ac4d69e29d909eed",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 77,
"avg_line_length": 23.473684210526315,
"alnum_prop": 0.5896860986547086,
"repo_name": "bertptrs/adventofcode",
"id": "5aea8bd0de99df8b348638b785891cc2a7aa1d05",
"size": "1338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2019/aoc2019/day13.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "531"
},
{
"name": "C",
"bytes": "1729"
},
{
"name": "C#",
"bytes": "2880"
},
{
"name": "C++",
"bytes": "449"
},
{
"name": "Clojure",
"bytes": "2454"
},
{
"name": "CoffeeScript",
"bytes": "1765"
},
{
"name": "Go",
"bytes": "856"
},
{
"name": "Groovy",
"bytes": "1457"
},
{
"name": "Haskell",
"bytes": "603"
},
{
"name": "Java",
"bytes": "3238"
},
{
"name": "JavaScript",
"bytes": "1210"
},
{
"name": "Julia",
"bytes": "1144"
},
{
"name": "Kotlin",
"bytes": "1589"
},
{
"name": "Lex",
"bytes": "310"
},
{
"name": "Lua",
"bytes": "2480"
},
{
"name": "MATLAB",
"bytes": "646"
},
{
"name": "Makefile",
"bytes": "983"
},
{
"name": "PHP",
"bytes": "622"
},
{
"name": "Perl",
"bytes": "589"
},
{
"name": "Python",
"bytes": "92178"
},
{
"name": "R",
"bytes": "560"
},
{
"name": "Ruby",
"bytes": "738"
},
{
"name": "Rust",
"bytes": "372899"
},
{
"name": "Scala",
"bytes": "776"
},
{
"name": "Shell",
"bytes": "1957"
},
{
"name": "Swift",
"bytes": "337"
}
],
"symlink_target": ""
}
|
"""
WSGI config for opencmdb project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "opencmdb.settings")
application = get_wsgi_application()
|
{
"content_hash": "4400c21483e69d2bb4f00d18826185e2",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.5625,
"alnum_prop": 0.7709923664122137,
"repo_name": "unixhot/opencmdb",
"id": "a3263e7a6b8f57886ac4c6e12755e12e23b57662",
"size": "393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opencmdb/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "61198"
},
{
"name": "HTML",
"bytes": "44545"
},
{
"name": "JavaScript",
"bytes": "503133"
},
{
"name": "Python",
"bytes": "244232"
},
{
"name": "Vue",
"bytes": "95038"
}
],
"symlink_target": ""
}
|
from ..core import Dimensioned, AttrTree
try:
import pandas
from .pandas import DFrame # pyflakes:ignore (API import)
except:
pandas = None
try:
import seaborn
from .seaborn import * # pyflakes:ignore (API import)
except:
seaborn = None
from .collector import * # pyflakes:ignore (API import)
def public(obj):
if not isinstance(obj, type): return False
baseclasses = [Dimensioned, Collector, AttrTree]
return any([issubclass(obj, bc) for bc in baseclasses])
__all__ = list(set([_k for _k, _v in locals().items() if public(_v)]))
|
{
"content_hash": "641a075f7b17a2dba203169ac863fe87",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 70,
"avg_line_length": 25.26086956521739,
"alnum_prop": 0.6678141135972461,
"repo_name": "mjabri/holoviews",
"id": "7b90d8bbe095f802db9b35a5faa87b9ad049c506",
"size": "581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "holoviews/interface/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "747"
},
{
"name": "HTML",
"bytes": "12364"
},
{
"name": "JavaScript",
"bytes": "11329"
},
{
"name": "Python",
"bytes": "1088079"
}
],
"symlink_target": ""
}
|
from flask import current_app, request
from flask_appbuilder.security.sqla.models import Permission, Role
from marshmallow import ValidationError
from sqlalchemy import func
from airflow._vendor.connexion import NoContent
from airflow.api_connexion import security
from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound
from airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters
from airflow.api_connexion.schemas.role_and_permission_schema import (
ActionCollection,
RoleCollection,
action_collection_schema,
role_collection_schema,
role_schema,
)
from airflow.security import permissions
def _check_action_and_resource(sm, perms):
"""
Checks if the action or resource exists and raise 400 if not
This function is intended for use in the REST API because it raise 400
"""
for item in perms:
if not sm.get_action(item[0]):
raise BadRequest(detail=f"The specified action: '{item[0]}' was not found")
if not sm.get_resource(item[1]):
raise BadRequest(detail=f"The specified resource: '{item[1]}' was not found")
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_ROLE)])
def get_role(role_name):
"""Get role"""
ab_security_manager = current_app.appbuilder.sm
role = ab_security_manager.find_role(name=role_name)
if not role:
raise NotFound(title="Role not found", detail=f"The Role with name `{role_name}` was not found")
return role_schema.dump(role)
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_ROLE)])
@format_parameters({'limit': check_limit})
def get_roles(limit, order_by='name', offset=None):
"""Get roles"""
appbuilder = current_app.appbuilder
session = appbuilder.get_session
total_entries = session.query(func.count(Role.id)).scalar()
to_replace = {"role_id": "id"}
allowed_filter_attrs = ['role_id', 'name']
query = session.query(Role)
query = apply_sorting(query, order_by, to_replace, allowed_filter_attrs)
roles = query.offset(offset).limit(limit).all()
return role_collection_schema.dump(RoleCollection(roles=roles, total_entries=total_entries))
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_PERMISSION)])
@format_parameters({'limit': check_limit})
def get_permissions(limit=None, offset=None):
"""Get permissions"""
session = current_app.appbuilder.get_session
total_entries = session.query(func.count(Permission.id)).scalar()
query = session.query(Permission)
actions = query.offset(offset).limit(limit).all()
return action_collection_schema.dump(ActionCollection(actions=actions, total_entries=total_entries))
@security.requires_access([(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_ROLE)])
def delete_role(role_name):
"""Delete a role"""
ab_security_manager = current_app.appbuilder.sm
role = ab_security_manager.find_role(name=role_name)
if not role:
raise NotFound(title="Role not found", detail=f"The Role with name `{role_name}` was not found")
ab_security_manager.delete_role(role_name=role_name)
return NoContent, 204
@security.requires_access([(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_ROLE)])
def patch_role(role_name, update_mask=None):
"""Update a role"""
appbuilder = current_app.appbuilder
security_manager = appbuilder.sm
body = request.json
try:
data = role_schema.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
role = security_manager.find_role(name=role_name)
if not role:
raise NotFound(title="Role not found", detail=f"Role with name: `{role_name} was not found")
if update_mask:
update_mask = [i.strip() for i in update_mask]
data_ = {}
for field in update_mask:
if field in data and not field == "permissions":
data_[field] = data[field]
elif field == "actions":
data_["permissions"] = data['permissions']
else:
raise BadRequest(detail=f"'{field}' in update_mask is unknown")
data = data_
perms = data.get("permissions", [])
if perms:
perms = [
(item['permission']['name'], item['view_menu']['name']) for item in data['permissions'] if item
]
_check_action_and_resource(security_manager, perms)
security_manager.update_role(pk=role.id, name=data['name'])
security_manager.init_role(role_name=data['name'], perms=perms or role.permissions)
return role_schema.dump(role)
@security.requires_access([(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_ROLE)])
def post_role():
"""Create a new role"""
appbuilder = current_app.appbuilder
security_manager = appbuilder.sm
body = request.json
try:
data = role_schema.load(body)
except ValidationError as err:
raise BadRequest(detail=str(err.messages))
role = security_manager.find_role(name=data['name'])
if not role:
perms = [
(item['permission']['name'], item['view_menu']['name']) for item in data['permissions'] if item
]
_check_action_and_resource(security_manager, perms)
security_manager.init_role(role_name=data['name'], perms=perms)
return role_schema.dump(role)
raise AlreadyExists(
detail=f"Role with name `{role.name}` already exist. Please update with patch endpoint"
)
|
{
"content_hash": "d4eecb184657f0ffad5b022491b89ce4",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 107,
"avg_line_length": 40.713235294117645,
"alnum_prop": 0.6866534224309193,
"repo_name": "dhuang/incubator-airflow",
"id": "8996a189fd26d6460ebafdcfe959dc64c24f259c",
"size": "6323",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/api_connexion/endpoints/role_and_permission_endpoint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "264851"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "3357958"
},
{
"name": "Shell",
"bytes": "34442"
}
],
"symlink_target": ""
}
|
"""
MoinMoin - init "logging" system
WARNING
-------
logging must be configured VERY early, before the code in log.getLogger
gets executed. Thus, logging is configured either by:
a) an environment variable MOINLOGGINGCONF that contains the path/filename
of a logging configuration file - this method overrides all following
methods (except if it can't read or use that configuration, then it
will use c))
b) by an explicit call to MoinMoin.log.load_config('logging.conf') -
you need to do this very early or a) or c) will happen before
c) by using a builtin fallback logging conf
If logging is not yet configured, log.getLogger will do an implicit
configuration call - then a) or c) is done.
Usage (for wiki server admins)
------------------------------
Either use something like this in some shell script:
MOINLOGGINGCONF=/path/to/logging.conf
export MOINLOGGINGCONF
Or, modify your server adaptor script (e.g. moin.cgi) to do this:
from MoinMoin import log
log.load_config('wiki/config/logging/logfile') # XXX please fix this path!
You have to fix that path to use a logging configuration matching your
needs (we provide some examples in the path given there, it is relative to
the uncompressed moin distribution archive - if you use some moin package,
you maybe find it under /usr/share/moin/).
It is likely that you also have to edit the sample logging configurations
we provide (e.g. to fix the logfile location).
Usage (for developers)
----------------------
If you write code for moin, do this at top of your module:
from MoinMoin import log
logging = log.getLogger(__name__)
This will create a logger with 'MoinMoin.your.module' as name.
The logger can optionally get configured in the logging configuration.
If you don't configure it, some upperlevel logger (e.g. the root logger)
will do the logging.
@copyright: 2008 MoinMoin:ThomasWaldmann,
2007 MoinMoin:JohannesBerg
@license: GNU GPL, see COPYING for details.
"""
# This is the "last resort" fallback logging configuration for the case
# that load_config() is either not called at all or with a non-working
# logging configuration.
# See http://www.python.org/doc/lib/logging-config-fileformat.html
# We just use stderr output by default, if you want anything else,
# you will have to configure logging.
logging_config = """\
[DEFAULT]
# Default loglevel, to adjust verbosity: DEBUG, INFO, WARNING, ERROR, CRITICAL
loglevel=INFO
[loggers]
keys=root
[handlers]
keys=stderr
[formatters]
keys=default
[logger_root]
level=%(loglevel)s
handlers=stderr
[handler_stderr]
class=StreamHandler
level=NOTSET
formatter=default
args=(sys.stderr, )
[formatter_default]
format=%(asctime)s %(levelname)s %(name)s:%(lineno)d %(message)s
datefmt=
class=logging.Formatter
"""
import os
import logging
import logging.config
import logging.handlers # needed for handlers defined there being configurable in logging.conf file
configured = False
fallback_config = False
import warnings
# 'CacheNeedsUpdate' string exception in Page.py is supported for backwards compat reasons:
warnings.filterwarnings('ignore', r'catching of string exceptions is deprecated', module='MoinMoin.Page')
# TODO: subprocess was added in python 2.4, we now can refactor the code to use it and remove this:
warnings.filterwarnings('ignore', r'The popen\d? module is deprecated. Use the subprocess module.')
def _log_warning(message, category, filename, lineno, file=None, line=None):
# for warnings, we just want to use the logging system, not stderr or other files
msg = "%s:%s: %s: %s" % (filename, lineno, category.__name__, message)
logger = getLogger(__name__)
logger.warning(msg) # Note: the warning will look like coming from here,
# but msg contains info about where it really comes from
def load_config(conf_fname=None):
""" load logging config from conffile """
global configured
err_msg = None
conf_fname = os.environ.get('MOINLOGGINGCONF', conf_fname)
if conf_fname:
try:
conf_fname = os.path.abspath(conf_fname)
# we open the conf file here to be able to give a reasonable
# error message in case of failure (if we give the filename to
# fileConfig(), it silently ignores unreadable files and gives
# unhelpful error msgs like "No section: 'formatters'"):
f = open(conf_fname)
try:
logging.config.fileConfig(f)
finally:
f.close()
configured = True
l = getLogger(__name__)
l.info('using logging configuration read from "%s"' % conf_fname)
warnings.showwarning = _log_warning
except Exception, err: # XXX be more precise
err_msg = str(err)
if not configured:
# load builtin fallback logging config
from StringIO import StringIO
f = StringIO(logging_config)
try:
logging.config.fileConfig(f)
finally:
f.close()
configured = True
l = getLogger(__name__)
if err_msg:
l.warning('load_config for "%s" failed with "%s".' % (conf_fname, err_msg))
l.info('using logging configuration read from built-in fallback in MoinMoin.log module')
warnings.showwarning = _log_warning
import MoinMoin
code_path = os.path.dirname(MoinMoin.__file__)
from MoinMoin.version import project, release, revision
l.info('Running %s %s %s code from %s' % (project, release, revision, code_path))
def getLogger(name):
""" wrapper around logging.getLogger, so we can do some more stuff:
- preprocess logger name
- patch loglevel constants into logger object, so it can be used
instead of the logging module
"""
if not configured:
load_config()
logger = logging.getLogger(name)
for levelnumber, levelname in logging._levelNames.items():
if isinstance(levelnumber, int): # that list has also the reverse mapping...
setattr(logger, levelname, levelnumber)
return logger
|
{
"content_hash": "eb9ea55e17f6026511410540b8eeb59c",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 105,
"avg_line_length": 37.383720930232556,
"alnum_prop": 0.6575427682737169,
"repo_name": "Glottotopia/aagd",
"id": "92ca2aa33d28f7763b337e9aec4f3ed41d13aab4",
"size": "6460",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "moin/local/moin/MoinMoin/log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "152885"
},
{
"name": "CSS",
"bytes": "454208"
},
{
"name": "ColdFusion",
"bytes": "438820"
},
{
"name": "HTML",
"bytes": "1998354"
},
{
"name": "Java",
"bytes": "510468"
},
{
"name": "JavaScript",
"bytes": "6505329"
},
{
"name": "Lasso",
"bytes": "72399"
},
{
"name": "Makefile",
"bytes": "10216"
},
{
"name": "PHP",
"bytes": "259528"
},
{
"name": "Perl",
"bytes": "137186"
},
{
"name": "Python",
"bytes": "13713475"
},
{
"name": "Shell",
"bytes": "346"
},
{
"name": "XSLT",
"bytes": "15970"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_virtualnetworkpeering_facts
version_added: "2.8"
short_description: Get facts of Azure Virtual Network Peering.
description:
- Get facts of Azure Virtual Network Peering.
options:
resource_group:
description:
- Name of a resource group where the vnet exists.
required: True
virtual_network:
description:
- The name of Virtual network.
- It can be name of virtual network.
- It can be virtual network resource id.
required: True
name:
description:
- Name of the virtual network peering.
extends_documentation_fragment:
- azure
author:
- Yunge Zhu (@yungezz)
'''
EXAMPLES = '''
- name: Get virtual network peering by name
azure_rm_virtualnetworkpeering_facts:
resource_group: myResourceGroup
virtual_network: myVnet1
name: myVnetPeer
- name: List virtual network peering of virtual network
azure_rm_virtualnetworkpeering:
resource_group: myResourceGroup
virtual_network: myVnet1
'''
RETURN = '''
vnetpeerings:
description: A list of Virtual Network Peering facts.
returned: always
type: complex
contains:
id:
description: Id of current Virtual Network peering.
returned: always
type: str
sample:
"/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVnet/virtualNetworkPeerings/peer1"
name:
description: Name of Virtual Network peering.
returned: always
type: str
sample: myPeering
remote_virtual_network:
description: Id of remote Virtual Network to be peered to.
returned: always
type: str
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVnet2
remote_address_space:
description: The reference of the remote Virtual Network address space.
type: complex
contains:
address_prefixes:
description: A list of address blocks reserved for this Virtual Network in CIDR notation.
type: list
sample: 10.1.0.0/16
peering_state:
description: The status of the virtual network peering.
returned: always
type: str
sample: Connected
provisioning_state:
description: The provisioning state of the resource.
returned: always
type: str
sample: Succeeded
allow_forwarded_traffic:
description: Whether the forwarded traffic from the VMs in the remote Virtual Network will be allowed/disallowed.
returned: always
type: bool
sample: False
allow_gateway_transit:
description: If gateway links can be used in remote Virtual Networking to link to this Virtual Network.
returned: always
type: bool
sample: False
allow_virtual_network_access:
description: Whether the VMs in the linked Virtual Network space would be able to access all the VMs in local Virtual Network space.
returned: always
type: bool
sample: False
use_remote_gateways:
description: If remote gateways can be used on this Virtual Network.
returned: always
type: bool
sample: False
'''
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
def vnetpeering_to_dict(vnetpeering):
'''
Convert a virtual network peering object to a dict.
'''
results = dict(
id=vnetpeering.id,
name=vnetpeering.name,
remote_virtual_network=vnetpeering.remote_virtual_network.id,
remote_address_space=dict(
address_prefixes=vnetpeering.remote_address_space.address_prefixes
),
peering_state=vnetpeering.peering_state,
provisioning_state=vnetpeering.provisioning_state,
use_remote_gateways=vnetpeering.use_remote_gateways,
allow_gateway_transit=vnetpeering.allow_gateway_transit,
allow_forwarded_traffic=vnetpeering.allow_forwarded_traffic,
allow_virtual_network_access=vnetpeering.allow_virtual_network_access
)
return results
class AzureRMVirtualNetworkPeeringFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str'
),
virtual_network=dict(
type='raw',
required=True
)
)
self.resource_group = None
self.name = None
self.virtual_network = None
self.results = dict(changed=False)
super(AzureRMVirtualNetworkPeeringFacts, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_tags=False)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()):
setattr(self, key, kwargs[key])
# parse virtual_network
self.virtual_network = self.parse_resource_to_dict(self.virtual_network)
if self.virtual_network['resource_group'] != self.resource_group:
self.fail('Resource group of virtual_network is not same as param resource_group')
self.results['vnetpeerings'] = []
# get vnet peering
if self.name:
self.results['vnetpeerings'] = self.get_by_name()
else:
self.results['vnetpeerings'] = self.list_by_vnet()
return self.results
def get_by_name(self):
'''
Gets the Virtual Network Peering.
:return: List of Virtual Network Peering
'''
self.log(
"Get Virtual Network Peering {0}".format(self.name))
results = []
try:
response = self.network_client.virtual_network_peerings.get(resource_group_name=self.resource_group,
virtual_network_name=self.virtual_network['name'],
virtual_network_peering_name=self.name)
self.log("Response : {0}".format(response))
results.append(vnetpeering_to_dict(response))
except CloudError:
self.log('Did not find the Virtual Network Peering.')
return results
def list_by_vnet(self):
'''
Lists the Virtual Network Peering in specific Virtual Network.
:return: List of Virtual Network Peering
'''
self.log(
"List Virtual Network Peering in Virtual Network {0}".format(self.virtual_network['name']))
results = []
try:
response = self.network_client.virtual_network_peerings.list(resource_group_name=self.resource_group,
virtual_network_name=self.virtual_network['name'])
self.log("Response : {0}".format(response))
if response:
for p in response:
results.append(vnetpeering_to_dict(p))
except CloudError:
self.log('Did not find the Virtual Network Peering.')
return results
def main():
"""Main execution"""
AzureRMVirtualNetworkPeeringFacts()
if __name__ == '__main__':
main()
|
{
"content_hash": "b9394fdb5cc35540e46562fe943d20fb",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 180,
"avg_line_length": 34.82203389830509,
"alnum_prop": 0.5975906546605013,
"repo_name": "SergeyCherepanov/ansible",
"id": "7af9e30cc25f592233c7b18cbf328bd13919161c",
"size": "8376",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/cloud/azure/azure_rm_virtualnetworkpeering_facts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
}
|
import re
from pynmea.utils import checksum_calc
class NMEASentence(object):
""" Base sentence class. This is used to pull apart a sentence.
It will not have any real reference to what things mean. Things that
subclass this base class should all the additional functionality.
"""
def __init__(self, parse_map):
self.sen_type = None
self.parse_map = parse_map
def _parse(self, nmea_str):
""" Tear the sentence apart, grabbing the name on the way. Create a
parts attribute on the class and fill in the sentence type in
sen_type
"""
self.nmea_sentence = nmea_str
self.parts = nmea_str.split(',')
chksum_regex = re.compile(r".+((\*{1})(?i)(?P<chksum>[0-9a-f]{2}))$")
m = chksum_regex.match(nmea_str)
if m:
self.checksum = m.groupdict()['chksum']
d, par, ck = self.parts.pop().rpartition('*')
self.parts.extend([d])
#if '*' in self.parts[-1]:
#d, par, ck = self.parts.pop().rpartition('*')
#self.parts.extend([d])
self.sen_type = self.parts[0]
if self.parts[0].startswith('$'):
self.parts[0] = self.parts[0][1:]
self.sen_type = self.parts[0]
def parse(self, nmea_str, ignore_err=False):
""" Use the parse map. Parse map should be in the format:
(('Field name', 'field_name'),
('Field name', 'field_name'))
Where the first entry in the tuple is the human readable name
and the second is the parameter name
"""
self._parse(nmea_str)
#assert len(self.parts[1:]) <= len(self.parse_map)
parts_len = len(self.parts) - 1
for index, item in enumerate(self.parse_map):
if index + 1 > parts_len:
break
setattr(self, item[1], self.parts[index + 1])
#for index, item in enumerate(self.parts[1:]):
#setattr(self, self.parse_map[index][1], item)
def check_chksum(self):
# If there is no checksum, raise AssertionError
assert hasattr(self, 'checksum')
result = checksum_calc(self.nmea_sentence)
return (result.upper() == self.checksum.upper())
# ---------------------------------------------------------------------------- #
# Here are all the currently supported sentences. All should eventually be
# supported. They are being added as properties and other useful functions are
# implimented. Unit tests are also provided.
# ---------------------------------------------------------------------------- #
class GPAAM(NMEASentence):
""" Waypoint Arrival Alarm
"""
def __init__(self):
parse_map = (
("Arrival Circle Entered", "arrival_circ_entered"),
("Perpendicular Passed", "perp_passed"),
("Circle Radius", "circle_rad"),
("Nautical Miles", "circle_rad_unit"),
("Waypoint ID", "waypoint_id"))
super(GPAAM, self).__init__(parse_map)
class GPALM(NMEASentence):
""" GPS Almanac data
"""
def __init__(self):
parse_map = (("Total number of messages", "total_num_msgs"),
("Message number", "msg_num"),
("Satellite PRN number", "sat_prn_num"), # 01 - 32
("GPS week number", "gps_week_num"), # Week since Jan 6 1980
("SV Health, bits 17-24 of each almanac page", "sv_health"),
("Eccentricity", "eccentricity"),
("Almanac Reference Time", "alamanac_ref_time"),
("Inclination Angle", "inc_angle"),
("Rate of right ascension", "rate_right_asc"),
("Root of semi-major axis", "root_semi_major_axis"),
("Argument of perigee", "arg_perigee"),
("Longitude of ascension node", "lat_asc_node"),
("Mean anomaly", "mean_anom"),
("F0 Clock parameter", "f0_clock_param"),
("F1 Clock parameter", "f1_clock_param"))
super(GPALM, self).__init__(parse_map)
class GPAPA(NMEASentence):
""" Autopilot Sentence "A"
"""
def __init__(self):
parse_map = (
("General Status", "status_gen"),
("Cycle lock Status", "status_cycle_lock"),
("Cross Track Error Magnitude", "cross_track_err_mag"),
("Direction to Steer (L or R)", "dir_steer"),
("Cross Track Units (Nautical Miles or KM)", "cross_track_unit"),
("Arrival Circle Entered", "arr_circle_entered"), # A = True
("Perpendicular passed at waypoint", "perp_passed"), # A = True
("Bearing origin to destination", "bearing_to_dest"),
("Bearing type", "bearing_type"), # M = Magnetic, T = True
("Destination waypoint ID", "dest_waypoint_id"))
super(GPAPA, self).__init__(parse_map)
class GPAPB(NMEASentence):
""" Autopilot Sentence "B"
"""
def __init__(self):
parse_map = (
("General Status", "status_gen"),
("Cycle lock Status", "status_cycle_lock"),
("Cross Track Error Magnitude", "cross_track_err_mag"),
("Direction to Steer (L or R)", "dir_steer"),
("Cross Track Units (Nautical Miles or KM)", "cross_track_unit"),
("Arrival Circle Entered", "arr_circle_entered"), # A = True
("Perpendicular passed at waypoint", "perp_passed"), # A = True
("Bearing origin to destination", "bearing_to_dest"),
("Bearing type", "bearing_type"), # M = Magnetic, T = True
("Destination waypoint ID", "dest_waypoint_id"),
("Bearing, present position to dest", "bearing_pres_dest"),
("Bearing to destination, type", "bearing_pres_dest_type"), # M = Magnetic, T = True
("Heading to steer to destination", "heading_to_dest"),
("Heading to steer to destination type", "heading_to_dest_type")) # M = Magnetic, T = True
super(GPAPB, self).__init__(parse_map)
class GPBEC(NMEASentence):
""" Bearing & Distance to Waypoint, Dead Reckoning
"""
def __init__(self):
parse_map = (
("Timestamp", "timestamp"),
("Waypoint Latitude", "waypoint_lat"),
("Waypoint Latitude direction", "waypoint_lat_dir"),
("Waypoint Longitude", "waypoint_lon"),
("Waypoint Longitude direction", "waypoint_lon_dir"),
("Bearing, true", "bearing_true"),
("Bearing True symbol", "bearing_true_sym"), # T = true
("Bearing Magnetic", "bearing_mag"),
("Bearing Magnetic symbol", "bearing_mag_sym"),
("Nautical Miles", "nautical_miles"),
("Nautical Miles symbol", "nautical_miles_sym"),
("Waypoint ID", "waypoint_id"),
("FAA mode indicator", "faa_mode"))
super(GPBEC, self).__init__(parse_map)
class GPBOD(NMEASentence):
def __init__(self):
# 045.,T,023.,M,DEST,START
parse_map = (('Bearing True', 'bearing_t'),
('Bearing True Type', 'bearing_t_type'),
('Bearing Magnetic', 'bearing_mag'),
('Bearing Magnetic Type', 'bearing_mag_type'),
('Destination', 'dest'),
('Start', 'start'))
super(GPBOD, self).__init__(parse_map)
@property
def bearing_true(self):
return ','.join([self.bearing_t, self.bearing_t_type])
@property
def bearing_magnetic(self):
return ','.join([self.bearing_mag, self.bearing_mag_type])
@property
def destination(self):
return self.dest
@property
def origin(self):
return self.start
class GPBWC(NMEASentence):
def __init__(self):
parse_map = (
('Timestamp', 'timestamp'),
('Latitude of next Waypoint', 'lat_next'),
('Latitude of next Waypoint Direction', 'lat_next_direction'),
('Longitude of next Waypoint', 'lon_next'),
('Longitude of next Waypoint Direction', 'lon_next_direction'),
('True track to waypoint', 'true_track'),
('True Track Symbol', 'true_track_sym'),
('Magnetic track to waypoint', 'mag_track'),
('Magnetic Symbol', 'mag_sym'),
('Range to waypoint', 'range_next'),
('Unit of range', 'range_unit'),
('Waypoint Name', 'waypoint_name'))
#('Checksum', 'checksum'))
super(GPBWC, self).__init__(parse_map)
class GPBWR(NMEASentence):
def __init__(self):
parse_map = (
('Timestamp', 'timestamp'),
('Latitude of next Waypoint', 'lat_next'),
('Latitude of next Waypoint Direction', 'lat_next_direction'),
('Longitude of next Waypoint', 'lon_next'),
('Longitude of next Waypoint Direction', 'lon_next_direction'),
('True track to waypoint', 'true_track'),
('True Track Symbol', 'true_track_sym'),
('Magnetic track to waypoint', 'mag_track'),
('Magnetic Symbol', 'mag_sym'),
('Range to waypoint', 'range_next'),
('Unit of range', 'range_unit'),
('Waypoint Name', 'waypoint_name'))
#('Checksum', 'checksum'))
super(GPBWR, self).__init__(parse_map)
class RDENS(NMEASentence):
""" Sentence created by winriver in n files - sentence looks like
$RDENS,254,2269966,PC
"""
def __init__(self):
parse_map = (
('Ensemble', 'ensemble'),
('Computer time', 'time_hundredths'),
('Source', 'source'))
super(RDENS, self).__init__(parse_map)
class GPGGA(NMEASentence):
def __init__(self):
parse_map = (
('Timestamp', 'timestamp'),
('Latitude', 'latitude'),
('Latitude Direction', 'lat_direction'),
('Longitude', 'longitude'),
('Longitude Direction', 'lon_direction'),
('GPS Quality Indicator', 'gps_qual'),
('Number of Satellites in use', 'num_sats'),
('Horizontal Dilution of Precision', 'horizontal_dil'),
('Antenna Alt above sea level (mean)', 'antenna_altitude'),
('Units of altitude (meters)', 'altitude_units'),
('Geoidal Separation', 'geo_sep'),
('Units of Geoidal Separation (meters)', 'geo_sep_units'),
('Age of Differential GPS Data (secs)', 'age_gps_data'),
('Differential Reference Station ID', 'ref_station_id'))
#('Checksum', 'checksum'))
super(GPGGA, self).__init__(parse_map)
class GPBWW(NMEASentence):
""" Bearing, Waypoint to Waypoint
"""
def __init__(self):
parse_map = (
("Bearing degrees True", "bearing_deg_true"),
("Bearing degrees True Symbol", "bearing_deg_true_sym"),
("Bearing degrees Magnitude", "bearing_deg_mag"),
("Bearing degrees Magnitude Symbol", "bearing_deg_mag_sym"),
("Destination Waypoint ID", "waypoint_id_dest"),
("Origin Waypoint ID", "waypoint_id_orig"))
super(GPBWW, self).__init__(parse_map)
class GPGLL(NMEASentence):
def __init__(self):
parse_map = (
('Latitude', 'lat'),
('Latitude Direction', 'lat_dir'),
('Longitude', 'lon'),
('Longitude Direction', 'lon_dir'),
('Timestamp', 'timestamp'),
('Data Validity', "data_valid"))
super(GPGLL, self).__init__(parse_map)
self._use_data_validity = False
#def _parse(self, nmea_str):
#""" GPGGL Allows for a couple of different formats.
#The all have lat,direction,lon,direction
#but one may have timestamp,data_validity
#while the other has only checksum
#We shall treat data_validity as a checksum and always
#add in a timestamp field
#"""
#self.nmea_sentence = nmea_str
#self.parts = nmea_str.split(',')
#chksum_regex = re.compile(r".+((\*{1})(?i)(?P<chksum>[0-9a-f]{2}))$")
#m = chksum_regex.match(nmea_str)
#if m:
#self.checksum = m.groupdict()['chksum']
##if '*' in self.parts[-1]:
### There is a checksum but no timestamp + data_validity.
### Add an empty field for the timestamp and indicate that when
### validating the checksum, we should use validity, not a
### calculation
##d, par, ck = self.parts.pop().rpartition('*')
##self.parts.extend([d, ''])
##self._use_data_validity = True
#self.sen_type = self.parts[0]
#if self.parts[0].startswith('$'):
#self.parts[0] = self.parts[0][1:]
#self.sen_type = self.parts[0]
#def check_chksum(self):
#""" Override check_checksum. If it has been detected that
#the checksum field contains "A" for valid data and something else
#for invalid, do a check based on thsi information. Otherwise, call
#to original checksum code from the superclass
#"""
## If we are looking for an "A" character
#if self._use_data_validity:
#if self.checksum == 'A':
#return True
#else:
#return False
#else:
## Otherwise, call the superclass version
#return super(GPGLL, self).check_chksum()
@property
def latitude(self):
return float(self.lat)
@property
def longitude(self):
return float(self.lon)
@property
def lat_direction(self):
mapping = {'N': 'North', 'S': 'South'}
return mapping[self.lat_dir.upper()]
@property
def lon_direction(self):
mapping = {"E": "East", "W": "West"}
return mapping[self.lon_dir.upper()]
class GPGSA(NMEASentence):
def __init__(self):
parse_map = (
('Mode', 'mode'),
('Mode fix type', 'mode_fix_type'),
('SV ID01', 'sv_id01'),
('SV ID02', 'sv_id02'),
('SV ID03', 'sv_id03'),
('SV ID04', 'sv_id04'),
('SV ID05', 'sv_id05'),
('SV ID06', 'sv_id06'),
('SV ID07', 'sv_id07'),
('SV ID08', 'sv_id08'),
('SV ID09', 'sv_id09'),
('SV ID10', 'sv_id10'),
('SV ID11', 'sv_id11'),
('SV ID12', 'sv_id12'),
('PDOP (Dilution of precision)', 'pdop'),
('HDOP (Horizontal DOP)', 'hdop'),
('VDOP (Vertical DOP)', 'vdop'))
#('Checksum', 'checksum'))
super(GPGSA, self).__init__(parse_map)
class GPGSV(NMEASentence):
def __init__(self):
parse_map = (
('Number of messages of type in cycle', 'num_messages'),
('Message Number', 'msg_num'),
('Total number of SVs in view', 'num_sv_in_view'),
('SV PRN number 1', 'sv_prn_num_1'),
('Elevation in degrees 1', 'elevation_deg_1'), # 90 max
('Azimuth, deg from true north 1', 'azimuth_1'), # 000 to 159
('SNR 1', 'snr_1'), # 00-99 dB
('SV PRN number 2', 'sv_prn_num_2'),
('Elevation in degrees 2', 'elevation_deg_2'), # 90 max
('Azimuth, deg from true north 2', 'azimuth_2'), # 000 to 159
('SNR 2', 'snr_2'), # 00-99 dB
('SV PRN number 3', 'sv_prn_num_3'),
('Elevation in degrees 3', 'elevation_deg_3'), # 90 max
('Azimuth, deg from true north 3', 'azimuth_3'), # 000 to 159
('SNR 3', 'snr_3'), # 00-99 dB
('SV PRN number 4', 'sv_prn_num_4'),
('Elevation in degrees 4', 'elevation_deg_4'), # 90 max
('Azimuth, deg from true north 4', 'azimuth_4'), # 000 to 159
('SNR 4', 'snr_4')) # 00-99 dB
#('Checksum', 'checksum'))
super(GPGSV, self).__init__(parse_map)
class GPHDG(NMEASentence):
""" NOTE! This is a GUESS as I cannot find an actual spec
telling me the fields. Updates are welcome!
"""
def __init__(self):
parse_map = (
("Heading", "heading"),
("Deviation", "deviation"),
("Deviation Direction", "dev_dir"),
("Variation", "variation"),
("Variation Direction", "var_dir"))
#("Checksum", "checksum"))
super(GPHDG, self).__init__(parse_map)
class GPHDT(NMEASentence):
def __init__(self):
parse_map = (
("Heading", "heading"),
("True", "hdg_true"))
#("Checksum", "checksum"))
super(GPHDT, self).__init__(parse_map)
class GPR00(NMEASentence):
def __init__(self):
parse_map = (
("Waypoint List", "waypoint_list"),)
#("Checksum", "checksum"))
super(GPR00, self).__init__(parse_map)
def parse(self, nmea_str):
""" As the length of the sentence is variable (there can be many or few
waypoints), parse is overridden to do something special with the
different parts
"""
self._parse(nmea_str)
new_parts = [self.parts[0]]
new_parts.append(self.parts[1:])
#new_parts.append(self.parts[-1])
self.parts = new_parts
for index, item in enumerate(self.parts[1:]):
setattr(self, self.parse_map[index][1], item)
class GPRMA(NMEASentence):
def __init__(self):
parse_map = (
("Data status", "data_status"),
("Latitude", "lat"),
("Latitude Direction", "lat_dir"),
("Longitude", "lon"),
("Longitude Direction", "lon_dir"),
("Not Used 1", "not_used_1"),
("Not Used 2", "not_used_2"),
("Speed over ground", "spd_over_grnd"), # Knots
("Course over ground", "crse_over_grnd"),
("Variation", "variation"),
("Variation Direction", "var_dir"))
#("Checksum", "checksum"))
super(GPRMA, self).__init__(parse_map)
class GPRMB(NMEASentence):
""" Recommended Minimum Navigation Information
"""
def __init__(self):
parse_map = (
("Data Validity", "validity"),
("Cross Track Error", "cross_track_error"), # nautical miles, 9.9 max
("Cross Track Error, direction to corrent", "cte_correction_dir"),
("Origin Waypoint ID", "origin_waypoint_id"),
("Destination Waypoint ID", "dest_waypoint_id"),
("Destination Waypoint Latitude", "dest_lat"),
("Destination Waypoint Lat Direction", "dest_lat_dir"),
("Destination Waypoint Longitude", "dest_lon"),
("Destination Waypoint Lon Direction", "dest_lon_dir"),
("Range to Destination", "dest_range"), # Nautical Miles
("True Bearing to Destination", "dest_true_bearing"),
("Velocity Towards Destination", "dest_velocity"), # Knots
("Arrival Alarm", "arrival_alarm")) # A = Arrived, V = Not arrived
#("Checksum", "checksum"))
super(GPRMB, self).__init__(parse_map)
class GPRMC(NMEASentence):
""" Recommended Minimum Specific GPS/TRANSIT Data
"""
def __init__(self):
parse_map = (("Timestamp", "timestamp"),
("Data Validity", "data_validity"),
("Latitude", "lat"),
("Latitude Direction", "lat_dir"),
("Longitude", "lon"),
("Longitude Direction", "lon_dir"),
("Speed Over Ground", "spd_over_grnd"),
("True Course", "true_course"),
("Datestamp", "datestamp"),
("Magnetic Variation", "mag_variation"),
("Magnetic Variation Direction", "mag_var_dir"))
#("Checksum", "checksum"))
super(GPRMC, self).__init__(parse_map)
class GPRTE(NMEASentence):
""" Routes
"""
def __init__(self):
parse_map = (
("Number of sentences in sequence", "num_in_seq"),
("Sentence Number", "sen_num"),
("Start Type", "start_type"), # The first in the list is either current route or waypoint
("Name or Number of Active Route", "active_route_id"),
("Waypoint List", "waypoint_list"))
#("Checksum", "checksum"))
super(GPRTE, self).__init__(parse_map)
def parse(self, nmea_str):
""" As the length of the sentence is variable (there can be many or few
waypoints), parse is overridden to do something special with the
different parts
"""
self._parse(nmea_str)
new_parts = []
new_parts.extend(self.parts[0:5])
new_parts.append(self.parts[5:])
self.parts = new_parts
for index, item in enumerate(self.parts[1:]):
setattr(self, self.parse_map[index][1], item)
class GPSTN(NMEASentence):
""" NOTE: No real data could be found for examples of the actual spec so
it is a guess that there may be a checksum on the end
"""
def __init__(self):
parse_map = (
("Talker ID Number", "talker_id"),) # 00 - 99
#("Checksum", "checksum"))
super(GPSTN, self).__init__(parse_map)
class GPTRF(NMEASentence):
""" Transit Fix Data
"""
def __init__(self):
parse_map = (
("Timestamp (UTC)", "timestamp"),
("Date (DD/MM/YY", "date"),
("Latitude", "lat"),
("Latitude Direction", "lat_dir"),
("Longitude", "lon"),
("Longitude Direction", "lon_dir"),
("Elevation Angle", "ele_angle"),
("Number of Iterations", "num_iterations"),
("Number of Doppler Intervals", "num_doppler_intervals"),
("Update Distance", "update_dist"), # Nautical Miles
("Satellite ID", "sat_id"))
super(GPTRF, self).__init__(parse_map)
class GPVBW(NMEASentence):
""" Dual Ground/Water Speed
"""
def __init__(self):
parse_map = (
("Longitudinal Water Speed", "lon_water_spd"), # Knots
("Transverse Water Speed", "trans_water_spd"), # Knots
("Water Speed Data Validity", "data_validity_water_spd"),
("Longitudinal Ground Speed", "lon_grnd_spd"), # Knots
("Transverse Ground Speed", "trans_grnd_spd"), # Knots
("Ground Speed Data Validity", "data_validity_grnd_spd"))
#("Checksum", "checksum"))
super(GPVBW, self).__init__(parse_map)
class GPVTG(NMEASentence):
""" Track Made Good and Ground Speed
"""
def __init__(self):
parse_map = (
("True Track made good", "true_track"),
("True Track made good symbol", "true_track_sym"),
("Magnetic Track made good", "mag_track"),
("Magnetic Track symbol", "mag_track_sym"),
("Speed over ground knots", "spd_over_grnd_kts"),
("Speed over ground symbol", "spd_over_grnd_kts_sym"),
("Speed over ground kmph", "spd_over_grnd_kmph"),
("Speed over ground kmph symbol", "spd_over_grnd_kmph_sym"))
super(GPVTG, self).__init__(parse_map)
class GPWCV(NMEASentence):
""" Waypoint Closure Velocity
"""
def __init__(self):
parse_map = (
("Velocity", "velocity"),
("Velocity Units", "vel_units"), # Knots
("Waypoint ID", "waypoint_id"))
super(GPWCV, self).__init__(parse_map)
class GPWNC(NMEASentence):
""" Distance, Waypoint to Waypoint
"""
def __init__(self):
parse_map = (
("Distance, Nautical Miles", "dist_nautical_miles"),
("Distance Nautical Miles Unit", "dist_naut_unit"),
("Distance, Kilometers", "dist_km"),
("Distance, Kilometers Unit", "dist_km_unit"),
("Origin Waypoint ID", "waypoint_origin_id"),
("Destination Waypoint ID", "waypoint_dest_id"))
super(GPWNC, self).__init__(parse_map)
class GPWPL(NMEASentence):
""" Waypoint Location
"""
def __init__(self):
parse_map = (
("Latitude", "lat"),
("Latitude Direction", "lat_dir"),
("Longitude", "lon"),
("Longitude Direction", "lon_dir"),
("Waypoint ID", "waypoint_id"))
super(GPWPL, self).__init__(parse_map)
class GPXTE(NMEASentence):
""" Cross-Track Error, Measured
"""
def __init__(self):
parse_map = (("General Warning Flag", "warning_flag"),
("Lock flag (Not Used)", "lock_flag"),
("Cross Track Error Distance", "cross_track_err_dist"),
("Correction Direction (L or R)", "correction_dir"),
("Distance Units", "dist_units"))
super(GPXTE, self).__init__(parse_map)
class GPZDA(NMEASentence):
def __init__(self):
parse_map = (
("Timestamp", "timestamp"), # hhmmss.ss = UTC
("Day", "day"), # 01 to 31
("Month", "month"), # 01 to 12
("Year", "year"), # Year = YYYY
("Local Zone Description", "local_zone"), # 00 to +/- 13 hours
("Local Zone Minutes Description", "local_zone_minutes")) # same sign as hours
#("Checksum", "checksum"))
super(GPZDA, self).__init__(parse_map)
# ---------------------------------- Not Yet Implimented --------------------- #
# ---------------------------------------------------------------------------- #
#class GPDBT(NMEASentence):
# """ Depth Below Transducer
# """
# def __init__(self):
# parse_map = ()
# super(GPDBT).__init__(parse_map)
#class GPDPT(NMEASentence):
# """ Heading - Deviation and Variation
# """
# def __init__(self):
# parse_map = ()
# super(GPDPT).__init__(parse_map)
#class GPFSI(NMEASentence):
# """ Frequency Set Information
# """
# def __init__(self):
# parse_map = ()
# super(GPFSI).__init__(parse_map)
#class GPGLC(NMEASentence):
# """ Geographic Position, Loran-C
# """
# def __init__(self):
# parse_map = ()
# super(GPGLC).__init__(parse_map)
#class GPGXA(NMEASentence):
# """ TRANSIT Position
# """
# def __init__(self):
# parse_map = ()
# super(GPGXA).__init__(parse_map)
#class GPHSC(NMEASentence):
# """ Heading Steering Command
# """
# def __init__(self):
# parse_map = ()
# super(GPHSC).__init__(parse_map)
#class GPLCD(NMEASentence):
# """ Loran-C Signal Data
# """
# def __init__(self):
# parse_map = ()
# super(GPLCD).__init__(parse_map)
#class GPMTA(NMEASentence):
# """ Air Temperature (to be phased out)
# """
# def __init__(self):
# parse_map = ()
# super(GPMTA).__init__(parse_map)
#class GPMTW(NMEASentence):
# """ Water Temperature
# """
# def __init__(self):
# parse_map = ()
# super(GPMTW).__init__(parse_map)
#class GPMWD(NMEASentence):
# """ Wind Direction
# """
# def __init__(self):
# parse_map = ()
# super(GPMWD).__init__(parse_map)
#class GPMWV(NMEASentence):
# """ Wind Speed and Angle
# """
# def __init__(self):
# parse_map = ()
# super(GPMWV).__init__(parse_map)
#class GPOLN(NMEASentence):
# """ Omega Lane Numbers
# """
# def __init__(self):
# parse_map = ()
# super(GPOLN).__init__(parse_map)
#class GPOSD(NMEASentence):
# """ Own Ship Data
# """
# def __init__(self):
# parse_map = ()
# super(GPOSD).__init__(parse_map)
#class GPROT(NMEASentence):
# """ Rate of Turn
# """
# def __init__(self):
# parse_map = ()
# super(GPROT).__init__(parse_map)
#class GPRPM(NMEASentence):
# """ Revolutions
# """
# def __init__(self):
# parse_map = ()
# super(GPRPM).__init__(parse_map)
#class GPRSA(NMEASentence):
# """ Rudder Sensor Angle
# """
# def __init__(self):
# parse_map = ()
# super(GPRSA).__init__(parse_map)
#class GPRSD(NMEASentence):
# """ RADAR System Data
# """
# def __init__(self):
# parse_map = ()
# super(GPRSD).__init__(parse_map)
#class GPSFI(NMEASentence):
# """ Scanning Frequency Information
# """
# def __init__(self):
# parse_map = ()
# super(GPSFI).__init__(parse_map)
#class GPTTM(NMEASentence):
# """ Tracked Target Message
# """
# def __init__(self):
# parse_map = ()
# super(GPTTM).__init__(parse_map)
#class GPVDR(NMEASentence):
# """ Set and Drift
# """
# def __init__(self):
# parse_map = ()
# super(GPVDR).__init__(parse_map)
#class GPVHW(NMEASentence):
# """ Water Speed and Heading
# """
# def __init__(self):
# parse_map = ()
# super(GPVHW).__init__(parse_map)
#class GPVLW(NMEASentence):
# """ Distance Traveled through the Water
# """
# def __init__(self):
# parse_map = ()
# super(GPVLW).__init__(parse_map)
#class GPVPW(NMEASentence):
# """ Speed, Measured Parallel to Wind
# """
# def __init__(self):
# parse_map = ()
# super(GPVPW).__init__(parse_map)
#class GPXDR(NMEASentence):
# """ Transducer Measurements
# """
# def __init__(self):
# parse_map = ()
# super(GPXDR).__init__(parse_map)
#class GPXTR(NMEASentence):
# """ Cross-Track Error, Dead Reckoning
# """
# def __init__(self):
# parse_map = ()
# super(GPXTR).__init__(parse_map)
#class GPZFO(NMEASentence):
# """ UTC & Time from Origin Waypoint
# """
# def __init__(self):
# parse_map = ()
# super(GPZFO).__init__(parse_map)
#class GPZTG(NMEASentence):
# """ UTC & Time to Destination Waypoint
# """
# def __init__(self):
# parse_map = ()
# super(GPZTG).__init__(parse_map)
# ---------------------------------------------------------------------------- #
# -------------------------- Unknown Formats --------------------------------- #
# ---------------------------------------------------------------------------- #
#class GPASD(NMEASentence):
# """ Auto-pilot system data (Unknown format)
# """
# def __init__(self):
# parse_map = ()
# super(GPASD).__init__()
# ---------------------------------------------------------------------------- #
# -------------------------- Obsolete Formats -------------------------------- #
# ---------------------------------------------------------------------------- #
#class GPDCN(NMEASentence):
# """ Decca Position (obsolete)
# """
# def __init__(self):
# parse_map = ()
# super(GPDCN).__init__(parse_map)
# PROPRIETRY SENTENCES
# -- GARMIN -- #
class PGRME(NMEASentence):
""" GARMIN Estimated position error
"""
def __init__(self):
parse_map = (("Estimated Horiz. Position Error", "hpe"),
("Estimated Horiz. Position Error Unit (M)", "hpe_unit"),
("Estimated Vert. Position Error", "vpe"),
("Estimated Vert. Position Error Unit (M)", "vpe_unit"),
("Estimated Horiz. Position Error", "osepe"),
("Overall Spherical Equiv. Position Error", "osepe_unit"))
super(PGRME, self).__init__(parse_map)
class PGRMM(NMEASentence):
""" GARMIN Map Datum
"""
def __init__(self):
parse_map = (('Currently Active Datum', 'datum'),)
super(PGRMM, self).__init__(parse_map)
class PGRMZ(NMEASentence):
""" GARMIN Altitude Information
"""
def __init__(self):
parse_map = (("Altitude", "altitude"),
("Altitude Units (Feet)", "altitude_unit"),
("Positional Fix Dimension (2=user, 3=GPS)",
"pos_fix_dim"))
super(PGRMZ, self).__init__(parse_map)
|
{
"content_hash": "4b52a965ba35509097c9674095b2c849",
"timestamp": "",
"source": "github",
"line_count": 959,
"max_line_length": 102,
"avg_line_length": 33.56934306569343,
"alnum_prop": 0.5133724722765819,
"repo_name": "esatel/ADCPy",
"id": "2d44398f129d77ceb3119cb73f95985fa6e69a72",
"size": "32193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adcpy/pynmea/nmea.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "399234"
},
{
"name": "Shell",
"bytes": "6652"
}
],
"symlink_target": ""
}
|
import pandas as pd, numpy as np, os, gc
# LOAD AND FREQUENCY-ENCODE
FE = ['EngineVersion','AppVersion','AvSigVersion','Census_OSVersion']
# LOAD AND ONE-HOT-ENCODE
OHE = [ 'RtpStateBitfield','IsSxsPassiveMode','DefaultBrowsersIdentifier',
'AVProductStatesIdentifier','AVProductsInstalled', 'AVProductsEnabled',
'CountryIdentifier', 'CityIdentifier',
'GeoNameIdentifier', 'LocaleEnglishNameIdentifier',
'Processor', 'OsBuild', 'OsSuite',
'SmartScreen','Census_MDC2FormFactor',
'Census_OEMNameIdentifier',
'Census_ProcessorCoreCount',
'Census_ProcessorModelIdentifier',
'Census_PrimaryDiskTotalCapacity', 'Census_PrimaryDiskTypeName',
'Census_HasOpticalDiskDrive',
'Census_TotalPhysicalRAM', 'Census_ChassisTypeName',
'Census_InternalPrimaryDiagonalDisplaySizeInInches',
'Census_InternalPrimaryDisplayResolutionHorizontal',
'Census_InternalPrimaryDisplayResolutionVertical',
'Census_PowerPlatformRoleName', 'Census_InternalBatteryType',
'Census_InternalBatteryNumberOfCharges',
'Census_OSEdition', 'Census_OSInstallLanguageIdentifier',
'Census_GenuineStateName','Census_ActivationChannel',
'Census_FirmwareManufacturerIdentifier',
'Census_IsTouchEnabled', 'Census_IsPenCapable',
'Census_IsAlwaysOnAlwaysConnectedCapable', 'Wdft_IsGamer',
'Wdft_RegionIdentifier']
# LOAD ALL AS CATEGORIES
dtypes = {}
for x in FE+OHE: dtypes[x] = 'category'
dtypes['MachineIdentifier'] = 'str'
dtypes['HasDetections'] = 'int8'
# LOAD CSV FILE
df_train = pd.read_csv('../input/train.csv', usecols=dtypes.keys(), dtype=dtypes)
print ('Loaded',len(df_train),'rows of TRAIN.CSV!')
# DOWNSAMPLE
# sm = 2000000
# df_train = df_train.sample(sm)
# print ('Only using',sm,'rows to train and validate')
x=gc.collect()
# # Statistically Encode Variables
# All four variables in the Python variable list `FE` will get frequency encoded and all thirty-nine variables in list `OHE` will get statistically one-hot-encoded. In total, forty-three variables are imported from the training csv while thirty-nine were ignored.
#
# Among all our category variables, there are a combined 211,562 values! So we can't one-hot-encode all. (Note that this is without Census_OEMModelIdentifier's 175,366 or Census_SystemVolumeTotalCapacity's 536,849) We will use a trick from statistics. First we'll assume we have a random sample. (Which we don't actually have, but let's pretend.) Then for each value, we will test the following hypotheses
#
# $$H_0: \text{Prob(HasDetections=1 given value is present)} = 0.5 $$
# $$H_A: \text{Prob(HasDetections=1 given value is present)} \ne 0.5$$
#
# The test statistic z-score equals \\( \hat{p} \\), the observed HasDetections rate given value is present, minus 0.5 divided by the standard deviation of \\( \hat{p} \\). The Central Limit Theorem tells us
#
# $$\text{z-score} = \frac{\hat{p}-0.5}{SD(\hat{p})} = 2 (\hat{p} - 0.5)\sqrt{n} $$
#
# where \\(n\\) is the number of occurences of the value. If the absolute value of \\(z\\) is greater than 2.0, we are 95% confident that Prob(HasDetections=1 given value is present) is not equal 0.5 and we will include a boolean for this value in our model. Actually, we'll use a \\(z\\) threshold of 5.0 and require \\( 10^{-7}n>0.005 \\). This adds 350 new boolean variables (instead of naively one-hot-encoding 211,562!).
#
# ## Example - Census_FirmwareManufacturerIdentifier
# In the plots below, the dotted lines use the right y-axis and solid lines/bars use the left. The top plot below shows 20 values of variable `Census_FirmwareManufacturerIdentifier`. Notice that I consider NAN a value. Each of these values contains over 0.5% of the data. And all the variables together contain 97% of the data. Value=93 has a HasDetections rate of 52.5% while value=803 has a HasDetections rate of 35.4%. Their z-scores are \\(22.2 = 2\times(0.5253-0.5)\times\sqrt{192481} \text{ }\\) and \\(-71.3 = 2\times(0.3535-0.5)\times\sqrt{59145}\text{ }\\) respectively! The probability that value=93 and value=803 have a HasDetections rate of 50% and what we are observing is due to chance is close to nothing. Additionally from the bottom plot, you see that these two values have consistently been high and low throughout all of the year 2018. We can trust that this trend will continue into the test set's October and November computers.
#
# 
#
# ## Python Code
# To see the Python encoding functions, click 'see code' to the right.
# In[ ]:
import math
# CHECK FOR NAN
def nan_check(x):
if isinstance(x,float):
if math.isnan(x):
return True
return False
# FREQUENCY ENCODING
def encode_FE(df,col,verbose=1):
d = df[col].value_counts(dropna=False)
n = col+"_FE"
df[n] = df[col].map(d)/d.max()
if verbose==1:
print('FE encoded',col)
return [n]
# ONE-HOT-ENCODE ALL CATEGORY VALUES THAT COMPRISE MORE THAN
# "FILTER" PERCENT OF TOTAL DATA AND HAS SIGNIFICANCE GREATER THAN "ZSCORE"
def encode_OHE(df, col, filter, zscore, tar='HasDetections', m=0.5, verbose=1):
cv = df[col].value_counts(dropna=False)
cvd = cv.to_dict()
vals = len(cv)
th = filter * len(df)
sd = zscore * 0.5/ math.sqrt(th)
#print(sd)
n = []; ct = 0; d = {}
for x in cv.index:
try:
if cv[x]<th: break
sd = zscore * 0.5/ math.sqrt(cv[x])
except:
if cvd[x]<th: break
sd = zscore * 0.5/ math.sqrt(cvd[x])
if nan_check(x): r = df[df[col].isna()][tar].mean()
else: r = df[df[col]==x][tar].mean()
if abs(r-m)>sd:
nm = col+'_BE_'+str(x)
if nan_check(x): df[nm] = (df[col].isna()).astype('int8')
else: df[nm] = (df[col]==x).astype('int8')
n.append(nm)
d[x] = 1
ct += 1
if (ct+1)>=vals: break
if verbose==1:
print('OHE encoded',col,'- Created',len(d),'booleans')
return [n,d]
# ONE-HOT-ENCODING from dictionary
def encode_OHE_test(df,col,dt):
n = []
for x in dt:
n += encode_BE(df,col,x)
return n
# BOOLEAN ENCODING
def encode_BE(df,col,val):
n = col+"_BE_"+str(val)
if nan_check(val):
df[n] = df[col].isna()
else:
df[n] = df[col]==val
df[n] = df[n].astype('int8')
return [n]
# In[ ]:
cols = []; dd = []
# ENCODE NEW
for x in FE:
cols += encode_FE(df_train,x)
for x in OHE:
tmp = encode_OHE(df_train,x,0.005,5)
cols += tmp[0]; dd.append(tmp[1])
print('Encoded',len(cols),'new variables')
# REMOVE OLD
for x in FE+OHE:
del df_train[x]
print('Removed original',len(FE+OHE),'variables')
x = gc.collect()
# ## Example - Census_OEMModelIdentifier
# Below is variable `Census_OEMModelIdentifier`. Observe how NAN is treated like a category value and that it has consistently had the lowest HasDetections rate all of year 2018. Also notice how value=245824 has consistently been high. Finally note that value=188345 and 248045 are high and low respectively in August and September but earlier in the year their positions were reversed! What will their positions be in the test set's October and November computers??
#
# 
# # Build and Train Network
# We will a build a 3 layer fully connected network with 100 neurons on each hidden layer. We will use ReLU activation, Batch Normalization, 40% Dropout, Adam Optimizer, and Decaying Learning Rate. Unfortunately we don't have an AUC loss function, so we will use Cross Entrophy instead. After each epoch, we will call a custom Keras callback to display the current AUC and continually save the best model.
# In[ ]:
from keras import callbacks
from sklearn.metrics import roc_auc_score
class printAUC(callbacks.Callback):
def __init__(self, X_train, y_train):
super(printAUC, self).__init__()
self.bestAUC = 0
self.X_train = X_train
self.y_train = y_train
def on_epoch_end(self, epoch, logs={}):
pred = self.model.predict(np.array(self.X_train))
auc = roc_auc_score(self.y_train, pred)
print("Train AUC: " + str(auc))
pred = self.model.predict(self.validation_data[0])
auc = roc_auc_score(self.validation_data[1], pred)
print ("Validation AUC: " + str(auc))
if (self.bestAUC < auc) :
self.bestAUC = auc
self.model.save("bestNet.h5", overwrite=True)
return
# In[ ]:
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Dropout, BatchNormalization, Activation
from keras.callbacks import LearningRateScheduler
from keras.optimizers import Adam
#SPLIT TRAIN AND VALIDATION SET
X_train, X_val, Y_train, Y_val = train_test_split(
df_train[cols], df_train['HasDetections'], test_size = 0.5)
# BUILD MODEL
model = Sequential()
model.add(Dense(500,input_dim=len(cols)))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(250,input_dim=len(cols)))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(100))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer=Adam(lr=0.01), loss="binary_crossentropy", metrics=["accuracy"])
annealer = LearningRateScheduler(lambda x: 1e-2 * 0.95 ** x)
# TRAIN MODEL
change batch_size and epochs as per your GPU sanyam
model.fit(X_train,Y_train, batch_size=32, epochs = 20, callbacks=[annealer,
printAUC(X_train, Y_train)], validation_data = (X_val,Y_val), verbose=2)
# # Predict Test and Submit to Kaggle
# Even after deleting the training data, our network still needs lot of our available RAM, we'll need to load in test.csv by chunks and predict by chunks. Click 'see code' button to see how this is done.
# In[ ]:
del df_train
del X_train, X_val, Y_train, Y_val
x = gc.collect()
# LOAD BEST SAVED NET
from keras.models import load_model
model = load_model('bestNet.h5')
pred = np.zeros((7853253,1))
id = 1
chunksize = 2000000
for df_test in pd.read_csv('../input/test.csv', chunksize = chunksize, usecols=list(dtypes.keys())[0:-1], dtype=dtypes):
print ('Loaded',len(df_test),'rows of TEST.CSV!')
# ENCODE TEST
cols = []
for x in FE:
cols += encode_FE(df_test,x,verbose=0)
for x in range(len(OHE)):
cols += encode_OHE_test(df_test,OHE[x],dd[x])
# PREDICT TEST
end = (id)*chunksize
if end>7853253: end = 7853253
pred[(id-1)*chunksize:end] = model.predict_proba(df_test[cols])
print(' encoded and predicted part',id)
id += 1
# In[ ]:
# SUBMIT TO KAGGLE
df_test = pd.read_csv('../input/test.csv', usecols=['MachineIdentifier'])
df_test['HasDetections'] = pred
df_test.to_csv('submission.csv', index=False)
# 
# # Conclusion
# In this kernel, we saw how to build and train a neural network with Keras. We also saw how to statistically one-hot-encode categorical variables. Our validation AUC was 0.703 and our LB was 0.671. So it appears that we are not time generalizing enough to the test set. Furthermore, other users are getting higher CV scores, so we should be able to improve our AUC by adding more variables and tuning our network more.
#
# If anyone forks this kernel and improves it's AUC, let me know. All comments and suggestions are welcome. Thanks for reading.
|
{
"content_hash": "042cd0e7012dfe71e9a3e7aaf6d9db3a",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 956,
"avg_line_length": 41.910071942446045,
"alnum_prop": 0.682087374474294,
"repo_name": "AdityaSoni19031997/Machine-Learning",
"id": "6dbea4bd6b7f4304e701dc21660bc69c09da9a92",
"size": "12040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kaggle/microsoft_malware_competition/trivial_nn_entity_embeddings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "77547639"
},
{
"name": "Python",
"bytes": "903037"
},
{
"name": "Shell",
"bytes": "531"
}
],
"symlink_target": ""
}
|
class ConnectionResolverInterface(object):
def connection(self, name=None):
raise NotImplementedError()
def get_default_connection(self):
raise NotImplementedError()
def set_default_connection(self, name):
raise NotImplementedError()
|
{
"content_hash": "7734fdfcb3684eae4d22030ccb45cfb5",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 43,
"avg_line_length": 27.3,
"alnum_prop": 0.706959706959707,
"repo_name": "sdispater/eloquent",
"id": "d9329b90e1fbac6360e55d1002b8fb8560da082b",
"size": "299",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "eloquent/connections/connection_resolver_interface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "741617"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
import djgeojson.fields
def forwards_func(apps, schema_editor):
# We get the model from the versioned app registry;
# if we directly import it, it'll be the wrong version
Organization = apps.get_model("organizations", "Organization")
db_alias = schema_editor.connection.alias
Organization.objects.using(db_alias).update(pos=None)
class Migration(migrations.Migration):
dependencies = [
('organizations', '0006_auto_20161222_0252'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='pos',
field=djgeojson.fields.PointField(blank=True, null=True, verbose_name='Position'),
),
migrations.RunPython(forwards_func),
]
|
{
"content_hash": "b6f85147e9e190a7f8fb61cf53e08a6f",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 94,
"avg_line_length": 29.285714285714285,
"alnum_prop": 0.6731707317073171,
"repo_name": "watchdogpolska/watchdog-kj-kultura",
"id": "6a2b9e794345f4f685c0aa50a4bac50adffe284e",
"size": "893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "watchdog_kj_kultura/organizations/migrations/0007_auto_20161230_1852.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "182971"
},
{
"name": "HTML",
"bytes": "93827"
},
{
"name": "JavaScript",
"bytes": "3024"
},
{
"name": "Python",
"bytes": "168593"
}
],
"symlink_target": ""
}
|
'''
libqp test script
'''
import libqp
print 'test hand and card:'
cd = libqp.card_t()
cd.suit = libqp.cdSuitHeart
cd.rank = libqp.cdRankAce
h = libqp.hand_new(20)
print 'push' a card to hand'
libqp.hand_push(h,cd)
cd.rank = libqp.cdRank2
print 'push a card to hand'
libqp.hand_push(h,cd)
print 'print a hand'
libqp.hand_print(h,10)
|
{
"content_hash": "c6c1c03f8ad5e748ce3551d730ebcb83",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 29,
"avg_line_length": 18.666666666666668,
"alnum_prop": 0.7053571428571429,
"repo_name": "huangtao/qipai",
"id": "be960e504cb85fb5c14739a0af269d0da95203e4",
"size": "336",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "swig/test_hand.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "175"
},
{
"name": "C",
"bytes": "225081"
},
{
"name": "C++",
"bytes": "18893"
},
{
"name": "CMake",
"bytes": "98398"
},
{
"name": "Makefile",
"bytes": "2288"
},
{
"name": "Python",
"bytes": "31174"
},
{
"name": "QMake",
"bytes": "1668"
},
{
"name": "Shell",
"bytes": "2927"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'BillingContactInfo'
db.create_table('accounting_billingcontactinfo', (
('account', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['accounting.BillingAccount'], unique=True, primary_key=True)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=50, null=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=50, null=True)),
('emails', self.gf('django.db.models.fields.CharField')(max_length=200, null=True)),
('phone_number', self.gf('django.db.models.fields.CharField')(max_length=20, null=True)),
('company_name', self.gf('django.db.models.fields.CharField')(max_length=50, null=True)),
('first_line', self.gf('django.db.models.fields.CharField')(max_length=50)),
('second_line', self.gf('django.db.models.fields.CharField')(max_length=50, null=True)),
('city', self.gf('django.db.models.fields.CharField')(max_length=50)),
('state_province_region', self.gf('django.db.models.fields.CharField')(max_length=50)),
('postal_code', self.gf('django.db.models.fields.CharField')(max_length=20)),
('country', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('accounting', ['BillingContactInfo'])
# Adding model 'BillingAccountAdmin'
db.create_table('accounting_billingaccountadmin', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('web_user', self.gf('django.db.models.fields.CharField')(unique=True, max_length=80, db_index=True)),
))
db.send_create_signal('accounting', ['BillingAccountAdmin'])
# Deleting field 'BillingAccount.web_user_contact'
db.delete_column(u'accounting_billingaccount', 'web_user_contact')
# Adding M2M table for field billing_admins on 'BillingAccount'
db.create_table('accounting_billingaccount_billing_admins', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('billingaccount', models.ForeignKey(orm['accounting.billingaccount'], null=False)),
('billingaccountadmin', models.ForeignKey(orm['accounting.billingaccountadmin'], null=False))
))
db.create_unique('accounting_billingaccount_billing_admins', ['billingaccount_id', 'billingaccountadmin_id'])
# Adding field 'CreditAdjustment.web_user'
db.add_column('accounting_creditadjustment', 'web_user', self.gf('django.db.models.fields.CharField')(max_length=80, null=True), keep_default=False)
def backwards(self, orm):
# Deleting model 'BillingContactInfo'
db.delete_table('accounting_billingcontactinfo')
# Deleting model 'BillingAccountAdmin'
db.delete_table('accounting_billingaccountadmin')
# Adding field 'BillingAccount.web_user_contact'
db.add_column(u'accounting_billingaccount', 'web_user_contact', self.gf('django.db.models.fields.CharField')(max_length=80, null=True), keep_default=False)
# Removing M2M table for field billing_admins on 'BillingAccount'
db.delete_table('accounting_billingaccount_billing_admins')
# Deleting field 'CreditAdjustment.web_user'
db.delete_column('accounting_creditadjustment', 'web_user')
models = {
'accounting.billingaccount': {
'Meta': {'object_name': 'BillingAccount'},
'account_type': ('django.db.models.fields.CharField', [], {'default': "'CONTRACT'", 'max_length': '25'}),
'billing_admins': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['accounting.BillingAccountAdmin']", 'null': 'True', 'symmetrical': 'False'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.Currency']"}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auto_invoiceable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'salesforce_account_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'})
},
'accounting.billingaccountadmin': {
'Meta': {'object_name': 'BillingAccountAdmin'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'web_user': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80', 'db_index': 'True'})
},
'accounting.billingcontactinfo': {
'Meta': {'object_name': 'BillingContactInfo'},
'account': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['accounting.BillingAccount']", 'unique': 'True', 'primary_key': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'emails': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'first_line': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'second_line': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'state_province_region': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'accounting.billingrecord': {
'Meta': {'object_name': 'BillingRecord'},
'date_emailed': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'emailed_to': ('django.db.models.fields.CharField', [], {'max_length': '254', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.Invoice']"}),
'pdf_data_id': ('django.db.models.fields.CharField', [], {'max_length': '48'})
},
'accounting.creditadjustment': {
'Meta': {'object_name': 'CreditAdjustment'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'credit_line': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.CreditLine']"}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.Invoice']", 'null': 'True'}),
'line_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.LineItem']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'reason': ('django.db.models.fields.CharField', [], {'default': "'MANUAL'", 'max_length': '25'}),
'web_user': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'})
},
'accounting.creditline': {
'Meta': {'object_name': 'CreditLine'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.BillingAccount']"}),
'balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feature_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.FeatureRate']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.SoftwareProductRate']", 'null': 'True', 'blank': 'True'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.Subscription']", 'null': 'True', 'blank': 'True'})
},
'accounting.currency': {
'Meta': {'object_name': 'Currency'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'date_updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'rate_to_default': ('django.db.models.fields.DecimalField', [], {'default': '1.0', 'max_digits': '20', 'decimal_places': '9'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'accounting.defaultproductplan': {
'Meta': {'object_name': 'DefaultProductPlan'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.SoftwarePlan']"}),
'product_type': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'})
},
'accounting.feature': {
'Meta': {'object_name': 'Feature'},
'feature_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
'accounting.featurerate': {
'Meta': {'object_name': 'FeatureRate'},
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feature': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.Feature']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'monthly_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '10', 'decimal_places': '2'}),
'monthly_limit': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'per_excess_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '10', 'decimal_places': '2'})
},
'accounting.invoice': {
'Meta': {'object_name': 'Invoice'},
'balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_due': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'date_end': ('django.db.models.fields.DateField', [], {}),
'date_paid': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_received': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.Subscription']"}),
'tax_rate': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'})
},
'accounting.lineitem': {
'Meta': {'object_name': 'LineItem'},
'base_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'base_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feature_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.FeatureRate']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.Invoice']"}),
'product_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.SoftwareProductRate']", 'null': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'unit_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'accounting.softwareplan': {
'Meta': {'object_name': 'SoftwarePlan'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'edition': ('django.db.models.fields.CharField', [], {'default': "'Enterprise'", 'max_length': '25'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'visibility': ('django.db.models.fields.CharField', [], {'default': "'INTERNAL'", 'max_length': '10'})
},
'accounting.softwareplanversion': {
'Meta': {'object_name': 'SoftwarePlanVersion'},
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feature_rates': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['accounting.FeatureRate']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.SoftwarePlan']"}),
'product_rates': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['accounting.SoftwareProductRate']", 'symmetrical': 'False', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_prbac.Role']"})
},
'accounting.softwareproduct': {
'Meta': {'object_name': 'SoftwareProduct'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'})
},
'accounting.softwareproductrate': {
'Meta': {'object_name': 'SoftwareProductRate'},
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'monthly_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '10', 'decimal_places': '2'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.SoftwareProduct']"})
},
'accounting.subscriber': {
'Meta': {'object_name': 'Subscriber'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'db_index': 'True'})
},
'accounting.subscription': {
'Meta': {'object_name': 'Subscription'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.BillingAccount']"}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_delay_invoicing': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'plan_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.SoftwarePlanVersion']"}),
'salesforce_contract_id': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounting.Subscriber']"})
},
'django_prbac.role': {
'Meta': {'object_name': 'Role'},
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parameters': ('django_prbac.fields.StringSetField', [], {'default': '[]', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'})
}
}
complete_apps = ['accounting']
|
{
"content_hash": "fd178d3db43595446bc5ba3a6b5b49c1",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 175,
"avg_line_length": 76.34412955465586,
"alnum_prop": 0.5713528132788884,
"repo_name": "SEL-Columbia/commcare-hq",
"id": "816bcf251739414e095cd9acb709a854481a4010",
"size": "18875",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "corehq/apps/accounting/migrations/0004_auto__add_billingcontactinfo__add_billingaccountadmin__del_field_billi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "768322"
},
{
"name": "JavaScript",
"bytes": "2647080"
},
{
"name": "Python",
"bytes": "7806659"
},
{
"name": "Shell",
"bytes": "28569"
}
],
"symlink_target": ""
}
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.core.exceptions import ValidationError
from lists.models import (
ListModel
)
User = get_user_model()
class ListModelTestCase(TestCase):
fixtures = [
'users.json',
'lists.json',
]
def test_we_have_lists(self):
"""Check that we can query for lists."""
listscount = ListModel.objects.count()
self.assertEqual(
listscount,
3
)
def test_ommiting_title_save_fails(self):
"""If we ommit the title for the mode, it should fail."""
new_list = ListModel(
status=True
)
with self.assertRaises(ValidationError):
new_list.full_clean()
new_list.save()
def test_we_can_toggle_a_list(self):
"""
Test that we can toggle a list within the DB.
"""
prev_list_count = ListModel.objects.get_active_lists().count()
list = ListModel.objects.get_active_lists().all()[0]
list.toggle_list()
listcount = ListModel.objects.get_active_lists().count()
self.assertGreater(
prev_list_count,
listcount
)
def test_str_returns_title(self):
"""Test that __str__ returns the title of the list."""
list = ListModel.objects.all()[0]
self.assertEqual(
list.__str__(),
list.title
)
def test_get_archived_lists_works(self):
"""Test we have results when calling get_archived_lists."""
listcount = ListModel.objects.get_archived_lists().count()
self.assertEqual(
listcount,
1
)
def test_get_active_lists_works(self):
"""Test we have results when calling get_active_lists."""
listcount = ListModel.objects.get_active_lists().count()
self.assertEqual(
listcount,
2
)
def test_user_has_lists(self):
"""Test that we can fetch the lists owned by the user."""
userid = 1
listcount = ListModel.objects.get_user_lists(userid).count()
self.assertEqual(
listcount,
3
)
def test_user_has_no_lists(self):
"""
Test that get_user_lists returns nothing for a user with no
lists.
"""
userid = 99999
listcount = ListModel.objects.get_user_lists(userid).count()
self.assertEqual(
listcount,
0
)
|
{
"content_hash": "7d48e1567221f8a0f9f44d50ea9d727d",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 70,
"avg_line_length": 27.87912087912088,
"alnum_prop": 0.5652345289712258,
"repo_name": "petkostas/Todo-List",
"id": "7705caaeedc9d3b71ebe3821be8d303885526d0a",
"size": "2537",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "todolist/lists/tests/test_listmodel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34581"
},
{
"name": "HTML",
"bytes": "12512"
},
{
"name": "JavaScript",
"bytes": "5365"
},
{
"name": "Python",
"bytes": "33920"
},
{
"name": "Shell",
"bytes": "1292"
},
{
"name": "TypeScript",
"bytes": "22353"
}
],
"symlink_target": ""
}
|
"""
Given a string s consists of upper/lower-case alphabets and empty space
characters ' ', return the length of last word in the string.
If the last word does not exist, return 0.
Note: A word is defined as a character sequence consists of non-space
characters only.
For example, Given s = "Hello World",
return 5.
"""
class Solution(object):
def lengthOfLastWord(self, s):
"""
:type s: str
:rtype: int
"""
n = len(s)
i = n - 1
res = 0
while i >= 0:
if s[i] != ' ':
res += 1
else:
if res != 0:
break
i -= 1
return res
|
{
"content_hash": "9b0b14828da7c69194c02fee37b431c0",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 71,
"avg_line_length": 22.9,
"alnum_prop": 0.512372634643377,
"repo_name": "shichao-an/leetcode-python",
"id": "772576bd912e7fbc985681a29d4d94c9a573e628",
"size": "687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "length_of_last_word/solution3.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "228552"
},
{
"name": "Shell",
"bytes": "353"
}
],
"symlink_target": ""
}
|
def fizz_buzz(number):
if number % 15 == 0:
return "Fizz Buzz"
elif number % 3 == 0:
return "Fizz"
elif number % 5 == 0:
return "Buzz"
return str(number)
if __name__ == '__main__':
# These using only for self-checking and not necessary for auto-testing
assert fizz_buzz(15) == "Fizz Buzz", "15 is divisible by 3 and 5"
assert fizz_buzz(6) == "Fizz", "6 is divisible by 3"
assert fizz_buzz(5) == "Buzz", "5 is divisible by 5"
assert fizz_buzz(7) == "7", "7 is not divisible by 3 or 5"
|
{
"content_hash": "e17e218f2be5e64374e94aea3caa7525",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 75,
"avg_line_length": 34.125,
"alnum_prop": 0.5824175824175825,
"repo_name": "kpbochenek/empireofcode",
"id": "d00b6d65e835fb5a3172a6fc25431892244800a0",
"size": "571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fizz_buzz.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "41446"
}
],
"symlink_target": ""
}
|
"""Utilities for unit-testing Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import functools
import itertools
import threading
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.framework import config
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import layers
from tensorflow.python.keras import models
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import keras_tensor
from tensorflow.python.keras.optimizer_v2 import adadelta as adadelta_v2
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_v2
from tensorflow.python.keras.optimizer_v2 import adamax as adamax_v2
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.keras.optimizer_v2 import nadam as nadam_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2
from tensorflow.python.keras.utils import tf_contextlib
from tensorflow.python.keras.utils import tf_inspect
from tensorflow.python.util import tf_decorator
def string_test(actual, expected):
np.testing.assert_array_equal(actual, expected)
def numeric_test(actual, expected):
np.testing.assert_allclose(actual, expected, rtol=1e-3, atol=1e-6)
def get_test_data(train_samples,
test_samples,
input_shape,
num_classes,
random_seed=None):
"""Generates test data to train a model on.
Arguments:
train_samples: Integer, how many training samples to generate.
test_samples: Integer, how many test samples to generate.
input_shape: Tuple of integers, shape of the inputs.
num_classes: Integer, number of classes for the data and targets.
random_seed: Integer, random seed used by numpy to generate data.
Returns:
A tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
if random_seed is not None:
np.random.seed(random_seed)
num_sample = train_samples + test_samples
templates = 2 * num_classes * np.random.random((num_classes,) + input_shape)
y = np.random.randint(0, num_classes, size=(num_sample,))
x = np.zeros((num_sample,) + input_shape, dtype=np.float32)
for i in range(num_sample):
x[i] = templates[y[i]] + np.random.normal(loc=0, scale=1., size=input_shape)
return ((x[:train_samples], y[:train_samples]),
(x[train_samples:], y[train_samples:]))
@test_util.disable_cudnn_autotune
def layer_test(layer_cls,
kwargs=None,
input_shape=None,
input_dtype=None,
input_data=None,
expected_output=None,
expected_output_dtype=None,
expected_output_shape=None,
validate_training=True,
adapt_data=None,
custom_objects=None,
test_harness=None):
"""Test routine for a layer with a single input and single output.
Arguments:
layer_cls: Layer class object.
kwargs: Optional dictionary of keyword arguments for instantiating the
layer.
input_shape: Input shape tuple.
input_dtype: Data type of the input data.
input_data: Numpy array of input data.
expected_output: Numpy array of the expected output.
expected_output_dtype: Data type expected for the output.
expected_output_shape: Shape tuple for the expected shape of the output.
validate_training: Whether to attempt to validate training on this layer.
This might be set to False for non-differentiable layers that output
string or integer values.
adapt_data: Optional data for an 'adapt' call. If None, adapt() will not
be tested for this layer. This is only relevant for PreprocessingLayers.
custom_objects: Optional dictionary mapping name strings to custom objects
in the layer class. This is helpful for testing custom layers.
test_harness: The Tensorflow test, if any, that this function is being
called in.
Returns:
The output data (Numpy array) returned by the layer, for additional
checks to be done by the calling code.
Raises:
ValueError: if `input_shape is None`.
"""
if input_data is None:
if input_shape is None:
raise ValueError('input_shape is None')
if not input_dtype:
input_dtype = 'float32'
input_data_shape = list(input_shape)
for i, e in enumerate(input_data_shape):
if e is None:
input_data_shape[i] = np.random.randint(1, 4)
input_data = 10 * np.random.random(input_data_shape)
if input_dtype[:5] == 'float':
input_data -= 0.5
input_data = input_data.astype(input_dtype)
elif input_shape is None:
input_shape = input_data.shape
if input_dtype is None:
input_dtype = input_data.dtype
if expected_output_dtype is None:
expected_output_dtype = input_dtype
if dtypes.as_dtype(expected_output_dtype) == dtypes.string:
if test_harness:
assert_equal = test_harness.assertAllEqual
else:
assert_equal = string_test
else:
if test_harness:
assert_equal = test_harness.assertAllClose
else:
assert_equal = numeric_test
# instantiation
kwargs = kwargs or {}
layer = layer_cls(**kwargs)
# Test adapt, if data was passed.
if adapt_data is not None:
layer.adapt(adapt_data)
# test get_weights , set_weights at layer level
weights = layer.get_weights()
layer.set_weights(weights)
# test and instantiation from weights
if 'weights' in tf_inspect.getargspec(layer_cls.__init__):
kwargs['weights'] = weights
layer = layer_cls(**kwargs)
# test in functional API
x = layers.Input(shape=input_shape[1:], dtype=input_dtype)
y = layer(x)
if backend.dtype(y) != expected_output_dtype:
raise AssertionError('When testing layer %s, for input %s, found output '
'dtype=%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, backend.dtype(y),
expected_output_dtype, kwargs))
def assert_shapes_equal(expected, actual):
"""Asserts that the output shape from the layer matches the actual shape."""
if len(expected) != len(actual):
raise AssertionError(
'When testing layer %s, for input %s, found output_shape='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual, expected, kwargs))
for expected_dim, actual_dim in zip(expected, actual):
if isinstance(expected_dim, tensor_shape.Dimension):
expected_dim = expected_dim.value
if isinstance(actual_dim, tensor_shape.Dimension):
actual_dim = actual_dim.value
if expected_dim is not None and expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s, for input %s, found output_shape='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual, expected, kwargs))
if expected_output_shape is not None:
assert_shapes_equal(tensor_shape.TensorShape(expected_output_shape),
y.shape)
# check shape inference
model = models.Model(x, y)
computed_output_shape = tuple(
layer.compute_output_shape(
tensor_shape.TensorShape(input_shape)).as_list())
computed_output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(shape=input_shape, dtype=input_dtype))
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
assert_shapes_equal(computed_output_shape, actual_output_shape)
assert_shapes_equal(computed_output_signature.shape, actual_output_shape)
if computed_output_signature.dtype != actual_output.dtype:
raise AssertionError(
'When testing layer %s, for input %s, found output_dtype='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual_output.dtype,
computed_output_signature.dtype, kwargs))
if expected_output is not None:
assert_equal(actual_output, expected_output)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = models.Model.from_config(model_config, custom_objects)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
assert_equal(output, actual_output)
# test training mode (e.g. useful for dropout tests)
# Rebuild the model to avoid the graph being reused between predict() and
# See b/120160788 for more details. This should be mitigated after 2.0.
layer_weights = layer.get_weights() # Get the layer weights BEFORE training.
if validate_training:
model = models.Model(x, layer(x))
if _thread_local_data.run_eagerly is not None:
model.compile(
'rmsprop',
'mse',
weighted_metrics=['acc'],
run_eagerly=should_run_eagerly())
else:
model.compile('rmsprop', 'mse', weighted_metrics=['acc'])
model.train_on_batch(input_data, actual_output)
# test as first layer in Sequential API
layer_config = layer.get_config()
layer_config['batch_input_shape'] = input_shape
layer = layer.__class__.from_config(layer_config)
# Test adapt, if data was passed.
if adapt_data is not None:
layer.adapt(adapt_data)
model = models.Sequential()
model.add(layers.Input(shape=input_shape[1:], dtype=input_dtype))
model.add(layer)
layer.set_weights(layer_weights)
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(computed_output_shape,
actual_output_shape):
if expected_dim is not None:
if expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s **after deserialization**, '
'for input %s, found output_shape='
'%s but expected to find inferred shape %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
actual_output_shape,
computed_output_shape,
kwargs))
if expected_output is not None:
assert_equal(actual_output, expected_output)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = models.Sequential.from_config(model_config, custom_objects)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
assert_equal(output, actual_output)
# for further checks in the caller function
return actual_output
_thread_local_data = threading.local()
_thread_local_data.model_type = None
_thread_local_data.run_eagerly = None
_thread_local_data.saved_model_format = None
_thread_local_data.save_kwargs = None
@tf_contextlib.contextmanager
def model_type_scope(value):
"""Provides a scope within which the model type to test is equal to `value`.
The model type gets restored to its original value upon exiting the scope.
Arguments:
value: model type value
Yields:
The provided value.
"""
previous_value = _thread_local_data.model_type
try:
_thread_local_data.model_type = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.model_type = previous_value
@tf_contextlib.contextmanager
def run_eagerly_scope(value):
"""Provides a scope within which we compile models to run eagerly or not.
The boolean gets restored to its original value upon exiting the scope.
Arguments:
value: Bool specifying if we should run models eagerly in the active test.
Should be True or False.
Yields:
The provided value.
"""
previous_value = _thread_local_data.run_eagerly
try:
_thread_local_data.run_eagerly = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.run_eagerly = previous_value
@tf_contextlib.contextmanager
def use_keras_tensors_scope(value):
"""Provides a scope within which we use KerasTensors in the func. API or not.
The boolean gets restored to its original value upon exiting the scope.
Arguments:
value: Bool specifying if we should build functional models
using KerasTensors in the active test.
Should be True or False.
Yields:
The provided value.
"""
previous_value = keras_tensor._KERAS_TENSORS_ENABLED # pylint: disable=protected-access
try:
keras_tensor._KERAS_TENSORS_ENABLED = value # pylint: disable=protected-access
yield value
finally:
# Restore KerasTensor usage to initial value.
keras_tensor._KERAS_TENSORS_ENABLED = previous_value # pylint: disable=protected-access
def should_run_eagerly():
"""Returns whether the models we are testing should be run eagerly."""
if _thread_local_data.run_eagerly is None:
raise ValueError('Cannot call `should_run_eagerly()` outside of a '
'`run_eagerly_scope()` or `run_all_keras_modes` '
'decorator.')
return _thread_local_data.run_eagerly and context.executing_eagerly()
@tf_contextlib.contextmanager
def saved_model_format_scope(value, **kwargs):
"""Provides a scope within which the savde model format to test is `value`.
The saved model format gets restored to its original value upon exiting the
scope.
Arguments:
value: saved model format value
**kwargs: optional kwargs to pass to the save function.
Yields:
The provided value.
"""
previous_format = _thread_local_data.saved_model_format
previous_kwargs = _thread_local_data.save_kwargs
try:
_thread_local_data.saved_model_format = value
_thread_local_data.save_kwargs = kwargs
yield
finally:
# Restore saved model format to initial value.
_thread_local_data.saved_model_format = previous_format
_thread_local_data.save_kwargs = previous_kwargs
def get_save_format():
if _thread_local_data.saved_model_format is None:
raise ValueError(
'Cannot call `get_save_format()` outside of a '
'`saved_model_format_scope()` or `run_with_all_saved_model_formats` '
'decorator.')
return _thread_local_data.saved_model_format
def get_save_kwargs():
if _thread_local_data.save_kwargs is None:
raise ValueError(
'Cannot call `get_save_kwargs()` outside of a '
'`saved_model_format_scope()` or `run_with_all_saved_model_formats` '
'decorator.')
return _thread_local_data.save_kwargs or {}
def get_model_type():
"""Gets the model type that should be tested."""
if _thread_local_data.model_type is None:
raise ValueError('Cannot call `get_model_type()` outside of a '
'`model_type_scope()` or `run_with_all_model_types` '
'decorator.')
return _thread_local_data.model_type
def get_small_sequential_mlp(num_hidden, num_classes, input_dim=None):
model = models.Sequential()
if input_dim:
model.add(layers.Dense(num_hidden, activation='relu', input_dim=input_dim))
else:
model.add(layers.Dense(num_hidden, activation='relu'))
activation = 'sigmoid' if num_classes == 1 else 'softmax'
model.add(layers.Dense(num_classes, activation=activation))
return model
def get_small_functional_mlp(num_hidden, num_classes, input_dim):
inputs = layers.Input(shape=(input_dim,))
outputs = layers.Dense(num_hidden, activation='relu')(inputs)
activation = 'sigmoid' if num_classes == 1 else 'softmax'
outputs = layers.Dense(num_classes, activation=activation)(outputs)
return models.Model(inputs, outputs)
class SmallSubclassMLP(models.Model):
"""A subclass model based small MLP."""
def __init__(self, num_hidden, num_classes, use_bn=False, use_dp=False):
super(SmallSubclassMLP, self).__init__(name='test_model')
self.use_bn = use_bn
self.use_dp = use_dp
self.layer_a = layers.Dense(num_hidden, activation='relu')
activation = 'sigmoid' if num_classes == 1 else 'softmax'
self.layer_b = layers.Dense(num_classes, activation=activation)
if self.use_dp:
self.dp = layers.Dropout(0.5)
if self.use_bn:
self.bn = layers.BatchNormalization(axis=-1)
def call(self, inputs, **kwargs):
x = self.layer_a(inputs)
if self.use_dp:
x = self.dp(x)
if self.use_bn:
x = self.bn(x)
return self.layer_b(x)
class _SmallSubclassMLPCustomBuild(models.Model):
"""A subclass model small MLP that uses a custom build method."""
def __init__(self, num_hidden, num_classes):
super(_SmallSubclassMLPCustomBuild, self).__init__()
self.layer_a = None
self.layer_b = None
self.num_hidden = num_hidden
self.num_classes = num_classes
def build(self, input_shape):
self.layer_a = layers.Dense(self.num_hidden, activation='relu')
activation = 'sigmoid' if self.num_classes == 1 else 'softmax'
self.layer_b = layers.Dense(self.num_classes, activation=activation)
def call(self, inputs, **kwargs):
x = self.layer_a(inputs)
return self.layer_b(x)
def get_small_subclass_mlp(num_hidden, num_classes):
return SmallSubclassMLP(num_hidden, num_classes)
def get_small_subclass_mlp_with_custom_build(num_hidden, num_classes):
return _SmallSubclassMLPCustomBuild(num_hidden, num_classes)
def get_small_mlp(num_hidden, num_classes, input_dim):
"""Get a small mlp of the model type specified by `get_model_type`."""
model_type = get_model_type()
if model_type == 'subclass':
return get_small_subclass_mlp(num_hidden, num_classes)
if model_type == 'subclass_custom_build':
return get_small_subclass_mlp_with_custom_build(num_hidden, num_classes)
if model_type == 'sequential':
return get_small_sequential_mlp(num_hidden, num_classes, input_dim)
if model_type == 'functional':
return get_small_functional_mlp(num_hidden, num_classes, input_dim)
raise ValueError('Unknown model type {}'.format(model_type))
class _SubclassModel(models.Model):
"""A Keras subclass model."""
def __init__(self, model_layers, *args, **kwargs):
"""Instantiate a model.
Args:
model_layers: a list of layers to be added to the model.
*args: Model's args
**kwargs: Model's keyword args, at most one of input_tensor -> the input
tensor required for ragged/sparse input.
"""
inputs = kwargs.pop('input_tensor', None)
super(_SubclassModel, self).__init__(*args, **kwargs)
# Note that clone and build doesn't support lists of layers in subclassed
# models. Adding each layer directly here.
for i, layer in enumerate(model_layers):
setattr(self, self._layer_name_for_i(i), layer)
self.num_layers = len(model_layers)
if inputs is not None:
self._set_inputs(inputs)
def _layer_name_for_i(self, i):
return 'layer{}'.format(i)
def call(self, inputs, **kwargs):
x = inputs
for i in range(self.num_layers):
layer = getattr(self, self._layer_name_for_i(i))
x = layer(x)
return x
class _SubclassModelCustomBuild(models.Model):
"""A Keras subclass model that uses a custom build method."""
def __init__(self, layer_generating_func, *args, **kwargs):
super(_SubclassModelCustomBuild, self).__init__(*args, **kwargs)
self.all_layers = None
self._layer_generating_func = layer_generating_func
def build(self, input_shape):
model_layers = []
for layer in self._layer_generating_func():
model_layers.append(layer)
self.all_layers = model_layers
def call(self, inputs, **kwargs):
x = inputs
for layer in self.all_layers:
x = layer(x)
return x
def get_model_from_layers(model_layers,
input_shape=None,
input_dtype=None,
name=None,
input_ragged=None,
input_sparse=None):
"""Builds a model from a sequence of layers.
Args:
model_layers: The layers used to build the network.
input_shape: Shape tuple of the input or 'TensorShape' instance.
input_dtype: Datatype of the input.
name: Name for the model.
input_ragged: Boolean, whether the input data is a ragged tensor.
input_sparse: Boolean, whether the input data is a sparse tensor.
Returns:
A Keras model.
"""
model_type = get_model_type()
if model_type == 'subclass':
inputs = None
if input_ragged or input_sparse:
inputs = layers.Input(
shape=input_shape,
dtype=input_dtype,
ragged=input_ragged,
sparse=input_sparse)
return _SubclassModel(model_layers, name=name, input_tensor=inputs)
if model_type == 'subclass_custom_build':
layer_generating_func = lambda: model_layers
return _SubclassModelCustomBuild(layer_generating_func, name=name)
if model_type == 'sequential':
model = models.Sequential(name=name)
if input_shape:
model.add(
layers.InputLayer(
input_shape=input_shape,
dtype=input_dtype,
ragged=input_ragged,
sparse=input_sparse))
for layer in model_layers:
model.add(layer)
return model
if model_type == 'functional':
if not input_shape:
raise ValueError('Cannot create a functional model from layers with no '
'input shape.')
inputs = layers.Input(
shape=input_shape,
dtype=input_dtype,
ragged=input_ragged,
sparse=input_sparse)
outputs = inputs
for layer in model_layers:
outputs = layer(outputs)
return models.Model(inputs, outputs, name=name)
raise ValueError('Unknown model type {}'.format(model_type))
class Bias(layers.Layer):
def build(self, input_shape):
self.bias = self.add_variable('bias', (1,), initializer='zeros')
def call(self, inputs):
return inputs + self.bias
class _MultiIOSubclassModel(models.Model):
"""Multi IO Keras subclass model."""
def __init__(self, branch_a, branch_b, shared_input_branch=None,
shared_output_branch=None, name=None):
super(_MultiIOSubclassModel, self).__init__(name=name)
self._shared_input_branch = shared_input_branch
self._branch_a = branch_a
self._branch_b = branch_b
self._shared_output_branch = shared_output_branch
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
elif isinstance(inputs, dict):
a = inputs['input_1']
b = inputs['input_2']
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = [a, b]
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
class _MultiIOSubclassModelCustomBuild(models.Model):
"""Multi IO Keras subclass model that uses a custom build method."""
def __init__(self, branch_a_func, branch_b_func,
shared_input_branch_func=None,
shared_output_branch_func=None):
super(_MultiIOSubclassModelCustomBuild, self).__init__()
self._shared_input_branch_func = shared_input_branch_func
self._branch_a_func = branch_a_func
self._branch_b_func = branch_b_func
self._shared_output_branch_func = shared_output_branch_func
self._shared_input_branch = None
self._branch_a = None
self._branch_b = None
self._shared_output_branch = None
def build(self, input_shape):
if self._shared_input_branch_func():
self._shared_input_branch = self._shared_input_branch_func()
self._branch_a = self._branch_a_func()
self._branch_b = self._branch_b_func()
if self._shared_output_branch_func():
self._shared_output_branch = self._shared_output_branch_func()
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = a, b
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
def get_multi_io_model(
branch_a,
branch_b,
shared_input_branch=None,
shared_output_branch=None):
"""Builds a multi-io model that contains two branches.
The produced model will be of the type specified by `get_model_type`.
To build a two-input, two-output model:
Specify a list of layers for branch a and branch b, but do not specify any
shared input branch or shared output branch. The resulting model will apply
each branch to a different input, to produce two outputs.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
model = get_multi_io_model(branch_a, branch_b)
```
To build a two-input, one-output model:
Specify a list of layers for branch a and branch b, and specify a
shared output branch. The resulting model will apply
each branch to a different input. It will then apply the shared output
branch to a tuple containing the intermediate outputs of each branch,
to produce a single output. The first layer in the shared_output_branch
must be able to merge a tuple of two tensors.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
input_branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
input_branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
shared_output_branch = [Concatenate(), Dense(), Dense()]
model = get_multi_io_model(input_branch_a, input_branch_b,
shared_output_branch=shared_output_branch)
```
To build a one-input, two-output model:
Specify a list of layers for branch a and branch b, and specify a
shared input branch. The resulting model will take one input, and apply
the shared input branch to it. It will then respectively apply each branch
to that intermediate result in parallel, to produce two outputs.
The first value in the shared_input_branch must be the Keras 'Input' layer
for the whole model. Branch a and branch b should not contain any Input
layers.
example usage:
```
shared_input_branch = [Input(shape=(2,), name='in'), Dense(), Dense()]
output_branch_a = [Dense(), Dense()]
output_branch_b = [Dense(), Dense()]
model = get_multi_io_model(output__branch_a, output_branch_b,
shared_input_branch=shared_input_branch)
```
Args:
branch_a: A sequence of layers for branch a of the model.
branch_b: A sequence of layers for branch b of the model.
shared_input_branch: An optional sequence of layers to apply to a single
input, before applying both branches to that intermediate result. If set,
the model will take only one input instead of two. Defaults to None.
shared_output_branch: An optional sequence of layers to merge the
intermediate results produced by branch a and branch b. If set,
the model will produce only one output instead of two. Defaults to None.
Returns:
A multi-io model of the type specified by `get_model_type`, specified
by the different branches.
"""
# Extract the functional inputs from the layer lists
if shared_input_branch:
inputs = shared_input_branch[0]
shared_input_branch = shared_input_branch[1:]
else:
inputs = branch_a[0], branch_b[0]
branch_a = branch_a[1:]
branch_b = branch_b[1:]
model_type = get_model_type()
if model_type == 'subclass':
return _MultiIOSubclassModel(branch_a, branch_b, shared_input_branch,
shared_output_branch)
if model_type == 'subclass_custom_build':
return _MultiIOSubclassModelCustomBuild((lambda: branch_a),
(lambda: branch_b),
(lambda: shared_input_branch),
(lambda: shared_output_branch))
if model_type == 'sequential':
raise ValueError('Cannot use `get_multi_io_model` to construct '
'sequential models')
if model_type == 'functional':
if shared_input_branch:
a_and_b = inputs
for layer in shared_input_branch:
a_and_b = layer(a_and_b)
a = a_and_b
b = a_and_b
else:
a, b = inputs
for layer in branch_a:
a = layer(a)
for layer in branch_b:
b = layer(b)
outputs = a, b
if shared_output_branch:
for layer in shared_output_branch:
outputs = layer(outputs)
return models.Model(inputs, outputs)
raise ValueError('Unknown model type {}'.format(model_type))
_V2_OPTIMIZER_MAP = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD
}
def get_v2_optimizer(name, **kwargs):
"""Get the v2 optimizer requested.
This is only necessary until v2 are the default, as we are testing in Eager,
and Eager + v1 optimizers fail tests. When we are in v2, the strings alone
should be sufficient, and this mapping can theoretically be removed.
Args:
name: string name of Keras v2 optimizer.
**kwargs: any kwargs to pass to the optimizer constructor.
Returns:
Initialized Keras v2 optimizer.
Raises:
ValueError: if an unknown name was passed.
"""
try:
return _V2_OPTIMIZER_MAP[name](**kwargs)
except KeyError:
raise ValueError(
'Could not find requested v2 optimizer: {}\nValid choices: {}'.format(
name, list(_V2_OPTIMIZER_MAP.keys())))
def get_expected_metric_variable_names(var_names, name_suffix=''):
"""Returns expected metric variable names given names and prefix/suffix."""
if tf2.enabled() or context.executing_eagerly():
# In V1 eager mode and V2 variable names are not made unique.
return [n + ':0' for n in var_names]
# In V1 graph mode variable names are made unique using a suffix.
return [n + name_suffix + ':0' for n in var_names]
def enable_v2_dtype_behavior(fn):
"""Decorator for enabling the layer V2 dtype behavior on a test."""
return _set_v2_dtype_behavior(fn, True)
def disable_v2_dtype_behavior(fn):
"""Decorator for disabling the layer V2 dtype behavior on a test."""
return _set_v2_dtype_behavior(fn, False)
def _set_v2_dtype_behavior(fn, enabled):
"""Returns version of 'fn' that runs with v2 dtype behavior on or off."""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
v2_dtype_behavior = base_layer_utils.V2_DTYPE_BEHAVIOR
base_layer_utils.V2_DTYPE_BEHAVIOR = enabled
try:
return fn(*args, **kwargs)
finally:
base_layer_utils.V2_DTYPE_BEHAVIOR = v2_dtype_behavior
return tf_decorator.make_decorator(fn, wrapper)
@contextlib.contextmanager
def device(should_use_gpu):
"""Uses gpu when requested and available."""
if should_use_gpu and test_util.is_gpu_available():
dev = '/device:GPU:0'
else:
dev = '/device:CPU:0'
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(should_use_gpu=True):
yield
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith('test') and (name !=
'test_session'):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def run_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute test with TensorFloat-32 disabled.
While almost every real-world deep learning model runs fine with
TensorFloat-32, many tests use assertAllClose or similar methods.
TensorFloat-32 matmuls typically will cause such methods to fail with the
default tolerances.
Args:
description: A description used for documentation purposes, describing why
the test requires TensorFloat-32 to be disabled.
Returns:
Decorator which runs a test with TensorFloat-32 disabled.
"""
def decorator(f):
@functools.wraps(f)
def decorated(self, *args, **kwargs):
allowed = config.tensor_float_32_execution_enabled()
try:
config.enable_tensor_float_32_execution(False)
f(self, *args, **kwargs)
finally:
config.enable_tensor_float_32_execution(allowed)
return decorated
return decorator
# The description is just for documentation purposes.
def run_all_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute all tests in a class with TensorFloat-32 disabled."""
return for_all_test_methods(run_without_tensor_float_32, description)
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
See go/tf-test-decorator-cheatsheet for the decorators to use in different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError('`run_v2_only` only supports test methods.')
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest('Test is only compatible with v2')
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
sort_by_key = lambda k: k[0]
combinations = []
for key, values in sorted(kwargs.items(), key=sort_by_key):
if not isinstance(values, list):
values = [values]
combinations.append([(key, value) for value in values])
combinations = [collections.OrderedDict(result)
for result in itertools.product(*combinations)]
named_combinations = []
for combination in combinations:
assert isinstance(combination, collections.OrderedDict)
name = ''.join([
'_{}_{}'.format(''.join(filter(str.isalnum, key)),
''.join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
collections.OrderedDict(
list(combination.items()) +
[('testcase_name', '_test{}'.format(name))]))
return named_combinations
|
{
"content_hash": "74a28987ce4ef809dced4a3b0de36d98",
"timestamp": "",
"source": "github",
"line_count": 1085,
"max_line_length": 92,
"avg_line_length": 34.04516129032258,
"alnum_prop": 0.6731367930913127,
"repo_name": "aam-at/tensorflow",
"id": "fecf52e71b358090d194c7d15b430a143eff0cc1",
"size": "37628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/testing_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "16049"
},
{
"name": "C",
"bytes": "784149"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "69481042"
},
{
"name": "CMake",
"bytes": "204596"
},
{
"name": "Dockerfile",
"bytes": "73667"
},
{
"name": "Go",
"bytes": "1670128"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "844222"
},
{
"name": "Jupyter Notebook",
"bytes": "1665601"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "101287"
},
{
"name": "Objective-C",
"bytes": "104023"
},
{
"name": "Objective-C++",
"bytes": "182460"
},
{
"name": "PHP",
"bytes": "17733"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "49451363"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4697"
},
{
"name": "Shell",
"bytes": "495434"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
},
{
"name": "TSQL",
"bytes": "921"
}
],
"symlink_target": ""
}
|
import os
def command():
# TODO: make this look for entry_points so third-party templates can be used
# TODO: make this verify that there's a manifest.json present
# TODO: make this display manifest.json's description along side template names
import armstrong.cli.templates
p = armstrong.cli.templates.__path__[0]
templates = [a for a in os.listdir(p) if os.path.isdir(os.path.join(p, a))]
print "The following templates are available:"
print "\n %s" % "\n ".join(templates)
|
{
"content_hash": "78a3d0467cf740b10b765083d53dcb95",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 83,
"avg_line_length": 43,
"alnum_prop": 0.687984496124031,
"repo_name": "armstrong/armstrong.cli",
"id": "b5f2080a54faf3a6c8bdaf647be60020c9d75271",
"size": "516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "armstrong/cli/commands/list_templates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "70078"
}
],
"symlink_target": ""
}
|
class Collection(object):
def __init__(self, items=None):
"""
Creates a new Collection
:param items: The collection items
:type items: dict or list or Collection
:rtype: None
"""
if items is None:
items = []
else:
items = self._get_items(items)
if not isinstance(items, (list, dict, tuple)):
self._items = [items]
else:
self._items = items
@classmethod
def make(cls, items=None):
"""
Create a new Collection instance if the value isn't one already
:param items: The collection items
:type items: dict or list or Collection
:return: A Collection instance
:rtype: Collection
"""
if isinstance(items, Collection):
return items
return cls(items)
def all(self):
"""
Get all of the items in the collection
:return: The items in the collections
:type: mixed
"""
return self._items
def collapse(self):
"""
Collapse the collection items into a single element (dict or list)
:return: A new Collection instance with collapsed items
:rtype: Collection
"""
results = []
if isinstance(self._items, dict):
items = self._items.values()
for values in items:
if isinstance(values, Collection):
values = values.all()
results += values
return Collection(results)
def contains(self, key, value=None):
"""
Determine if an element is in the collection
:param key: The element
:type key: int or str
:param value: The value of the element
:type value: mixed
:return: Whether the element is in the collection
:rtype: bool
"""
if value is not None:
if isinstance(self._items, list):
return key in self._items and self._items[self._items.index(key)] == value
return self._items.get(key) == value
return key in self._items
def __contains__(self, item):
return self.contains(item)
def diff(self, items):
"""
Diff the collections with the given items
:param items: The items to diff with
:type items: mixed
:return: A Collection instance
:rtype: Collection
"""
pass
def first(self, default=None):
"""
Get the first item of the collection.
:param default: The default value
:type default: mixed
"""
if len(self._items) > 0:
return self._items[0]
else:
return default
def lists(self, value, key=None):
"""
Get a list with the values of a given key
:rtype: list
"""
results = map(lambda x: x[value], self._items)
return list(results)
def map(self, callback):
"""
Run a map over each of the item.
:param callback: The map function
:type callback: callable
:rtype: Collection
"""
if isinstance(self._items, dict):
return Collection(list(map(callback, self._items.values())))
return Collection(list(map(callback, self._items)))
def unique(self):
"""
Return only unique items from the collection list.
:rtype: Collection
"""
seen = set()
seen_add = seen.add
return Collection([x for x in self._items if not (x in seen or seen_add(x))])
def _get_items(self, items):
if isinstance(items, Collection):
items = items.all()
elif hasattr('items', 'to_list'):
items = items.to_list()
elif hasattr('items', 'to_dict'):
items = items.to_dict()
return items
def to_dict(self):
return list(map(lambda value: value.to_dict() if hasattr(value, 'to_dict') else value,
self._items))
def __len__(self):
return len(self._items)
def __iter__(self):
for item in self._items:
yield item
def __getitem__(self, item):
return self._items[item]
|
{
"content_hash": "b8d1be28098166ce394913162523d263",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 94,
"avg_line_length": 24.8546511627907,
"alnum_prop": 0.5396491228070175,
"repo_name": "sdispater/eloquent",
"id": "5ccf84b1c7c2275c97f46884eb88d0ac40b0dea8",
"size": "4301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eloquent/support/collection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "741617"
}
],
"symlink_target": ""
}
|
import sys
import os
import os.path as P
import subprocess
import shutil
from subprocess import PIPE
crates = {
"ndarray": ("docs", ""),
"ndarray-rand": ("", "ndarray-rand"),
#"ndarray-rblas": ("", "ndarray-rblas"),
}
def crate_name(s):
"""Return crate name (with underscores) """
return s.replace("-", "_")
def manifest_path(crate):
home = P.join(P.dirname(sys.argv[0]), "..")
return P.normpath(P.join(home, crates[crate][1], "Cargo.toml"))
def run_get(cmd):
print(cmd)
return subprocess.Popen(cmd, stdout=PIPE).communicate()[0]
def run(cmd):
print(cmd)
subprocess.check_call(cmd)
def run_shell(cmd_string):
print(cmd_string)
subprocess.getoutput(cmd_string)
def version(crate):
manifest = manifest_path(crate)
pkgid = run_get(["cargo", "pkgid", "--manifest-path", manifest])
pkgid = pkgid.decode("utf-8").strip()
last = pkgid.rsplit("#")[-1]
return last.rsplit(":")[-1]
def target_dir():
home = P.join(P.dirname(sys.argv[0]), "..")
return P.join(home, "target")
def dest_dir():
home = P.join(P.dirname(sys.argv[0]), "..")
return P.join(home, "master")
def doc_home():
return P.dirname(sys.argv[0])
def image_dir():
return P.join(doc_home(), "images")
def mkdocs():
for crate in crates:
run(["cargo", "doc", "-v", "--no-deps",
"--manifest-path", manifest_path(crate),
"--features", crates[crate][0]])
docdir = P.join(target_dir(), "doc", crate_name(crate))
run_shell(r'find %s -name "*.html" -exec sed -i -e "s/<title>\(.*\) - Rust/<title>%s - \1 - Rust/g" {} \;'
% (docdir, version(crate)))
dest = dest_dir()
target_doc = P.join(target_dir(), "doc")
run_shell("rm -rvf ./%s" % dest)
run_shell("cp -r %s %s" % (target_doc, dest))
run_shell("cat %s/custom.css >> %s/main.css" % (doc_home(), dest))
# remove empty files
run_shell("find %s -size 0 -delete" % (dest, ))
def main():
path = sys.argv[0]
os.putenv("CARGO_TARGET_DIR", target_dir())
mkdocs()
if __name__ == "__main__":
main()
|
{
"content_hash": "b352f0fca24cb0c9e72c021f3d7b9777",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 114,
"avg_line_length": 27.38961038961039,
"alnum_prop": 0.5765765765765766,
"repo_name": "Pireax/rust-ndarray",
"id": "a207f65962fa999c6d9c104c347be2876e944113",
"size": "2133",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docgen/mkdocs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "621"
},
{
"name": "Python",
"bytes": "2133"
},
{
"name": "Rust",
"bytes": "379445"
}
],
"symlink_target": ""
}
|
from django.contrib.postgres.indexes import BrinIndex, GinIndex
from django.db import connection
from django.test import skipUnlessDBFeature
from . import PostgreSQLTestCase
from .models import CharFieldModel, IntegerArrayModel
@skipUnlessDBFeature('has_brin_index_support')
class BrinIndexTests(PostgreSQLTestCase):
def test_repr(self):
index = BrinIndex(fields=['title'], pages_per_range=4)
another_index = BrinIndex(fields=['title'])
self.assertEqual(repr(index), "<BrinIndex: fields='title', pages_per_range=4>")
self.assertEqual(repr(another_index), "<BrinIndex: fields='title'>")
def test_not_eq(self):
index = BrinIndex(fields=['title'])
index_with_page_range = BrinIndex(fields=['title'], pages_per_range=16)
self.assertNotEqual(index, index_with_page_range)
def test_deconstruction(self):
index = BrinIndex(fields=['title'], name='test_title_brin')
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.indexes.BrinIndex')
self.assertEqual(args, ())
self.assertEqual(kwargs, {'fields': ['title'], 'name': 'test_title_brin', 'pages_per_range': None})
def test_deconstruction_with_pages_per_range(self):
index = BrinIndex(fields=['title'], name='test_title_brin', pages_per_range=16)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.indexes.BrinIndex')
self.assertEqual(args, ())
self.assertEqual(kwargs, {'fields': ['title'], 'name': 'test_title_brin', 'pages_per_range': 16})
def test_invalid_pages_per_range(self):
with self.assertRaisesMessage(ValueError, 'pages_per_range must be None or a positive integer'):
BrinIndex(fields=['title'], name='test_title_brin', pages_per_range=0)
class GinIndexTests(PostgreSQLTestCase):
def test_repr(self):
index = GinIndex(fields=['title'])
self.assertEqual(repr(index), "<GinIndex: fields='title'>")
def test_eq(self):
index = GinIndex(fields=['title'])
same_index = GinIndex(fields=['title'])
another_index = GinIndex(fields=['author'])
self.assertEqual(index, same_index)
self.assertNotEqual(index, another_index)
def test_name_auto_generation(self):
index = GinIndex(fields=['field'])
index.set_name_with_model(IntegerArrayModel)
self.assertEqual(index.name, 'postgres_te_field_def2f8_gin')
def test_deconstruction(self):
index = GinIndex(fields=['title'], name='test_title_gin')
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.indexes.GinIndex')
self.assertEqual(args, ())
self.assertEqual(kwargs, {'fields': ['title'], 'name': 'test_title_gin'})
class SchemaTests(PostgreSQLTestCase):
def get_constraints(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def test_gin_index(self):
# Ensure the table is there and doesn't have an index.
self.assertNotIn('field', self.get_constraints(IntegerArrayModel._meta.db_table))
# Add the index
index_name = 'integer_array_model_field_gin'
index = GinIndex(fields=['field'], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(IntegerArrayModel, index)
constraints = self.get_constraints(IntegerArrayModel._meta.db_table)
# Check gin index was added
self.assertEqual(constraints[index_name]['type'], 'gin')
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(IntegerArrayModel, index)
self.assertNotIn(index_name, self.get_constraints(IntegerArrayModel._meta.db_table))
@skipUnlessDBFeature('has_brin_index_support')
def test_brin_index(self):
index_name = 'char_field_model_field_brin'
index = BrinIndex(fields=['field'], name=index_name, pages_per_range=4)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]['type'], 'brin')
self.assertEqual(constraints[index_name]['options'], ['pages_per_range=4'])
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(CharFieldModel._meta.db_table))
|
{
"content_hash": "ca32f71fb1eda783cdd79ed316d11cc3",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 107,
"avg_line_length": 45.11538461538461,
"alnum_prop": 0.6681585677749361,
"repo_name": "mattseymour/django",
"id": "9298b86e7387cb857bdc722a9d5db51ce1fb4f1d",
"size": "4692",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/postgres_tests/test_indexes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55935"
},
{
"name": "HTML",
"bytes": "182963"
},
{
"name": "JavaScript",
"bytes": "252645"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11845544"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
"""
Project name: Open Methodology for Security Tool Developers
Project URL: https://github.com/cr0hn/OMSTD
Copyright (c) 2014, cr0hn<-AT->cr0hn.com
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
API file
"""
__author__ = 'cr0hn - cr0hn<-at->cr0hn.com (@ggdaniel)'
__all__ = ["run_scan", "run_in_console"]
import time
# Import data
from .lib.data import *
from .lib.scan_tools import scan_tcp_ports
# ----------------------------------------------------------------------
def run_scan(input_parameters):
"""
Checks MD5 hash and return an Results object.
:param input_parameters: Parameters object with config
:type input_parameters: Parameters
:return: dict with IPs and their results object: {TARGET: Results()}
:rtype: dict(str: Results)
:raises: TypeError
"""
if not isinstance(input_parameters, Parameters):
raise TypeError("Expected Parameters, got '%s' instead" % type(input_parameters))
results = {}
#
# Scan each target
#
for target in input_parameters.targets:
start = time.time()
# Run!
r = scan_tcp_ports(target, input_parameters)
end = time.time()
taken_time = end - start
results[target] = Results(ports=r,
scan_time=taken_time)
return results
# ----------------------------------------------------------------------
def run_in_console(input_parameters):
"""
Run for command line interface. It's make all steps of tool:
:param input_parameters: Parameters object with config
:type input_parameters: Parameters
:raises: TypeError
"""
print("\nStarting OMSTD-HH-001 port scan.")
results = run_scan(input_parameters)
start = time.time()
# Run!
for target, res in results.items():
print("\nCompleted Connect Scan, %ss elapsed (%s total ports)" % (len(res.scan_time), len(res.ports)))
print("Scan report for %s" % target)
print("Not shown: %s closed ports" % abs(len(res.open_ports) - len(res.ports)))
print("PORT STATE SERVICE")
# Show open ports
for port, status in res.ports.items():
if input_parameters.only_open is True and status == "closed":
continue
print("%s/tcp %s" % (port, status))
end = time.time()
taken_time = '{number:.8f}'.format(number=(end - start))
print("Done: %s target%s scanned in %s seconds\n" % (
len(results),
"s" if len(results) > 1 else "",
taken_time))
|
{
"content_hash": "9c68dfe600c33a1e06aefb68d9bd86af",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 755,
"avg_line_length": 36.94392523364486,
"alnum_prop": 0.663546673412598,
"repo_name": "goyoregalado/OMSTD",
"id": "bacb354ced18b7edc48f58dcefaf60708a90e4a6",
"size": "3978",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/hacking/hh-001/omstd_hh_001/api.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "161510"
}
],
"symlink_target": ""
}
|
import sys
import os
import subprocess
from optparse import OptionParser
def main(argv=None):
parser = OptionParser(usage="Usage: %prog [options] [--] VAR=VALUE... command [options] arg1 arg2...")
parser.add_option("-i", "--ignore-environment",
action="store_true", default=False,
help="Start with an empty environment (do not inherit current environment)")
(options, args) = parser.parse_args(args=argv)
if options.ignore_environment:
new_env = {}
else:
new_env = os.environ.copy()
# pull out each name value pair
while len(args):
z = args[0].split("=", 1)
if len(z) != 2:
break # done with env args
if len(z[0]) == 0:
raise Exception("Error: incorrect format for env var: '%s'" % str(args[x]))
del args[0]
if len(z[1]) == 0:
# value is not present, so delete it
if z[0] in new_env:
del new_env[z[0]]
else:
new_env[z[0]] = z[1]
if len(args) == 0 or len(args[0]) == 0:
raise Exception("Error: syntax error in command arguments")
p = subprocess.Popen(args, env=new_env)
return p.wait()
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "477d2d1ed73bb4c9999ed62f0213910b",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 106,
"avg_line_length": 29,
"alnum_prop": 0.5501567398119123,
"repo_name": "irinabov/debian-qpid-proton",
"id": "443bdcac68ecd07aa47ffa87a4b13f744e4df5d2",
"size": "2147",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1612165"
},
{
"name": "C++",
"bytes": "1432324"
},
{
"name": "CMake",
"bytes": "119386"
},
{
"name": "Dockerfile",
"bytes": "1522"
},
{
"name": "Go",
"bytes": "322530"
},
{
"name": "Objective-C",
"bytes": "6108"
},
{
"name": "Python",
"bytes": "826215"
},
{
"name": "Ruby",
"bytes": "365171"
},
{
"name": "SWIG",
"bytes": "45104"
},
{
"name": "Shell",
"bytes": "23005"
}
],
"symlink_target": ""
}
|
import pytest
from numpy.testing import assert_equal
from spacy import util
from spacy.training import Example
from spacy.lang.en import English
from spacy.language import Language
from spacy.tests.util import make_tempdir
from spacy.morphology import Morphology
from spacy.attrs import MORPH
from spacy.tokens import Doc
def test_label_types():
nlp = Language()
morphologizer = nlp.add_pipe("morphologizer")
morphologizer.add_label("Feat=A")
with pytest.raises(ValueError):
morphologizer.add_label(9)
TRAIN_DATA = [
(
"I like green eggs",
{
"morphs": ["Feat=N", "Feat=V", "Feat=J", "Feat=N"],
"pos": ["NOUN", "VERB", "ADJ", "NOUN"],
},
),
# test combinations of morph+POS
("Eat blue ham", {"morphs": ["Feat=V", "", ""], "pos": ["", "ADJ", ""]}),
]
def test_no_label():
nlp = Language()
nlp.add_pipe("morphologizer")
with pytest.raises(ValueError):
nlp.initialize()
def test_implicit_label():
nlp = Language()
nlp.add_pipe("morphologizer")
train_examples = []
for t in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
nlp.initialize(get_examples=lambda: train_examples)
def test_no_resize():
nlp = Language()
morphologizer = nlp.add_pipe("morphologizer")
morphologizer.add_label("POS" + Morphology.FIELD_SEP + "NOUN")
morphologizer.add_label("POS" + Morphology.FIELD_SEP + "VERB")
nlp.initialize()
# this throws an error because the morphologizer can't be resized after initialization
with pytest.raises(ValueError):
morphologizer.add_label("POS" + Morphology.FIELD_SEP + "ADJ")
def test_initialize_examples():
nlp = Language()
morphologizer = nlp.add_pipe("morphologizer")
morphologizer.add_label("POS" + Morphology.FIELD_SEP + "NOUN")
train_examples = []
for t in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
# you shouldn't really call this more than once, but for testing it should be fine
nlp.initialize()
nlp.initialize(get_examples=lambda: train_examples)
with pytest.raises(TypeError):
nlp.initialize(get_examples=lambda: None)
with pytest.raises(TypeError):
nlp.initialize(get_examples=train_examples)
def test_overfitting_IO():
# Simple test to try and quickly overfit the morphologizer - ensuring the ML models work correctly
nlp = English()
nlp.add_pipe("morphologizer")
train_examples = []
for inst in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(inst[0]), inst[1]))
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(50):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses["morphologizer"] < 0.00001
# test the trained model
test_text = "I like blue ham"
doc = nlp(test_text)
gold_morphs = ["Feat=N", "Feat=V", "", ""]
gold_pos_tags = ["NOUN", "VERB", "ADJ", ""]
assert [str(t.morph) for t in doc] == gold_morphs
assert [t.pos_ for t in doc] == gold_pos_tags
# Also test the results are still the same after IO
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
doc2 = nlp2(test_text)
assert [str(t.morph) for t in doc2] == gold_morphs
assert [t.pos_ for t in doc2] == gold_pos_tags
# Make sure that running pipe twice, or comparing to call, always amounts to the same predictions
texts = [
"Just a sentence.",
"Then one more sentence about London.",
"Here is another one.",
"I like London.",
]
batch_deps_1 = [doc.to_array([MORPH]) for doc in nlp.pipe(texts)]
batch_deps_2 = [doc.to_array([MORPH]) for doc in nlp.pipe(texts)]
no_batch_deps = [doc.to_array([MORPH]) for doc in [nlp(text) for text in texts]]
assert_equal(batch_deps_1, batch_deps_2)
assert_equal(batch_deps_1, no_batch_deps)
# Test without POS
nlp.remove_pipe("morphologizer")
nlp.add_pipe("morphologizer")
for example in train_examples:
for token in example.reference:
token.pos_ = ""
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(50):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses["morphologizer"] < 0.00001
# Test the trained model
test_text = "I like blue ham"
doc = nlp(test_text)
gold_morphs = ["Feat=N", "Feat=V", "", ""]
gold_pos_tags = ["", "", "", ""]
assert [str(t.morph) for t in doc] == gold_morphs
assert [t.pos_ for t in doc] == gold_pos_tags
# Test overwrite+extend settings
# (note that "" is unset, "_" is set and empty)
morphs = ["Feat=V", "Feat=N", "_"]
doc = Doc(nlp.vocab, words=["blue", "ham", "like"], morphs=morphs)
orig_morphs = [str(t.morph) for t in doc]
orig_pos_tags = [t.pos_ for t in doc]
morphologizer = nlp.get_pipe("morphologizer")
# don't overwrite or extend
morphologizer.cfg["overwrite"] = False
doc = morphologizer(doc)
assert [str(t.morph) for t in doc] == orig_morphs
assert [t.pos_ for t in doc] == orig_pos_tags
# overwrite and extend
morphologizer.cfg["overwrite"] = True
morphologizer.cfg["extend"] = True
doc = Doc(nlp.vocab, words=["I", "like"], morphs=["Feat=A|That=A|This=A", ""])
doc = morphologizer(doc)
assert [str(t.morph) for t in doc] == ["Feat=N|That=A|This=A", "Feat=V"]
# extend without overwriting
morphologizer.cfg["overwrite"] = False
morphologizer.cfg["extend"] = True
doc = Doc(nlp.vocab, words=["I", "like"], morphs=["Feat=A|That=A|This=A", "That=B"])
doc = morphologizer(doc)
assert [str(t.morph) for t in doc] == ["Feat=A|That=A|This=A", "Feat=V|That=B"]
# overwrite without extending
morphologizer.cfg["overwrite"] = True
morphologizer.cfg["extend"] = False
doc = Doc(nlp.vocab, words=["I", "like"], morphs=["Feat=A|That=A|This=A", ""])
doc = morphologizer(doc)
assert [str(t.morph) for t in doc] == ["Feat=N", "Feat=V"]
# Test with unset morph and partial POS
nlp.remove_pipe("morphologizer")
nlp.add_pipe("morphologizer")
for example in train_examples:
for token in example.reference:
if token.text == "ham":
token.pos_ = "NOUN"
else:
token.pos_ = ""
token.set_morph(None)
optimizer = nlp.initialize(get_examples=lambda: train_examples)
print(nlp.get_pipe("morphologizer").labels)
for i in range(50):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses["morphologizer"] < 0.00001
# Test the trained model
test_text = "I like blue ham"
doc = nlp(test_text)
gold_morphs = ["", "", "", ""]
gold_pos_tags = ["NOUN", "NOUN", "NOUN", "NOUN"]
assert [str(t.morph) for t in doc] == gold_morphs
assert [t.pos_ for t in doc] == gold_pos_tags
|
{
"content_hash": "08a46ee4f92d4915b4d53f4abe7d1f10",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 102,
"avg_line_length": 35.64824120603015,
"alnum_prop": 0.6265858471948125,
"repo_name": "honnibal/spaCy",
"id": "11d6f0477d49a7f52187cd30aa5d4998bdfb04d4",
"size": "7094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spacy/tests/pipeline/test_morphologizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "729544"
},
{
"name": "HTML",
"bytes": "26303"
},
{
"name": "JavaScript",
"bytes": "234039"
},
{
"name": "Jinja",
"bytes": "10482"
},
{
"name": "Makefile",
"bytes": "1576"
},
{
"name": "Python",
"bytes": "3361067"
},
{
"name": "Sass",
"bytes": "56639"
},
{
"name": "Shell",
"bytes": "984"
}
],
"symlink_target": ""
}
|
from .conf import settings
from .interrogate import (
node_installed, node_version, node_version_raw, raise_if_node_missing, raise_if_node_version_less_than, run_command
)
def ensure_node_installed():
raise_if_node_missing()
def ensure_node_version_gte(required_version):
ensure_node_installed()
raise_if_node_version_less_than(required_version)
def node_run(*args):
ensure_node_installed()
return run_command((settings.PATH,) + tuple(args))
|
{
"content_hash": "0a46fbe77375a33657ba330020e5a799",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 119,
"avg_line_length": 26.166666666666668,
"alnum_prop": 0.7282377919320594,
"repo_name": "markfinger/python-nodejs",
"id": "c6e77ed79831edba915176e54066d7a168dcde40",
"size": "471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nodejs/bindings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "34"
},
{
"name": "Python",
"bytes": "6803"
}
],
"symlink_target": ""
}
|
from typing import Dict, List, Tuple
from collections import Counter
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from overrides import overrides
from allennlp.training.metrics.metric import Metric
@Metric.register("conll_coref_scores")
class ConllCorefScores(Metric):
def __init__(self) -> None:
self.scorers = [Scorer(m) for m in (Scorer.muc, Scorer.b_cubed, Scorer.ceafe)]
@overrides
def __call__(self, top_spans, antecedent_indices, predicted_antecedents, metadata_list):
top_spans, antecedent_indices, predicted_antecedents = self.unwrap_to_tensors(top_spans,
antecedent_indices,
predicted_antecedents)
for i, metadata in enumerate(metadata_list):
gold_clusters, mention_to_gold = self.get_gold_clusters(metadata["clusters"])
predicted_clusters, mention_to_predicted = self.get_predicted_clusters(top_spans[i],
antecedent_indices,
predicted_antecedents[i])
for scorer in self.scorers:
scorer.update(predicted_clusters, gold_clusters, mention_to_predicted, mention_to_gold)
@overrides
def get_metric(self, reset: bool = False) -> Tuple[float, float, float]:
metrics = (lambda e: e.get_precision(), lambda e: e.get_recall(), lambda e: e.get_f1())
precision, recall, f1_score = tuple(sum(metric(e) for e in self.scorers) / len(self.scorers)
for metric in metrics)
if reset:
self.reset()
return precision, recall, f1_score
@overrides
def reset(self):
self.scorers = [Scorer(metric) for metric in (Scorer.muc, Scorer.b_cubed, Scorer.ceafe)]
@staticmethod
def get_gold_clusters(gold_clusters):
gold_clusters = [tuple(tuple(m) for m in gc) for gc in gold_clusters]
mention_to_gold = {}
for gold_cluster in gold_clusters:
for mention in gold_cluster:
mention_to_gold[mention] = gold_cluster
return gold_clusters, mention_to_gold
@staticmethod
def get_predicted_clusters(top_spans, antecedent_indices, predicted_antecedents):
predicted_clusters_to_ids: Dict[Tuple[int, int], int] = {}
clusters: List[List[Tuple[int, int]]] = []
for i, predicted_antecedent in enumerate(predicted_antecedents):
if predicted_antecedent < 0:
continue
# Find predicted index in the antecedent spans.
predicted_index = antecedent_indices[i, predicted_antecedent]
# Must be a previous span.
assert i > predicted_index
antecedent_span = tuple(top_spans[predicted_index])
# Check if we've seen the span before.
if antecedent_span in predicted_clusters_to_ids.keys():
predicted_cluster_id: int = predicted_clusters_to_ids[antecedent_span]
else:
# We start a new cluster.
predicted_cluster_id = len(clusters)
clusters.append([antecedent_span])
predicted_clusters_to_ids[antecedent_span] = predicted_cluster_id
mention = tuple(top_spans[i])
clusters[predicted_cluster_id].append(mention)
predicted_clusters_to_ids[mention] = predicted_cluster_id
# finalise the spans and clusters.
clusters = [tuple(cluster) for cluster in clusters]
# Return a mapping of each mention to the cluster containing it.
predicted_clusters_to_ids: Dict[Tuple[int, int], List[Tuple[int, int]]] = \
{mention: clusters[cluster_id] for mention, cluster_id
in predicted_clusters_to_ids.items()}
return clusters, predicted_clusters_to_ids
class Scorer:
"""
Mostly borrowed from <https://github.com/clarkkev/deep-coref/blob/master/evaluation.py>
"""
def __init__(self, metric):
self.precision_numerator = 0
self.precision_denominator = 0
self.recall_numerator = 0
self.recall_denominator = 0
self.metric = metric
def update(self, predicted, gold, mention_to_predicted, mention_to_gold):
if self.metric == self.ceafe:
p_num, p_den, r_num, r_den = self.metric(predicted, gold)
else:
p_num, p_den = self.metric(predicted, mention_to_gold)
r_num, r_den = self.metric(gold, mention_to_predicted)
self.precision_numerator += p_num
self.precision_denominator += p_den
self.recall_numerator += r_num
self.recall_denominator += r_den
def get_f1(self):
precision = 0 if self.precision_denominator == 0 else \
self.precision_numerator / float(self.precision_denominator)
recall = 0 if self.recall_denominator == 0 else \
self.recall_numerator / float(self.recall_denominator)
return 0 if precision + recall == 0 else 2 * precision * recall / (precision + recall)
def get_recall(self):
if self.recall_numerator == 0:
return 0
else:
return self.recall_numerator / float(self.recall_denominator)
def get_precision(self):
if self.precision_numerator == 0:
return 0
else:
return self.precision_numerator / float(self.precision_denominator)
def get_prf(self):
return self.get_precision(), self.get_recall(), self.get_f1()
@staticmethod
def b_cubed(clusters, mention_to_gold):
"""
Averaged per-mention precision and recall.
<https://pdfs.semanticscholar.org/cfe3/c24695f1c14b78a5b8e95bcbd1c666140fd1.pdf>
"""
numerator, denominator = 0, 0
for cluster in clusters:
if len(cluster) == 1:
continue
gold_counts = Counter()
correct = 0
for mention in cluster:
if mention in mention_to_gold:
gold_counts[tuple(mention_to_gold[mention])] += 1
for cluster2, count in gold_counts.items():
if len(cluster2) != 1:
correct += count * count
numerator += correct / float(len(cluster))
denominator += len(cluster)
return numerator, denominator
@staticmethod
def muc(clusters, mention_to_gold):
"""
Counts the mentions in each predicted cluster which need to be re-allocated in
order for each predicted cluster to be contained by the respective gold cluster.
<http://aclweb.org/anthology/M/M95/M95-1005.pdf>
"""
true_p, all_p = 0, 0
for cluster in clusters:
all_p += len(cluster) - 1
true_p += len(cluster)
linked = set()
for mention in cluster:
if mention in mention_to_gold:
linked.add(mention_to_gold[mention])
else:
true_p -= 1
true_p -= len(linked)
return true_p, all_p
@staticmethod
def phi4(gold_clustering, predicted_clustering):
"""
Subroutine for ceafe. Computes the mention F measure between gold and
predicted mentions in a cluster.
"""
return 2 * len([mention for mention in gold_clustering if mention in predicted_clustering]) \
/ float(len(gold_clustering) + len(predicted_clustering))
@staticmethod
def ceafe(clusters, gold_clusters):
"""
Computes the Constrained EntityAlignment F-Measure (CEAF) for evaluating coreference.
Gold and predicted mentions are aligned into clusterings which maximise a metric - in
this case, the F measure between gold and predicted clusters.
<https://www.semanticscholar.org/paper/On-Coreference-Resolution-Performance-Metrics-Luo/de133c1f22d0dfe12539e25dda70f28672459b99>
"""
clusters = [cluster for cluster in clusters if len(cluster) != 1]
scores = np.zeros((len(gold_clusters), len(clusters)))
for i, gold_cluster in enumerate(gold_clusters):
for j, cluster in enumerate(clusters):
scores[i, j] = Scorer.phi4(gold_cluster, cluster)
matching = linear_assignment(-scores)
similarity = sum(scores[matching[:, 0], matching[:, 1]])
return similarity, len(clusters), similarity, len(gold_clusters)
|
{
"content_hash": "573d93544bff0d9c61ca37700207e03f",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 138,
"avg_line_length": 43.85858585858586,
"alnum_prop": 0.5971902349147858,
"repo_name": "nafitzgerald/allennlp",
"id": "1428f76981c90879ae3579d43cd57d642eec1071",
"size": "8684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "allennlp/training/metrics/conll_coref_scores.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5564"
},
{
"name": "Cuda",
"bytes": "18610"
},
{
"name": "Jupyter Notebook",
"bytes": "36610"
},
{
"name": "Makefile",
"bytes": "1478"
},
{
"name": "Perl",
"bytes": "43067"
},
{
"name": "Python",
"bytes": "1247761"
},
{
"name": "Shell",
"bytes": "13919"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
def index(request):
return render(request, 'index.html', {
'foo': 'bar',
})
|
{
"content_hash": "b4c551d0486bd7fbf3397b5742a79b70",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 42,
"avg_line_length": 19.142857142857142,
"alnum_prop": 0.5970149253731343,
"repo_name": "Semprini/cbe-utilities",
"id": "4a5405a9e735075417a313f4d0c888eae34e1c10",
"size": "134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utilities/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1035"
},
{
"name": "HTML",
"bytes": "2232"
},
{
"name": "Python",
"bytes": "26017"
},
{
"name": "Shell",
"bytes": "1181"
}
],
"symlink_target": ""
}
|
import sys, os
def copydata(outfile, infile):
while 1:
data = infile.read(512)
if (data):
outfile.write(data)
else:
break
def alignoffset(outfile):
offset = outfile.tell()
outfile.seek((offset + 511) & ~511)
return outfile.tell()
def appendimage(outfile, infile):
offset = alignoffset(outfile)
copydata(outfile, infile)
length = alignoffset(outfile) - offset
assert (offset % 512 == 0)
assert (length % 512 == 0)
return (offset/512, length/512)
if len(sys.argv) < 4:
print "Usage:", sys.argv[0], "output kernel boot [recovery]"
sys.exit(1)
outfile = open(sys.argv[1], 'wb')
kernel = open(sys.argv[2], 'r')
boot = open(sys.argv[3], 'r')
recovery = None
if (len(sys.argv) == 5):
recovery = open(sys.argv[4], 'r')
offset_table = "\n\nBOOT_IMAGE_OFFSETS\n"
copydata(outfile, kernel)
table_loc = alignoffset(outfile)
outfile.write('\x00' * 512)
offset_table += "boot_offset=%d;boot_len=%d;" % appendimage(outfile, boot)
if recovery:
offset_table += "recovery_offset=%d;recovery_len=%d;" % appendimage(outfile, recovery)
offset_table += "\n\n"
outfile.seek(table_loc)
outfile.write(offset_table)
outfile.flush()
os.fsync(outfile.fileno())
outfile.close()
|
{
"content_hash": "a26234297397bddc69e7373f43ddccd9",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 90,
"avg_line_length": 27.347826086956523,
"alnum_prop": 0.6446740858505564,
"repo_name": "ghostkim-sc/SMG920T_profiling_enabled",
"id": "dcc82949c3232c7fac9bb9eabc0b133c7c2d3ee1",
"size": "1280",
"binary": false,
"copies": "84",
"ref": "refs/heads/master",
"path": "utilities/mkshbootimg.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "4528"
},
{
"name": "Assembly",
"bytes": "9791460"
},
{
"name": "Awk",
"bytes": "18681"
},
{
"name": "C",
"bytes": "518034272"
},
{
"name": "C++",
"bytes": "13105745"
},
{
"name": "GDB",
"bytes": "18113"
},
{
"name": "Lex",
"bytes": "40805"
},
{
"name": "M4",
"bytes": "3388"
},
{
"name": "Makefile",
"bytes": "1522326"
},
{
"name": "Objective-C",
"bytes": "1278363"
},
{
"name": "Perl",
"bytes": "372361"
},
{
"name": "Python",
"bytes": "22590"
},
{
"name": "Roff",
"bytes": "22012"
},
{
"name": "Scilab",
"bytes": "21433"
},
{
"name": "Shell",
"bytes": "218756"
},
{
"name": "SourcePawn",
"bytes": "2711"
},
{
"name": "Stata",
"bytes": "4176"
},
{
"name": "UnrealScript",
"bytes": "6113"
},
{
"name": "Yacc",
"bytes": "83091"
}
],
"symlink_target": ""
}
|
"""Utility functions specifically for airdialogue model."""
import codecs
import random
import re
import numpy as np
import tensorflow.compat.v1 as tf
from airdialogue.evaluator import infer_utils
from airdialogue.evaluator.metrics import f1
from airdialogue.evaluator.metrics.flight_distance import generate_scaled_flight
from airdialogue.evaluator.metrics.flight_distance import split_flight
from utils import misc_utils as utils
from utils import vocab_utils
mode_self_play_mutable = 'self_play_mutable'
mode_self_play_immutable = 'self_play_immutable'
self_play_modes = [
mode_self_play_mutable,
mode_self_play_immutable,
]
mode_self_play_dialogue_train = 'self_play_train'
mode_self_play_dialogue_eval = 'self_play_eval'
task_TRAINEVAL = 'TRAINEVAL'
task_INFER = 'INFER'
task_SP_EVAL = 'SP_EVAL'
task_SP_DISTRIBUTED = 'SP_DISTRIBUTED'
def compute_reward(predicted_action,
actual_action,
flight_db,
alpha=0.5,
beta=0.2,
gamma=0.3,
debug=False):
"""here we compute the scaled reward."""
predicted_name, predicted_flight, predicted_state = parse_action(
predicted_action)
actual_name, actual_flight, actual_state = parse_action(actual_action)
# this will do normalization including lower case and prouncation/space
# removal
score1 = f1.f1_score(predicted_name, actual_name)
score2 = 1 - generate_scaled_flight(predicted_flight, actual_flight,
flight_db)
score3 = float(predicted_state == actual_state)
reward_compliment = score1 * 0.2 + score2 * 0.5 + score3 * 0.3
acc1 = score1
acc2 = score2
acc3 = score3
return reward_compliment, acc1, acc2, acc3
def parse_action(action):
"""parse the action and consider multiple name scenario.
name will also appear first.
"""
name = ' '.join(action[0:-2])
flight = action[-2]
state = action[-1]
return name, flight, state
def compute_01_score(predicted_action, actual_action):
"""here we compute the 0/1 score."""
predicted_name, predicted_flight, predicted_state = parse_action(
predicted_action)
actual_name, actual_flight, actual_state = parse_action(actual_action)
# name score discrete
predicted_names = predicted_name.lower().split(' ')
actual_names = actual_name.lower().split(' ')
ds1_name = ((predicted_names[0].strip() == actual_names[0].strip()) +
(predicted_names[1].strip() == actual_names[1].strip())) / 2.0
# flight score discrete
truth_idx_arr = split_flight(actual_flight)
predicted_flight = predicted_flight.strip()
if '<fl_empty>' in truth_idx_arr:
assert len(truth_idx_arr) == 1
ds2_flight = int(predicted_flight == '<fl_empty>')
else:
ds2_flight = int(predicted_flight in truth_idx_arr)
# ds2_flight = predicted_flight.strip() == actual_flight.strip()
# state score discrete
ds3_state = predicted_state.strip() == actual_state.strip()
# total score
ds_total = 0.2 * ds1_name + 0.5 * ds2_flight + 0.3 * ds3_state
return ds_total, ds1_name, ds2_flight, ds3_state
def get_training_reward(hparams, s1, s2, s3, d1, d2, d3):
"""Calcualte the reward for training."""
if hparams.train_reward_type == 'scaled':
return 0.2 * s1 + 0.5 * s2 + 0.3 * s3
elif hparams.train_reward_type == 'discrete':
return 0.2 * d1 + 0.5 * d2 + 0.3 * d3
elif hparams.train_reward_type == 'combined':
return 0.2 * (s1 + d1) / 2.0 + 0.5 * (s2 + d2) / 2.0 + 0.3 * (s2 + d3) / 2.0
elif hparams.train_reward_type == 'extreme':
return d1 * 1000 + d2 * 10 + d3 * 1
else:
raise ValueError('invalid reward type')
def compute_reward_batch(utterance,
predicted_action,
actual_action_concat,
flight_db,
hparams,
alpha=0.5,
beta=0.2,
gamma=0.3):
"""Calcualte the reward for a batch."""
rewards = []
acc1 = []
acc2 = []
acc3 = []
discrete_score = []
ds1_arr = []
ds2_arr = []
ds3_arr = []
train_rw_arr = []
for pa, aa_con, fl in zip(predicted_action, actual_action_concat, flight_db):
aa = aa_con.split(' ')
rw, ac1, ac2, ac3 = compute_reward(pa, aa, fl)
rewards.append(rw)
acc1.append(ac1)
acc2.append(ac2)
acc3.append(ac3)
ds, ds1, ds2, ds3 = compute_01_score(pa, aa)
discrete_score.append(ds)
ds1_arr.append(ds1)
ds2_arr.append(ds2)
ds3_arr.append(ds3)
train_rw_arr.append(
get_training_reward(hparams, ac1, ac2, ac3, ds1, ds2, ds3))
return train_rw_arr, rewards, acc1, acc2, acc3, discrete_score, ds1_arr, ds2_arr, ds3_arr
def calculate_reward_metrics(batch_rewards):
train_rw_arr, rewards, acc1, acc2, acc3, discrete_score, ds1_arr, ds2_arr, ds3_arr = batch_rewards
# print ('acc3',acc3,np.mean(acc3))
return {
'train_rw_arr': np.mean(train_rw_arr),
'reawrds': np.mean(rewards),
'rw1_name': np.mean(acc1),
'rw2_flight_num': np.mean(acc2),
'rw3_action_state': np.mean(acc3),
'discrete_score': np.mean(discrete_score),
'ds1_name': np.mean(ds1_arr),
'ds2_flight': np.mean(ds2_arr),
'ds3_state': np.mean(ds3_arr),
}
def extract_best_beam_response(response):
"""Make sure outputs is of shape [batch_size, time] when using beam search."""
new_response = [
extract_best_beam_single(response[0]),
extract_best_beam_single(response[1]),
extract_best_beam_single(response[2])
]
return new_response
def extract_best_beam_single(sample_words):
"""Extract the best beam from the sampled words."""
if sample_words.ndim == 3: # if this is beam search
# Original beam search output is in [batch_size, time, beam_width] shape.
sample_words = sample_words.transpose([2, 0, 1])
# After extraction it would be [batch_size, time]
best_beam = sample_words[0, :, :]
return best_beam
else:
return sample_words
def decode_and_evaluate(name,
model,
data_iterator_handle,
sess,
trans_file,
ref_file,
metrics,
hparams,
infer_src_data=None,
decode=True):
"""Decode a test set and compute a score according to the evaluation task."""
# Decode
cnt = 0
last_cnt = 0
if decode:
with codecs.getwriter('utf-8')(tf.gfile.GFile(trans_file,
mode='wb')) as trans_f:
trans_f.write('') # Write empty string to ensure file is created.
while True:
try:
ut1, ut2, action = model.generate_infer_utterance(
sess, data_iterator_handle)
batch_size = ut1.shape[0]
for sent_id in range(batch_size):
src = infer_src_data[cnt]
speaker = get_speaker(src)
nmt_outputs = [ut1, ut2][speaker]
translation = get_translation_cut_both(nmt_outputs, sent_id,
hparams.t1.encode(),
hparams.t2.encode())
translation = translation.decode('utf-8')
if hparams.self_play_start_turn == 'agent':
if '<eod>' in translation:
ac_arr = [w.decode('utf-8') for w in action[sent_id]]
name = ac_arr[0] + ' ' + ac_arr[1]
flight = re.match(r'<fl_(\d+)>', ac_arr[2])
flight = flight.group(1) if flight else ''
status = re.match(r'<st_(\w+)>', ac_arr[3])
status = status.group(1) if status else ''
translation += '|' + '|'.join([name, flight, status])
else:
translation += '|||'
trans_f.write(translation + '\n')
cnt += 1
if last_cnt - cnt >= 10000: # 400k in total
utils.print_out('cnt= ' + str(cnt))
last_cnt += 10000
except tf.errors.OutOfRangeError:
break
# Evaluation
evaluation_scores = {}
if ref_file and tf.gfile.Exists(trans_file):
for metric in metrics:
score = infer_utils.evaluate(ref_file, trans_file, metric)
evaluation_scores[metric] = score
utils.print_out(' %s %s: %.1f' % (metric, name, score))
return evaluation_scores
def load_data(inference_input_file):
"""Load inference data.
Note, dialogue context might contain multiple
flights connected using underlines. e.g. flight1_flight2.
"""
with codecs.getreader('utf-8')(tf.gfile.GFile(
inference_input_file, mode='rb')) as f:
text_data = f.read().splitlines()
return text_data
def get_translation(nmt_outputs, sent_id, tgt_eos):
"""Given batch decoding outputs, select a sentence and turn to text."""
# Select a sentence
output = nmt_outputs[sent_id, :].tolist()
if tgt_eos in output:
output = output[:output.index(tgt_eos)]
translation = utils.format_text(output)
return translation
def get_translation_cut_both(nmt_outputs, sent_id, start_token, end_token):
"""Given batch decoding outputs, select a sentence and turn to text."""
# Select a sentence
output = nmt_outputs[sent_id, :].tolist()
if start_token in output:
output = output[:output.index(start_token)]
if end_token in output:
output = output[:output.index(end_token)]
translation = utils.format_text(output)
return translation
def get_speaker(text_data):
turn_token = text_data.split('|')[-1].split(' ')[-1]
if turn_token == vocab_utils.start_of_turn1:
return 0
if turn_token == vocab_utils.start_of_turn2:
return 1
raise ValueError('invalid ending for dialogue : ' + turn_token)
def _sample_decode(model, global_step, iterator_handle, sess, hparams,
real_iterator, sample_src_data, sample_tar_data, sample_kb,
iterator_src_placeholder, iterator_kb_placeholder,
iterator_batch_size_placeholder):
"""Pick a sentence and decode."""
decode_id = random.randint(0, len(sample_src_data) - 1)
utils.print_out(' # %d' % decode_id)
speaker = get_speaker(sample_src_data[decode_id])
iterator_feed_dict = {
iterator_src_placeholder: [sample_src_data[decode_id]],
iterator_kb_placeholder: [sample_kb[decode_id]],
iterator_batch_size_placeholder: 1,
}
sess.run(real_iterator.initializer, feed_dict=iterator_feed_dict)
nmt_outputs, _, source = model.generate_utterance(sess, speaker,
iterator_handle)
if hparams.beam_width > 0:
nmt_outputs = nmt_outputs[0]
translation = get_translation(
nmt_outputs,
sent_id=0, # there is only one sentence because batch size is 1
tgt_eos=None)
src = get_translation(
source,
sent_id=0, # there is only one sentence because batch size is 1
tgt_eos=None)
src_dialogue = src
utils.print_out(' src: %s' % src_dialogue)
if sample_tar_data:
tar_dialogue = sample_tar_data[decode_id].split('|')[-1]
utils.print_out(' ref: %s' % tar_dialogue)
utils.print_out(' ours: ' + str(translation) + ' (speaker' + str(speaker) +
')')
|
{
"content_hash": "ecfe01e399b99189b3a5390f047ebe38",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 100,
"avg_line_length": 34.551829268292686,
"alnum_prop": 0.6052236830495015,
"repo_name": "google/airdialogue_model",
"id": "864832e52b4a945412d96ec9119b0a7085d150f1",
"size": "11908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/dialogue_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "197887"
},
{
"name": "Shell",
"bytes": "11468"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.