text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'ben'
from pprint import pprint
import os
import json
import pandas as pd
from os import walk
import os
import sys
data = {}
phase = 'phase1'
prac = False
mypath = '../build/img/' + phase + '/900'
data['batchMeta'] = {
'numBatches':404,
'imgPerSet':10,
'batchPerSet':2,
'imgPerBatch':5,
'subjects':0
}
# mypath = '../build/img/training1/test'
# data['batchMeta'] = {
# 'numBatches':1,
# 'imgPerSet':5,
# 'batchPerSet':1,
# 'imgPerBatch':5,
# }
for (dirpath, dirnames, filenames) in walk(mypath):
break
print os.getcwd()
#gsData = pd.read_csv(os.getcwd()+'/GoldStandardData/'+phase+'/GS.txt', sep='\t')
markerIndex = 0
for image in filenames:
[e, b, u1, u2, u3, smp] = image.split('-')
[smp, u4] = smp.split('.')
subID = u1 + '-' + u2 + '-' + u3
if subID not in data:
data[subID] = {}
data['batchMeta']['subjects'] += 1
data[subID]['blockIdxs'] = {}
data[subID]['blockIdx'] = 0
if int(b[1:]) not in data[subID]['blockIdxs']: #map to block indexs from actual blocks
data[subID]['blockIdxs'][int(b[1:])] = data[subID]['blockIdx']
data[subID]['blockIdx'] += 1
data[subID][data[subID]['blockIdxs'][int(b[1:])]] = {}
data[subID][data[subID]['blockIdxs'][int(b[1:])]]['imgs'] = {}
data[subID][data[subID]['blockIdxs'][int(b[1:])]]['idx'] = 0
blcIdx = data[subID]['blockIdxs'][int(b[1:])]
imgIdx = (int(e[1:])-1) % data['batchMeta']['imgPerBatch']
if imgIdx not in data[subID][blcIdx]:
data[subID][blcIdx]['imgs'][imgIdx] = {}
data[subID][blcIdx]['imgs'][imgIdx]['epoch'] = int(e[1:])
data[subID][blcIdx]['imgs'][imgIdx]['batch'] = int(b[1:])
data[subID][blcIdx]['imgs'][imgIdx]['filename'] = image
data[subID][blcIdx]['imgs'][imgIdx]['start'] = int(smp[3:])
data[subID][blcIdx]['imgs'][imgIdx]['end'] = int(smp[3:]) + 25*1000
data[subID][blcIdx]['imgs'][imgIdx]['stage'] = 2
data[subID][blcIdx]['imgs'][imgIdx]['loadedViews'] = 0
data[subID][blcIdx]['imgs'][imgIdx]['backNextViews'] = 0
data[subID][blcIdx]['imgs'][imgIdx]['subID'] = u1 + '-' + u2 + '-' + u3
data[subID][blcIdx]['imgs'][imgIdx]['markers'] = []
data[subID][blcIdx]['imgs'][imgIdx]['noMarkers'] = False
data[subID][blcIdx]['imgs'][imgIdx]['prac'] = prac
data[subID][blcIdx]['imgs'][imgIdx]['phase'] = phase
markers = []
#gsMarkerData = gsData.loc[gsData.filename.isin({image})];
#if gsMarkerData.empty:
data[subID][blcIdx]['imgs'][imgIdx]['gsMarkers'] = []
# else:
# for i in gsMarkerData.index:
# print gsMarkerData.ix[i]
# markers.append({
# 'startPercent': gsMarkerData.ix[i].startPercent,
# 'durationPercent': gsMarkerData.ix[i].durationPercent,
# 'startSecs': gsMarkerData.ix[i].startSecs,
# 'durationSecs': gsMarkerData.ix[i].durationSecs,
# 'scoreConfidence': gsMarkerData.ix[i].scoreConfidence,
# 'markerIndex': 'GS' + str(markerIndex)
# })
# markerIndex += 1
# data[subID][blcIdx]['imgs'][imgIdx]['gsMarkers'] = markers;
# print markers
batchIdx = 0
dataOut = {}
for subID in data:
if subID == 'batchMeta':
dataOut['batchMeta'] = data['batchMeta']
continue
for batch in data[subID]:
if batch == 'blockIdx' or batch == 'blockIdxs':
continue
dataOut[batchIdx] = data[subID][batch]
batchIdx += 1
with open('../app/Assets/metaData' + phase + '.json', 'wb') as fp:
pprint(data)
pprint(dataOut)
json.dump(dataOut, fp)
fp.close()
| {
"repo_name": "bdyetton/MODA",
"path": "Tools/parseFolderOfImagesToMetaJson.py",
"copies": "1",
"size": "3681",
"license": "mit",
"hash": 1664541129081770000,
"line_mean": 32.1621621622,
"line_max": 90,
"alpha_frac": 0.5707688128,
"autogenerated": false,
"ratio": 2.9237490071485306,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39945178199485304,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ben'
from pprint import pprint
import os
import json
import pandas as pd
from os import walk
import os
import sys
data = {}
phase = 'practice'
easyPrac = [10,12,17,30,34]
hardPrac = [25,26,35,37,42]
mypath = '../build/img/' + phase + '/900'
prac = True
data['batchMeta'] = {
'numBatches':2,
'imgPerSet':10,
'batchPerSet':2,
'imgPerBatch':5,
'subjects':0
}
# mypath = '../build/img/training1/test'
# data['batchMeta'] = {
# 'numBatches':1,
# 'imgPerSet':5,
# 'batchPerSet':1,
# 'imgPerBatch':5,
# }
for (dirpath, dirnames, filenames) in walk(mypath):
break
print os.getcwd()
gsData = pd.read_csv(os.getcwd()+'/GoldStandardData/'+phase+'/GS.txt', sep=',')
markerIndex = 0
data['easy'] = {'imgs':{0:{},1:{},2:{},3:{},4:{}}}
data['hard'] = {'imgs':{0:{},1:{},2:{},3:{},4:{}}}
hardIdx = -1
easyIdx = -1
for image in filenames:
[e, b, u1, u2, u3, smp] = image.split('-')
if int(e[1:]) in easyPrac:
dif = 'easy'
easyIdx += 1
idx = easyIdx
elif int(e[1:]) in hardPrac:
dif = 'hard'
hardIdx += 1
idx = hardIdx
else:
print 'Extra File in prac: ' + image
continue
[smp, u4] = smp.split('.')
subID = u1 + '-' + u2 + '-' + u3
data[dif]['imgs'][idx]['idx'] = 0
data[dif]['imgs'][idx]['loadedViews'] = 0
data[dif]['imgs'][idx]['backNextViews'] = 0
data[dif]['imgs'][idx]['epoch'] = int(e[1:])
data[dif]['imgs'][idx]['batch'] = int(b[1:])
data[dif]['imgs'][idx]['filename'] = image
data[dif]['imgs'][idx]['start'] = int(smp[3:])
data[dif]['imgs'][idx]['end'] = int(smp[3:]) + 25*1000
data[dif]['imgs'][idx]['stage'] = 2
data[dif]['imgs'][idx]['subID'] = u1 + '-' + u2 + '-' + u3
data[dif]['imgs'][idx]['markers'] = []
data[dif]['imgs'][idx]['noMarkers'] = False
data[dif]['imgs'][idx]['prac'] = prac
data[dif]['imgs'][idx]['phase'] = phase
markers = []
gsMarkerData = gsData.loc[gsData.filename.isin({image})];
if gsMarkerData.empty:
data[dif]['imgs'][idx]['gsMarkers'] = []
else:
for i in gsMarkerData.index:
print gsMarkerData.ix[i]
markers.append({
'xP': gsMarkerData.ix[i].startPercent,
'wP': gsMarkerData.ix[i].durationPercent,
'xSecs': gsMarkerData.ix[i].startSecs,
'wSecs': gsMarkerData.ix[i].durationSecs,
'scoreConfidence': gsMarkerData.ix[i].scoreConfidence,
'markerIndex': markerIndex
})
markerIndex += 1
data[dif]['imgs'][idx]['gsMarkers'] = markers
batchIdx = 0
dataOut = {}
dataOut['batchMeta'] = data['batchMeta']
dataOut['0'] = data['easy']
dataOut['1'] = data['hard']
with open('../app/Assets/metaData' + phase + '.json', 'wb') as fp:
pprint(data)
pprint(dataOut)
json.dump(dataOut, fp)
fp.close()
| {
"repo_name": "bdyetton/MODA",
"path": "Tools/parseFolderOfImagesPracSet.py",
"copies": "1",
"size": "2905",
"license": "mit",
"hash": 3011582155490640400,
"line_mean": 26.6666666667,
"line_max": 79,
"alpha_frac": 0.5449225473,
"autogenerated": false,
"ratio": 2.902097902097902,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3947020449397902,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ben'
import urllib2
import json
from collections import defaultdict
import time
import random
try:
from urllib import quote_plus
except ImportError:
from urllib.parse import quote_plus
GCM_URL = 'https://android.googleapis.com/gcm/send'
class GCMException(Exception):
pass
class GCMMalformedJsonException(GCMException):
pass
class GCMConnectionException(GCMException):
pass
class GCMAuthenticationException(GCMException):
pass
class GCMTooManyRegIdsException(GCMException):
pass
class GCMInvalidTtlException(GCMException):
pass
# Exceptions from Google responses
class GCMMissingRegistrationException(GCMException):
pass
class GCMMismatchSenderIdException(GCMException):
pass
class GCMNotRegisteredException(GCMException):
pass
class GCMMessageTooBigException(GCMException):
pass
class GCMInvalidRegistrationException(GCMException):
pass
class GCMUnavailableException(GCMException):
pass
def group_response(response, registration_ids, key):
# Pair up results and reg_ids
mapping = zip(registration_ids, response['results'])
# Filter by key
filtered = ((reg_id, res[key]) for reg_id, res in mapping if key in res)
# Grouping of errors and mapping of ids
if key is 'registration_id':
grouping = dict(filtered)
else:
grouping = defaultdict(list)
for k, v in filtered:
grouping[v].append(k)
return grouping or None
def urlencode_utf8(params):
"""
UTF-8 safe variant of urllib.urlencode.
http://stackoverflow.com/a/8152242
"""
if hasattr(params, 'items'):
params = params.items()
params = (
'='.join((
quote_plus(k.encode('utf8'), safe='/'),
quote_plus(v.encode('utf8'), safe='/')
)) for k, v in params
)
return '&'.join(params)
class GCM(object):
# Timeunit is milliseconds.
BACKOFF_INITIAL_DELAY = 1000
MAX_BACKOFF_DELAY = 1024000
def __init__(self, api_key = 'AIzaSyB_YcUTTKUI2x51g9HiqApT1qpaQ5nWR3o', url=GCM_URL, proxy=None):
""" api_key : google api key
url: url of gcm service.
proxy: can be string "http://host:port" or dict {'https':'host:port'}
"""
self.api_key = api_key
self.url = url
if proxy:
if isinstance(proxy,basestring):
protocol = url.split(':')[0]
proxy={protocol:proxy}
auth = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(urllib2.ProxyHandler(proxy), auth, urllib2.HTTPHandler)
urllib2.install_opener(opener)
def construct_payload(self, registration_ids, data=None, collapse_key=None,
delay_while_idle=False, time_to_live=None, is_json=True, dry_run=False):
"""
Construct the dictionary mapping of parameters.
Encodes the dictionary into JSON if for json requests.
Helps appending 'data.' prefix to the plaintext data: 'hello' => 'data.hello'
:return constructed dict or JSON payload
:raises GCMInvalidTtlException: if time_to_live is invalid
"""
if time_to_live:
if time_to_live > 2419200 or time_to_live < 0:
raise GCMInvalidTtlException("Invalid time to live value")
if is_json:
payload = {'registration_ids': registration_ids}
if data:
payload['data'] = data
else:
payload = {'registration_id': registration_ids}
if data:
plaintext_data = data.copy()
for k in plaintext_data.keys():
plaintext_data['data.%s' % k] = plaintext_data.pop(k)
payload.update(plaintext_data)
if delay_while_idle:
payload['delay_while_idle'] = delay_while_idle
if time_to_live >= 0:
payload['time_to_live'] = time_to_live
if collapse_key:
payload['collapse_key'] = collapse_key
if dry_run:
payload['dry_run'] = True
if is_json:
payload = json.dumps(payload)
return payload
def make_request(self, data, is_json=True):
"""
Makes a HTTP request to GCM servers with the constructed payload
:param data: return value from construct_payload method
:raises GCMMalformedJsonException: if malformed JSON request found
:raises GCMAuthenticationException: if there was a problem with authentication, invalid api key
:raises GCMConnectionException: if GCM is screwed
"""
headers = {
'Authorization': 'key=%s' % self.api_key,
}
# Default Content-Type is defaulted to application/x-www-form-urlencoded;charset=UTF-8
if is_json:
headers['Content-Type'] = 'application/json'
if not is_json:
data = urlencode_utf8(data)
req = urllib2.Request(self.url, data, headers)
try:
response = urllib2.urlopen(req).read()
except urllib2.HTTPError as e:
if e.code == 400:
raise GCMMalformedJsonException("The request could not be parsed as JSON")
elif e.code == 401:
raise GCMAuthenticationException("There was an error authenticating the sender account")
elif e.code == 503:
raise GCMUnavailableException("GCM service is unavailable")
else:
error = "GCM service error: %d" % e.code
raise GCMUnavailableException(error)
except urllib2.URLError as e:
raise GCMConnectionException("There was an internal error in the GCM server while trying to process the request")
if is_json:
response = json.loads(response)
return response
def raise_error(self, error):
if error == 'InvalidRegistration':
raise GCMInvalidRegistrationException("Registration ID is invalid")
elif error == 'Unavailable':
# Plain-text requests will never return Unavailable as the error code.
# http://developer.android.com/guide/google/gcm/gcm.html#error_codes
raise GCMUnavailableException("Server unavailable. Resent the message")
elif error == 'NotRegistered':
raise GCMNotRegisteredException("Registration id is not valid anymore")
elif error == 'MismatchSenderId':
raise GCMMismatchSenderIdException("A Registration ID is tied to a certain group of senders")
elif error == 'MessageTooBig':
raise GCMMessageTooBigException("Message can't exceed 4096 bytes")
def handle_plaintext_response(self, response):
# Split response by line
response_lines = response.strip().split('\n')
# Split the first line by =
key, value = response_lines[0].split('=')
if key == 'Error':
self.raise_error(value)
else:
if len(response_lines) == 2:
return response_lines[1].split('=')[1]
return
def handle_json_response(self, response, registration_ids):
errors = group_response(response, registration_ids, 'error')
canonical = group_response(response, registration_ids, 'registration_id')
info = {}
if errors:
info.update({'errors': errors})
if canonical:
info.update({'canonical': canonical})
return info
def extract_unsent_reg_ids(self, info):
if 'errors' in info and 'Unavailable' in info['errors']:
return info['errors']['Unavailable']
return []
def plaintext_request(self, registration_id, data=None, collapse_key=None,
delay_while_idle=False, time_to_live=None, retries=5, dry_run=False):
"""
Makes a plaintext request to GCM servers
:param registration_id: string of the registration id
:param data: dict mapping of key-value pairs of messages
:return dict of response body from Google including multicast_id, success, failure, canonical_ids, etc
:raises GCMMissingRegistrationException: if registration_id is not provided
"""
if not registration_id:
raise GCMMissingRegistrationException("Missing registration_id")
payload = self.construct_payload(
registration_id, data, collapse_key,
delay_while_idle, time_to_live, False, dry_run
)
attempt = 0
backoff = self.BACKOFF_INITIAL_DELAY
for attempt in range(retries):
try:
response = self.make_request(payload, is_json=False)
return self.handle_plaintext_response(response)
except GCMUnavailableException:
sleep_time = backoff / 2 + random.randrange(backoff)
time.sleep(float(sleep_time) / 1000)
if 2 * backoff < self.MAX_BACKOFF_DELAY:
backoff *= 2
raise IOError("Could not make request after %d attempts" % attempt)
def json_request(self, registration_ids, data=None, collapse_key=None,
delay_while_idle=False, time_to_live=None, retries=5, dry_run=False):
"""
Makes a JSON request to GCM servers
:param registration_ids: list of the registration ids
:param data: dict mapping of key-value pairs of messages
:return dict of response body from Google including multicast_id, success, failure, canonical_ids, etc
:raises GCMMissingRegistrationException: if the list of registration_ids is empty
:raises GCMTooManyRegIdsException: if the list of registration_ids exceeds 1000 items
"""
if not registration_ids:
raise GCMMissingRegistrationException("Missing registration_ids")
if len(registration_ids) > 1000:
raise GCMTooManyRegIdsException("Exceded number of registration_ids")
attempt = 0
backoff = self.BACKOFF_INITIAL_DELAY
for attempt in range(retries):
payload = self.construct_payload(
registration_ids, data, collapse_key,
delay_while_idle, time_to_live, True, dry_run
)
response = self.make_request(payload, is_json=True)
info = self.handle_json_response(response, registration_ids)
unsent_reg_ids = self.extract_unsent_reg_ids(info)
if unsent_reg_ids:
registration_ids = unsent_reg_ids
sleep_time = backoff / 2 + random.randrange(backoff)
time.sleep(float(sleep_time) / 1000)
if 2 * backoff < self.MAX_BACKOFF_DELAY:
backoff *= 2
else:
break
return info
| {
"repo_name": "itielshwartz/BackendApi",
"path": "gcm.py",
"copies": "1",
"size": "10846",
"license": "apache-2.0",
"hash": -5261743307571401000,
"line_mean": 33.4317460317,
"line_max": 125,
"alpha_frac": 0.6177392587,
"autogenerated": false,
"ratio": 4.281879194630872,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5399618453330872,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ben'
import logging
from gcm import *
API_KEY = 'AIzaSyB_YcUTTKUI2x51g9HiqApT1qpaQ5nWR3o'
URL = 'https://android.googleapis.com/gcm/send'
#basic util for gcm
def sendMessageToServer(registration_ids,messageType, data=None):
headers = {'Authorization': 'key=%s' % API_KEY}
headers['Content-Type'] = 'application/json'
payload = dict()
payload['messageType'] = messageType
payload['registration_ids'] =registration_ids
if data:
payload['data'] = data
try:
req = urllib2.Request(URL, json.dumps(payload), headers)
response = urllib2.urlopen(req)
except urllib2.HTTPError as err:
logging.info(payload)
logging.info(err.code)
else:
logging.info(response.read())
def sendMessageToClients(messageType, registration_ids,is_json=True,data = None):
headers = {'Authorization': 'key=%s' % API_KEY}
headers['Content-Type'] = 'application/json'
payload = dict()
payload['messageType'] = messageType
payload['registration_ids'] = registration_ids
if data:
payload['data'] = data
req = urllib2.Request(URL, json.dumps(payload), headers)
response = urllib2.urlopen(req)
output = response.read()
logging.info(output)
| {
"repo_name": "itielshwartz/BackendApi",
"path": "Utilities.py",
"copies": "1",
"size": "1258",
"license": "apache-2.0",
"hash": 7210750328800122000,
"line_mean": 26.347826087,
"line_max": 81,
"alpha_frac": 0.666136725,
"autogenerated": false,
"ratio": 3.465564738292011,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4631701463292011,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ben'
"""
Polynomial manipulations.
Polynomials are represented as lists of coefficients, 0 order first.
"""
def evaluate(x, poly):
"""
Evaluate the polynomial at the value x.
poly is a list of coefficients from lowest to highest.
:param x: Argument at which to evaluate
:param poly: The polynomial coefficients, lowest order to highest
:return: The result of evaluating the polynomial at x
"""
if len(poly) == 0:
return 0
else:
return x*evaluate(x,poly[1:]) + poly[0]
def bisection(a, b, poly, tolerance):
"""
Assume that poly(a) <= 0 and poly(b) >= 0.
Modify a and b so that abs(b-a) < tolerance and poly(b) >= 0 and poly(a) <= 0.
Return (a+b)/2
:param a: poly(a) <= 0
:param b: poly(b) >= 0
:param poly: polynomial coefficients, low order first
:param tolerance: greater than 0
:return: an approximate root of the polynomial
"""
if evaluate(a, poly) > 0:
raise Exception("poly(a) must be <= 0")
if evaluate(b,poly) < 0:
raise Exception("poly(b) must be >= 0")
mid = (a+b) / 2
if abs(b-a) <= tolerance:
return mid
else:
val = evaluate(mid,poly)
if val <= 0:
return bisection(mid, b, poly, tolerance)
else:
return bisection(a, mid, poly, tolerance)
| {
"repo_name": "dhruvaldarji/InternetProgramming",
"path": "Assignment_7/polynomials.py",
"copies": "1",
"size": "1365",
"license": "mit",
"hash": -6992419932681066000,
"line_mean": 27.4375,
"line_max": 82,
"alpha_frac": 0.5926739927,
"autogenerated": false,
"ratio": 3.5454545454545454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9623425873998535,
"avg_score": 0.0029405328312021423,
"num_lines": 48
} |
__author__ = 'Ben'
"""
Polynomial manipulations.
Polynomials are represented as lists of coefficients, 0 order first.
"""
def evaluate(x, poly):
"""
Evaluate the polynomial at the value x.
poly is a list of coefficients from lowest to highest.
:param x: Argument at which to evaluate
:param poly: The polynomial coefficients, lowest order to highest
:return: The result of evaluating the polynomial at x
"""
if len(poly) == 0:
return 0
else:
return x*evaluate(x,poly[1:]) + poly[0]
def bisection(a, b, poly, tolerance):
"""
Assume that poly(a) <= 0 and poly(b) >= 0.
Modify a and b so that abs(b-a) < tolerance and poly(b) >= 0 and poly(a) <= 0.
Return (a+b)/2
:param a: poly(a) <= 0
:param b: poly(b) >= 0
:param poly: polynomial coefficients, low order first
:param tolerance: greater than 0
:return: an approximate root of the polynomial
"""
if evaluate(a, poly) > 0:
raise Exception("poly(a) must be <= 0")
if evaluate(b,poly) < 0:
raise Exception("poly(b) must be >= 0")
mid = (a+b) / 2
if abs(b-a) <= tolerance:
return mid
else:
val = evaluate(mid,poly)
if val <= 0:
return bisection(mid, b, poly, tolerance)
else:
return bisection(a, mid, poly, tolerance)
| {
"repo_name": "dhruvaldarji/InternetProgramming",
"path": "Assignment_7/Assignment7/polynomials.py",
"copies": "1",
"size": "1366",
"license": "mit",
"hash": 2906812206988880000,
"line_mean": 26.8775510204,
"line_max": 82,
"alpha_frac": 0.5922401171,
"autogenerated": false,
"ratio": 3.548051948051948,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9630326012598056,
"avg_score": 0.001993210510778319,
"num_lines": 49
} |
"""
Build arguments parser for the scripts (mapper, reducers and command builder).
"""
import argparse
def get_map_argparser():
"""Build command line arguments parser for a mapper.
Arguments parser compatible with the commands builder workflows.
"""
parser = argparse.ArgumentParser()
parser.add_argument("crossval",
help="JSON file to configure cross validation scheme")
parser.add_argument("method",
help="JSON file to configure the method")
parser.add_argument("dataset",
help="Joblib file with data and folds")
parser.add_argument("out",
help="Filename to output the results")
parser.add_argument("outer", type=int,
help="Outer CV Id")
parser.add_argument("--inner", type=int,
help="Inner CV Id")
# verbose mode
parser.add_argument("-v", "--verbose", help="verbose mode",
action="store_true")
return parser
def get_ired_argparser():
"""Build command line arguments parser for an inner reducer.
Arguments parser compatible with the commands builder workflows.
"""
parser = argparse.ArgumentParser()
parser.add_argument("crossval",
help="JSON file to configure cross validation scheme")
parser.add_argument("method",
help="JSON file to configure the method")
parser.add_argument("dataset",
help="Joblib file with data and folds")
parser.add_argument("out",
help="Filename to output the results")
parser.add_argument("in",
help="Filename template for input files")
parser.add_argument("outer", type=int,
help="Outer CV Id")
# verbose mode
parser.add_argument("-v", "--verbose", help="verbose mode",
action="store_true")
return parser
def get_ored_argparser():
"""Build command line arguments parser for an outer reducer.
Arguments parser compatible with the commands builder workflows.
"""
parser = argparse.ArgumentParser()
parser.add_argument("out",
help="Filename to output the results")
parser.add_argument("in",
help="Filename template for input files")
# verbose mode
parser.add_argument("-v", "--verbose", help="verbose mode",
action="store_true")
return parser
| {
"repo_name": "BenoitDamota/mempamal",
"path": "mempamal/arguments.py",
"copies": "1",
"size": "2611",
"license": "bsd-3-clause",
"hash": 886761547384171100,
"line_mean": 32.4743589744,
"line_max": 78,
"alpha_frac": 0.5928762926,
"autogenerated": false,
"ratio": 4.712996389891697,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 78
} |
def dynamic_import(str_import):
"""Take a string representing a python function or class and import it.
Parameters:
-----------
str_import : str
the string representing the import (e.g. "sklearn.metrics.f1_score")
"""
mod, cla = str_import.rsplit('.', 1)
dyn_import = getattr(__import__(mod, fromlist=[str(cla)]), cla)
return dyn_import
def _get_step(step):
"""Import class of a given step and return a sklearn.pipeline.Pipeline step
"""
str_imp = step[1][0]
kwargs = step[1][1]
name = step[0]
return (name, (dynamic_import(str_imp), kwargs))
def construct_pipeline(cfg):
"""Construct the pipeline steps for sklearn.pipeline.Pipeline.
Construct the pipeline steps for sklearn.pipeline.Pipeline and
return the steps and the parameter to optimize (e.g. "logit__C" for
the parameter "C" of the step named "logit").
Parameters:
-----------
cfg : dict,
method configuration describing the steps of an pipelined estimator
"""
steps = cfg["steps"]
try:
est_param = cfg["est_param"]
except KeyError:
est_param = None
pipe = []
for step in steps:
pipe.append(_get_step(step))
return {'steps': pipe}, est_param
def get_score_func(cfg, cv="crossval_score"):
"""Import score function and kwargs from the CV configuration.
Parameters
----------
cfg : dict,
configuration dict for cross-validation.
cv : str, optional(default="crossval_score")
section of the configuration dict to search for a score function.
"""
func = dynamic_import(cfg[cv]["funcMetric"][0])
kwargs = cfg[cv]["funcMetric"][1]
return func, kwargs
| {
"repo_name": "BenoitDamota/mempamal",
"path": "mempamal/dynamic.py",
"copies": "1",
"size": "1796",
"license": "bsd-3-clause",
"hash": -958779197670906000,
"line_mean": 27.0625,
"line_max": 79,
"alpha_frac": 0.631403118,
"autogenerated": false,
"ratio": 3.8376068376068377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9969009955606838,
"avg_score": 0,
"num_lines": 64
} |
"""
Simple GridSearch for a pipelined estimator (without warm restart).
"""
import numpy as np
class GenericGridSearch(object):
"""Simple GridSearch for a pipelined estimator.
Note: see sklearn.pipeline.Pipeline
"""
def __init__(self, est, params, score_func,
est_kwargs=None,
score_kwargs=None):
"""
Parameters
----------
est : estimator,
Estimator to jsonify.
params : array, shape(n_parameters)
Grid of parameters
score_func : func,
scoreng function (e.g. sklearn.metrics.f1_score)
est_kwargs : dict, optional (default=None)
keywords arguments for the estimator
score_kwargs : dict, optional (default=None)
keywords arguments for the scoring function
"""
if params is None:
params = [None]
self.params = params
self.est = est
self.res = {}
self.score_func = score_func
if score_kwargs is not None:
self.score_kwargs = score_kwargs
else:
self.score_kwargs = {}
if est_kwargs is not None:
self.est_kwargs = est_kwargs
else:
self.est_kwargs = {}
def fit(self, X, y):
"""Fit the estimator on each parameter of the grid.
Parameters
----------
X : array, shape (n_samples, n_features)
features array
y : array, shape (n_samples, n_targets)
targets array
"""
for p in self.params:
param_kwargs = p if p is not None else {}
p_ = tuple(p.values()) if p is not None else "None"
steps = self.est_kwargs['steps']
pipe_steps = []
for s in steps:
s_i = (s[1][0])()
for k, v in (s[1][1]).iteritems():
s_i.__setattr__(k, v)
pipe_steps.append((s[0], s_i))
self.res[p_] = self.est(pipe_steps)
self.res[p_].set_params(**param_kwargs)
self.res[p_].fit(X, y)
def predict(self, X):
"""Predict the targets from X for each parameter of the grid
Parameters
----------
X : array, shape (n_samples, n_features)
features array
"""
y_pred = []
for p in self.params:
p_ = tuple(p.values()) if p is not None else "None"
y_pred.append(self.res[p_].predict(X))
return np.asarray(y_pred)
def score(self, y_test, y_pred):
"""Apply the scoring function for each prediction
Parameters
----------
y_test : array, shape (n_samples, n_targets)
Real targets values
y_pred : array, shape (n_parameters, n_samples, n_targets)
Targets prediction to score.
"""
scores = []
for yp in y_pred:
scores.append(self.score_func(y_test, yp, **self.score_kwargs))
return np.asarray(scores).T
| {
"repo_name": "BenoitDamota/mempamal",
"path": "mempamal/gridsearch.py",
"copies": "1",
"size": "3108",
"license": "bsd-3-clause",
"hash": -4484260092727330300,
"line_mean": 29.7722772277,
"line_max": 75,
"alpha_frac": 0.5215572716,
"autogenerated": false,
"ratio": 4.025906735751295,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 101
} |
"""
Workflow generation.
"""
import os.path as path
import numpy as np
def _create_generic(folds_dic, cv_cfg, method_cfg, in_out_dir,
mapper="./scripts/mapper.py",
i_red="./scripts/inner_reducer.py",
o_red="./scripts/outer_reducer.py",
verbose=False):
"""Create a workflow (list of commands and dependancies).
Note: internal function (see create_wf)
"""
# construct paths
cv = path.join(in_out_dir, cv_cfg["src"])
folds = path.join(in_out_dir, folds_dic["src"])
method = path.join(in_out_dir, method_cfg["src"])
# results filenames
m_out = path.join(in_out_dir, "map_res_{outer}_{inner}.pkl")
ri_out = path.join(in_out_dir, "red_res_{outer}.pkl")
ro_out = path.join(in_out_dir, "final_res.pkl")
# number of folds
n_o = folds_dic["n_outer"]
n_i = folds_dic["n_inner"] if cv_cfg["modelSelection"] else None
# a workflow is a collection of commands and dependancies
all_cmd = {}
# dependancies are tuples (cmd_nameA, cmd_nameB) stored in a dict
# with cmd_B that waits for cmd_A completion
dependancies = []
name_ired = "|--- Inner reduce outer={}"
name_ored = "|- Final reduce"
for i in xrange(n_o):
cmd_mapper = ["python", mapper, cv, method, folds]
name_cur_ired = name_ired.format(i)
if cv_cfg["modelSelection"]:
for k in xrange(n_i):
cur_cmd = (cmd_mapper +
[m_out.format(inner=k, outer=i), repr(i),
"--inner", repr(k)])
name = "|----- Map outer={} inner={}".format(i, k)
all_cmd[name] = cur_cmd
dependancies.append((name, name_cur_ired))
if verbose:
print(" ".join(cur_cmd))
cmd_i_red = ["python", i_red, cv, method, folds,
ri_out.format(outer=i),
m_out.format(outer=i, inner="{inner}"),
repr(i)]
all_cmd[name_cur_ired] = cmd_i_red
dependancies.append((name_cur_ired, name_ored))
if verbose:
print("\n{}\n".format(" ".join(cmd_i_red)))
else:
cur_cmd = cmd_mapper + [ri_out.format(outer=i), repr(i)]
name = "|--- Map outer={}".format(i)
all_cmd[name] = cur_cmd
dependancies.append((name, name_ored))
if verbose:
print(" ".join(cur_cmd))
cmd_o_red = ["python", o_red, ro_out, ri_out]
all_cmd[name_ored] = cmd_o_red
if verbose:
print(" ".join(cmd_o_red))
return all_cmd, dependancies
def create_wf(folds_dic, cv_cfg, method_cfg, in_out_dir, verbose=False):
"""Create a workflow (list of commands and dependancies).
the list of commands returned is a dictionnary which associates a
job name and a command. A command is a list of strings. Dependancies
are a list of tuples (cmd_nameA, cmd_nameB) where cmd_B waits for
cmd_A completion.
Parameters:
-----------
folds_dic : dict,
The dictionnary with all the folds.
cv_cfg : dict,
Configuration for cross-validation.
data_cfg : dict,
Configuration for the data and I/O.
method_cfg : dict,
Configuration of the method.
verbose : boolean, optional (default=False)
verbose mode.
"""
c_map = method_cfg["mapper"]
c_i_red = method_cfg["inner_reducer"]
c_o_red = method_cfg["outer_reducer"]
return _create_generic(folds_dic, cv_cfg, method_cfg, in_out_dir,
mapper=c_map, i_red=c_i_red, o_red=c_o_red,
verbose=verbose)
def save_wf(wf, output_file, mode="soma-workflow"):
"""Save the workflow in a file.
Support simple JSON commands list (cmd-list) or soma-workflow.
Parameters:
----------
wf : tuple (cmd-dict, dependancies),
Workflow to save.
output_file : str,
filename for the workflow.
mode : str in ["soma-workflow", "cmd_list"],
optional (default="soma-workflow")
format to save the workflow.
"""
cmd = wf[0]
dep_orig = wf[1]
if mode == "soma-workflow":
from soma_workflow.client import Job, Workflow, Helper
for k, v in cmd.iteritems():
cmd[k] = Job(command=v, name=k)
dep = [((cmd[a], cmd[b])) for a, b in dep_orig]
jobs = np.asarray(cmd.values())[np.argsort(cmd.keys())]
workflow = Workflow(jobs=jobs.tolist(),
dependencies=dep)
Helper.serialize(output_file, workflow)
return workflow
elif mode == "cmd-list":
import json
for k, v in cmd.iteritems():
cmd[k] = " ".join(v)
with open(output_file, 'w') as fd:
json.dump(dict(cmd=cmd, dep=dep_orig), fd, indent=True)
return cmd
else:
raise TypeError("Invalid workflow mode \'{}\'".format(mode))
| {
"repo_name": "BenoitDamota/mempamal",
"path": "mempamal/workflow.py",
"copies": "1",
"size": "5092",
"license": "bsd-3-clause",
"hash": -7485452908564282000,
"line_mean": 35.1134751773,
"line_max": 72,
"alpha_frac": 0.5542026709,
"autogenerated": false,
"ratio": 3.471029311520109,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9525231982420109,
"avg_score": 0,
"num_lines": 141
} |
__author__ = 'Benoit'
#Computer attempts to guess a number you choose between 1 and 100 in 10 tries
answer = 'yes'
print ("Please, think of a number between 1 and 100. I am about to try to guess it in 10 tries.")
while answer == "yes":
NumOfTry = 10
NumToGuess = 50
LimitLow = 1
LimitHigh = 100
while NumOfTry != 0:
try:
print ("I try: ",NumToGuess)
print ("Please type: 1 for my try is too high")
print (" 2 for my try is too low")
print (" 3 I guessed your number")
HumanAnswer = int (input("So did I guess right?"))
if 1 < HumanAnswer > 3:
print ("Please enter a valid answer. 1, 2 and 3 are the valid choice")
NumOfTry = NumOfTry + 1
if HumanAnswer == 1:
LimitHigh = NumToGuess
print ("Hmm, so your number is between ",LimitLow, "and ", LimitHigh)
NumOfTry = NumOfTry - 1
print (NumOfTry, "attempts left")
NumToGuess = int (((LimitHigh - LimitLow)/2) + LimitLow)
if NumToGuess <= LimitLow:
NumToGuess = NumToGuess + 1
if LimitHigh - LimitLow == 2:
NumToGuess = LimitLow + 1
elif HumanAnswer == 2:
LimitLow = NumToGuess
print ("Hmm, so your number is between ",LimitLow, "and ", LimitHigh)
NumOfTry = NumOfTry - 1
print (NumOfTry, "attempts left")
NumToGuess = int (((LimitHigh - LimitLow)/2) + LimitLow)
if NumToGuess <= LimitLow:
NumToGuess = NumToGuess + 1
if LimitHigh - LimitLow == 2:
NumToGuess = LimitLow + 1
elif HumanAnswer == 3:
print ("Woo hoo! I won")
NumOfTry = 0
except:
break
else:
answer = input ('Do you want to play again? (yes/no)')
else:
print ("Thank you for playing. Goodbye")
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/578963_Guess_number_2__computer_attempts_guess_your/recipe-578963.py",
"copies": "1",
"size": "2068",
"license": "mit",
"hash": -5295704188604435000,
"line_mean": 41.2040816327,
"line_max": 97,
"alpha_frac": 0.5101547389,
"autogenerated": false,
"ratio": 4.160965794768612,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5171120533668612,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Benoit'
# guess a number between 1 and 100 in ten tries
import random
answer = 'yes'
while answer == "yes":
NumToGuess = random.randint(1, 100)
NumOfTry = 10
print ("Try to guess a number between 1 and 100 in 10 tries")
while NumOfTry != 0:
try:
x = int (input ("Please enter a number between 1 and 100"))
if x > NumToGuess:
print (x,"is too high")
NumOfTry = NumOfTry - 1
print (NumOfTry, "attempt(s) left")
print ("")
elif x < NumToGuess:
print (x,"is too low")
NumOfTry = NumOfTry - 1
print (NumOfTry, "attempt(s) left")
print ("")
elif x == NumToGuess:
print ("You Win, Congratulations!!!")
NumOfTry = 0
except:
print ("Please enter a valid number. For example 1, 5 an 44 are valid numbers to input.")
else:
print ("The number to guess was: ", NumToGuess)
answer = input ('Do you want to play again? (yes/no)')
else:
print ("Thank you for playing. Goodbye")
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/578962_Guess_a_number/recipe-578962.py",
"copies": "1",
"size": "1152",
"license": "mit",
"hash": 9121099768657498000,
"line_mean": 36.1612903226,
"line_max": 101,
"alpha_frac": 0.5243055556,
"autogenerated": false,
"ratio": 3.972413793103448,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9930053913323591,
"avg_score": 0.013333087075971323,
"num_lines": 31
} |
__author__ = 'Benqing'
users = {
"Angelica": {"Blues Traveler": 3.5, "Broken Bells": 2.0, "Norah Jones": 4.5, "Phoenix": 5.0,
"Slightly Stoopid": 1.5, "The Strokes": 2.5, "Vampire Weekend": 2.0},
"Bill": {"Blues Traveler": 2.0, "Broken Bells": 3.5, "Deadmau5": 4.0, "Phoenix": 2.0, "Slightly Stoopid": 3.5,
"Vampire Weekend": 3.0},
"Chan": {"Blues Traveler": 5.0, "Broken Bells": 1.0, "Deadmau5": 1.0, "Norah Jones": 3.0, "Phoenix": 5,
"Slightly Stoopid": 1.0},
"Dan": {"Blues Traveler": 3.0, "Broken Bells": 4.0, "Deadmau5": 4.5, "Phoenix": 3.0, "Slightly Stoopid": 4.5,
"The Strokes": 4.0, "Vampire Weekend": 2.0},
"Hailey": {"Broken Bells": 4.0, "Deadmau5": 1.0, "Norah Jones": 4.0, "The Strokes": 4.0, "Vampire Weekend": 1.0},
"Jordyn": {"Broken Bells": 4.5, "Deadmau5": 4.0, "Norah Jones": 5.0, "Phoenix": 5.0, "Slightly Stoopid": 4.5,
"The Strokes": 4.0, "Vampire Weekend": 4.0},
"Sam": {"Blues Traveler": 5.0, "Broken Bells": 2.0, "Norah Jones": 3.0, "Phoenix": 5.0, "Slightly Stoopid": 4.0,
"The Strokes": 5.0},
"Veronica": {"Blues Traveler": 3.0, "Norah Jones": 5.0, "Phoenix": 4.0, "Slightly Stoopid": 2.5, "The Strokes": 3.0}
}
# Compute the Euclidean Distance between Hailey and Veronica
import math
def minkowski_dist(user_ratings1, user_ratings2, r):
"""Minkowski Distance between two users"""
if not (isinstance(user_ratings1, dict) and isinstance(user_ratings2, dict)):
exit()
item_score_diff_r_sum = 0.0
for item_name in user_ratings1:
if item_name in user_ratings2:
# there is a matched item
item_score_diff_r_sum += abs(user_ratings1[item_name] - user_ratings2[item_name]) ** r
return math.pow(item_score_diff_r_sum, 1.0 / r)
def euclidean_dist(user_ratings1, user_ratings2):
"""Euclidean Distance between two users"""
if not (isinstance(user_ratings1, dict) and isinstance(user_ratings2, dict)):
exit()
item_score_diff_sqr_sum = 0.0
for item_name in user_ratings1:
if item_name in user_ratings2:
# there is a matched item
item_score_diff_sqr_sum += (user_ratings1[item_name] - user_ratings2[item_name]) ** 2
return math.sqrt(item_score_diff_sqr_sum)
def manhattan_dist(user_ratings1, user_ratings2):
"""Manhattan Distance between two users"""
if not (isinstance(user_ratings1, dict) and isinstance(user_ratings2, dict)):
exit()
item_score_diff_abs_sum = 0.0
for item_name in user_ratings1:
if item_name in user_ratings2:
# there is a matched item
item_score_diff_abs_sum += abs(user_ratings1[item_name] - user_ratings2[item_name])
return item_score_diff_abs_sum
def compute_nearest_neighbor(username, users_in):
"""creates a sorted list of users based on their distance to username"""
distances = []
for user in users_in:
if user != username:
distance = minkowski_dist(users_in[user], users_in[username], 2)
distances.append((distance, user))
# sort based on distance -- closest first
distances.sort()
return distances
def pearson(user_ratings1, user_ratings2):
"""An approximation of Pearson Correlation"""
n = 0
# This actually could happen
# if vals1_len != vals2_len:
# exit()
sum_of_products = 0.0
sum_of_user1 = 0.0
sum_of_user2 = 0.0
sum_of_user1_sqr = 0.0
sum_of_user2_sqr = 0.0
for k in user_ratings1:
if k in user_ratings2:
sum_of_products += user_ratings1[k] * user_ratings2[k]
sum_of_user1 += user_ratings1[k]
sum_of_user2 += user_ratings2[k]
sum_of_user1_sqr += user_ratings1[k] * user_ratings1[k]
sum_of_user2_sqr += user_ratings2[k] * user_ratings2[k]
n += 1
return (sum_of_products - sum_of_user1 * sum_of_user2 / n) / (
math.sqrt(sum_of_user1_sqr - sum_of_user1 * sum_of_user1 / n) *
math.sqrt(sum_of_user2_sqr - sum_of_user2 * sum_of_user2 / n))
if __name__ == '__main__':
print 'tesing...'
# my_dict1 = {'a': 1, 'b': 2}
# print my_dict1
# for k in my_dict1:
# print k
# print type(my_dict1)
# print type(my_dict1) == dict
# print euclidean_dist(users['Hailey'], users['Veronica'])
# print euclidean_dist(users['Hailey'], users['Jordyn'])
# print manhattan_dist(users['Hailey'], users['Veronica'])
# print manhattan_dist(users['Hailey'], users['Jordyn'])
# print minkowski_dist(users['Hailey'], users['Veronica'], 4)
# print compute_nearest_neighbor('Hailey', users)
# print users['Hailey'].values()
# print type(users['Hailey'].values())
print pearson(users['Angelica'], users['Bill'])
print pearson(users['Angelica'], users['Hailey'])
print pearson(users['Angelica'], users['Jordyn']) | {
"repo_name": "timmyshen/Guide_To_Data_Mining",
"path": "Chapter2/SharpenYourPencil/distance.py",
"copies": "1",
"size": "4916",
"license": "mit",
"hash": -1601900371452491800,
"line_mean": 40.6694915254,
"line_max": 120,
"alpha_frac": 0.6061838893,
"autogenerated": false,
"ratio": 2.7556053811659194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3861789270465919,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ben, Ryan'
# -*- coding: utf-8 -*-
import numpy as np
import time
import math
from scipy import stats
from matplotlib import pylab as plt
import stockrollover
def time_stamp(t):
"""Prints the difference between the parameter and current time.
This is useful for timing program execution if timestamps are periodicly saved.
Parameters:
a: float
Returns:
current time: float
"""
print "%(time).4f seconds to execute \n" % {"time": time.time() - t}
return time.time()
# Survival shape parameters for weibull decay functions. Methodology adapted from
# https://www.aceee.org/files/proceedings/2010/data/papers/1977.pdf
np.set_printoptions(precision=2)
vintage_start, vintage_stop, vintage_step = 1990, 2050, 1
vintages = np.arange(vintage_start, vintage_stop + 1, vintage_step, dtype=np.int)
year_start, year_stop, year_step = 1990, 2050, 1
years = np.arange(year_start, year_stop + 1, year_step, dtype=np.int)
starting_stock = 5000
# annual_new = np.array(100*(1+np.arange(len(vintages))*.5), dtype=np.int)
annual_new = np.array(1000 * (1 + np.arange(len(vintages)) * .5), dtype=np.float)
# annual_new = np.zeros(len(vintages))
annual_new[0] += starting_stock
weibull_shape = 2.34
weibull_meanlife = 20
t = time.time()
for n in range(10):
stock = stockrollover.stockrollover(years, vintages, annual_new, weibull_shape, weibull_meanlife)
t = time_stamp(t)
# test = cdf[-1::-1]*stock[-1,-1]
# vs
# stock[-1]
# Add in existing stock
#
| {
"repo_name": "energyPATHWAYS/energyPATHWAYS",
"path": "energyPATHWAYS/_obsolete/tests/test_stockrollover.py",
"copies": "1",
"size": "1520",
"license": "mit",
"hash": 2980738695666142000,
"line_mean": 23.5161290323,
"line_max": 101,
"alpha_frac": 0.6881578947,
"autogenerated": false,
"ratio": 2.9174664107485606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.410562430544856,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bensoer'
from crypto.algorithms.algorithminterface import AlgorithmInterface
from tools.argparcer import ArgParcer
'''
CaesarCipher is an Algorithm using the CaesarCipher encryption techniques.
Letters are replaced with equivelent letters in the alphabet by a certain offset off. For example A is replaced with D
with an offset of 3. Decryption is simply reversing this process on the recieved string
'''
class CaesarCipher(AlgorithmInterface):
def __init__(self, arguments):
# if they pass an o parameter, use this as the offset value
offset = ArgParcer.getValue(arguments, "-o")
if offset == "":
self.offset = 3
else:
self.offset = int(offset)
def encryptString(self, unencryptedMessage):
'''
encryptString with the CaesarCipher encrypts the message by finding the orignal value of the character in the
ascii table and then offsetting the ordinal value by a set ammount. It then converts the offset ordinal value
back into whatever ascii character represents this new ordinal. This then represents the encrypted letter. Each
letter has this process applied to them before the completely encryptedMessage is returned
:param unencryptedMessage: String - the unencrypted message
:return:Bytes[] - the CaesarCipher encrypted message as bytes
'''
encryptedMessage = ""
for letter in unencryptedMessage:
encryptedLetter = chr(ord(letter) + self.offset)
encryptedMessage += encryptedLetter
return encryptedMessage.encode()
def decryptString(self, encryptedMessage):
'''
decryptString with the CaesarCipher encrypts the message by finding the ordignal value of the character in the
ascii table and then offsetting the ordinal value by the reverse set ammount as that applied in the encryptString
method. It then converts the offset ordinal value back into whatever ascii character represents this new ordinal.
This then represents the decrypted letter. Each letter has this process applied to them before the completely
encryptedMessage is returned
:param encryptedMessage: Bytes[] - the encrypted message as a byte array
:return:String - the CaesarCipher unencrypted message
'''
encryptedMessage = encryptedMessage.decode()
decryptedMessage = ""
for letter in encryptedMessage:
decryptedLetter = chr(ord(letter) - self.offset)
decryptedMessage += decryptedLetter
return decryptedMessage
| {
"repo_name": "bensoer/pychat",
"path": "crypto/algorithms/caesarcipher.py",
"copies": "1",
"size": "2611",
"license": "mit",
"hash": -7700421505056912000,
"line_mean": 48.2641509434,
"line_max": 121,
"alpha_frac": 0.7127537342,
"autogenerated": false,
"ratio": 4.889513108614232,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6102266842814232,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bensoer'
from socket import *
import threading
import sys
bufferSize = 2048
serverName = 'localhost'
serverPort = 1400
serverSocket = socket(AF_INET, SOCK_DGRAM)
serverSocket.bind((serverName, serverPort))
canCheck = 1
def checkForReceiving():
message, clientAddress = serverSocket.recvfrom(bufferSize)
print("Response: " + message.decode())
if canCheck == 1:
t = threading.Timer(1, checkForReceiving)
t.start()
clientPort = 1200
clientName = 'localhost'
#child = threading.Thread(target=timerManager)
#child.start()
t = threading.Timer(1, checkForReceiving)
t.start()
print("Server is ready to recieve")
while True:
clientSocket = socket(AF_INET, SOCK_DGRAM)
message = input()
if '-1' in message:
print("Quiting..Bye")
canCheck = 0
clientSocket.close()
t.cancel()
break
else:
clientSocket.sendto(message.encode(), (clientName, clientPort))
#response, serverAddress = clientSocket.recvfrom(2048)
clientSocket.close()
print("made it here")
canCheck = 0
t.cancel()
| {
"repo_name": "bensoer/pychat",
"path": "example/client.py",
"copies": "1",
"size": "1099",
"license": "mit",
"hash": 724141581404880900,
"line_mean": 19.7358490566,
"line_max": 71,
"alpha_frac": 0.6715195632,
"autogenerated": false,
"ratio": 3.4559748427672954,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4627494405967295,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bensoer'
import select
from tools.commandtype import CommandType
class ListenerMultiProcess:
__keepListening = True
__connections = {}
__firstMessageReceived = False
__firstMessage = b''
__rejectFirstMessageMatches = False
__replySent = False
def __init__(self, socket, decryptor, child_conn_pipe):
'''
constructor. This sets up all attributes needed for the listener process to function
:param socket: Socket - the socket the listener process will listen on
:param decryptor: Decryptor - the decryption instance the recieved message will be put through before
printing to screen
:return: void
'''
self.__socket = socket
self.__socket.setblocking(0)
self.__decryptor = decryptor
self.__child_conn_pipe = child_conn_pipe
'in python this is apparently the only way to remember what file descriptor belongs to what socket'
fileno = self.__socket.fileno()
self.__connections[fileno] = self.__socket
print("Listener Process Initialized")
def start(self):
'''
start is called by the parent process to initialize the child process task. This method is simply the kickoff
point of the child process, and auto contains it within a managed object
:return: void
'''
epoll = select.epoll()
epoll.register(self.__socket, (select.EPOLLIN | select.EPOLLERR | select.EPOLLHUP | select.EPOLLET))
while self.__keepListening:
events = epoll.poll()
for fd, eventType in events:
if eventType & (select.EPOLLHUP|select.EPOLLERR):
print("SystemError Recieving Message. Epoll Errored. Closing Descriptor")
epoll.unregister(fd)
else:
socket = self.__connections[fd]
message, address = socket.recvfrom(2048)
encryptedMessage = message
#if we haven't recieved the first message yet then this one is it
if self.__firstMessageReceived == False:
self.__firstMessageReceived = True
# give the message first to the algorithm to determine whether we print it or not
#writeToConsole = self.__decryptor.giveFirstMessage(encryptedMessage)
self.__child_conn_pipe.send([CommandType.GiveFirstMessage, encryptedMessage])
writeToConsole = self.__child_conn_pipe.recv()[0]
# check if a reply has been sent
if self.__replySent == False:
#firstMessageToBeSent = self.__decryptor.getInitializationMessage()
self.__child_conn_pipe.send([CommandType.GetInitializationMessage])
firstMessageToBeSent = self.__child_conn_pipe.recv()[0]
# if first message does exist then send it
if len(firstMessageToBeSent) > 0:
socket.sendto(firstMessageToBeSent, address)
self.__replySent = True
# keep track of this first message
self.__firstMessage = encryptedMessage
if writeToConsole == True:
# if you wanted to write to console, then everything should be able to write to console
self.__rejectFirstMessageMatches = False
#if we are to write to console then decrypt the message using algorithms decryptor
#decryptedMessage = self.__decryptor.decrypt(encryptedMessage)
self.__child_conn_pipe.send([CommandType.Decrypt, encryptedMessage])
decryptedMessage = self.__child_conn_pipe.recv()[0]
#if the message is empty though don't bother printing it
if decryptedMessage != "":
print(decryptedMessage)
else:
# if you don't want firstMessage to write to console, assume u never want it to
self.__rejectFirstMessageMatches = True
else:
# drop anything that looks like the first message if rejection is set
if self.__rejectFirstMessageMatches:
if encryptedMessage != self.__firstMessage:
#decryptedMessage = self.__decryptor.decrypt(encryptedMessage)
self.__child_conn_pipe.send([CommandType.Decrypt, encryptedMessage])
decryptedMessage = self.__child_conn_pipe.recv()[0]
print(decryptedMessage)
else:
# decryptedMessage = self.__decryptor.decrypt(encryptedMessage)
self.__child_conn_pipe.send([CommandType.Decrypt, encryptedMessage])
decryptedMessage = self.__child_conn_pipe.recv()[0]
print(decryptedMessage)
'''
try:
message, address = self.__socket.recvfrom(2048)
encryptedMessage = message.decode()
decryptedMessage = self.__decryptor.decrypt(encryptedMessage)
print(decryptedMessage)
except Exception:
pass
''' | {
"repo_name": "bensoer/pychat",
"path": "client/listenermultiprocess.py",
"copies": "1",
"size": "5668",
"license": "mit",
"hash": -1604016339900006000,
"line_mean": 46.6386554622,
"line_max": 117,
"alpha_frac": 0.5462244178,
"autogenerated": false,
"ratio": 5.32206572769953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.636829014549953,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bensoer'
import select
class ListenerProcess:
__keepListening = True
__connections = {}
__firstMessageReceived = False
__firstMessage = b''
__rejectFirstMessageMatches = False
__replySent = False
def __init__(self, socket, decryptor):
'''
constructor. This sets up all attributes needed for the listener process to function
:param socket: Socket - the socket the listener process will listen on
:param decryptor: Decryptor - the decryption instance the recieved message will be put through before
printing to screen
:return: void
'''
self.__socket = socket
self.__socket.setblocking(0)
self.__decryptor = decryptor
'in python this is apparently the only way to remember what file descriptor belongs to what socket'
fileno = self.__socket.fileno()
self.__connections[fileno] = self.__socket
print("Listener Process Initialized")
def start(self):
'''
start is called by the parent process to initialize the child process task. This method is simply the kickoff
point of the child process, and auto contains it within a managed object
:return: void
'''
epoll = select.epoll()
epoll.register(self.__socket, (select.EPOLLIN | select.EPOLLERR | select.EPOLLHUP | select.EPOLLET))
while self.__keepListening:
events = epoll.poll()
for fd, eventType in events:
if eventType & (select.EPOLLHUP|select.EPOLLERR):
print("SystemError Recieving Message. Epoll Errored. Closing Descriptor")
epoll.unregister(fd)
else:
socket = self.__connections[fd]
message, address = socket.recvfrom(2048)
encryptedMessage = message
#if we haven't recieved the first message yet then this one is it
if self.__firstMessageReceived == False:
self.__firstMessageReceived = True
# give the message first to the algorithm to determine whether we print it or not
writeToConsole = self.__decryptor.giveFirstMessage(encryptedMessage)
# check if a reply has been sent
if self.__replySent == False:
firstMessageToBeSent = self.__decryptor.getInitializationMessage()
# if first message does exist then send it
if len(firstMessageToBeSent) > 0:
socket.sendto(firstMessageToBeSent, address)
self.__replySent = True
# keep track of this first message
self.__firstMessage = encryptedMessage
if writeToConsole == True:
# if you wanted to write to console, then everything should be able to write to console
self.__rejectFirstMessageMatches = False
#if we are to write to console then decrypt the message using algorithms decryptor
decryptedMessage = self.__decryptor.decrypt(encryptedMessage)
#if the message is empty though don't bother printing it
if decryptedMessage != "":
print(decryptedMessage)
else:
# if you don't want firstMessage to write to console, assume u never want it to
self.__rejectFirstMessageMatches = True
else:
# drop anything that looks like the first message if rejection is set
if self.__rejectFirstMessageMatches:
if encryptedMessage != self.__firstMessage:
decryptedMessage = self.__decryptor.decrypt(encryptedMessage)
print(decryptedMessage)
else:
decryptedMessage = self.__decryptor.decrypt(encryptedMessage)
print(decryptedMessage)
'''
try:
message, address = self.__socket.recvfrom(2048)
encryptedMessage = message.decode()
decryptedMessage = self.__decryptor.decrypt(encryptedMessage)
print(decryptedMessage)
except Exception:
pass
''' | {
"repo_name": "bensoer/pychat",
"path": "client/listenerprocess.py",
"copies": "1",
"size": "4653",
"license": "mit",
"hash": 2518791073674094600,
"line_mean": 42.9056603774,
"line_max": 117,
"alpha_frac": 0.544594885,
"autogenerated": false,
"ratio": 5.526128266033254,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6570723151033255,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bensoer'
from crypto.algorithms.algorithminterface import AlgorithmInterface
from tools.argparcer import ArgParcer
from collections import deque
import sys
import string
class TranspositionCipher(AlgorithmInterface):
__key = ""
'''
__mapper stores the dynamic mapping of letters and values for encryption and decryption
Index Information:
[0] A Character list of the key
[1] An Integer list giving the alphabetical order index of the key. Note this list is ordered in order of
appearance in the character array so although the numbers are alphabetical order, they themselves may not
be in order
[2+] Messages being encoded or decoded. Depending on length and the length of the key this may spant several
rows
'''
__mapper = None
def __init__(self, arguments):
key = ArgParcer.getValue(arguments, "-k")
if key == "":
raise AttributeError("Key Is Required Parameter For Transposition Cipher")
else:
self.__key = key
# add the key as a list
self.__mapper = list()
self.__mapper.append(list(self.__key))
positionsList = self.__createAlphabeticalIndexListOfKey(self.__key)
#print(positionsList)
self.__mapper.append(positionsList)
#print(self.__mapper)
def __createAlphabeticalIndexListOfKey(self, key):
alphabet = "abcdefghijklmnopqrstuvwxyz"
positions = [None] * len(key)
letterValue = 1
for letter in alphabet:
for kIndex, kLetter in enumerate(key):
#print(letter + " vs " + kLetter.lower())
if letter == kLetter.lower():
#print("Found Match: " + letter + " is given position: " + str(letterValue))
positions[kIndex] = letterValue
letterValue = letterValue + 1
return positions
def __buildMapperWithMessage(self, message):
keyLength = len(self.__key)
messageLength = len(message)
rows = messageLength / keyLength
extra = messageLength % keyLength
#print("key length: " + str(keyLength))
#print("message length: " + str(messageLength))
#print("rows needed: " + str(rows))
#print("extra bits: " + str(extra))
messageIndex = 0
while (messageIndex * keyLength) < len(message):
segment = list()
for i in range(keyLength):
if (i + (messageIndex * keyLength)) >= len(message):
segment.append("*")
else:
segment.append(message[i + (messageIndex * keyLength)])
self.__mapper.append(segment)
messageIndex = messageIndex + 1
def __clearMapper(self):
for i in range(2, len(self.__mapper)):
self.__mapper.pop(2)
def __getLettersUnderAlphabetIndex(self,index):
numOfRows = len(self.__mapper)
for position, alphaIndex in enumerate(self.__mapper[1]):
if alphaIndex == index:
letters = list()
for i in range(2, numOfRows):
letters.append(self.__mapper[i][position])
return letters
def encryptString(self, unencryptedMessage):
self.__buildMapperWithMessage(unencryptedMessage)
#print(self.__mapper)
#print("mooving on")
keyLength = len(self.__key)
joinedLists = list()
encryptedMessage = ""
for i in range(1, keyLength+1):
listOfLetters = self.__getLettersUnderAlphabetIndex(i)
#print("index: " + str(i) + " has letters: ")
#print(listOfLetters)
joinedLists = joinedLists + listOfLetters
#print(joinedLists)
encryptedMessage = ''.join(joinedLists)
encryptedMessage = encryptedMessage.replace('*', '')
#print(encryptedMessage)
self.__clearMapper()
return encryptedMessage.encode()
def __insertLettersAtAlphabetIndex(self, index, letters):
lettersList = list(letters)
for pos, alphaIndex in enumerate(self.__mapper[1]):
if alphaIndex == index:
for position, letter in enumerate(lettersList):
self.__mapper[2+position][pos] = letter
break
def __fillEndSpaces(self, numberOfSpaces):
endRow = len(self.__mapper) - 1
rowlen = len(self.__mapper[endRow])
for i in range(0, numberOfSpaces):
#print("Inserting star into index: " + str(endRow) + str((rowlen-1)-i))
self.__mapper[endRow][(rowlen - 1) - i] = '*'
def __getSegmentForColumn(self, message, mappos, msgStartIndex):
mappos = mappos + 1
letters = self.__getLettersUnderAlphabetIndex(int(mappos))
numOfStars = 0
if letters is not None:
numOfStars = letters.count('*')
endpos = msgStartIndex + len(letters) - numOfStars
segment = message[msgStartIndex:endpos]
return segment
def decryptString(self, encryptedMessage):
strEncryptedMessage = encryptedMessage.decode()
keyLength = len(self.__key)
messageLength = len(strEncryptedMessage)
#figure out how many stars need to be added to the message
stars = messageLength - keyLength
if stars <= 0:
# means the message is smaller then the key
stars = abs(stars)
else:
# means the message is larger then the key
largeEnoughKey = keyLength
#so largen our key in increments of our key length to find something large enough
while messageLength > largeEnoughKey:
largeEnoughKey = largeEnoughKey + keyLength
stars = messageLength - largeEnoughKey
stars = abs(stars)
# if there is more then 0 stars needed, add them as part of the message length
totalMessageLength = messageLength
if stars > 0:
totalMessageLength = messageLength + stars
#print(totalMessageLength)
# now determine how many rows will be needed to store this entire message
rows = int(totalMessageLength) / int(keyLength)
extra = int(totalMessageLength) % int(keyLength)
# if there is extra letters to be included add a row for it
if extra > 0:
#print("There is extra: " + str(extra))
rows = rows + 1
# create the appropriate number of rows needed on the mapper to hold the message
for row in range(0, int(rows)):
newRow = [None] * keyLength
self.__mapper.append(newRow)
# fill extra spaces at the end with stars
self.__fillEndSpaces(stars)
#print(self.__mapper)
# insert the message into the mapper
msgpos = 0
mappos = 0
while msgpos < messageLength:
segment = self.__getSegmentForColumn(strEncryptedMessage, mappos, msgpos)
#print(segment)
# put this letter segment in the column of the alphabet index
self.__insertLettersAtAlphabetIndex(mappos + 1, segment)
mappos = mappos + 1
msgpos = msgpos + len(segment)
#print(self.__mapper)
# reconstruct from the message from each row in the mapper
fullLists = list()
for i in range(2, len(self.__mapper)):
#print("loop " + str(i))
mapRow = self.__mapper[i]
#print(mapRow)
fullLists = fullLists + mapRow
fullSegment = ''.join(fullLists)
fullSegment = fullSegment.replace('*', '')
# cleanup the mapper for next message
self.__clearMapper()
return fullSegment
| {
"repo_name": "bensoer/pychat",
"path": "crypto/algorithms/transpositioncipher.py",
"copies": "1",
"size": "7792",
"license": "mit",
"hash": -5323237938713533000,
"line_mean": 30.8040816327,
"line_max": 113,
"alpha_frac": 0.5939425051,
"autogenerated": false,
"ratio": 4.384918401800788,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5478860906900788,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bergundy'
class LayerDict(object):
def __init__(self):
self._layers = {}
"""
:type self._layers: dict(dict)
"""
def set_layer(self, key, dct):
prev = self._layers.get(key, {})
self._layers[key] = dct
return self._calc_changes(key, prev, dct)
def del_layer(self, key):
prev = self._layers.pop(key, {})
return self._calc_changes(key, prev, {})
def _calc_changes(self, key, prev, curr):
added, removed, modified = calc_diff(prev, curr)
updated = added | removed | modified
sorted_layers = (self._layers[k] for k in sorted(self._layers) if k > key)
for l in sorted_layers:
updated -= l.viewkeys()
if not updated:
break
return {k: curr.get(k) for k in updated}
def calc_diff(a, b):
a_keys = a.viewkeys()
b_keys = b.viewkeys()
added = b_keys - a_keys
removed = a_keys - b_keys
common = a_keys & b_keys
values = lambda d: (d[k] for k in common)
modified = {k for k, ak, bk in zip(common, values(a), values(b)) if ak != bk}
return added, removed, modified
| {
"repo_name": "pombredanne/click-config",
"path": "click_config/inotify/layers.py",
"copies": "2",
"size": "1173",
"license": "bsd-2-clause",
"hash": -5305270114808743000,
"line_mean": 26.2790697674,
"line_max": 82,
"alpha_frac": 0.5481670929,
"autogenerated": false,
"ratio": 3.3706896551724137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4918856748072414,
"avg_score": null,
"num_lines": null
} |
__author__ = "Berserker66"
langversion = 1
langname = "German"
##updater
# text construct: "Version "+version+available+changelog
#example: Version 3 available, click here to download, or for changelog click here
available = " verfügbar, clicke hier für den Download"
changelog = ", oder hier für den Changelog"
##world gen
worldify = "Welt aus Bild"
planetoids = "Planetoiden & Terra"
arena = "Kerker Arena"
flat = "Flachwelt"
new = "Neue Welt:"
##mainmenu
#omnitool
settings = "Einstellungen"
report_issue = "Problem melden"
exit = "Verlassen"
#start
start = "Start"
terraria = "Terraria"
steamfree = "Terraria Steamfrei"
#open
open = "Öffne"
imagefolder = "Weltbilder"
backupfolder = "Weltbackups"
themes = "Omnitool themes"
#visit
visit = "Besuche"
donate = "Spenden"
homepage = "Omnitool"
TO = "Terraria Online"
wiki = "Terraria Wiki"
##world thumbnail
label = "Welt: "
##settings menu
warning = "Einige Änderungen benötigen einen Neustart"
none = "Keine"
tiny = "Winzig" #unused
small = "Klein"
medium = "Mittel"
large = "Groß"
very_large = "XXL"
theme_select = "Skin Auswahl:"
thumbsize = "Welt Vorschau größte:"
mk_backups = "Erstelle Backups"
world_columns = "Weltspalten:"
##world interaction menu
wa_worldactionmenu = "Action für {}:"
wa_imageopen = "Bild öffnen"
wa_renderopen = "Welt darstellen"
wa_teditopen = "In TEdit öffnen"
wa_update = "Bild aktualisieren"
wa_super = "Super-Bild generieren"
##planetoids & terra
pt_start = 'Start Generation!'
pt_name = "Name: "
pt_mode = "Modus: "
pt_small = "kleine Planetoids"
pt_medium = "mittlere Planetoids"
pt_large = "große Planetoids"
pt_square = "quadratische Planetoids"
pt_both = "große Planetoids & Terra"
pt_square_terra = "quadratische Terra"
pt_start_sel = "Start: "
pt_morning = "Morgen"
pt_day = "Tag"
pt_night = "Nacht"
pt_bloodmoon = "Blutmond"
pt_extras = "Extras: "
pt_sun = "Sonne: "
pt_atlantis = "Atlantis: "
pt_merchant = "Händler: "
pt_lloot = "Weniger Loot: "
pt_mirror = "Gespiegelt: "
pt_pre = "Item Prefixes: "
##worldify
w_start = "Start Weltifizierung!"
w_cont = "Weiter"
w_name = "Name: "
w_rgb = "RGB"
w_hsv = "gewichtetes HSV"
w_method = "Methode: "
w_priority = "Prioritätsauswahl"
w_hue = "Farbton: "
w_saturation = "Sättigung: "
w_brightness = "Helligkeit: "
##arena
a_start = "Start Generation!"
a_name = "Name: "
a_rooms = "Räume: "
a_sidelen = "Raum seitenlänge: "
a_corlen = "Korridorlänge: "
a_chest = "Kisten: "
a_itemchest = "Items pro Kiste: "
a_light = "Beleuchtung: "
a_chances = "Raum Chancen: "
a_standard = "Standard: "
a_cross = "Kreuz Korridor: "
##torch
at_chances = "Farbchancen:"
at_full = "Vollspektrum"
at_blue = "Blau"
at_red = "Rot"
at_green = "Grün"
at_pink = "Pink"
at_white = "Weiß"
at_yellow = "Gelb"
at_purple = "Lila"
at_lime = "Verflucht"
##plugins
pl_start = "Start Plugin"
pl_rec = "Wähle eine Welt zum einlesen"
pl_mod = "Wähle eine Welt zum modifizieren"
pl_trans = "Wähle zwei Welten für einen Transfer"
pl_trans_source = "Quelle"
pl_trans_target = "Ziel"
##flatworld
fw_size = "Weltgröße:"
fw_tiny = "winzig"
fw_square = "quadratisch"
fw_small = "klein"
fw_medium = "mittel"
fw_large = "groß"
fw_tile = "Block Typ:"
fw_wall = "Mauer Typ:"
fw_surf = "Oberflächentyp:"
| {
"repo_name": "Berserker66/omnitool",
"path": "omnitool/Language/german.py",
"copies": "1",
"size": "3251",
"license": "mit",
"hash": -6408815863629089000,
"line_mean": 19.9155844156,
"line_max": 83,
"alpha_frac": 0.6895374107,
"autogenerated": false,
"ratio": 2.2571829011913103,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8393725029756418,
"avg_score": 0.010599056426978505,
"num_lines": 154
} |
### VMware advanced memory stats
### Displays memory stats coming from the hypervisor inside VMware VMs.
### The vmGuestLib API from VMware Tools needs to be installed
class dstat_plugin(dstat):
def __init__(self):
self.name = 'vmware advanced memory'
self.vars = ('active', 'ballooned', 'mapped', 'overhead', 'saved', 'shared', 'swapped', 'targetsize', 'used')
self.nick = ('active', 'balln', 'mappd', 'ovrhd', 'saved', 'shard', 'swapd', 'targt', 'used')
self.type = 'd'
self.width = 5
self.scale = 1024
def check(self):
try:
global vmguestlib
import vmguestlib
self.gl = vmguestlib.VMGuestLib()
except:
raise Exception, 'Needs python-vmguestlib module'
def extract(self):
self.gl.UpdateInfo()
self.val['active'] = self.gl.GetMemActiveMB() * 1024 ** 2
self.val['ballooned'] = self.gl.GetMemBalloonedMB() * 1024 ** 2
self.val['mapped'] = self.gl.GetMemMappedMB() * 1024 ** 2
self.val['overhead'] = self.gl.GetMemOverheadMB() * 1024 ** 2
self.val['saved'] = self.gl.GetMemSharedSavedMB() * 1024 ** 2
self.val['shared'] = self.gl.GetMemSharedMB() * 1024 ** 2
self.val['swapped'] = self.gl.GetMemSwappedMB() * 1024 ** 2
self.val['targetsize'] = self.gl.GetMemTargetSizeMB() * 1024 ** 2
self.val['used'] = self.gl.GetMemUsedMB() * 1024 ** 2
# vim:ts=4:sw=4 | {
"repo_name": "SpamapS/dstat-plugins",
"path": "dstat_plugins/plugins/dstat_vm_mem_adv.py",
"copies": "4",
"size": "1514",
"license": "apache-2.0",
"hash": -657400665017646800,
"line_mean": 39.9459459459,
"line_max": 117,
"alpha_frac": 0.5964332893,
"autogenerated": false,
"ratio": 3.349557522123894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006564051380006739,
"num_lines": 37
} |
### VMware cpu stats
### Displays CPU stats coming from the hypervisor inside VMware VMs.
### The vmGuestLib API from VMware Tools needs to be installed
class dstat_plugin(dstat):
def __init__(self):
self.name = 'vm cpu'
self.vars = ('used', 'stolen', 'elapsed')
self.nick = ('usd', 'stl')
self.type = 'p'
self.width = 3
self.scale = 100
self.cpunr = getcpunr()
def check(self):
try:
global vmguestlib
import vmguestlib
self.gl = vmguestlib.VMGuestLib()
except:
raise Exception, 'Needs python-vmguestlib module'
def extract(self):
self.gl.UpdateInfo()
self.set2['elapsed'] = self.gl.GetElapsedMs()
self.set2['stolen'] = self.gl.GetCpuStolenMs()
self.set2['used'] = self.gl.GetCpuUsedMs()
for name in ('stolen', 'used'):
self.val[name] = (self.set2[name] - self.set1[name]) * 100 / (self.set2['elapsed'] - self.set1['elapsed']) / self.cpunr
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4 | {
"repo_name": "SpamapS/dstat-plugins",
"path": "dstat_plugins/plugins/dstat_vm_cpu.py",
"copies": "4",
"size": "1168",
"license": "apache-2.0",
"hash": -602730013198174800,
"line_mean": 29.7631578947,
"line_max": 131,
"alpha_frac": 0.5761986301,
"autogenerated": false,
"ratio": 3.308781869688385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5884980499788386,
"avg_score": null,
"num_lines": null
} |
### VMware ESX kernel interrupt stats
### Displays kernel interrupt statistics on VMware ESX servers
# NOTE TO USERS: command-line plugin configuration is not yet possible, so I've
# "borrowed" the -I argument.
# EXAMPLES:
# # dstat --vmkint -I 0x46,0x5a
# You can even combine the Linux and VMkernel interrupt stats
# # dstat --vmkint -i -I 14,0x5a
# Look at /proc/vmware/interrupts to see which interrupt is linked to which function
class dstat_plugin(dstat):
def __init__(self):
self.name = 'vmkint'
self.type = 'd'
self.width = 4
self.scale = 1000
self.open('/proc/vmware/interrupts')
# self.intmap = self.intmap()
# def intmap(self):
# ret = {}
# for line in dopen('/proc/vmware/interrupts').readlines():
# l = line.split()
# if len(l) <= self.vmkcpunr: continue
# l1 = l[0].split(':')[0]
# l2 = ' '.join(l[vmkcpunr()+1:]).split(',')
# ret[l1] = l1
# for name in l2:
# ret[name.strip().lower()] = l1
# return ret
def vmkcpunr(self):
#the service console sees only one CPU, so cpunr == 1, only the vmkernel sees all CPUs
ret = []
# default cpu number is 2
ret = 2
for l in self.fd[0].splitlines():
if l[0] == 'Vector':
ret = int( int( l[-1] ) + 1 )
return ret
def discover(self):
#interrupt names are not decimal numbers, but rather hexadecimal numbers like 0x7e
ret = []
self.fd[0].seek(0)
for line in self.fd[0].readlines():
l = line.split()
if l[0] == 'Vector': continue
if len(l) < self.vmkcpunr()+1: continue
name = l[0].split(':')[0]
amount = 0
for i in l[1:1+self.vmkcpunr()]:
amount = amount + long(i)
if amount > 20: ret.append(str(name))
return ret
def vars(self):
ret = []
if op.intlist:
list = op.intlist
else:
list = self.discover
# len(list) > 5: list = list[-5:]
for name in list:
if name in self.discover:
ret.append(name)
# elif name.lower() in self.intmap.keys():
# ret.append(self.intmap[name.lower()])
return ret
def check(self):
try:
os.listdir('/proc/vmware')
except:
raise Exception, 'Needs VMware ESX'
info(1, 'The vmkint module is an EXPERIMENTAL module.')
def extract(self):
self.fd[0].seek(0)
for line in self.fd[0].readlines():
l = line.split()
if len(l) < self.vmkcpunr()+1: continue
name = l[0].split(':')[0]
if name in self.vars:
self.set2[name] = 0
for i in l[1:1+self.vmkcpunr()]:
self.set2[name] = self.set2[name] + long(i)
for name in self.set2.keys():
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4
| {
"repo_name": "barzan/dbseer",
"path": "middleware_old/dstat_for_server/plugins/dstat_vmk_int.py",
"copies": "3",
"size": "3205",
"license": "apache-2.0",
"hash": -8144581516616198000,
"line_mean": 32.3854166667,
"line_max": 94,
"alpha_frac": 0.5235569423,
"autogenerated": false,
"ratio": 3.349007314524556,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006052301521876284,
"num_lines": 96
} |
### VMware ESX kernel vmhba stats
### Displays kernel vmhba statistics on VMware ESX servers
# NOTE TO USERS: command-line plugin configuration is not yet possible, so I've
# "borrowed" the -D argument.
# EXAMPLES:
# # dstat --vmkhba -D vmhba1,vmhba2,total
# # dstat --vmkhba -D vmhba0
# You can even combine the Linux and VMkernel diskstats (but the "total" argument
# will be used by both).
# # dstat --vmkhba -d -D sda,vmhba1
class dstat_plugin(dstat):
def __init__(self):
self.name = 'vmkhba'
self.nick = ('read', 'writ')
self.cols = 2
def discover(self, *list):
# discover will list all vmhba's found.
# we might want to filter out the unused vmhba's (read stats, compare with ['0', ] * 13)
ret = []
try:
list = os.listdir('/proc/vmware/scsi/')
except:
raise Exception, 'Needs VMware ESX'
for name in list:
for line in dopen('/proc/vmware/scsi/%s/stats' % name).readlines():
l = line.split()
if len(l) < 13: continue
if l[0] == 'cmds': continue
if l == ['0', ] * 13: continue
ret.append(name)
return ret
def vars(self):
# vars will take the argument list - when implemented - , use total, or will use discover + total
ret = []
if op.disklist:
list = op.disklist
#elif not op.full:
# list = ('total', )
else:
list = self.discover
list.sort()
for name in list:
if name in self.discover + ['total']:
ret.append(name)
return ret
def check(self):
try:
os.listdir('/proc/vmware')
except:
raise Exception, 'Needs VMware ESX'
info(1, 'The vmkhba module is an EXPERIMENTAL module.')
def extract(self):
self.set2['total'] = (0, 0)
for name in self.vars:
self.set2[name] = (0, 0)
for name in os.listdir('/proc/vmware/scsi/'):
for line in dopen('/proc/vmware/scsi/%s/stats' % name).readlines():
l = line.split()
if len(l) < 13: continue
if l[0] == 'cmds': continue
if l[2] == '0' and l[4] == '0': continue
if l == ['0', ] * 13: continue
self.set2['total'] = ( self.set2['total'][0] + long(l[2]), self.set2['total'][1] + long(l[4]) )
if name in self.vars and name != 'total':
self.set2[name] = ( long(l[2]), long(l[4]) )
for name in self.set2.keys():
self.val[name] = (
(self.set2[name][0] - self.set1[name][0]) * 1024.0 / elapsed,
(self.set2[name][1] - self.set1[name][1]) * 1024.0 / elapsed,
)
if step == op.delay:
self.set1.update(self.set2)
| {
"repo_name": "dongyoungy/dbseer_middleware",
"path": "rs-sysmon2/plugins/dstat_vmk_hba.py",
"copies": "1",
"size": "2966",
"license": "apache-2.0",
"hash": -7434976194438366000,
"line_mean": 35.1707317073,
"line_max": 111,
"alpha_frac": 0.5148347943,
"autogenerated": false,
"ratio": 3.4976415094339623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4512476303733962,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bert'
import sys
import requests
import csv
import json
import handle_json
import handle_csv
import compare
import logging
from datetime import datetime
# Set up logging file to receive Update record
stake_list = [
'Garden',
'Grove Creek',
'Lindon',
'Lindon Central',
'Lindon West',
'Mount Mahogany',
'North Field',
'Pleasant Grove',
'Pleasant Grove East',
'Pleasant Grove West',
'Timpanogos'
]
numfiles = len(sys.argv)
if (numfiles > 0):
compare_file = sys.argv[1]
if compare_file in stake_list:
stake = compare_file
else:
stake = 'all'
else:
stake = ''
logging.basicConfig(filename='data/results/Update {0}-{1}.txt'.format(compare_file, str(datetime.today()).split()[0]), level=logging.DEBUG)
log1 = logging.getLogger('Update')
logging.debug('\n*** {0} ***'.format(compare_file))
# Load in saved database and convert to JSON fuzzy format for comparison
counselors = handle_csv.load_counselors('data/MBC_db.csv', stake)
fuzzy_comparable_counselors = handle_csv.make_fuzzy_comparable_counselor_list(counselors)
fuzzy_results = []
"""Load in one data file and convert to JSON fuzzy format for comparison.
Find matching entries for each MB Counselor in the database, and return
with information. Write to the log all info that needs to be updated online.
"""
if (compare_file == 'MBC'):
new_counselors = handle_csv.load_counselors('data/MBC.csv', stake)
new_fuzzy_comparable_counselors = handle_csv.make_fuzzy_comparable_counselor_list(new_counselors)
for c in fuzzy_comparable_counselors:
fuzzy_results.append(compare.find_match(c, new_fuzzy_comparable_counselors))
elif (compare_file == 'YPT'):
dist_scouters = handle_csv.load_dist_ypt('data/YPT.csv')
fuzzy_comparable_scouters = handle_csv.make_fuzzy_comparable_district_list(dist_scouters)
for c in fuzzy_comparable_counselors:
fuzzy_results.append(compare.find_match(c, fuzzy_comparable_scouters))
elif len(stake):
all_households = handle_json.read_in_stake('data/{0}.json'.format(stake))
fuzzy_comparable_members = handle_json.make_fuzzy_comparable_member_list(all_households)
for c in fuzzy_comparable_counselors:
fuzzy_results.append(compare.fuzzy_compare(c, fuzzy_comparable_members))
else:
logging.info('No stake selected for matching')
if len(fuzzy_results):
for c in fuzzy_results:
counselor = c['counselor']
match = c['match']
if (c['match_type'] == 'moved '):
logging.info('\n{0} : {1}, {2} \t{3}'.format(c['match_type'], counselor['last_name'], counselor['first_name'], counselor['street']))
logging.info('new owner: {0}, {1} {2}\n'.format(match['last_name'], match['first_name'], match['street']))
elif (c['match_type'] == 'unknown ' or c['match_type'] == 'possible'):
logging.info('\n{0}'.format(c['match_type']))
logging.info('{0}, {1}\t{2} {3} {4}'.format(counselor['last_name'], counselor['first_name'], counselor['street'], counselor['phones'], counselor['emails']))
# logging.info('{0}, {1}\t{2} {3} {4}'.format( match['last_name'], match['first_name'], match['street'], match['phones'], match['emails']))
else:
logging.info('\n{0} : {1} {2} ({3}) : {4}'.format(c['match_type'], counselor['last_name'], counselor['first_name'], match['first_name'] if (c['match_type'] == 'probable')else '',match['full_name']))
logging.info('Addresses: {0} <--> {1}'.format(counselor['street'], match['street']))
logging.info('Ward: {0}'.format(match['ward']))
if (compare_file == 'YPT'):
logging.info(match)
if match['phones']['phone1']:
if ((match['phones']['phone1'] != counselor['phones']['phone1']) and
(match['phones']['phone1'] != counselor['phones']['phone2'])):
logging.info('New Phone* {0} (was {1})'.format(match['phones']['phone1'],counselor['phones']['phone1']))
if match['phones']['phone2']:
if ((match['phones']['phone2'] != counselor['phones']['phone1']) and
(match['phones']['phone2'] != counselor['phones']['phone2'])):
logging.info('New Phone* {0} (was {1})'.format(match['phones']['phone2'],counselor['phones']['phone2']))
if len(match['emails']['email1']):
if ((match['emails']['email1'] != counselor['emails']['email1']) and
(match['emails']['email1'] != counselor['emails']['email2'])):
logging.info('New eMail* {0} (was {1})'.format(match['emails']['email1'],counselor['emails']['email1']))
if len(match['emails']['email2']):
if ((match['emails']['email2'] != counselor['emails']['email1']) and
(match['emails']['email2'] != counselor['emails']['email2'])):
logging.info('New eMail* {0} (was {1})'.format(match['emails']['email2'],counselor['emails']['email2']))
# get exact matches
exact_matches = [x for x in fuzzy_results if (x['match_type'] == 'matched ')]
# get widened matches
probable_matches = [x for x in fuzzy_results if (x['match_type'] == 'probable')]
# get mismatches
possible_matches = [x for x in fuzzy_results if (x['match_type'] == 'possible')]
# get no matches
not_matches = [x for x in fuzzy_results if (x['match_type'] == 'moved ')]
unknown_matches = [x for x in fuzzy_results if (x['match_type'] == 'unknown ')]
print '\n ***** RESULTS *****\n'
print '\n'
print 'exact matches: {0}' .format(len(exact_matches))
print 'probable matches: {0}' .format(len(probable_matches))
print 'possible matches: {0}' .format(len(possible_matches))
print 'total matches: {0}' .format(len(exact_matches) + len(probable_matches) + len(possible_matches))
print 'move outs: {0}' .format(len(not_matches))
print 'total unknown: {0}' .format(len(unknown_matches))
print 'total searched: {0}' .format(len(fuzzy_comparable_counselors))
print 'find percentage: {0}'.format(float(len(exact_matches) + len(probable_matches) + len(possible_matches))/(float(len(fuzzy_comparable_counselors))))
with open('matches-{0}.json'.format(stake), 'wb') as f:
f.write(json.dumps(fuzzy_results))
print 'File write complete' | {
"repo_name": "hisPeople/ducking-avenger",
"path": "Comp2MBCdb.py",
"copies": "1",
"size": "6427",
"license": "mit",
"hash": -8294596487308976000,
"line_mean": 45.2446043165,
"line_max": 210,
"alpha_frac": 0.615995021,
"autogenerated": false,
"ratio": 3.2824310520939735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4398426073093974,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from ..util import assert_Xy
__all__ = ['split_data']
def split_data(X, y=None, frac=2/3):
"""
Randomly split the dataset in two subsets with sizes proportional to `frac`
and `1 - frac`.
Parameters
----------
X : array_like
input features, shape=(n_samples, n_features).
y : array_like, optional
targets, shape=(n_samples,).
frac : float in (0, 1), optional
relative size of the subsets.
Returns
-------
X1 : array
length `frac * len(X)`
y1 : array
length `frac * len(X)`, only returned if `y` is not None on input
X2 : array
length `(1 - frac) * len(X)`
y2 : array
length `(1 - frac) * len(X)`, only returned if `y` is not None on input
Raises
------
TypeError, ValueError
"""
if not 0. < frac < 1.:
raise ValueError("'frac' is not larger than 0 and smaller than 1.")
n = len(X)
m = int(np.floor(frac * n))
indices = np.random.permutation(np.arange(n))
if y is None:
X = np.asarray(X)
return X[indices[:m]], X[indices[m:]]
else:
X, y = assert_Xy(X, y)
return X[indices[:m]], y[indices[:m]], X[indices[m:]], y[indices[m:]]
| {
"repo_name": "bertrand-l/LearnML",
"path": "learnml/cross_validation/data_utils.py",
"copies": "1",
"size": "1362",
"license": "bsd-3-clause",
"hash": 6066509997623786000,
"line_mean": 25.1923076923,
"line_max": 82,
"alpha_frac": 0.5660792952,
"autogenerated": false,
"ratio": 3.4307304785894206,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9495651293715277,
"avg_score": 0.00023169601482854495,
"num_lines": 52
} |
__author__ = 'besta'
class BestaPlayer:
def __init__(self, fichier, player):
self.fichier = fichier
self.grille = self.getFirstGrid()
self.best_hit = 0
self.players = player
def getFirstGrid(self):
"""
Implements function to get the first grid.
:return: the grid.
"""
li = []
with open(self.fichier, 'r') as fi:
for line in fi.readlines():
li.append(line)
return li
def updateGrid(self):
"""
Implements function to update the grid to alter n-1
round values
"""
with open(self.fichier, 'r') as fi:
for line in fi.readlines():
i = 0
for car in line:
j = 0
if car != '\n':
self.grille[i][j] = car
j += 1
i += 1
def grilleEmpty(self):
"""
Implement function to check if the grid is empty.
"""
for line in self.grille:
for car in line[:len(line) - 1]:
if car != '0':
return False
return True
def checkLines(self, player, inARow):
"""
Implements function to check the current lines setup to evaluate best combinaison.
:param player: check for your numbers (your player number) or those of your opponent.
:param inARow: how many tokens in a row (3 or 2).
:return: true or false
"""
count = 0
flag = False
for line_number, line in enumerate(self.grille):
count = 0
for car_pos, car in enumerate(line[:len(line) - 1]):
if int(car) == player and not flag:
count = 1
flag = True
elif int(car) == player and flag:
count += 1
if count == inARow:
if car_pos - inARow >= 0 and self.canPlayLine(line_number, car_pos - inARow):
return True, car_pos - inARow
if car_pos + 1 <= 6 and self.canPlayLine(line_number, car_pos + 1):
return True, car_pos + 1
else:
count = 0
return False, 0
def canPlayLine(self, line, col):
"""
Function to check if we can fill the line with a token.
:param line: which line
:param col: which column
:return: true or false
"""
if line == 5:
return self.grille[line][col] == '0'
else:
return self.grille[line][col] == '0' and self.grille[line + 1][col] != '0'
def changeColumnInLines(self):
"""
Implements function to transform columns in lines to make tests eaiser.
:return: a reverse matrice
"""
column = []
for x in xrange(7):
col = ''
for y in xrange(6):
col += self.grille[y][x]
column.append(col)
return column
def checkColumns(self, player, inARow):
"""
Implements function to check the current columns setup to evaluate best combinaison.
:param player: check for your numbers (your player number) or those of your opponent.
:param inARow: how many tokens in a row (3 or 2).
:return: true or false
"""
column = self.changeColumnInLines()
count = 0
flag = False
for col_number, line in enumerate(column):
count = 0
for car_pos, car in enumerate(line):
if int(car) == player and not flag:
count = 1
flag = True
elif int(car) == player and flag:
count += 1
if count == inARow and car_pos - inARow >= 0 and self.grille[car_pos - inARow][col_number] == '0':
return True, col_number
else:
count = 0
return False, 0
def checkDiagonalLeftToRight(self, player, inARow):
"""
Implements function to check the current diagonal to evaluate best combinaison.
:param player: check for your numbers or opponent ones.
:param inARow: how many tokens in a row (3 or 2).
:return:
"""
x = 3
flag = False
while x < 6:
count = 0
x_int = x
y_int = 0
while x_int >= 0:
if int(self.grille[x_int][y_int]) == player and not flag:
count = 1
flag = True
elif int(self.grille[x_int][y_int]) == player and flag:
count += 1
if count == inARow and y_int + 1 <= 6 and x_int - 1 >= 0 and self.grille[x_int][y_int + 1] != '0':
return True, y_int + 1
else:
count = 0
flag = False
x_int -= 1
y_int += 1
x += 1
y = 1
flag = False
while y <= 3:
count = 0
x_int = 5
y_int = y
while y_int <= 6 and x_int >= 0:
if int(self.grille[x_int][y_int]) == player and not flag:
count = 1
flag = True
elif int(self.grille[x_int][y_int]) == player and flag:
count += 1
if count == inARow and y_int + 1 <= 6 and x_int - 1 >= 0 and self.grille[x_int][y + 1] != '0':
return True, y_int + 1
else:
count = 0
flage = False
x_int -= 1
y_int += 1
y += 1
return False, 0
def checkDiagonalRightToLeft(self, player, inARow):
"""
Implements function to check the current diagonal to evaluate best combinaison.
:param player: check for your numbers or opponent ones.
:param inARow: how many tokens in a row (3 or 2).
:return:
"""
x = 3
flag = False
while x < 6:
count = 0
x_int = x
y_int = 6
while x_int >= 0:
if int(self.grille[x_int][y_int]) == player and not flag:
count = 1
flag = True
elif int(self.grille[x_int][y_int]) == player and flag:
count += 1
if count == inARow and y_int - 1 >= 0 and x_int - 1 >= 0 and self.grille[x_int][y_int - 1] != '0':
return True, y_int - 1
else:
count = 0
flag = False
x_int -= 1
y_int -= 1
x += 1
y = 5
flag = False
while y <= 3:
count = 0
x_int = 5
y_int = y
while y_int >= 3 and x_int >= 0:
if int(self.grille[x_int][y_int]) == player and not flag:
count = 1
flag = True
elif int(self.grille[x_int][y_int]) == player and flag:
count += 1
if count == inARow and y_int - 1 >= 0 and x_int - 1 >= 0 and self.grille[x_int][y - 1] != '0':
return True, y_int - 1
else:
count = 0
flage = False
x_int -= 1
y_int -= 1
y -= 1
return False, 0
def checkDiagonals(self, player, inARow):
"""
Calls two diagonal functional.
:return: an int, representing the column where to play or 0 and False if there is no pattern search.
"""
check = self.checkDiagonalLeftToRight(player, inARow)
if check[0]:
return check
else:
return self.checkDiagonalRightToLeft(player, inARow)
def playSomeColumn(self, player, inARow):
"""
Call all function for a player and a number of tokens given.
:param player: which player
:param inARow: how many token
:return: true or false (col number if true)
"""
methods = {'checklines': self.checkLines, 'checkcolumn': self.checkColumns, 'checkdiagonal': self.checkDiagonals}
for key, function in methods.items():
which_col = function(player, inARow)
if which_col[0]:
return which_col
return False, 0
def findFirstColumnEmpty(self):
"""
Implements function to get the first column where a slot remain.
:return: the column
"""
for col in xrange(7):
if self.grille[0][col] == '0':
return col
return -1
def decideColumn(self):
"""
Implements main function : to decide what is the better hit to do.
:return: an int, representing the column where we play
"""
if self.grilleEmpty():
return 3
li_sequence = [3, 2, 1]
li_players = [self.players[0], self.players[1]]
for sequence in li_sequence:
for player in li_players:
choosen_col = self.playSomeColumn(player, sequence)
if choosen_col[0]:
return choosen_col[1]
return self.findFirstColumnEmpty()
| {
"repo_name": "KeserOner/puissance4",
"path": "bestaplayer.py",
"copies": "1",
"size": "9518",
"license": "mit",
"hash": 922085073882328200,
"line_mean": 31.9342560554,
"line_max": 121,
"alpha_frac": 0.4655389788,
"autogenerated": false,
"ratio": 4.202207505518764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5167746484318764,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bethard'
import argparse
import collections
import copy
import functools
import glob
import logging
import os
import re
import anafora
import anafora.select
class Scores(object):
def __init__(self):
self.reference = 0
self.predicted = 0
self.correct = 0
def add(self, reference, predicted):
"""
:param set reference: the reference annotations
:param set predicted: the predicted annotations
"""
self.reference += len(reference)
self.predicted += len(predicted)
self.correct += len(reference & predicted)
def update(self, other):
"""
:param Scores other: scores to merge into this one
"""
self.reference += other.reference
self.predicted += other.predicted
self.correct += other.correct
def precision(self):
"""
:return float: the fraction of predicted annotations that were correct
"""
return 1.0 if self.predicted == 0 else self.correct / float(self.predicted)
def recall(self):
"""
:return float: the fraction of reference annotations that were found
"""
return 1.0 if self.reference == 0 else self.correct / float(self.reference)
def f1(self):
"""
:return float: the harmonic mean of precision and recall
"""
p = self.precision()
r = self.recall()
return 0.0 if p + r == 0.0 else 2 * p * r / (p + r)
def __repr__(self):
return "{0}(reference={1}, predicted={2}, correct={3})".format(
self.__class__.__name__, self.reference, self.predicted, self.correct
)
class DebuggingScores(Scores):
def __init__(self):
Scores.__init__(self)
self.errors = []
def add(self, reference, predicted):
"""
:param set reference: the reference annotations
:param set predicted: the predicted annotations
"""
Scores.add(self, reference, predicted)
errors = []
for item in reference - predicted:
errors.append((item, "not in predicted"))
for item in predicted - reference:
errors.append((item, "not in reference"))
errors.sort()
self.errors.extend(errors)
def update(self, other):
"""
:param DebuggingScores other: scores to merge into this one
"""
Scores.update(self, other)
self.errors.extend(other.errors)
class TemporalClosureScores(object):
def __init__(self):
self.reference = 0
self.predicted = 0
self.precision_correct = 0
self.recall_correct = 0
@property
def correct(self):
return self.precision_correct, self.recall_correct
def add(self, reference, predicted):
"""
:param set reference: the reference annotations
:param set predicted: the predicted annotations
"""
reference = {a for a in reference if self._is_valid(a)}
predicted = {a for a in predicted if self._is_valid(a)}
reference = self._remove_duplicate_relations(reference)
predicted = self._remove_duplicate_relations(predicted)
self.reference += len(reference)
self.predicted += len(predicted)
self.precision_correct += len(self._closure(reference) & predicted)
self.recall_correct += len(reference & self._closure(predicted))
def update(self, other):
"""
:param TemporalClosureScores other: scores to merge into this one
"""
self.reference += other.reference
self.predicted += other.predicted
self.precision_correct += other.precision_correct
self.recall_correct += other.recall_correct
def precision(self):
"""
:return float: the fraction of predicted annotations that were correct (or inferable)
"""
return 1.0 if self.predicted == 0 else self.precision_correct / float(self.predicted)
def recall(self):
"""
:return float: the fraction of reference annotations that were found (or inferable)
"""
return 1.0 if self.reference == 0 else self.recall_correct / float(self.reference)
def f1(self):
"""
:return float: the harmonic mean of precision and recall
"""
p = self.precision()
r = self.recall()
return 0.0 if p + r == 0.0 else 2 * p * r / (p + r)
def __repr__(self):
return "{0}(reference={1}, predicted={2}, precision_correct={3}, recall_correct={4})".format(
self.__class__.__name__, self.reference, self.predicted, self.precision_correct, self.recall_correct
)
def _is_valid(self, annotation):
# temporal closure only makes sense with binary relations
try:
(start, end), _, prop = annotation
except (TypeError, ValueError):
msg = "temporal closure requires binary spans, found {0}"
raise RuntimeError(msg.format(annotation))
try:
name, value = prop
except (TypeError, ValueError):
msg = "temporal closure requires a single property, found {0}"
raise RuntimeError(msg.format(annotation))
# temporal closure only works on a defined set of temporal relations
if value not in self._interval_to_point:
logging.warning("invalid relation for temporal closure {0}".format(annotation))
return False
# otherwise, temporal closure should work
return True
def _remove_duplicate_relations(self, annotations):
seen_point_relations = set()
result_annotations = set()
for annotation in annotations:
# only include this annotation if no previous annotation expanded to the same point relations
point_relations = frozenset(self._to_point_relations(annotation))
if point_relations not in seen_point_relations:
seen_point_relations.add(point_relations)
result_annotations.add(annotation)
# return the filtered annotations
return result_annotations
def _to_point_relations(self, annotation):
start = self._start
end = self._end
# converts an interval relation to point relations
point_relations = set()
intervals, _, (_, value) = annotation
interval1, interval2 = intervals
# the start of an interval is always before its end
point_relations.add(((interval1, start), "<", (interval1, end)))
point_relations.add(((interval2, start), "<", (interval2, end)))
# use the interval-to-point lookup table to add the necessary point relations
for index1, side1, relation, index2, side2 in self._interval_to_point[value]:
point1 = (intervals[index1], side1)
point2 = (intervals[index2], side2)
point_relations.add((point1, relation, point2))
# for reflexive point relations, add them in the other direction too
if relation == "=":
point_relations.add((point2, relation, point1))
# return the collected relations
return point_relations
def _to_interval_relations(self, point_relations, annotations):
# map intervals to names
interval_names = collections.defaultdict(set)
for spans, type_name, (prop_name, _) in annotations:
for span in spans:
interval_names[span].add((type_name, prop_name))
# find all pairs of intervals that have some point relation between them (and whose names match)
pair_names = {}
for ((interval1, _), _, (interval2, _)) in point_relations:
names = interval_names[interval1] & interval_names[interval2]
if names:
pair_names[(interval1, interval2)] = names
pair_names[(interval2, interval1)] = names
# for each interval pair, see if it satisfies the point-wise requirements for any interval relations
interval_relations = set()
for pair in pair_names:
names = pair_names[pair]
for relation, requirements in self._interval_to_point.items():
if all(((pair[i1], s1), r, (pair[i2], s2)) in point_relations
for i1, s1, r, i2, s2 in requirements):
for type_name, prop_name in names:
interval_relations.add((pair, type_name, (prop_name, relation)))
# return the collected relations
return interval_relations
def _closure(self, annotations):
# convert interval relations to point relations
new_relations = {r for a in annotations for r in self._to_point_relations(a)}
# repeatedly apply point transitivity rules until no new relations can be inferred
point_relations = set()
point_relations_index = collections.defaultdict(set)
while new_relations:
# update the result and the index with any new relations found on the last iteration
point_relations.update(new_relations)
for point_relation in new_relations:
point_relations_index[point_relation[0]].add(point_relation)
# infer any new transitive relations, e.g., if A < B and B < C then A < C
new_relations = set()
for point1, relation12, point2 in point_relations:
for _, relation23, point3 in point_relations_index[point2]:
relation13 = self._point_transitions[relation12][relation23]
new_relation = (point1, relation13, point3)
if new_relation not in point_relations:
new_relations.add(new_relation)
# convert the point relations back to interval relations
return self._to_interval_relations(point_relations, annotations)
# constants representing the start point and end point of an interval
_start = 0
_end = 1
# mapping from interval relation names to point relations
# for example, BEFORE means that the first interval's end is before the second interval's start
_interval_to_point = {
"BEFORE": [(0, _end, "<", 1, _start)],
"AFTER": [(1, _end, "<", 0, _start)],
"IBEFORE": [(0, _end, "=", 1, _start)],
"IAFTER": [(0, _start, "=", 1, _end)],
"CONTAINS": [(0, _start, "<", 1, _start), (1, _end, "<", 0, _end)],
"INCLUDES": [(0, _start, "<", 1, _start), (1, _end, "<", 0, _end)],
"IS_INCLUDED": [(1, _start, "<", 0, _start), (0, _end, "<", 1, _end)],
"BEGINS-ON": [(0, _start, "=", 1, _start)],
"ENDS-ON": [(0, _end, "=", 1, _end)],
"BEGINS": [(0, _start, "=", 1, _start), (0, _end, "<", 1, _end)],
"BEGUN_BY": [(0, _start, "=", 1, _start), (1, _end, "<", 0, _end)],
"ENDS": [(1, _start, "<", 0, _start), (0, _end, "=", 1, _end)],
"ENDED_BY": [(0, _start, "<", 1, _start), (0, _end, "=", 1, _end)],
"SIMULTANEOUS": [(0, _start, "=", 1, _start), (0, _end, "=", 1, _end)],
"IDENTITY": [(0, _start, "=", 1, _start), (0, _end, "=", 1, _end)],
"DURING": [(0, _start, "=", 1, _start), (0, _end, "=", 1, _end)],
"DURING_INV": [(0, _start, "=", 1, _start), (0, _end, "=", 1, _end)],
"OVERLAP": [(0, _start, "<", 1, _end), (1, _start, "<", 0, _end)],
}
# transitivity table for point relations
_point_transitions = {
"<": {"<": "<", "=": "<"},
"=": {"<": "<", "=": "="},
}
@functools.total_ordering
class _OverlappingSpans(object):
def __init__(self, spans):
self.spans = spans
def __iter__(self):
return iter(self.spans)
def __eq__(self, other):
for self_start, self_end in self.spans:
for other_start, other_end in other.spans:
if self_start < other_end and other_start < self_end:
return True
return False
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return 0
def __lt__(self, other):
return self.spans < other.spans
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__, self.spans)
class ToSet(object):
def __init__(self,
select,
spans_type=None,
type_name="*",
prop_name="*",
prop_value="*"):
self.select = select
self.spans_type = spans_type
self.type_name = type_name
self.prop_name = prop_name
self.prop_value = prop_value
def accept(self, annotation):
if self.select(annotation.type, self.prop_name, self.prop_value) or \
self.select(annotation.type, "<span>"):
if self.type_name == "*" or annotation.type == self.type_name:
if self.prop_name == "*" or self.prop_value == "*":
return True
if self.prop_name is not None:
if self.prop_name in annotation.properties:
if annotation.properties[self.prop_name] == self.prop_value:
return True
return False
def key(self, annotation):
if not isinstance(annotation, anafora.AnaforaAnnotation):
return annotation
spans = self._spans(annotation)
props = None
if self.prop_name == "*":
props = []
for name in sorted(annotation.properties):
value = annotation.properties[name]
if value is None:
continue
if annotation.type == self.type_name:
if not self.select(annotation.type, name, value):
continue
if isinstance(value, anafora.AnaforaAnnotation):
if self.select.is_excluded(value.type):
continue
props.append((name, self.key(value)))
props = tuple(props)
elif self.prop_name is not None and annotation.type == self.type_name:
if self.select(annotation.type, self.prop_name, self.prop_value):
if self.prop_name in annotation.properties:
value = self.key(annotation.properties[self.prop_name])
props = self.prop_name, value
return spans, annotation.type, props
def _spans(self, annotation):
if isinstance(annotation, anafora.AnaforaEntity):
spans = annotation.spans
if self.spans_type is not None:
spans = self.spans_type(spans)
elif isinstance(annotation, anafora.AnaforaRelation):
spans = tuple(
self._spans(annotation.properties[prop_name])
for prop_name in sorted(annotation.properties)
if isinstance(annotation.properties[prop_name], anafora.AnaforaAnnotation))
if len(spans) == 1:
spans = spans[0]
else:
raise ValueError("unknown annotation type: {0}".format(annotation))
return spans
def __call__(self, iterable):
return {self.key(x) for x in iterable if self.accept(x)}
def score_data(reference_data, predicted_data, include=None, exclude=None,
scores_type=Scores, spans_type=None):
"""
:param AnaforaData reference_data: reference ("gold standard") Anafora data
:param AnaforaData predicted_data: predicted (system-generated) Anafora data
:param set include: types of annotations to include (others will be excluded); may be type names,
(type-name, property-name) tuples, (type-name, property-name, property-value) tuples
:param set exclude: types of annotations to exclude; may be type names, (type-name, property-name) tuples,
(type-name, property-name, property-value) tuples
:param type scores_type: type for calculating matches between predictions and reference
:param type spans_type: wrapper object to apply to annotation spans
:return dict: mapping from (annotation type[, property name[, property value]]) to Scores object
"""
# returns true if this type:property:value is accepted by includes= and excludes=
select = anafora.select.Select(include, exclude)
# get reference and predicted annotations
reference_annotations = reference_data.annotations
predicted_annotations = [] if predicted_data is None else predicted_data.annotations
# determines available views by examining all the annotations
span = "<span>"
views = {}
if select("*"):
views["*"] = ToSet(select=select,
spans_type=spans_type)
if select("*", span):
views["*", span] = ToSet(select=select,
spans_type=spans_type,
prop_name=None)
for annotations in [reference_annotations, predicted_annotations]:
for ann in annotations:
if ann.type not in views:
if select(ann.type):
views[ann.type] = ToSet(select=select,
spans_type=spans_type,
type_name=ann.type)
if (ann.type, span) not in views:
if select(ann.type, span):
views[ann.type, span] = ToSet(select=select,
spans_type=spans_type,
type_name=ann.type,
prop_name=None)
for prop_name, prop_value in ann.properties.items():
if (ann.type, prop_name) not in views:
if select(ann.type, prop_name):
views[ann.type, prop_name] = ToSet(
select=select,
spans_type=spans_type,
type_name=ann.type,
prop_name=prop_name)
if not isinstance(prop_value, anafora.AnaforaAnnotation):
if (ann.type, prop_name, prop_value) not in views:
if select(ann.type, prop_name, prop_value):
if prop_value is not None:
views[ann.type, prop_name, prop_value] = ToSet(
select=select,
spans_type=spans_type,
type_name=ann.type,
prop_name=prop_name,
prop_value=prop_value)
# fill a mapping from a name (type, type:property or type:property:value) to the corresponding scores
result = collections.defaultdict(lambda: scores_type())
for view_name in sorted(views, key=lambda x: x if isinstance(x, tuple) else (x,)):
to_set = views[view_name]
set1 = to_set(reference_annotations)
set2 = to_set(predicted_annotations)
result[view_name].add(set1, set2)
# return the collected scores
return result
def _load(xml_path):
"""
Tries to load data from an Anafora XML file, issuing errors on failure.
:param xml_path: the path to an Anafora XML file
:return AnaforaData: the data loaded from the XML, or None if there was a failure
"""
if not os.path.exists(xml_path):
logging.warn("%s: no such file", xml_path)
return None
try:
data = anafora.AnaforaData.from_file(xml_path)
except anafora.ElementTree.ParseError:
logging.warn("%s: ignoring invalid XML", xml_path)
return None
else:
return data
def score_dirs(reference_dir, predicted_dir, xml_name_regex="[.]xml$", text_dir=None,
include=None, exclude=None, scores_type=Scores, spans_type=None):
"""
:param string reference_dir: directory containing reference ("gold standard") Anafora XML directories
:param string predicted_dir: directory containing predicted (system-generated) Anafora XML directories
:param xml_name_regex: regular expression matching the files to be compared
:param string text_dir: directory containing the raw texts corresponding to the Anafora XML
(if None, texts are assumed to be in the reference dir)
:param set include: types of annotations to include (others will be excluded); may be type names,
(type-name, property-name) tuples, (type-name, property-name, property-value) tuples
:param set exclude: types of annotations to exclude; may be type names, (type-name, property-name) tuples,
(type-name, property-name, property-value) tuples
:param type scores_type: type for calculating matches between predictions and reference
:param type spans_type: wrapper object to apply to annotation spans
:return iter: an iterator of (file-name, name-to-scores) where name-to-scores is a mapping from
(annotation type[, property name[, property value]]) to a Scores object
"""
# walks through the reference Anafora XML directories, scoring each and adding those to the overall scores
for sub_dir, text_name, reference_xml_names in anafora.walk(reference_dir, xml_name_regex):
# load the reference data from its Anafora XML
try:
[reference_xml_name] = reference_xml_names
except ValueError:
logging.warn("expected one reference file for %s, found %s", text_name, reference_xml_names)
if not reference_xml_names:
continue
reference_xml_name = reference_xml_names[0]
reference_xml_path = os.path.join(reference_dir, sub_dir, reference_xml_name)
reference_data = _load(reference_xml_path)
# check for self-references in the annotations, which cause equality and hashing to fail
self_reference = reference_data.annotations.find_self_referential()
if self_reference is not None:
msg = "skipping reference file %s with self-referential annotation %s"
logging.warn(msg, reference_xml_path, self_reference.id)
continue
# find and load the corresponding predicted data from its Anafora XML
predicted_xml_glob = os.path.join(predicted_dir, sub_dir, text_name + "*.xml")
predicted_xml_paths = [f for f in glob.glob(predicted_xml_glob) if re.search(xml_name_regex, f) is not None]
try:
[predicted_xml_path] = predicted_xml_paths
predicted_data = _load(predicted_xml_path)
except ValueError:
logging.warn("expected one predicted file at %s, found %s", predicted_xml_glob, predicted_xml_paths)
if not predicted_xml_paths:
predicted_xml_path = None
predicted_data = anafora.AnaforaData()
else:
predicted_xml_path = predicted_xml_paths[0]
predicted_data = _load(predicted_xml_path)
# check for self-references in the annotations, which cause equality and hashing to fail
self_reference = predicted_data.annotations.find_self_referential()
if self_reference is not None:
msg = "skipping predicted file %s with self-referential annotation %s"
logging.warn(msg, predicted_xml_path, self_reference.id)
predicted_data = anafora.AnaforaData()
# determine the path for the raw text source file
if text_dir is None:
text_path = os.path.join(reference_dir, sub_dir, text_name)
else:
text_path = os.path.join(text_dir, text_name)
# if no raw text was found, then asking for the text of an annotation is an error
if not os.path.exists(text_path) or not os.path.isfile(text_path):
def _span_text(_):
raise RuntimeError("no text file found at {0}".format(text_path))
# otherwise, the text of an annotation can be extracted based on its spans
else:
with open(text_path) as text_file:
text = text_file.read()
def _flatten(items):
if isinstance(items, tuple) and isinstance(items[0], int):
yield items
else:
for item in items:
for flattened_items in _flatten(item):
yield flattened_items
def _span_text(spans):
return "...".join(text[start:end] for start, end in _flatten(spans))
# score this data and update the overall scores
named_scores = score_data(reference_data, predicted_data, include, exclude,
scores_type=scores_type, spans_type=spans_type)
for name, scores in named_scores.items():
# if there were some predictions, and if we're using scores that keep track of errors, log the errors
if predicted_xml_paths:
for annotation, message in getattr(scores, "errors", []):
spans, _, _ = annotation
logging.debug('%s: %s: "%s" %s"', text_name, message, _span_text(spans), annotation)
# generate the file name and the resulting scores
yield text_name, named_scores
def score_annotators(anafora_dir, xml_name_regex, include=None, exclude=None,
scores_type=Scores, spans_type=None):
"""
:param anafora_dir: directory containing Anafora XML directories
:param xml_name_regex: regular expression matching the annotator files to be compared
:param include: types of annotations to include (others will be excluded); may be type names,
(type-name, property-name) tuples, (type-name, property-name, property-value) tuples
:param set exclude: types of annotations to exclude; may be type names, (type-name, property-name) tuples,
(type-name, property-name, property-value) tuples
:param type scores_type: type for calculating matches between predictions and reference
:param type spans_type: wrapper object to apply to annotation spans
:return iter: an iterator of (file-name, name-to-scores) where name-to-scores is a mapping from
(annotation type[, property name[, property value]]) to a Scores object
"""
# pattern for extracting the annotator name from the Anafora XML file name
annotator_name_regex = "([^.]*)[.][^.]*[.]xml$"
# function for getting a canonical prefix corresponding to a pair of annotators
def make_prefix(annotators):
return "{0}-vs-{1}".format(*sorted(annotators))
# walks through the Anafora XML directories, scoring each and adding those to the overall scores
for sub_dir, text_name, xml_names in anafora.walk(anafora_dir, xml_name_regex):
# load the data from each Anafora XML file
annotator_data = []
for xml_name in xml_names:
# ignore in-progress annotations and automatic pre-annotations
if '.inprogress.' in xml_name or '.preannotation.' in xml_name:
continue
# ignore empty files
xml_path = os.path.join(anafora_dir, sub_dir, xml_name)
if os.stat(xml_path).st_size == 0:
continue
# load the data and add it to the list
data = _load(xml_path)
annotator_name = re.search(annotator_name_regex, xml_name).group(1)
annotator_data.append((annotator_name, data))
# at least 2 annotators are needed for annotator agreement
if len(annotator_data) < 2:
logging.warn("%s: found fewer than 2 annotators: %s", text_name, xml_names)
continue
# pair each annotator with each other annotator
annotator_named_scores = collections.defaultdict(lambda: scores_type())
for i in range(len(annotator_data)):
annotator1, data1 = annotator_data[i]
for j in range(i + 1, len(annotator_data)):
annotator2, data2 = annotator_data[j]
# make a prefix for this specific pair of annotators
prefix = make_prefix([annotator1, annotator2])
# make a prefix where non-gold annotators are just called "annotator"
general_prefix = make_prefix(
a if a == "gold" else "annotator" for a in [annotator1, annotator2])
# perform the comparison of the two annotation sets and update the overall scores
named_scores = score_data(data1, data2, include, exclude,
scores_type=scores_type, spans_type=spans_type)
# add annotators as prefixes
for name, scores in named_scores.items():
if not isinstance(name, tuple):
name = name,
annotator_named_scores[(prefix,) + name].update(scores)
annotator_named_scores[(general_prefix,) + name].update(scores)
# generate the filename and the resulting scores
yield text_name, annotator_named_scores
def _print_document_scores(file_named_scores):
def _score_name(x):
return ":".join(x) if isinstance(x, tuple) else x
print("{0:40}\t{1:40}\t{2:^5}\t{3:^5}\t{4:^5}\t{5:^5}\t{6:^5}\t{7:^5}".format(
"", "", "ref", "pred", "corr", "P", "R", "F1"))
for file_name, named_scores in file_named_scores:
for name, scores in named_scores.items():
print("{0!s:40}\t{1!s:40}\t{2!s:5}\t{3!s:5}\t{4!s:5}\t{5:5.3f}\t{6:5.3f}\t{7:5.3f}".format(
file_name, _score_name(name), scores.reference, scores.predicted, scores.correct,
scores.precision(), scores.recall(), scores.f1()))
def _print_merged_scores(file_named_scores, scores_type):
all_named_scores = collections.defaultdict(lambda: scores_type())
for _, named_scores in file_named_scores:
for name, scores in named_scores.items():
all_named_scores[name].update(scores)
def _score_name(x):
return ":".join(x) if isinstance(x, tuple) else x
print("{0:40}\t{1:^5}\t{2:^5}\t{3:^5}\t{4:^5}\t{5:^5}\t{6:^5}".format(
"", "ref", "pred", "corr", "P", "R", "F1"))
for name in sorted(all_named_scores, key=_score_name):
scores = all_named_scores[name]
print("{0!s:40}\t{1!s:5}\t{2!s:5}\t{3!s:5}\t{4:5.3f}\t{5:5.3f}\t{6:5.3f}".format(
_score_name(name), scores.reference, scores.predicted, scores.correct,
scores.precision(), scores.recall(), scores.f1()))
if __name__ == "__main__":
def split_tuple_on_colons(string):
result = tuple(string.split(":"))
return result[0] if len(result) == 1 else result
parser = argparse.ArgumentParser(description="""%(prog)s compares one directory of Anafora XML annotations to
another and prints statistics such as precision, recall and F-measure. It can also be used with a single
Anafora XML directory to compute inter-annotator agreement.""")
parser.set_defaults(scores_type=Scores)
parser.add_argument("-r", "--reference", metavar="DIR", dest="reference_dir", required=True,
help="The root of a set of Anafora XML directories representing reference annotations.")
parser.add_argument("-p", "--predicted", metavar="DIR", dest="predicted_dir",
help="The root of a set of Anafora XML directories representing system-predicted annotations.")
parser.add_argument("-t", "--text", metavar="DIR", dest="text_dir",
help="A flat directory containing the raw text. By default, the reference directory is " +
"assumed to contain the raw text. (Text is typically only needed with --verbose.)")
parser.add_argument("-i", "--include", metavar="EXPR", nargs="+", type=split_tuple_on_colons,
help="An expression identifying types of annotations to be included in the evaluation. " +
"The expression takes the form type[:property[:value]. For example, TLINK would only " +
"include TLINK annotations (and TLINK properties and property values) in the " +
"evaluation, while TLINK:Type:CONTAINS would only include TLINK annotations with a Type " +
"property that has the value CONTAINS.")
parser.add_argument("-e", "--exclude", metavar="EXPR", nargs="+", type=split_tuple_on_colons,
help="An expression identifying types of annotations to be excluded from the evaluation. " +
"The expression takes the form type[:property[:value] (see --include).")
parser.add_argument("-x", "--xml-name-regex", metavar="REGEX", default="[.]xml$",
help="A regular expression for matching XML files in the subdirectories, typically used to " +
"restrict the evaluation to a subset of the available files (default: %(default)r)")
parser.add_argument("--temporal-closure", action="store_const", const=TemporalClosureScores, dest="scores_type",
help="Apply temporal closure on the reference annotations when calculating precision, and " +
"apply temporal closure on the predicted annotations when calculating recall. " +
"This must be combined with --include to restrict the evaluation to a Type:Property " +
"whose values are valid temporal relations (BEFORE, AFTER, INCLUDES, etc.)")
parser.add_argument("--per-document", action="store_true",
help="Print out scores for each document, rather than overall scores")
parser.add_argument("--verbose", action="store_const", const=DebuggingScores, dest="scores_type",
help="Include more information in the output, such as the reference expressions that were " +
"and the predicted expressions that were not in the reference.")
parser.add_argument("--overlap", dest="spans_type", action="store_const", const=_OverlappingSpans,
help="Count predicted annotation spans as correct if they overlap by one character or more " +
"with a reference annotation span. Not intended as a real evaluation method (since what " +
"to do with multiple matches is not well defined) but useful for debugging purposes.")
args = parser.parse_args()
basic_config_kwargs = {"format": "%(levelname)s:%(message)s"}
if args.scores_type == DebuggingScores:
basic_config_kwargs["level"] = logging.DEBUG
logging.basicConfig(**basic_config_kwargs)
if args.predicted_dir is not None:
_file_named_scores = score_dirs(
reference_dir=args.reference_dir,
predicted_dir=args.predicted_dir,
xml_name_regex=args.xml_name_regex,
text_dir=args.text_dir,
include=args.include,
exclude=args.exclude,
scores_type=args.scores_type,
spans_type=args.spans_type)
else:
_file_named_scores = score_annotators(
anafora_dir=args.reference_dir,
xml_name_regex=args.xml_name_regex,
include=args.include,
exclude=args.exclude,
scores_type=args.scores_type,
spans_type=args.spans_type)
if args.per_document:
_print_document_scores(_file_named_scores)
else:
_print_merged_scores(_file_named_scores, scores_type=args.scores_type)
| {
"repo_name": "bethard/anaforatools",
"path": "anafora/evaluate.py",
"copies": "1",
"size": "35734",
"license": "apache-2.0",
"hash": -7133460854526977000,
"line_mean": 44.8716302953,
"line_max": 120,
"alpha_frac": 0.5931605754,
"autogenerated": false,
"ratio": 4.1282347504621075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5221395325862107,
"avg_score": null,
"num_lines": null
} |
__author__ = 'BeyondSky'
# circular linked list + dictionary
class LRUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.hm = {}
self.CAPACITY = capacity
self.head = Node(-1, -1) # dummy head
self.head.next = self.head
self.head.prev = self.head
self.size = 0
def get(self, key):
"""
:rtype: int
"""
node = self.hm.get(key)
if node is None:
return -1
else:
self.detach_entry(node)
self.add_first(node)
return node.val
def set(self, key, value):
"""
:type key: int
:type value: int
:rtype: nothing
"""
node = self.hm.get(key)
if node is not None:
self.detach_entry(node)
node.val = value
self.add_first(node)
else:
if self.size == self.CAPACITY:
self.remove_last()
self.size -= 1
node = Node(key, value)
self.add_first(node)
self.size += 1
self.hm[key] = node
def add_first(self, node):
node.prev = self.head
node.next = self.head.next
self.head.next = node
node.next.prev = node
def remove_last(self):
temp = self.head.prev
if temp is None:
return
temp.prev.next = self.head
self.head.prev = temp.prev
temp.prev = None
temp.next = None
del self.hm[temp.key]
def detach_entry(self, node):
node.prev.next = node.next
node.next.prev = node.prev
class Node:
key = None
val = None
prev = None
next = None
def __init__(self, key, val):
self.key = key
self.val = val
if __name__ == "__main__":
outer = LRUCache(1)
outer.set(2, 1)
print(outer.get(2))
outer.set(3, 2)
print(outer.get(2))
print(outer.get(3))
| {
"repo_name": "BeyondSkyCoder/BeyondCoder",
"path": "leetcode/python/LRUcache_design.py",
"copies": "1",
"size": "2005",
"license": "apache-2.0",
"hash": -490887249364260700,
"line_mean": 21.032967033,
"line_max": 48,
"alpha_frac": 0.4872817955,
"autogenerated": false,
"ratio": 3.652094717668488,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.958052604990626,
"avg_score": 0.011770092652445593,
"num_lines": 91
} |
__author__ = 'BeyondSky'
from trie import Trie
from trie import TrieNode
class WordsearchI(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
row = len(board)
col = len(board[0])
if row == 0 or col == 0:
return False
if row * col < len(word):
return False
visited = [ [0]*col for y in range(row) ]
for i in range(row):
for j in range(col):
ret = self.word_search_helper(board, word, visited, i, j, 0)
if ret is True:
return True;
return False;
def word_search_helper(self, board, w, visited, i, j, pos):
if board[i][j] != w[pos]:
return False
if pos == len(w) - 1:
return True
row = len(board)
col = len(board[0])
pos += 1
visited[i][j] = 1
if i > 0 and visited[i-1][j] == 0 and self.word_search_helper(board, w, visited, i-1, j, pos):
return True
if i < row-1 and visited[i+1][j] == 0 and self.word_search_helper(board, w, visited, i+1, j, pos):
return True
if j > 0 and visited[i][j-1] == 0 and self.word_search_helper(board, w, visited, i, j-1, pos):
return True
if j < col-1 and visited[i][j+1] == 0 and self.word_search_helper(board, w, visited, i, j+1, pos):
return True
visited[i][j] = 0
return False
class WordsearchII?:
hs = set()
def findWords(self, board, words):
"""
:type board: List[List[str]]
:type words: List[str]
:rtype: List[str]
"""
res = []
row = len(board)
col = len(board[0])
if row == 0 and col == 0:
return res
visited = [[0] * col for x in range(row)]
tr = Trie()
for s in words:
tr.add_word(s)
root = tr.root
for i in range(row):
for j in range(col):
# prune search earlier if word is not in trie
idx = board[i][j]
if root.children.get(idx) is None:
continue
visited[i][j] = 1
self.word_search_helper2_dfs(board, res, root.children[idx], visited, i, j)
visited[i][j] = 0
return sorted(res)
def word_search_helper2_dfs(self, board, res, trie_node, visited, i, j):
if trie_node.isWord is True:
s = trie_node.get_word()
if s not in self.hs: # avoid dup to the res list
self.hs.add(s)
res.append(s)
row = len(board)
col = len(board[0])
# UP
if i > 0 and visited[i-1][j] == 0:
idx = board[i-1][j]
if trie_node.children.get(idx) is not None:
visited[i-1][j] = 1
self.word_search_helper2_dfs(board, res, trie_node.children[idx], visited, i-1, j)
visited[i-1][j] = 0
# DOWN
if i < row-1 and visited[i+1][j] == 0:
idx = board[i+1][j]
if trie_node.children.get(idx) is not None:
visited[i+1][j] = 1
self.word_search_helper2_dfs(board, res, trie_node.children[idx], visited, i+1, j)
visited[i+1][j] = 0
# LEFT
if j > 0 and visited[i][j-1] == 0:
idx = board[i][j-1]
if trie_node.children.get(idx) is not None:
visited[i][j-1] = 1
self.word_search_helper2_dfs(board, res, trie_node.children[idx], visited, i, j-1)
visited[i][j-1] = 0
# RIGHT
if j < col-1 and visited[i][j+1] == 0:
idx = board[i][j+1]
if trie_node.children.get(idx) is not None:
visited[i][j+1] = 1
self.word_search_helper2_dfs(board, res, trie_node.children[idx], visited, i, j+1)
visited[i][j+1] = 0
if __name__ == "__main__":
outer = WordsearchII()
matrix1 = [ ['a', 'b', 'd'], ['e', 'c', 'f']]
matrix2 = ["a", "a"]
print(outer.findWords(matrix2, ['a'])) | {
"repo_name": "BeyondSkyCoder/BeyondCoder",
"path": "leetcode/python/word_search_I_II_bt.py",
"copies": "1",
"size": "4215",
"license": "apache-2.0",
"hash": -3195767723217396000,
"line_mean": 29.1142857143,
"line_max": 106,
"alpha_frac": 0.4778173191,
"autogenerated": false,
"ratio": 3.33729216152019,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.431510948062019,
"avg_score": null,
"num_lines": null
} |
__author__ = 'BeyondSky'
import re
class Solution:
# @param {string} s
# @return {integer}
def calculate_I(self, s):
tokens = self.toRPN(s)
return self.evalRPN(tokens)
operators = ['+', '-', '*', '/']
def toRPN(self, s):
tokens, stack = [], []
number = ''
for c in s:
if c.isdigit():
number += c
else:
if number:
tokens.append(number)
number = ''
if c in self.operators:
while len(stack) and self.getPriority(stack[-1]) >= self.getPriority(c):
tokens.append(stack.pop())
stack.append(c)
elif c == '(':
stack.append(c)
elif c == ')':
while len(stack) and stack[-1] != '(':
tokens.append(stack.pop())
stack.pop()
if number:
tokens.append(number)
while len(stack):
tokens.append(stack.pop())
return tokens
def evalRPN(self, tokens):
stack = []
for token in tokens:
if token in self.operators:
y, x = stack.pop(), stack.pop()
stack.append(self.getVal(x, y, token))
else:
stack.append(int(token))
return stack[0]
def getVal(self, x, y, operator):
return {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'/': lambda x, y: int(float(x) / y),
}[operator](x, y)
def getPriority(self, operator):
return {
'+' : 1,
'-' : 1,
'*' : 2,
'/' : 2,
}.get(operator, 0)
# @param {string} s
# @return {integer}
def calculate_II(self, s):
op = {'+': lambda x, y: x + y, '-': lambda x, y: x - y, '*': lambda x, y: x * y, '/': lambda x, y: int(float(x) / y)}
s = re.sub(r'\d+', ' \g<0> ', s)
expression = s.split()
total = d = idx = 0
func = op['+']
while idx < len(expression):
token = expression[idx]
if token in '+-':
total = func(total, d)
func = op[token]
elif token in '*/':
idx += 1
d = op[token](d, int(expression[idx]))
else:
d = int(token)
idx += 1
return func(total, d)
if __name__ == "__main__":
outer = Solution()
print(outer.calculate_II("1-1+1*2")) | {
"repo_name": "BeyondSkyCoder/BeyondCoder",
"path": "leetcode/python/basic_calculator.py",
"copies": "1",
"size": "2636",
"license": "apache-2.0",
"hash": -6817870075601150000,
"line_mean": 27.3548387097,
"line_max": 125,
"alpha_frac": 0.4074355083,
"autogenerated": false,
"ratio": 3.893648449039882,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9769095765852249,
"avg_score": 0.0063976382975266255,
"num_lines": 93
} |
__author__ = 'BeyondSky'
class Trie:
def __init__(self):
self.root = TrieNode('z')
def add_word(self, word):
node = self.root
for c in word:
child = node.children.get(c)
if child is None:
child = TrieNode(c)
child.father = node
node.children[c] = child
node = child
node.isWord = True
def delete(self, word):
node = self.root
queue = []
for c in word:
queue.append((c, node))
child = node.children.get(c)
if child is None:
return False
node = child
if node.isWord is False:
return False
if len(node.children):
node.isWord = False
else:
for letter, node in reversed(queue):
del node.children[c]
if len(node.children) or node.isWord:
break
return True
class TrieNode:
def __init__(self, value):
self.val = value
self.isWord = False
self.children = dict() # use dict to save Trie Tree structure
self.father = None
def get_word(self):
cur = self
s = ''
while cur.father is not None:
s += str(cur.val)
cur = cur.father
return s[::-1] # reverse by slice trick | {
"repo_name": "BeyondSkyCoder/BeyondCoder",
"path": "leetcode/python/trie.py",
"copies": "1",
"size": "1400",
"license": "apache-2.0",
"hash": -7895977707106036000,
"line_mean": 23.5789473684,
"line_max": 70,
"alpha_frac": 0.4814285714,
"autogenerated": false,
"ratio": 4.117647058823529,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014473684210526317,
"num_lines": 57
} |
__author__ = 'bgrace'
class Token(object):
def __init__(self, contents):
self.contents = contents
class DocumentDelimiterToken(Token):
pass
class DocumentTypeToken(Token):
pass
class DocumentAntecedentToken(Token):
pass
class DocumentBody(Token):
pass
class Tokenizer(object):
"""
document = doc_delimiter newline body {newline doc_delimiter whitespace symbol newline body};
doc_delimiter = "---";
newline = "\n";
whitespace = " " | "\t";
symbol = '@', alphanumeric | '!', alphanumeric;
alphanumeric = ? a-aA-Z ?, ? a-aA-Z0-9 ?
body = {line} | delimiter, newline;
line = {printable_character} newline;
"""
delimiter = "---"
def __init__(self, readable):
self.input = readable
self.tokens = []
def tokenize_input(self):
for line in self.input.readline():
self.tokens.append(self.tokenize_line(line))
def tokenize_line(self, line):
if line.startswith("---"):
# TODO I stopped working right here, finishing this block!
pass | {
"repo_name": "bgrace/wagtail-commons",
"path": "wagtail_commons/core/management/commands/hd_parser.py",
"copies": "1",
"size": "1090",
"license": "bsd-3-clause",
"hash": 8281343669131658000,
"line_mean": 19.9807692308,
"line_max": 97,
"alpha_frac": 0.6018348624,
"autogenerated": false,
"ratio": 3.920863309352518,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0023139063317634745,
"num_lines": 52
} |
__author__ = 'BH4101'
import data
import feature_extraction
data = data.posts()
features_train, features_test, label_train, label_test = feature_extraction.extract_features(data, train_size=0.8, with_stemmer=True, tfidf=True)
print "Training the model"
from sklearn.dummy import DummyClassifier
from sklearn.metrics import f1_score, accuracy_score
from sklearn.svm import LinearSVC
baseline_classifiers = [DummyClassifier(strategy="most_frequent"), DummyClassifier(strategy="stratified")]
C_range = [0.01, 0.1, 0.3, 1, 3, 10, 15, 30, 50, 80, 100, 300, 1000]
def linear_svg(C):
return LinearSVC(C=C)
classifiers = baseline_classifiers
classifiers.extend(map(linear_svg, C_range))
def fit(classifier):
classifier.fit(features_train, label_train)
predicted = classifier.predict(features_test)
accuracy = accuracy_score(label_test, predicted)
f1 = f1_score(label_test, predicted)
print "%s, accuracy=%s, f1_score=%s" % (classifier, accuracy, f1)
for classifier in classifiers:
fit(classifier)
| {
"repo_name": "rux-pizza/discourse-analysis",
"path": "like_prediction.py",
"copies": "1",
"size": "1030",
"license": "mit",
"hash": 7020325440131694000,
"line_mean": 27.6111111111,
"line_max": 145,
"alpha_frac": 0.7368932039,
"autogenerated": false,
"ratio": 3.311897106109325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4548790310009325,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bharathramh'
from Vertex import *
from Edges import *
from Graph import *
from MinHeap import *
import sys
class Dijkstra:
def __init__(self):
pass
def dijkstra(self):
self.initializeSingleSource()
S = [] #a set of vertices whose final shortest path weights have already been determined
Q = list(self.graph.vertexMap.values()) #list of vertices
self.heap = MinHeap(Q, key = lambda vertex : vertex.d) #heap implementation using key as vertex.
while len(self.heap) > 0:
# while Q != None and len(Q) > 0:
# minDVertex = min(Q, key= lambda v: v.d) #If using basic list, which uses O(n) as the running time.
# Q.remove(minDVertex)
minDVertex = self.heap.extractMin()
if not minDVertex.status :
continue #skipping the vertices which are down.
S.append(minDVertex)
for edge in minDVertex.adj: #retrieving edges
# print("adjcnt item is ", edge.destination.name)
if not edge.status or not edge.destination.status:
continue #skipping the edges which are down.
nextVertex = edge.destination #retrieving destination vertex from edge data
transit_time = edge.transit_time
self.Relax(minDVertex, nextVertex, transit_time)
return self.source, self.destination
def Relax(self, minDVertex, nextVertex, transit_time):
newValue = minDVertex.d + transit_time
if nextVertex.d > newValue:
# nextVertex.d = minDVertex.d + transit_time
self.heap.heapDecreaseKey(nextVertex, newValue, nextVertex.setKeyForHeap)
nextVertex.pi = minDVertex
def initializeSingleSource(self): #O(V)
for vertex in self.graph.vertexMap.values():
vertex.d = sys.maxsize
vertex.pi = None
self.source.d = 0
def minPath(self, graph, source, destination):
self.graph = graph
self.destination = graph.vertexMap[destination]
self.source = graph.vertexMap[source] #O(1)
return self.dijkstra() | {
"repo_name": "bharathramh92/dijkstra_test",
"path": "Dijkstra.py",
"copies": "1",
"size": "2431",
"license": "mit",
"hash": 4974293569293766000,
"line_mean": 37.6031746032,
"line_max": 139,
"alpha_frac": 0.550802139,
"autogenerated": false,
"ratio": 4.287477954144621,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5338280093144621,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bharathramh'
import sys
class Vertex:
""" Vertex class to store vertex details
"""
def __init__(self, name):
self.name = name
self.adj = []
self.status = True
self.d = [sys.maxsize]
self.pi = None
self.reset()
def setKeyForHeap(self, d):
self.d = d
return True
def addEdge(self, edge):
self.adj.append(edge)
def getEdgeFromVertex(self, dest):
for temp in self.adj:
if dest == temp.destination:
return temp
return None
def deleteEdge(self, dest):
try:
self.adj.remove(self.getEdgeFromVertex(dest))
except ValueError as e:
print("Edge not found ")
def reset(self):
self.dist = sys.maxsize
self.prev = None
def __repr__(self):
return "\n\nVertex data \nname : %s adj : %s dist : %f status : %s prev : %s d: %s pi : %s"\
%(self.name, self.adj, self.dist, self.status, self.prev, self.d[0], self.pi)
| {
"repo_name": "bharathramh92/dijkstra_test",
"path": "Vertex.py",
"copies": "1",
"size": "1054",
"license": "mit",
"hash": -1299653222844346000,
"line_mean": 21.4255319149,
"line_max": 100,
"alpha_frac": 0.5322580645,
"autogenerated": false,
"ratio": 3.6095890410958904,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46418471055958904,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bharathramh'
import sys
WHITE = 255
GREY = 100
BLACK = 0
class BFS:
def __init__(self, graph):
self.graph = graph
def BFS(self, graph, source): #Running of BFS will be O(V+E)
if source.status == False:
return
# Initialization
for vertex in graph.vertexMap.values():
vertex.color = WHITE
vertex.d = sys.maxsize
vertex.pi = None
source.color = GREY
source.d = 0
source.pi = None
Q = []
Q.append(source)
while Q != None and len(Q) > 0:
u = Q.pop(0)
if u.status != True:
continue
for edge in graph.vertexMap[u.name].adj:
if edge.status == True and edge.destination.color == WHITE:
edge.destination.color = GREY
edge.destination.d = u.d + 1
edge.destination.pi = u
Q.append(edge.destination)
u.color = BLACK
def updataGraph(self, graph):
self.graph = graph
| {
"repo_name": "bharathramh92/dijkstra_test",
"path": "BFS.py",
"copies": "1",
"size": "1124",
"license": "mit",
"hash": 3718190244089621000,
"line_mean": 23.4347826087,
"line_max": 102,
"alpha_frac": 0.4822064057,
"autogenerated": false,
"ratio": 4.043165467625899,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9894137208081492,
"avg_score": 0.02624693304888146,
"num_lines": 46
} |
__author__ = 'bharathramh'
class MinHeap:
"""This Method is for Object and key for which the heap has to built should be passed while initializing.
Build heap method will be called once the object is instantiated.
updateData will also call the build heap method with the new data."""
def __init__(self, data, key):
self.data = data
self.key = key
self.build_heap()
def updateData(self, data, key):
self.data = data
self.key = key
self.build_heap(self.data)
def parent(self, i):
return (i-1)//2
def left(self, i):
return 2*i + 1
def right(self, i):
return 2*i+2
def min_heapify(self,i):
l=self.left(i)
r=self.right(i)
smallest=i
if l<self.aHeapsize and self.key(self.data[l]) < self.key(self.data[i]):
smallest=l
if r<self.aHeapsize and self.key(self.data[r]) < self.key(self.data[smallest]):
smallest=r
if smallest != i:
if smallest == l:
if l<self.aHeapsize: self.data[l].position = i
self.data[i].position = l
if r<self.aHeapsize: self.data[r].position = r
elif smallest == r:
if r<self.aHeapsize: self.data[r].position = i
self.data[i].position = r
if l<self.aHeapsize: self.data[l].position = l
self.data[smallest],self.data[i]=self.data[i],self.data[smallest]
self.min_heapify(smallest)
else:
if l<self.aHeapsize: self.data[l].position = l
self.data[i].position = i
if r<self.aHeapsize: self.data[r].position = r
def build_heap(self): #O(n)
self.aHeapsize = len(self.data)
loc_i = self.parent(self.aHeapsize -1)
while loc_i>=0:
self.min_heapify(loc_i)
loc_i-=1
return self.data
def extractMin(self): #O(log(n))
if self.aHeapsize < 1:
return None
min = self.data[0]
self.data[0] = self.data[self.aHeapsize-1]
self.aHeapsize -= 1
if self.aHeapsize > 0:
self.min_heapify(0)
return min
def heapDecreaseKey(self, node, newValue, setKeyFunction):
"""
:param node: The object whose priority has to decreased
:param newValue: The new value of the Key in the object
:setKeyFunction sk: Key setter function in object
:return: Boolean
"""
if newValue > self.key(node): #new value should be smaller than the old one
return False
i = node.position #index
setKeyFunction(newValue) #setting the key
while i > 0 and self.key(self.data[self.parent(i)]) > self.key(self.data[i]):
self.data[self.parent(i)].position, self.data[i].position = self.data[i].position, self.data[self.parent(i)].position
self.data[self.parent(i)], self.data[i] = self.data[i], self.data[self.parent(i)]
i = self.parent(i)
return True
def __len__(self):
return self.aHeapsize | {
"repo_name": "bharathramh92/dijkstra_test",
"path": "MinHeap.py",
"copies": "1",
"size": "3372",
"license": "mit",
"hash": 8277617937476494000,
"line_mean": 32.73,
"line_max": 129,
"alpha_frac": 0.524911032,
"autogenerated": false,
"ratio": 3.742508324084351,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4767419356084351,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bharathramh'
from Vertex import *
from Edges import *
from Graph import *
from Dijkstra import *
from MinHeap import *
from BFS import *
import sys
class main:
""" Initial graph can be populated by providing a text file with source, destination, and transit_time.
eg: Belk Grigg 1.2"""
def __init__(self):
graph = Graph()
self.outFile = open("output", 'w')
try:
filename = sys.argv[1]
f = open(filename, 'r')
for line in f:
split = line.split()
graph.addEdge(split[0], split[1], split[2])
f.close()
except IndexError:
print("Input file was not passed while calling the function")
while(True):
userInput = input().split()
if userInput == None or len(userInput) == 0:
continue
self.writeInFile(" ".join(userInput))
command = userInput.pop(0)
if command == "print":
if len(userInput) != 0:
print("print wont take argument.\nUsage :print")
continue
for key,value in sorted(graph.vertexMap.items()):
if not graph.vertexMap[key].status:
key += " DOWN"
print(key)
self.writeInFile(key)
orderedAdj = sorted(value.adj, key = lambda x: x.destination.name)
for edge in orderedAdj :
s = ""
if not edge.status:
s = "DOWN"
self.writeInFile((" %s %s %s" %(edge.destination.name, edge.transit_time, s)))
print(" %s %s %s" %(edge.destination.name, edge.transit_time, s))
self.writeInFile("")
elif command=="addedge":
if len(userInput) != 3:
print("addedge takes exactly 3 arguments.\nUsage :addedge <source Vertex> "
"<destination Vertex> <transit time>")
continue
tailvertex, headvertex, transit_time = userInput[0],userInput[1], userInput[2]
graph.updateEdge(tailvertex, headvertex, transit_time)
elif command == "deleteedge":
if len(userInput) != 2:
print("addedge takes exactly 2 arguments.\nUsage :deleteedge <source Vertex> "
"<destination Vertex>")
continue
tailvertex, headvertex = userInput[0],userInput[1]
graph.deleteEdge(tailvertex, headvertex)
elif command == "edgeup":
if len(userInput) != 2:
print("edgeup takes exactly 2 arguments.\nUsage :edgeup <source Vertex> "
"<destination Vertex>")
continue
tailvertex, headvertex = userInput[0],userInput[1]
graph.upEdgeStatus(tailvertex, headvertex)
elif command == "edgedown":
if len(userInput) != 2:
print("edgedown takes exactly 2 arguments.\nUsage :edgedown <source Vertex> "
"<destination Vertex>")
continue
tailvertex, headvertex = userInput[0],userInput[1]
graph.downEdgeStatus(tailvertex, headvertex)
elif command == "vertexup":
if len(userInput) != 1:
print("vertexup takes exactly 1 argument.\nUsage :vertexup <Vertex name>")
continue
vertex = userInput[0]
graph.upVertexStatus(vertex)
elif command == "vertexdown" :
if len(userInput) != 1:
print("vertexdown takes exactly 1 argument.\nUsage :vertexdown <Vertex name>")
continue
vertex = userInput[0]
graph.downVertexStatus(vertex)
elif command == "reachable":
if len(userInput) != 0:
print("reachable wont take argument.\nUsage :reachable")
continue
# BFS(graph).printReachableVerticesFromAllSource()
self.graph = graph
bfs = BFS(self.graph)
self.printReachableVerticesFromAllSource(bfs)
elif command == "path":
if len(userInput) != 2:
print("vertexdown takes exactly 2 argument.\nUsage :path <source Vertex>"
" <destination Vertex>")
continue
source, destination = userInput[0],userInput[1]
dijkstra = Dijkstra()
source, destination = dijkstra.minPath(graph, source, destination)
self.str = ""
if self.printPath(source, destination) == "":
self.str = "No path found from "+ source.name + " to "+ destination.name
else:
self.str += (" %.2f" % round(destination.d, 2))
print(self.str)
self.writeInFile(self.str)
elif command == "quit":
sys.exit(0)
else:
print("Incorrect command. The following commands are available"
"\nprint\naddedge\nedgedown\nedgeup\nvertexdown\nvertexup\nreachable\npath")
pass
self.outFile.close()
def printPath(self, source, destination):
if destination == source:
self.str += destination.name + " "
elif destination.pi == None:
return ""
else:
self.printPath(source , destination.pi)
self.str += destination.name + " "
return self.str
def writeInFile(self, st):
self.outFile.write(st + "\n")
def printReachableVerticesFromAllSource(self, bfs):
# for current in sorted(self.graph.vertexMap.values()): #use this line if sorting while printing is not required
for current in sorted(self.graph.vertexMap.values(), key = lambda x : x.name): #use this line if sorting while printing is required
bfs.BFS(self.graph, current) #Running time will be O(V(V+E))
if current.status != True:
continue
self.writeInFile(current.name)
print(current.name)
# for x in sorted(self.graph.vertexMap.values(), key = lambda x : x.name): #use this line if sorting while printing is not required
for x in sorted(self.graph.vertexMap.values(), key = lambda x : x.name): #use this line if sorting while printing is required
if x.color == BLACK and x.status == True and x.name != current.name:
out = " " + x.name
print(out)
self.writeInFile(out)
if __name__ == '__main__':
main() | {
"repo_name": "bharathramh92/dijkstra_test",
"path": "GraphMainClass.py",
"copies": "1",
"size": "7081",
"license": "mit",
"hash": -7524306168783180000,
"line_mean": 41.9212121212,
"line_max": 160,
"alpha_frac": 0.5092501059,
"autogenerated": false,
"ratio": 4.559562137797811,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5568812243697812,
"avg_score": null,
"num_lines": null
} |
__author__ = "bhargavchava97(github), Andrew Jewett"
try:
from ..nbody_graph_search import Ugraph
except:
# not installed as a module
from nbody_graph_search import Ugraph
# This file defines how improper interactions are generated in AMBER (GAFF).
# To use it, add "(gaff_imp.py)" to the name of the "Data Impropers By Type"
# section, and make sure this file is located in the "common" directory.
# For example:
# write_once("Data Impropers By Type (gaff_imp.py)") {
# ...
# }
# To find 4-body "improper" interactions,
# (by default, most of the time), we would use this subgraph:
# 0
# * 1st bond connects atoms 2 and 0
# | => 2nd bond connects atoms 2 and 1
# _.*._ 3rd bond connects atoms 2 and 3
# *' 2 `*
# 1 3
#
# In AMBER/GAFF, the central atom is the third atom ("2").
# http://archive.ambermd.org/201307/0519.html
# This differs from other force-fields.
# We take this detail into account in the line below:
bond_pattern = Ugraph([(2,0), (2,1), (2,3)])
# As with other force-fields, the improper-angle is the angle between the planes
# defined by the first three atoms (0,1,2) and last three atoms (1,2,3).
# (This is implemented in LAMMPS using an improper_style which requires
# that the atoms in the interaction will be listed in this order: 0,1,2,3.)
def canonical_order(match):
"""
Before defining a new interaction, we must check to see if an
interaction between these same 4 atoms has already been created
(perhaps listed in a different, but equivalent order).
If we don't check for this this, we will create many unnecessary redundant
interactions (which can slow down he simulation).
To avoid this, I define a "canonical_order" function which sorts the atoms
and bonds in a way which is consistent with the symmetry of the interaction
being generated... Later the re-ordered list of atom and bond ids will be
tested against the list of atom/bond ids in the matches-found-so-far,
before it is added to the list of interactions found so far. Note that
the energy of an improper interactions is a function of the improper angle.
The "improper angle" is often defined as the angle between planes formed
by atoms 0,1,2 & 1,2,3. (Alternately, it is sometimes defined as the
angle between the 0,1,2 plane and atom 3.)
This angle does not change when swapping the OUTER pair of atoms (0 and 3)
(except for a change of sign, which does not matter since the energy functions
used are typically sign invariant. Furthermore, neither of OUTER pair of atoms
are the central atom. There are 3!=6 ways of ordering the remaining 3 atoms.)
Consequently it does not make sense to define a separate 4-body improper-
interaction between atoms 0,1,2,3 AS WELL AS between 3,1,2,0.
So we sort the atoms and bonds so that the first atom has a always has
a lower atomID than the last atom. (Later we will check to see if we
have already defined an interaction between these 4 atoms. If not then
we create a new one.)
"""
atom0 = match[0][0]
atom1 = match[0][1]
atom2 = match[0][2]
atom3 = match[0][3]
# match[1][0:2] contains the ID numbers for the 3 bonds
bond0 = match[1][0]
bond1 = match[1][1]
bond2 = match[1][2]
if atom0 <= atom3:
#return ((atom0,atom1,atom2,atom3), (bond0, bond1, bond2))
# But this is the same thing as:
if atom0 <= atom1:
if atom1 <= atom3:
return match
else:
return ((atom0,atom3,atom2,atom1), (bond0,bond2,bond1))
else:
return ((atom1,atom0,atom2,atom3), (bond1,bond0,bond2))
elif atom3 <= atom1:
if atom0 <= atom1:
return ((atom3,atom0,atom2,atom1), (bond2,bond0,bond1))
else:
return ((atom3,atom1,atom2,atom0), (bond2,bond1,bond0))
else:
return ((atom1,atom3,atom2,atom0), (bond1,bond2,bond0))
| {
"repo_name": "smsaladi/moltemplate",
"path": "moltemplate/nbody_alt_symmetry/gaff_imp.py",
"copies": "1",
"size": "4075",
"license": "bsd-3-clause",
"hash": -7498646425575640000,
"line_mean": 41.8947368421,
"line_max": 84,
"alpha_frac": 0.6549693252,
"autogenerated": false,
"ratio": 3.4978540772532187,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9593564867979478,
"avg_score": 0.011851706894747946,
"num_lines": 95
} |
from PIL import Image
i = Image.open("input.png")
#store pixels of input image
pixels = i.load()
width, height = i.size
k=Image.new(i.mode,i.size)
print "Filter size should be an odd positive number"
filtersize=input("Choose the Size of Filter: ")
print "Type (True) or (False) without braces"
applyred=input ("Choose to apply red : ")
applygreen=input("Choose to apply green : ")
applyblue=input ("Choose to apply blue : ")
print "Choose Shift Type"
print "1 None "
print "2 Shift Left"
print "3 Shift Right"
shifttype=input()
filterwidth=filtersize
filterheight=filtersize
filterOffset = (filterwidth-1)/2;
offsety=filterOffset
offsetx=filterOffset
j = Image.new(i.mode,(width+(2*offsetx),height+(2*offsety)))
for x in range(width+(2*offsetx)):
for y in range(height+(2*offsety)):
if(x<offsetx):
j.putpixel((x,y),(0,0,0))
if(y<offsety):
j.putpixel((x,y),(0,0,0))
if(x>=width):
j.putpixel((x,y),(0,0,0))
if(y>=height):
j.putpixel((x,y),(0,0,0))
if(x>=offsetx and y>=offsety and x <=width and y <=height):
cpixel=pixels[x-offsetx,y-offsety]
j.putpixel((x,y),cpixel)
for y in range(height+(2*offsety)):
for x in range(width+(2*offsetx)):
if(x>=offsetx and y>=offsety and x <=width and y <=height):
blue=0.0
green=0.0
red=0.0
#filterx=x-offsetx
#filtery=y-offsety
for fy in range(filterheight):
for fx in range(filterwidth):
cpixel=j.getpixel((x-offsety+fx,y-offsety+fy))
red+=float(cpixel[0])
green+=float(cpixel[1])
blue+=float(cpixel[2])
red=red/filtersize
green=green/filtersize
blue=blue/filtersize
cpixel=j.getpixel((x,y))
if(applyred==False):
red=cpixel[0]
if(applygreen==False):
green=cpixel[1]
if(applyblue==False):
blue=cpixel[2]
if(red<0):
red=0
if(red>255):
red=255
if(green<0):
green=0
if(green>255):
green=255
if(blue<0):
blue=0
if(blue>255):
blue=255
if(shifttype==1):
k.putpixel((x-offsetx,y-offsety),(int(red),int(green),int(blue)))
if(shifttype==2):
k.putpixel((x-offsetx,y-offsety),(int(blue),int(red),int(green)))
if(shifttype==3):
k.putpixel((x-offsetx,y-offsety),(int(green),int(blue),int(red)))
k.save('output.png')
| {
"repo_name": "BhargavGamit/ImageManipulationAlgorithms",
"path": "AverageColoursFilter.py",
"copies": "1",
"size": "2806",
"license": "mit",
"hash": 5284153036884624000,
"line_mean": 29.1720430108,
"line_max": 81,
"alpha_frac": 0.5374198147,
"autogenerated": false,
"ratio": 3.240184757505774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42776045722057743,
"avg_score": null,
"num_lines": null
} |
from PIL import Image
i = Image.open("input1.png")
j = Image.open("input2.png")
#store pixels of first image
pixels_first = i.load()
width_first, height_first = i.size
k=Image.new(i.mode,i.size)
#store pixels of second image
pixels_second = j.load()
width_second,height_second = j.size
blue=0
green=0
red=0
print "Choose Bitwise blendtypes as follows"
print "1 AND \n2 OR \n3 XOR\n"
redblend=input("Choose red blend type: ")
greenblend=input("Choose green blend type: ")
blueblend=input("Choose blue blend type: ")
#cpixel[0] contains red value cpixel[1] contains green value
#cpixel[2] contains blue value cpixel[3] contains alpha value
for image_width_iterator in range(width_first):
for image_height_iterator in range(height_first):
cpixel = pixels_first[image_width_iterator, image_height_iterator]
dpixel = pixels_second[image_width_iterator,image_height_iterator]
if(redblend == 1):
red=cpixel[0]&dpixel[0]
elif(redblend == 2):
red=cpixel[0]|dpixel[0]
elif(redblend==3):
red=cpixel[0]^dpixel[0]
else:
red=0
if(greenblend == 1):
green=cpixel[1]&dpixel[1]
elif(greenblend == 2):
green=cpixel[1]|dpixel[1]
elif(greenblend==3):
green=cpixel[1]^dpixel[1]
else:
blue=0
if(blueblend == 1):
blue=cpixel[2]&dpixel[2]
elif(blueblend == 2):
blue=cpixel[2]|dpixel[2]
elif(blueblend==3):
blue=cpixel[2]^dpixel[2]
else:
blue=0
if(red<0):
red=0
if(red>255):
red=255
if(green<0):
green=0
if(green>255):
green=255
if(blue<0):
blue=0
if(blue>255):
blue=255
k.putpixel((image_width_iterator,image_height_iterator),(red,green,blue))
k.save('output.png')
| {
"repo_name": "BhargavGamit/ImageManipulationAlgorithms",
"path": "Bitwise Blending.py",
"copies": "1",
"size": "2167",
"license": "mit",
"hash": 2117137718516641300,
"line_mean": 27.1428571429,
"line_max": 81,
"alpha_frac": 0.6022150438,
"autogenerated": false,
"ratio": 3.1542940320232895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42565090758232893,
"avg_score": null,
"num_lines": null
} |
from PIL import Image
i = Image.open("input.png")
#store pixels of input image
pixels = i.load()
width, height = i.size
k=Image.new(i.mode,i.size)
print "1 Blur3x3Filter"
print "2 Blur5x5Filter"
print "3 Gaussian3x3BlurFilter"
print "4 Gaussian5x5BlurFilter"
print "5 SoftenFilter"
print "6 MotionBlurFilter"
print "7 MotionBlurLeftToRightFilter"
print "8 MotionBlurRightToLeftFilter"
print "9 HighPass3x3Filter"
print "10 EdgeDetectionFilter"
print "11 HorizontalEdgeDetectionFilter"
print "12 VerticalEdgeDetectionFilter"
print "13 EdgeDetection45DegreeFilter"
print "14 EdgeDetectionTopLeftBottomRight"
print "15 SharpenFilter"
print "16 Sharpen3x3Filter"
print "17 Sharpen3x3FactorFilter"
print "18 Sharpen5x5Filter"
print "19 IntenseSharpenFilter"
print "20 EmbossFilter"
print "21 Emboss45DegreeFilter"
print "22 EmbossTopLeftBottomRight"
print "23 IntenseEmbossFilter"
userin=input("Choose the Filter: ")
if(userin == 1):
factor=1.0
bias=0.0
filtermatrix=[[0.0,0.2,0.0],[0.2,0.2,0.2],[0.0,0.2,0.2]]
elif(userin == 2):
factor=1.0/13.0
bias=0.0
filtermatrix=[[0,0,1,0,0],[0,1,1,1,0],[1,1,1,1,1],[0,1,1,1,0],[0,0,1,0,0]]
elif(userin == 3):
factor=1.0/16.0
bias=0.0
filtermatrix=[[1,2,1],[2,4,2],[1,2,1]]
elif(userin == 4):
factor=1.0/159.0
bias=0.0
filtermatrix=[[2,4,5,4,2],[4,9,12,9,4],[5,12,15,12,5],[4,9,12,9,4],[2,4,5,4,2]]
elif(userin == 5):
factor=1.0/8.0
bias=0.0
filtermatrix=[[1,1,1],[1,1,1],[1,1,1]]
elif(userin == 6):
factor=1.0/18.0
bias=0.0
filtermatrix=[[1, 0, 0, 0, 0, 0, 0, 0, 1],[0, 1, 0, 0, 0, 0, 0, 1, 0],[0, 0, 1, 0, 0, 0, 1, 0, 0],[0, 0, 0, 1, 0, 1, 0, 0, 0],[0, 0, 0, 0, 1, 0, 0, 0, 0],[0, 0, 0, 1, 0, 1, 0, 0, 0],[0, 0, 1, 0, 0, 0, 1, 0, 0],[0, 1, 0, 0, 0, 0, 0, 1, 0],[1, 0, 0, 0, 0, 0, 0, 0, 1]]
elif(userin == 7):
factor=1.0/9.0
bias=0.0
filtermatrix=[[1, 0, 0, 0, 0, 0, 0, 0, 0],[0, 1, 0, 0, 0, 0, 0, 0, 0],[0, 0, 1, 0, 0, 0, 0, 0, 0],[0, 0, 0, 1, 0, 0, 0, 0, 0],[0, 0, 0, 0, 1, 0, 0, 0, 0],[0, 0, 0, 0, 0, 1, 0, 0, 0],[0, 0, 0, 0, 0, 0, 1, 0, 0],[0, 0, 0, 0, 0, 0, 0, 1, 0],[0, 0, 0, 0, 0, 0, 0, 0, 1]]
elif(userin == 8):
factor=1.0/9.0
bias=0.0
filtermatrix=[[0, 0, 0, 0, 0, 0, 0, 0, 1],[0, 0, 0, 0, 0, 0, 0, 1, 0],[0, 0, 0, 0, 0, 0, 1, 0, 0],[0, 0, 0, 0, 0, 1, 0, 0, 0],[0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0],[0, 0, 1, 0, 0, 0, 0, 0, 0],[0, 1, 0, 0, 0, 0, 0, 0, 0],[1, 0, 0, 0, 0, 0, 0, 0, 0]]
elif(userin == 9):
factor=1.0/16.0
bias=128.0
filtermatrix=[[-1,-2,-1],[-2,12,-2],[-1,-2,-1]]
elif(userin == 10):
factor=1.0
bias=0.0
filtermatrix=[[-1,-1,-1],[-1,8,-1],[-1,-1,-1]]
elif(userin == 11):
factor=1.0
bias=0.0
filtermatrix[[0,0,0,0,0],[0,0,0,0,0],[-1,-1,2,0,0],[0,0,0,0,0],[0,0,0,0,0]]
elif(userin == 12):
factor=1.0
bias=0.0
filtermatrix=[[0,0,-1,0,0],[0,0,-1,0,0],[0,0,4,0,0],[0,0,-1,0,0],[0,0,-1,0,0]]
elif(userin == 13):
factor=1.0
bias=0.0
filtermatrix=[[-1,0,0,0,0],[0,-2,0,0,0],[0,0,6,0,0],[0,0,0,-2,0],[0,0,0,0,-1]]
elif(userin == 14):
factor=1.0
bias=0.0
filtermatrix=[[-5,0,0],[0,0,0],[0,0,5]]
elif(userin == 15):
factor=1.0
bias=0.0
filtermatrix=[[-1,-1,-1],[-1,9,-1],[-1,-1,-1]]
elif(userin == 16):
factor=1.0
bias=0.0
filtermatrix=[[0,-1,0],[-1,5,-1],[0,-1,0]]
elif(userin == 17):
factor=1.0
bias=0.0
filtermatrix=[[0,-2,0],[-2,-11,-2],[0,-2,0]]
elif(userin == 18):
factor=1.0/8.0
bias=0.0
filtermatrix=[[-1,-1,-1,-1,-1],[-1,2,2,2,-1],[-1,2,8,2,1],[-1,2,2,2-1],[-1,-1,-1,-1,-1]]
elif(userin == 19):
factor=1.0
bias=0.0
filtermatrix=[[1,1,1],[1,-7,1],[1,1,1]]
elif(userin == 20):
factor=1.0
bias=128.0
filtermatrix=[[2,0,0],[0,-1,0],[0,0,-1]]
elif(userin == 21):
factor=1.0
bias=128.0
filtermatrix=[[-1,-1,0],[-1,0,1],[0,1,1]]
elif(userin == 22):
factor=1.0
bias=128.0
filtermatrix=[[-1,0,0],[0,0,0],[0,0,1]]
elif(userin == 23):
factor=1.0
bias=128.0
filtermatrix=[[-1,-1,-1,-1,0],[-1,-1,-1,0,1],[-1,-1,0,1,1],[-1,0,1,1,1],[0,1,1,1,1]]
else:
print "invalid input"
filterwidth=len(filtermatrix)
filterheight=len(filtermatrix[0])
filterOffset = (filterwidth-1)/2;
offsety=filterOffset
offsetx=filterOffset
j = Image.new(i.mode,(width+(2*offsetx),height+(2*offsety)))
for x in range(width+(2*offsetx)):
for y in range(height+(2*offsety)):
if(x<offsetx):
j.putpixel((x,y),(0,0,0))
if(y<offsety):
j.putpixel((x,y),(0,0,0))
if(x>=width):
j.putpixel((x,y),(0,0,0))
if(y>=height):
j.putpixel((x,y),(0,0,0))
if(x>=offsetx and y>=offsety and x <=width and y <=height):
cpixel=pixels[x-offsetx,y-offsety]
j.putpixel((x,y),cpixel)
for y in range(height+(2*offsety)):
for x in range(width+(2*offsetx)):
if(x>=offsetx and y>=offsety and x <=width and y <=height):
blue=0.0
green=0.0
red=0.0
#filterx=x-offsetx
#filtery=y-offsety
for fy in range(filterheight):
for fx in range(filterwidth):
cpixel=j.getpixel((x-offsety+fx,y-offsety+fy))
red+=float(cpixel[0])*filtermatrix[fx][fy]
green+=float(cpixel[1])*filtermatrix[fx][fy]
blue+=float(cpixel[2])*filtermatrix[fx][fy]
red=(factor*red)+bias
green=(factor*green)+bias
blue=(factor*blue)+bias
if(red<0):
red=0
if(red>255):
red=255
if(green<0):
green=0
if(green>255):
green=255
if(blue<0):
blue=0
if(blue>255):
blue=255
k.putpixel((x-offsetx,y-offsety),(int(red),int(green),int(blue)))
k.save('output.png')
| {
"repo_name": "BhargavGamit/ImageManipulationAlgorithms",
"path": "Image Convolution.py",
"copies": "1",
"size": "5988",
"license": "mit",
"hash": -8870577213870692000,
"line_mean": 30.1875,
"line_max": 271,
"alpha_frac": 0.5288911156,
"autogenerated": false,
"ratio": 2.281142857142857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.33100339727428574,
"avg_score": null,
"num_lines": null
} |
from PIL import Image
i = Image.open("input.jpg")
#pixel data is stored in pixels in form of two dimensional array
pixels = i.load()
width, height = i.size
k=Image.new(i.mode,i.size)
filtersize=input('Enter the size of the filter: ')
filterOffset=(filtersize-1)/2
filterheight=filtersize
filterwidth=filtersize
offsety=filterOffset
offsetx=filterOffset
j = Image.new(i.mode,(width+(2*offsetx),height+(2*offsety)))
for x in range(width+(2*offsetx)):
for y in range(height+(2*offsety)):
if(x<offsetx):
j.putpixel((x,y),(0,0,0))
if(y<offsety):
j.putpixel((x,y),(0,0,0))
if(x>=width):
j.putpixel((x,y),(0,0,0))
if(y>=height):
j.putpixel((x,y),(0,0,0))
if(x>=offsetx and y>=offsety and x < width and y <height):
cpixel=pixels[x-offsetx,y-offsety]
j.putpixel((x,y),cpixel)
for y in range(height+(2*offsety)):
for x in range(width+(2*offsetx)):
if(x>=offsetx and y>=offsety and x < width and y <height):
blue=0.0
green=0.0
red=0.0
blues=list()
reds=list()
greens=list()
filterx=x-offsetx
filtery=y-offsety
for fy in range(filterheight):
for fx in range(filterwidth):
cpixel=j.getpixel((x-offsety+fx,y-offsety+fy))
reds.append(float(cpixel[0]))
greens.append(float(cpixel[1]))
blues.append(float(cpixel[2]))
reds.sort()
blues.sort()
greens.sort()
index=len(reds)
medianindex=index/2
if(index%2!=0):
red=reds[medianindex]
blue=blues[medianindex]
green=greens[medianindex]
else:
red=(reds[medianindex]+reds[medianindex+1])/2
blue=(blues[medianindex]+blues[medianindex+1])/2
green=(greens[medianindex]+greens[medianindex+1])/2
k.putpixel((x-offsetx,y-offsety),(int(red),int(green),int(blue)))
del reds[:]
del blues[:]
del greens[:]
k.save('output.png')
| {
"repo_name": "BhargavGamit/ImageManipulationAlgorithms",
"path": "Median Filter.py",
"copies": "1",
"size": "2275",
"license": "mit",
"hash": -5120743367206795000,
"line_mean": 31.5,
"line_max": 77,
"alpha_frac": 0.5441758242,
"autogenerated": false,
"ratio": 3.240740740740741,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9155672980599173,
"avg_score": 0.025848716868313425,
"num_lines": 70
} |
from PIL import Image
i = Image.open("input.jpg")
#pixel data is stored in pixels in form of two dimensional array
pixels = i.load()
width, height = i.size
k=Image.new(i.mode,i.size)
sol=Image.new(i.mode,i.size)
filtersize=input('Enter the size of the median filter: ')
filterOffset=(filtersize-1)/2
filterheight=filtersize
filterwidth=filtersize
offsety=filterOffset
offsetx=filterOffset
j = Image.new(i.mode,(width+(2*offsetx),height+(2*offsety)))
for x in range(width+(2*offsetx)):
for y in range(height+(2*offsety)):
if(x<offsetx):
j.putpixel((x,y),(0,0,0))
if(y<offsety):
j.putpixel((x,y),(0,0,0))
if(x>=width):
j.putpixel((x,y),(0,0,0))
if(y>=height):
j.putpixel((x,y),(0,0,0))
if(x>=offsetx and y>=offsety and x < width and y <height):
cpixel=pixels[x-offsetx,y-offsety]
j.putpixel((x,y),cpixel)
for y in range(height+(2*offsety)):
for x in range(width+(2*offsetx)):
if(x>=offsetx and y>=offsety and x < width and y <height):
blue=0.0
green=0.0
red=0.0
blues=list()
reds=list()
greens=list()
filterx=x-offsetx
filtery=y-offsety
for fy in range(filterheight):
for fx in range(filterwidth):
cpixel=j.getpixel((x-offsety+fx,y-offsety+fy))
reds.append(float(cpixel[0]))
greens.append(float(cpixel[1]))
blues.append(float(cpixel[2]))
reds.sort()
blues.sort()
greens.sort()
index=len(reds)
medianindex=index/2
if(index%2!=0):
red=reds[medianindex]
blue=blues[medianindex]
green=greens[medianindex]
else:
red=(reds[medianindex]+reds[medianindex+1])/2
blue=(blues[medianindex]+blues[medianindex+1])/2
green=(greens[medianindex]+greens[medianindex+1])/2
k.putpixel((x-offsetx,y-offsety),(int(red),int(green),int(blue)))
del reds[:]
del blues[:]
del greens[:]
minmaxfiltersize=input('Enter the size of min-max filter: ')
minmaxfilteroffset=(minmaxfiltersize-1)/2
filterheight=minmaxfiltersize
filterwidth=minmaxfiltersize
minmaxoffsety=minmaxfilteroffset
minmaxoffsetx=minmaxfilteroffset
z = Image.new(i.mode,(width+(2*minmaxoffsetx),height+(2*minmaxoffsety)))
for x in range(width+(2*minmaxoffsetx)):
for y in range(height+(2*minmaxoffsety)):
if(x<minmaxoffsetx):
z.putpixel((x,y),(0,0,0))
if(y<minmaxoffsety):
z.putpixel((x,y),(0,0,0))
if(x>=width):
z.putpixel((x,y),(0,0,0))
if(y>=height):
z.putpixel((x,y),(0,0,0))
if(x>=minmaxoffsetx and y>=minmaxoffsety and x < width and y <height):
cpixel=k.getpixel((x-minmaxoffsetx,y-minmaxoffsety))
z.putpixel((x,y),cpixel)
for y in range(height+(2*minmaxoffsety)):
for x in range(width+(2*minmaxoffsetx)):
if(x>=minmaxoffsetx and y>=minmaxoffsety and x < width and y <height):
minblue=255
mingreen=255
minred=255
maxblue=0
maxgreen=0
maxred=0
filterx=x-minmaxoffsetx
filtery=y-minmaxoffsety
for fy in range(filterheight):
for fx in range(filterwidth):
cpixel=z.getpixel((x-minmaxoffsety+fx,y-minmaxoffsety+fy))
minblue=min(minblue,cpixel[2])
mingreen=min(mingreen,cpixel[1])
minred=min(minred,cpixel[0])
maxblue=max(maxblue,cpixel[2])
maxgreen=max(maxgreen,cpixel[1])
maxred=max(maxred,cpixel[0])
red=maxred-minred
green=maxgreen-mingreen
blue=maxblue-minblue
sol.putpixel((x-minmaxoffsetx,y-minmaxoffsety),(int(red),int(green),int(blue)))
sol.save('minmax.png')
| {
"repo_name": "BhargavGamit/ImageManipulationAlgorithms",
"path": "Min-Max Filter.py",
"copies": "1",
"size": "4183",
"license": "mit",
"hash": -5987652014637668000,
"line_mean": 34.1512605042,
"line_max": 91,
"alpha_frac": 0.5639493187,
"autogenerated": false,
"ratio": 3.1737481031866466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42376974218866464,
"avg_score": null,
"num_lines": null
} |
import sys
from PIL import Image
i = Image.open("input.png")
#store pixels of input image
pixels = i.load()
width, height = i.size
k=Image.new(i.mode,i.size)
print "Choose BooleanFilterType"
print "1 None"
print "2 EdgeDetect"
print "3 Sharpen"
filtertype=input()
filtersize=input("Choose the Size of Filter (Usually 3) : ")
print "Usually 1.0"
redfactor=input("Enter red factor value : ")
greenfactor=input("Enter green factor value : ")
bluefactor=input("Enter blue factor value : ")
threshold=input("Enter Threshold value (0.0 to 200.0):")
edgemasks=[]
edgemasks.append("011011011")
edgemasks.append("000111111")
edgemasks.append("110110110")
edgemasks.append("111111000")
edgemasks.append("011011001")
edgemasks.append("100110110")
edgemasks.append("111011000")
edgemasks.append("111110000")
edgemasks.append("111011001")
edgemasks.append("100110111")
edgemasks.append("001011111")
edgemasks.append("111110100")
edgemasks.append("000011111")
edgemasks.append("000110111")
edgemasks.append("001011011")
edgemasks.append("001011011")
edgemasks.append("110110100")
filterwidth=filtersize
filterheight=filtersize
filterOffset = (filterwidth-1)/2;
offsety=filterOffset
offsetx=filterOffset
j = Image.new(i.mode,(width+(2*offsetx),height+(2*offsety)))
for x in range(width+(2*offsetx)):
for y in range(height+(2*offsety)):
if(x<offsetx):
j.putpixel((x,y),(0,0,0))
if(y<offsety):
j.putpixel((x,y),(0,0,0))
if(x>=width):
j.putpixel((x,y),(0,0,0))
if(y>=height):
j.putpixel((x,y),(0,0,0))
if(x>=offsetx and y>=offsety and x <=width and y <=height):
cpixel=pixels[x-offsetx,y-offsety]
j.putpixel((x,y),cpixel)
for y in range(height+(2*offsety)):
for x in range(width+(2*offsetx)):
if(x>=offsetx+1 and y>=offsety+1 and x <=width and y <=height):
matrixmean=0
matrixtotal=0
matrixvariance=0.0
matrixpattern=''
filterx=x-offsetx
filtery=y-offsety
for fy in range(filterheight):
for fx in range(filterwidth):
cpixel=j.getpixel((x-offsety+fx,y-offsety+fy))
matrixmean+=(cpixel[0])
matrixmean+=(cpixel[1])
matrixmean+=(cpixel[2])
matrixmean=matrixmean/9
for fy in range(filterheight):
for fx in range(filterwidth):
cpixel=j.getpixel((x-offsety+fx,y-offsety+fy))
matrixtotal=(cpixel[0])
matrixtotal+=(cpixel[1])
matrixtotal+=(cpixel[2])
if(matrixtotal>matrixmean):
matrixpattern+='1'
else:
matrixpattern+='0'
matrixvariance+=pow(matrixmean-(cpixel[0]+cpixel[1]+cpixel[2]),2)
matrixvariance=matrixvariance/9.0
cpixel=j.getpixel((x,y))
if(filtertype==3):
red=cpixel[0]
green=cpixel[1]
blue=cpixel[2]
if(matrixvariance>threshold):
if(matrixpattern in edgemasks):
red=int(red*redfactor)
green=int(green*greenfactor)
blue=int(blue*bluefactor)
if(red<0):
red=0
if(red>255):
red=255
if(green<0):
green=0
if(green>255):
green=255
if(blue<0):
blue=0
if(blue>255):
blue=255
elif(matrixvariance>threshold and (matrixpattern in edgemasks)):
red=255
green=255
blue=255
else:
red=0
green=0
blue=0
del matrixpattern
k.putpixel((x-offsetx,y-offsety),(int(red),int(green),int(blue)))
k.save('output.png')
| {
"repo_name": "BhargavGamit/ImageManipulationAlgorithms",
"path": "BooleanEdgeDetectionFilter.py",
"copies": "1",
"size": "4276",
"license": "mit",
"hash": 8428933371966168000,
"line_mean": 31.8923076923,
"line_max": 85,
"alpha_frac": 0.5388213283,
"autogenerated": false,
"ratio": 3.5812395309882747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9510329242251135,
"avg_score": 0.021946323407427845,
"num_lines": 130
} |
from PIL import Image
i = Image.open("input.png")
#store pixels of input image
pixels = i.load()
width, height = i.size
k=Image.new(i.mode,i.size)
print "Filter size should be an odd positive number"
filtersize=input("Choose the Size of Filter: ")
print "Type (True) or (False) without braces"
applyred=input("Choose to apply red : ")
applygreen=input("Choose to apply green : ")
applyblue=input("Choose to apply blue : ")
print "Choose Morphology Type"
print "1 Dilation"
print "2 Erosion"
morphtype=input()
morphresetvalue=0.0
if(morphtype==2):
morphresetvalue=255.0
print "Choose Morphology Edge Type"
print "1 EdgeDetection"
print "2 SharpenEdgeDetection"
edgetype=input()
filterwidth=filtersize
filterheight=filtersize
filterOffset = (filterwidth-1)/2;
offsety=filterOffset
offsetx=filterOffset
j = Image.new(i.mode,(width+(2*offsetx),height+(2*offsety)))
for x in range(width+(2*offsetx)):
for y in range(height+(2*offsety)):
if(x<offsetx):
j.putpixel((x,y),(0,0,0))
if(y<offsety):
j.putpixel((x,y),(0,0,0))
if(x>=width):
j.putpixel((x,y),(0,0,0))
if(y>=height):
j.putpixel((x,y),(0,0,0))
if(x>=offsetx and y>=offsety and x <=width and y <=height):
cpixel=pixels[x-offsetx,y-offsety]
j.putpixel((x,y),cpixel)
for y in range(height+(2*offsety)):
for x in range(width+(2*offsetx)):
if(x>=offsetx and y>=offsety and x <=width and y <=height):
blue=morphresetvalue
green=morphresetvalue
red=morphresetvalue
#filterx=x-offsetx
#filtery=y-offsety
if(morphtype==1):
for fy in range(filterheight):
for fx in range(filterwidth):
cpixel=j.getpixel((x-offsety+fx,y-offsety+fy))
if(float(cpixel[0]) > red):
red=float(cpixel[0])
if(float(cpixel[1]) > green):
green=float(cpixel[1])
if(float(cpixel[2]) > blue):
blue=float(cpixel[2])
if(morphtype==2):
for fy in range(filterheight):
for fx in range(filterwidth):
cpixel=j.getpixel((x-offsety+fx,y-offsety+fy))
if(float(cpixel[0]) < red):
red=float(cpixel[0])
if(float(cpixel[1]) < green):
green=float(cpixel[1])
if(float(cpixel[2]) < blue):
blue=float(cpixel[2])
cpixel=j.getpixel((x,y))
if(applyred==False):
red=cpixel[0]
if(applygreen==False):
green=cpixel[1]
if(applyblue==False):
blue=cpixel[2]
if(edgetype==1 or edgetype==2):
if(morphtype==1):
red=red-cpixel[0]
green=green-cpixel[1]
blue=blue-cpixel[2]
if(morphtype==2):
red=cpixel[0]-red
green=cpixel[1]-green
blue=cpixel[2]-blue
if(edgetype==2):
red=red+cpixel[0]
green=green+cpixel[1]
blue=blue+cpixel[2]
if(red<0):
red=0
if(red>255):
red=255
if(green<0):
green=0
if(green>255):
green=255
if(blue<0):
blue=0
if(blue>255):
blue=255
k.putpixel((x-offsetx,y-offsety),(int(red),int(green),int(blue)))
k.save('output.png')
| {
"repo_name": "BhargavGamit/ImageManipulationAlgorithms",
"path": "DilateAndErodeFilter.py",
"copies": "1",
"size": "3901",
"license": "mit",
"hash": 6930523914477182000,
"line_mean": 30.208,
"line_max": 77,
"alpha_frac": 0.5037169956,
"autogenerated": false,
"ratio": 3.4522123893805308,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9330394769026462,
"avg_score": 0.02510692319081374,
"num_lines": 125
} |
__author__="Bhaskar Kalia"
__date__="Mon Sep 14"
__description__="Find and Replace Tool/gui interface"
#!/usr/bin/env
from Tkinter import *
import tkMessageBox
import subprocess
from Replacer import *
"""
###testing without gui###
filename="/home/bhaskar/Documents/test.txt"
replacer=Replacer(filename,"kalia","bhaskar")
replacer.find_and_replace()
print("done")
subprocess.call("gnome-open "+filename,shell=True)
"""
class Application(Frame):
#method to start find and replace
def fire(self):
filepath=self.filepath.get("1.0","end-1c")
find_text=self.find_text.get("1.0","end-1c")
replace_text=self.replace_text.get("1.0","end-1c")
#create replacer object and call find_and_replace method
replacer=Replacer(filepath,find_text,replace_text)
replacer.find_and_replace()
#enable the open output file button
self.show.config(state="normal")
print("done")
#method to open output file
def open_output(self):
filepath=self.filepath.get("1.0","end-1c")
subprocess.call("gnome-open "+filepath,shell=True)
#method to create widgets
def create_widgets(self):
#label1 filepath
self.label1=Label(self,text="Enter Absolute Path of File")
self.label1.pack()
#creating a textBox for reading the filepath value
self.filepath=Text(self)
self.filepath["height"]=1
self.filepath["width"]=60
self.filepath.insert(END,"/home")
self.filepath.pack(padx=20)
#label2 find text
self.label2=Label(self,text="Enter Text to Find")
self.label2.pack()
#creating a textBox for reading the find text
self.find_text=Text(self)
self.find_text["height"]=1
self.find_text["width"]=60
self.find_text.insert(END,"Text to find")
self.find_text.pack(padx=20)
#label3 replace text
self.label3=Label(self,text="Enter Text to Replace")
self.label3.pack()
#creating a textBox for reading the find text
self.replace_text=Text(self)
self.replace_text["height"]=1
self.replace_text["width"]=60
self.replace_text.insert(END,"Text to replace")
self.replace_text.pack(padx=20)
#Fire button here
self.firebutton=Button(self)
self.firebutton["text"]="Fire"
self.firebutton["fg"]="black"
self.firebutton["command"]=self.fire
self.firebutton.pack(padx=20,pady=10)
#show output files button button here
self.show=Button(self)
self.show["fg"]="black"
self.show["width"]=21
self.show["text"]="Open Output File"
self.show["command"]=self.open_output
self.show.config(state="disabled") #disbaled until sed has not been completed
self.show.pack()
#exit button here
self.exit=Button(self)
self.exit["fg"]="red"
self.exit["text"]="Exit"
self.exit["command"]=self.quit
self.exit.pack(padx=20,pady=10)
#created by and all details
self.ref=Label(self)
self.ref["bg"]="black"
self.ref["fg"]="white"
self.ref["text"]="Created By : Bhaskar Kalia\nSoftware Engineer I\nSnapdeal\n"
self.ref.pack(side="bottom")
#constructor
def __init__(self,master=None):
#set title for windows (find and replace)
master.title("Find and Replace")
# Define frame size and position in the screen :
"""
ScreenSizeX = master.winfo_screenwidth() # Get screen width [pixels]
ScreenSizeY = master.winfo_screenheight() # Get screen height [pixels]
ScreenRatio = 0.5 # Set the screen ratio for width and height
FrameSizeX = int(ScreenSizeX * ScreenRatio)
FrameSizeY = int(ScreenSizeY * ScreenRatio)
FramePosX = (ScreenSizeX - FrameSizeX)/2 # Find left and up border of window
FramePosY = (ScreenSizeY - FrameSizeY)/2
#position frame in the center
master.geometry("%sx%s+%s+%s"%(FrameSizeX,300,50,200))
"""
Frame.__init__(self,master)
self.pack(padx=20, pady=20)
self.create_widgets()
#start gui
root=Tk()
app=Application(master=root)
app.mainloop()
root.destroy()
| {
"repo_name": "BHASKARSDEN277GITHUB/python-find-replace-gui",
"path": "main.py",
"copies": "1",
"size": "4102",
"license": "mit",
"hash": 7337751425700187000,
"line_mean": 27.8873239437,
"line_max": 99,
"alpha_frac": 0.6535836177,
"autogenerated": false,
"ratio": 3.1626831148804935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4316266732580493,
"avg_score": null,
"num_lines": null
} |
# numItemsPurchased = int(input("How many items? "))
# totalCostItems = 0
# for numItemsPurchased in range(numItemsPurchased):
# itemCost = float(input("Enter the cost of the item: $"))
# totalCostItems = totalCostItems + itemCost
# print("Total cost of items is $" + str(totalCostItems))
# totalCostItemsAverage = totalCostItems/numItemsPurchased
# print("Average cost of items is $" + str(totalCostItemsAverage))
#----------------------------------------------------------------
# Next write the code that simulates tossing a coin 100 times.
# Determine and count the number of heads and number of tails tossed.
# Display these amounts.
import random
heads = 0 #heads = 1
tails = 0 #tails = 2
for coinFlip in range(0,100):
coinFlipResult = random.randint(1,2)
if coinFlipResult == 1:
heads = heads + 1
elif coinFlipResult == 2:
tails = tails + 1
print("Heads: " + str(heads))
print("Tails: " + str(tails))
| {
"repo_name": "sentientredstripe/sentientredstripe.github.io",
"path": "py/Chapter2_Program4_ForLoops3.py",
"copies": "1",
"size": "1484",
"license": "mit",
"hash": -7909935077823801000,
"line_mean": 31.2826086957,
"line_max": 129,
"alpha_frac": 0.6563342318,
"autogenerated": false,
"ratio": 3.4511627906976745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4607497022497674,
"avg_score": null,
"num_lines": null
} |
"""Base Model configurations"""
import os
import json
import os.path as osp
import numpy as np
from easydict import EasyDict as edict
'''
KEEP_PROB # Probability to keep a node in dropout
BATCH_SIZE # batch size
PROB_THRESH # Only keep boxes with probability higher than this threshold
PLOT_PROB_THRESH # Only plot boxes with probability higher than this threshold
NMS_THRESH # Bounding boxes with IOU larger than this are going to be removed
LOSS_COEF_CONF # loss coefficient for confidence regression
LOSS_COEF_CLASS # loss coefficient for classification regression
LOSS_COEF_BBOX # loss coefficient for bounding box regression
DECAY_STEPS # reduce step size after this many steps
LR_DECAY_FACTOR # multiply the learning rate by this factor
cfg.LEARNING_RATE # learning rate
MOMENTUM # momentum
WEIGHT_DECAY # weight decay
MAX_GRAD_NORM # gradients with norm larger than this is going to be clipped.
EPSILON = 1e-16 # a small value used to prevent numerical instability
EXP_THRESH=1.0 # threshold for safe exponential operation
BATCH_NORM_EPSILON=1e-5 # small value used in batch normalization to prevent dividing by 0. The
# default value here is the same with caffe's default value.
DATA_AUGMENTATION # Whether to do data augmentation
DRIFT_X # The range to randomly shift the image widht
DRIFT_Y # The range to randomly shift the image height
MAX_STEPS : Maximum number of batches to run
SUMMARY_STEP : Number of steps to save summary
CHECKPOINT_STEP : Number of steps to save checkpoint
TRAIN_DIR : Directory for saving checkpoints and log results
# Pixel mean values (BGR order) as a (1, 1, 3) array. Below is the BGR mean
# of VGG16
BGR_MEANS = np.array([[[103.939, 116.779, 123.68]]])
'''
def get_model_config(cfg_file):
with open(cfg_file) as infile:
cfg = json.load(infile)
mconfig = edict(cfg)
if 'LOAD_PRETRAINED_MODEL' not in mconfig.initialization:
mconfig.initialization.LOAD_PRETRAINED_MODEL = False
# number of categories to classify
mconfig.dataset.N_CLASSES = len(mconfig.dataset.CLASS_NAMES)
mconfig.anchor_boxes.ANCHOR_PER_GRID = 9
return mconfig
| {
"repo_name": "getnexar/squeezeDet",
"path": "src/config/config.py",
"copies": "1",
"size": "2388",
"license": "bsd-2-clause",
"hash": -8043827318102169000,
"line_mean": 33.1142857143,
"line_max": 97,
"alpha_frac": 0.6959798995,
"autogenerated": false,
"ratio": 3.713841368584759,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9625413001576195,
"avg_score": 0.056881653301712694,
"num_lines": 70
} |
"""Base Model configurations"""
import os
import os.path as osp
import numpy as np
from easydict import EasyDict as edict
def base_model_config(dataset='PASCAL_VOC'):
assert dataset.upper() in ['PASCAL_VOC', 'VID', 'KITTI', 'ILSVRC2013'], \
'Either PASCAL_VOC / VID / KITTI / ILSVRC2013'
cfg = edict()
# Dataset used to train/val/test model. Now support PASCAL_VOC or KITTI
cfg.DATASET = dataset.upper()
if cfg.DATASET == 'PASCAL_VOC':
# object categories to classify
cfg.CLASS_NAMES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant', 'sheep',
'sofa', 'train', 'tvmonitor')
elif cfg.DATASET == 'VID':
cfg.CLASS_NAMES = ('airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus',
'car', 'cattle', 'dog', 'domestic_cat', 'elephant',
'fox', 'giant_panda', 'hamster', 'horse', 'lion', 'lizard',
'monkey', 'motorcycle', 'rabbit', 'red_panda', 'sheep',
'snake', 'squirrel', 'tiger', 'train', 'turtle', 'watercraft',
'whale', 'zebra')
elif cfg.DATASET == 'KITTI':
cfg.CLASS_NAMES = ('car', 'pedestrian', 'cyclist')
elif cfg.DATASET == 'ILSVRC2013':
cfg.CLASS_NAMES = tuple(map(str, range(1, 1001)))
# number of categories to classify
cfg.CLASSES = len(cfg.CLASS_NAMES)
# type of the loss, either YOLO / SQT
cfg.LOSS_TYPE = 'SQT'
# Use batch normalization
cfg.BN = False
cfg.LOAD_BN = False
# ROI pooling output width
cfg.GRID_POOL_WIDTH = 7
# ROI pooling output height
cfg.GRID_POOL_HEIGHT = 7
# parameter used in leaky ReLU
cfg.LEAKY_COEF = 0.1
# Probability to keep a node in dropout
cfg.KEEP_PROB = 0.5
# image width
cfg.IMAGE_WIDTH = 224
# image height
cfg.IMAGE_HEIGHT = 224
# anchor box, array of [cx, cy, w, h]. To be defined later
cfg.ANCHOR_BOX = []
# number of anchor boxes
cfg.ANCHORS = len(cfg.ANCHOR_BOX)
# number of anchor boxes per grid
cfg.ANCHOR_PER_GRID = -1
# batch size
cfg.BATCH_SIZE = 20
# Only keep boxes with probability higher than this threshold
cfg.PROB_THRESH = 0.005
# Only plot boxes with probability higher than this threshold
cfg.PLOT_PROB_THRESH = 0.5
# Bounding boxes with IOU larger than this are going to be removed
cfg.NMS_THRESH = 0.2
# Pixel mean values (BGR order) as a (1, 1, 3) array. Below is the BGR mean
# of VGG16
cfg.BGR_MEANS = np.array([[[103.939, 116.779, 123.68]]])
cfg.SUB_BGR_MEANS = True
# loss coefficient for confidence regression
cfg.LOSS_COEF_CONF = 1.0
# loss coefficient for classification regression
cfg.LOSS_COEF_CLASS = 1.0
# loss coefficient for bounding box regression
cfg.LOSS_COEF_BBOX = 10.0
# learning rate decay policy
cfg.LR_DECAY_POLICY = 'exponential'
# reduce step size after this many steps
cfg.DECAY_STEPS = 10000
# multiply the learning rate by this factor
cfg.LR_DECAY_FACTOR = 0.1
# learning rate
cfg.LEARNING_RATE = 0.005
# learning rate step config
cfg.LR_STEP_BOUNDRY = None
cfg.LR_STEP_VALUE = None
# momentum
cfg.MOMENTUM = 0.9
# weight decay
cfg.WEIGHT_DECAY = 0.0005
# wether to load pre-trained model
cfg.LOAD_PRETRAINED_MODEL = True
# path to load the pre-trained model
cfg.PRETRAINED_MODEL_PATH = ''
# print log to console in debug mode
cfg.DEBUG_MODE = False
# a small value used to prevent numerical instability
cfg.EPSILON = 1e-16
# threshold for safe exponential operation
cfg.EXP_THRESH=1.0
# gradients with norm larger than this is going to be clipped.
cfg.MAX_GRAD_NORM = 10.0
# Whether to do data augmentation
cfg.DATA_AUGMENTATION = False
# Data augmentation type (YOLO / SQT)
cfg.DATA_AUG_TYPE = 'SQT'
# The range to randomly shift the image widht
cfg.DRIFT_X = 0
# The range to randomly shift the image height
cfg.DRIFT_Y = 0
# Whether to exclude images harder than hard-category. Only useful for KITTI
# dataset.
cfg.EXCLUDE_HARD_EXAMPLES = True
# small value used in batch normalization to prevent dividing by 0. The
# default value here is the same with caffe's default value.
cfg.BATCH_NORM_EPSILON = 1e-5
return cfg
| {
"repo_name": "goan15910/ConvDet",
"path": "src/config/config.py",
"copies": "1",
"size": "4449",
"license": "bsd-2-clause",
"hash": -159872735802428540,
"line_mean": 26.80625,
"line_max": 85,
"alpha_frac": 0.645088784,
"autogenerated": false,
"ratio": 3.2285921625544267,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9256373320616893,
"avg_score": 0.023461525187506806,
"num_lines": 160
} |
"""Base Model configurations"""
import os
import os.path as osp
import numpy as np
from easydict import EasyDict as edict
def base_model_config(dataset='PASCAL_VOC'):
assert dataset.upper()=='PASCAL_VOC' or dataset.upper()=='KITTI', \
'Currently only support PASCAL_VOC or KITTI dataset'
cfg = edict()
# Dataset used to train/val/test model. Now support PASCAL_VOC or KITTI
cfg.DATASET = dataset.upper()
if cfg.DATASET == 'PASCAL_VOC':
# object categories to classify
cfg.CLASS_NAMES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant', 'sheep',
'sofa', 'train', 'tvmonitor')
elif cfg.DATASET == 'KITTI':
cfg.CLASS_NAMES = ('car', 'pedestrian', 'cyclist')
# number of categories to classify
cfg.CLASSES = len(cfg.CLASS_NAMES)
# ROI pooling output width
cfg.GRID_POOL_WIDTH = 7
# ROI pooling output height
cfg.GRID_POOL_HEIGHT = 7
# parameter used in leaky ReLU
cfg.LEAKY_COEF = 0.1
# Probability to keep a node in dropout
cfg.KEEP_PROB = 0.5
# image width
cfg.IMAGE_WIDTH = 224
# image height
cfg.IMAGE_HEIGHT = 224
# anchor box, array of [cx, cy, w, h]. To be defined later
cfg.ANCHOR_BOX = []
# number of anchor boxes
cfg.ANCHORS = len(cfg.ANCHOR_BOX)
# number of anchor boxes per grid
cfg.ANCHOR_PER_GRID = -1
# batch size
cfg.BATCH_SIZE = 20
# Only keep boxes with probability higher than this threshold
cfg.PROB_THRESH = 0.005
# Only plot boxes with probability higher than this threshold
cfg.PLOT_PROB_THRESH = 0.5
# Bounding boxes with IOU larger than this are going to be removed
cfg.NMS_THRESH = 0.2
# Pixel mean values (BGR order) as a (1, 1, 3) array. Below is the BGR mean
# of VGG16
cfg.BGR_MEANS = np.array([[[103.939, 116.779, 123.68]]])
# loss coefficient for confidence regression
cfg.LOSS_COEF_CONF = 1.0
# loss coefficient for classification regression
cfg.LOSS_COEF_CLASS = 1.0
# loss coefficient for bounding box regression
cfg.LOSS_COEF_BBOX = 10.0
# reduce step size after this many steps
cfg.DECAY_STEPS = 10000
# multiply the learning rate by this factor
cfg.LR_DECAY_FACTOR = 0.1
# learning rate
cfg.LEARNING_RATE = 0.005
# momentum
cfg.MOMENTUM = 0.9
# weight decay
cfg.WEIGHT_DECAY = 0.0005
# wether to load pre-trained model
cfg.LOAD_PRETRAINED_MODEL = True
# path to load the pre-trained model
cfg.PRETRAINED_MODEL_PATH = ''
# print log to console in debug mode
cfg.DEBUG_MODE = False
# a small value used to prevent numerical instability
cfg.EPSILON = 1e-16
# threshold for safe exponential operation
cfg.EXP_THRESH=1.0
# gradients with norm larger than this is going to be clipped.
cfg.MAX_GRAD_NORM = 10.0
# Whether to do data augmentation
cfg.DATA_AUGMENTATION = False
# The range to randomly shift the image widht
cfg.DRIFT_X = 0
# The range to randomly shift the image height
cfg.DRIFT_Y = 0
# Whether to exclude images harder than hard-category. Only useful for KITTI
# dataset.
cfg.EXCLUDE_HARD_EXAMPLES = True
# small value used in batch normalization to prevent dividing by 0. The
# default value here is the same with caffe's default value.
cfg.BATCH_NORM_EPSILON = 1e-5
return cfg
| {
"repo_name": "Walter1218/self_driving_car_ND",
"path": "squeezeDet/src/config/config.py",
"copies": "1",
"size": "3497",
"license": "mit",
"hash": -140527273754779410,
"line_mean": 25.2932330827,
"line_max": 79,
"alpha_frac": 0.6720045754,
"autogenerated": false,
"ratio": 3.3084200567644277,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9372000184921949,
"avg_score": 0.021684889448495913,
"num_lines": 133
} |
"""Evaluation"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
from datetime import datetime
import os.path
import sys
import time
import numpy as np
from six.moves import xrange
import tensorflow as tf
from config import *
from dataset import pascal_voc, kitti
from utils.util import bbox_transform, Timer
from nets import *
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('dataset', 'KITTI',
"""Currently support PASCAL_VOC or KITTI dataset.""")
tf.app.flags.DEFINE_string('data_path', '', """Root directory of data""")
tf.app.flags.DEFINE_string('image_set', 'test',
"""Only used for VOC data."""
"""Can be train, trainval, val, or test""")
tf.app.flags.DEFINE_string('year', '2007',
"""VOC challenge year. 2007 or 2012"""
"""Only used for VOC data""")
tf.app.flags.DEFINE_string('eval_dir', '/tmp/bichen/logs/squeezeDet/eval',
"""Directory where to write event logs """)
tf.app.flags.DEFINE_string('checkpoint_path', '/tmp/bichen/logs/squeezeDet/train',
"""Path to the training checkpoint.""")
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 1,
"""How often to check if new cpt is saved.""")
tf.app.flags.DEFINE_boolean('run_once', False,
"""Whether to run eval only once.""")
tf.app.flags.DEFINE_string('net', 'squeezeDet',
"""Neural net architecture.""")
tf.app.flags.DEFINE_string('gpu', '0', """gpu id.""")
def eval_once(saver, ckpt_path, summary_writer, imdb, model):
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
# Restores from checkpoint
saver.restore(sess, ckpt_path)
# Assuming model_checkpoint_path looks something like:
# /ckpt_dir/model.ckpt-0,
# extract global_step from it.
global_step = ckpt_path.split('/')[-1].split('-')[-1]
num_images = len(imdb.image_idx)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
_t = {'im_detect': Timer(), 'im_read': Timer(), 'misc': Timer()}
num_detection = 0.0
for i in xrange(num_images):
_t['im_read'].tic()
images, scales = imdb.read_image_batch(shuffle=False)
_t['im_read'].toc()
_t['im_detect'].tic()
det_boxes, det_probs, det_class = sess.run(
[model.det_boxes, model.det_probs, model.det_class],
feed_dict={model.image_input:images, model.keep_prob: 1.0})
_t['im_detect'].toc()
_t['misc'].tic()
for j in range(len(det_boxes)): # batch
# rescale
det_boxes[j, :, 0::2] /= scales[j][0]
det_boxes[j, :, 1::2] /= scales[j][1]
det_bbox, score, det_class = model.filter_prediction(
det_boxes[j], det_probs[j], det_class[j])
num_detection += len(det_bbox)
for c, b, s in zip(det_class, det_bbox, score):
all_boxes[c][i].append(bbox_transform(b) + [s])
_t['misc'].toc()
print ('im_detect: {:d}/{:d} im_read: {:.3f}s '
'detect: {:.3f}s misc: {:.3f}s'.format(
i+1, num_images, _t['im_read'].average_time,
_t['im_detect'].average_time, _t['misc'].average_time))
print ('Evaluating detections...')
aps, ap_names = imdb.evaluate_detections(
FLAGS.eval_dir, global_step, all_boxes)
print ('Evaluation summary:')
print (' Average number of detections per image: {}:'.format(
num_detection/num_images))
print (' Timing:')
print (' im_read: {:.3f}s detect: {:.3f}s misc: {:.3f}s'.format(
_t['im_read'].average_time, _t['im_detect'].average_time,
_t['misc'].average_time))
print (' Average precisions:')
eval_summary_ops = []
for cls, ap in zip(ap_names, aps):
eval_summary_ops.append(
tf.summary.scalar('APs/'+cls, ap)
)
print (' {}: {:.3f}'.format(cls, ap))
print (' Mean average precision: {:.3f}'.format(np.mean(aps)))
eval_summary_ops.append(
tf.summary.scalar('APs/mAP', np.mean(aps))
)
eval_summary_ops.append(
tf.summary.scalar('timing/image_detect', _t['im_detect'].average_time)
)
eval_summary_ops.append(
tf.summary.scalar('timing/image_read', _t['im_read'].average_time)
)
eval_summary_ops.append(
tf.summary.scalar('timing/post_process', _t['misc'].average_time)
)
eval_summary_ops.append(
tf.summary.scalar('num_detections_per_image', num_detection/num_images)
)
print ('Analyzing detections...')
stats, ims = imdb.do_detection_analysis_in_eval(
FLAGS.eval_dir, global_step)
for k, v in stats.iteritems():
eval_summary_ops.append(
tf.summary.scalar(
'Detection Analysis/'+k, v)
)
eval_summary_str = sess.run(eval_summary_ops)
for sum_str in eval_summary_str:
summary_writer.add_summary(sum_str, global_step)
def evaluate():
"""Evaluate."""
assert FLAGS.dataset == 'KITTI', \
'Currently only supports KITTI dataset'
with tf.Graph().as_default() as g:
assert FLAGS.net == 'vgg16' or FLAGS.net == 'resnet50' \
or FLAGS.net == 'squeezeDet' or FLAGS.net == 'squeezeDet+', \
'Selected neural net architecture not supported: {}'.format(FLAGS.net)
if FLAGS.net == 'vgg16':
mc = kitti_vgg16_config()
mc.BATCH_SIZE = 1 # TODO(bichen): allow batch size > 1
mc.LOAD_PRETRAINED_MODEL = False
model = VGG16ConvDet(mc, FLAGS.gpu)
elif FLAGS.net == 'resnet50':
mc = kitti_res50_config()
mc.BATCH_SIZE = 1 # TODO(bichen): allow batch size > 1
mc.LOAD_PRETRAINED_MODEL = False
model = ResNet50ConvDet(mc, FLAGS.gpu)
elif FLAGS.net == 'squeezeDet':
mc = kitti_squeezeDet_config()
mc.BATCH_SIZE = 1 # TODO(bichen): allow batch size > 1
mc.LOAD_PRETRAINED_MODEL = False
model = SqueezeDet(mc, FLAGS.gpu)
elif FLAGS.net == 'squeezeDet+':
mc = kitti_squeezeDetPlus_config()
mc.BATCH_SIZE = 1 # TODO(bichen): allow batch size > 1
mc.LOAD_PRETRAINED_MODEL = False
model = SqueezeDetPlus(mc, FLAGS.gpu)
imdb = kitti(FLAGS.image_set, FLAGS.data_path, mc)
saver = tf.train.Saver(model.model_params)
summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)
ckpts = set()
while True:
if FLAGS.run_once:
# When run_once is true, checkpoint_path should point to the exact
# checkpoint file.
eval_once(saver, FLAGS.checkpoint_path, summary_writer, imdb, model)
return
else:
# When run_once is false, checkpoint_path should point to the directory
# that stores checkpoint files.
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_path)
if ckpt and ckpt.model_checkpoint_path:
if ckpt.model_checkpoint_path in ckpts:
# Do not evaluate on the same checkpoint
print ('Wait {:d}s for new checkpoints to be saved ... '
.format(FLAGS.eval_interval_secs))
time.sleep(FLAGS.eval_interval_secs)
else:
ckpts.add(ckpt.model_checkpoint_path)
print ('Evaluating {}...'.format(ckpt.model_checkpoint_path))
eval_once(saver, ckpt.model_checkpoint_path,
summary_writer, imdb, model)
else:
print('No checkpoint file found')
if not FLAGS.run_once:
print ('Wait {:d}s for new checkpoints to be saved ... '
.format(FLAGS.eval_interval_secs))
time.sleep(FLAGS.eval_interval_secs)
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
evaluate()
if __name__ == '__main__':
tf.app.run()
| {
"repo_name": "Walter1218/self_driving_car_ND",
"path": "squeezeDet/src/eval.py",
"copies": "1",
"size": "8100",
"license": "mit",
"hash": -8995617380902348000,
"line_mean": 35.6515837104,
"line_max": 82,
"alpha_frac": 0.5960493827,
"autogenerated": false,
"ratio": 3.403361344537815,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44994107272378153,
"avg_score": null,
"num_lines": null
} |
"""Image data base class for kitti"""
import cv2
import os
import numpy as np
import subprocess
from dataset.imdb import imdb
from utils.util import bbox_transform_inv, batch_iou
class kitti(imdb):
def __init__(self, image_set, data_path, mc):
imdb.__init__(self, 'kitti_'+image_set, mc)
self._image_set = image_set
self._data_root_path = data_path
self._image_path = os.path.join(self._data_root_path, 'training', 'image_2')
self._label_path = os.path.join(self._data_root_path, 'training', 'label_2')
self._classes = self.mc.CLASS_NAMES
self._class_to_idx = dict(zip(self.classes, range(self.num_classes)))
# a list of string indices of images in the directory
self._image_idx = self._load_image_set_idx()
# a dict of image_idx -> [[cx, cy, w, h, cls_idx]]. x,y,w,h are not divided by
# the image width and height
self._rois = self._load_kitti_annotation()
## batch reader ##
self._perm_idx = None
self._cur_idx = 0
# TODO(bichen): add a random seed as parameter
self._shuffle_image_idx()
self._eval_tool = './src/dataset/kitti-eval/cpp/evaluate_object'
def _load_image_set_idx(self):
image_set_file = os.path.join(
self._data_root_path, 'ImageSets', self._image_set+'.txt')
assert os.path.exists(image_set_file), \
'File does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_idx = [x.strip() for x in f.readlines()]
return image_idx
def _image_path_at(self, idx):
image_path = os.path.join(self._image_path, idx+'.png')
assert os.path.exists(image_path), \
'Image does not exist: {}'.format(image_path)
return image_path
def _load_kitti_annotation(self):
def _get_obj_level(obj):
height = float(obj[7]) - float(obj[5]) + 1
trucation = float(obj[1])
occlusion = float(obj[2])
if height >= 40 and trucation <= 0.15 and occlusion <= 0:
return 1
elif height >= 25 and trucation <= 0.3 and occlusion <= 1:
return 2
elif height >= 25 and trucation <= 0.5 and occlusion <= 2:
return 3
else:
return 4
idx2annotation = {}
for index in self._image_idx:
filename = os.path.join(self._label_path, index+'.txt')
with open(filename, 'r') as f:
lines = f.readlines()
f.close()
bboxes = []
for line in lines:
obj = line.strip().split(' ')
try:
cls = self._class_to_idx[obj[0].lower().strip()]
except:
continue
if self.mc.EXCLUDE_HARD_EXAMPLES and _get_obj_level(obj) > 3:
continue
xmin = float(obj[4])
ymin = float(obj[5])
xmax = float(obj[6])
ymax = float(obj[7])
assert xmin >= 0.0 and xmin <= xmax, \
'Invalid bounding box x-coord xmin {} or xmax {} at {}.txt' \
.format(xmin, xmax, index)
assert ymin >= 0.0 and ymin <= ymax, \
'Invalid bounding box y-coord ymin {} or ymax {} at {}.txt' \
.format(ymin, ymax, index)
x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])
bboxes.append([x, y, w, h, cls])
idx2annotation[index] = bboxes
return idx2annotation
def evaluate_detections(self, eval_dir, global_step, all_boxes):
"""Evaluate detection results.
Args:
eval_dir: directory to write evaluation logs
global_step: step of the checkpoint
all_boxes: all_boxes[cls][image] = N x 5 arrays of
[xmin, ymin, xmax, ymax, score]
Returns:
aps: array of average precisions.
names: class names corresponding to each ap
"""
det_file_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step), 'data')
if not os.path.isdir(det_file_dir):
os.makedirs(det_file_dir)
for im_idx, index in enumerate(self._image_idx):
filename = os.path.join(det_file_dir, index+'.txt')
with open(filename, 'wt') as f:
for cls_idx, cls in enumerate(self._classes):
dets = all_boxes[cls_idx][im_idx]
for k in xrange(len(dets)):
f.write(
'{:s} -1 -1 0.0 {:.2f} {:.2f} {:.2f} {:.2f} 0.0 0.0 0.0 0.0 0.0 '
'0.0 0.0 {:.3f}\n'.format(
cls.lower(), dets[k][0], dets[k][1], dets[k][2], dets[k][3],
dets[k][4])
)
cmd = self._eval_tool + ' ' \
+ os.path.join(self._data_root_path, 'training') + ' ' \
+ os.path.join(self._data_root_path, 'ImageSets',
self._image_set+'.txt') + ' ' \
+ os.path.dirname(det_file_dir) + ' ' + str(len(self._image_idx))
print('Running: {}'.format(cmd))
status = subprocess.call(cmd, shell=True)
aps = []
names = []
for cls in self._classes:
det_file_name = os.path.join(
os.path.dirname(det_file_dir), 'stats_{:s}_ap.txt'.format(cls))
if os.path.exists(det_file_name):
with open(det_file_name, 'r') as f:
lines = f.readlines()
assert len(lines) == 3, \
'Line number of {} should be 3'.format(det_file_name)
aps.append(float(lines[0].split('=')[1].strip()))
aps.append(float(lines[1].split('=')[1].strip()))
aps.append(float(lines[2].split('=')[1].strip()))
else:
aps.extend([0.0, 0.0, 0.0])
names.append(cls+'_easy')
names.append(cls+'_medium')
names.append(cls+'_hard')
return aps, names
def do_detection_analysis_in_eval(self, eval_dir, global_step):
det_file_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step), 'data')
det_error_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step),
'error_analysis')
if not os.path.exists(det_error_dir):
os.makedirs(det_error_dir)
det_error_file = os.path.join(det_error_dir, 'det_error_file.txt')
stats = self.analyze_detections(det_file_dir, det_error_file)
ims = self.visualize_detections(
image_dir=self._image_path,
image_format='.png',
det_error_file=det_error_file,
output_image_dir=det_error_dir,
num_det_per_type=10
)
return stats, ims
def analyze_detections(self, detection_file_dir, det_error_file):
def _save_detection(f, idx, error_type, det, score):
f.write(
'{:s} {:s} {:.1f} {:.1f} {:.1f} {:.1f} {:s} {:.3f}\n'.format(
idx, error_type,
det[0]-det[2]/2., det[1]-det[3]/2.,
det[0]+det[2]/2., det[1]+det[3]/2.,
self._classes[int(det[4])],
score
)
)
# load detections
self._det_rois = {}
for idx in self._image_idx:
det_file_name = os.path.join(detection_file_dir, idx+'.txt')
with open(det_file_name) as f:
lines = f.readlines()
f.close()
bboxes = []
for line in lines:
obj = line.strip().split(' ')
cls = self._class_to_idx[obj[0].lower().strip()]
xmin = float(obj[4])
ymin = float(obj[5])
xmax = float(obj[6])
ymax = float(obj[7])
score = float(obj[-1])
x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])
bboxes.append([x, y, w, h, cls, score])
bboxes.sort(key=lambda x: x[-1], reverse=True)
self._det_rois[idx] = bboxes
# do error analysis
num_objs = 0.
num_dets = 0.
num_correct = 0.
num_loc_error = 0.
num_cls_error = 0.
num_bg_error = 0.
num_repeated_error = 0.
num_detected_obj = 0.
with open(det_error_file, 'w') as f:
for idx in self._image_idx:
gt_bboxes = np.array(self._rois[idx])
num_objs += len(gt_bboxes)
detected = [False]*len(gt_bboxes)
det_bboxes = self._det_rois[idx]
for i, det in enumerate(det_bboxes):
if i < len(gt_bboxes):
num_dets += 1
ious = batch_iou(gt_bboxes[:, :4], det[:4])
max_iou = np.max(ious)
gt_idx = np.argmax(ious)
# if not detected[gt_idx]:
# if max_iou > 0.1:
# if gt_bboxes[gt_idx, 4] == det[4]:
# if max_iou >= 0.5:
# if i < len(gt_bboxes):
# num_correct += 1
# detected[gt_idx] = True
# else:
# if i < len(gt_bboxes):
# num_loc_error += 1
# _save_detection(f, idx, 'loc', det, det[5])
# else:
# if i < len(gt_bboxes):
# num_cls_error += 1
# _save_detection(f, idx, 'cls', det, det[5])
# else:
# if i < len(gt_bboxes):
# num_bg_error += 1
# _save_detection(f, idx, 'bg', det, det[5])
# else:
# if i < len(gt_bboxes):
# num_repeated_error += 1
if max_iou > 0.1:
if gt_bboxes[gt_idx, 4] == det[4]:
if max_iou >= 0.5:
if i < len(gt_bboxes):
if not detected[gt_idx]:
num_correct += 1
detected[gt_idx] = True
else:
num_repeated_error += 1
else:
if i < len(gt_bboxes):
num_loc_error += 1
_save_detection(f, idx, 'loc', det, det[5])
else:
if i < len(gt_bboxes):
num_cls_error += 1
_save_detection(f, idx, 'cls', det, det[5])
else:
if i < len(gt_bboxes):
num_bg_error += 1
_save_detection(f, idx, 'bg', det, det[5])
for i, gt in enumerate(gt_bboxes):
if not detected[i]:
_save_detection(f, idx, 'missed', gt, -1.0)
num_detected_obj += sum(detected)
f.close()
print ('Detection Analysis:')
print (' Number of detections: {}'.format(num_dets))
print (' Number of objects: {}'.format(num_objs))
print (' Percentage of correct detections: {}'.format(
num_correct/num_dets))
print (' Percentage of localization error: {}'.format(
num_loc_error/num_dets))
print (' Percentage of classification error: {}'.format(
num_cls_error/num_dets))
print (' Percentage of background error: {}'.format(
num_bg_error/num_dets))
print (' Percentage of repeated detections: {}'.format(
num_repeated_error/num_dets))
print (' Recall: {}'.format(
num_detected_obj/num_objs))
out = {}
out['num of detections'] = num_dets
out['num of objects'] = num_objs
out['% correct detections'] = num_correct/num_dets
out['% localization error'] = num_loc_error/num_dets
out['% classification error'] = num_cls_error/num_dets
out['% background error'] = num_bg_error/num_dets
out['% repeated error'] = num_repeated_error/num_dets
out['% recall'] = num_detected_obj/num_objs
return out
| {
"repo_name": "Walter1218/self_driving_car_ND",
"path": "squeezeDet/src/dataset/kitti.py",
"copies": "1",
"size": "11098",
"license": "mit",
"hash": 3404431137493731300,
"line_mean": 34.2317460317,
"line_max": 82,
"alpha_frac": 0.5352315733,
"autogenerated": false,
"ratio": 3.2289787605469886,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9212154227108084,
"avg_score": 0.010411221347781056,
"num_lines": 315
} |
"""Image data base class for kitti"""
import cv2
import os
import numpy as np
import subprocess
from dataset.imdb import imdb
from utils.util import bbox_transform_inv, batch_iou
class kitti(imdb):
def __init__(self, image_set, data_path, mc):
imdb.__init__(self, 'kitti_'+image_set, mc)
self._image_set = image_set
self._data_root_path = data_path
self._image_path = os.path.join(self._data_root_path, 'training', 'image_2')
self._label_path = os.path.join(self._data_root_path, 'training', 'label_2')
self._classes = self.mc.CLASS_NAMES
self._class_to_idx = dict(zip(self.classes, xrange(self.num_classes)))
# a list of string indices of images in the directory
self._image_idx = self._load_image_set_idx()
# a dict of image_idx -> [[cx, cy, w, h, cls_idx]]. x,y,w,h are not divided by
# the image width and height
self._rois = self._load_kitti_annotation()
## batch reader ##
self._perm_idx = None
self._cur_idx = 0
# TODO(bichen): add a random seed as parameter
self._shuffle_image_idx()
self._eval_tool = './src/dataset/kitti-eval/cpp/evaluate_object'
def _load_image_set_idx(self):
image_set_file = os.path.join(
self._data_root_path, 'ImageSets', self._image_set+'.txt')
assert os.path.exists(image_set_file), \
'File does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_idx = [x.strip() for x in f.readlines()]
return image_idx
def _image_path_at(self, idx):
image_path = os.path.join(self._image_path, idx+'.png')
assert os.path.exists(image_path), \
'Image does not exist: {}'.format(image_path)
return image_path
def _load_kitti_annotation(self):
def _get_obj_level(obj):
height = float(obj[7]) - float(obj[5]) + 1
trucation = float(obj[1])
occlusion = float(obj[2])
if height >= 40 and trucation <= 0.15 and occlusion <= 0:
return 1
elif height >= 25 and trucation <= 0.3 and occlusion <= 1:
return 2
elif height >= 25 and trucation <= 0.5 and occlusion <= 2:
return 3
else:
return 4
idx2annotation = {}
for index in self._image_idx:
filename = os.path.join(self._label_path, index+'.txt')
with open(filename, 'r') as f:
lines = f.readlines()
f.close()
bboxes = []
for line in lines:
obj = line.strip().split(' ')
try:
cls = self._class_to_idx[obj[0].lower().strip()]
except:
continue
if self.mc.EXCLUDE_HARD_EXAMPLES and _get_obj_level(obj) > 3:
continue
xmin = float(obj[4])
ymin = float(obj[5])
xmax = float(obj[6])
ymax = float(obj[7])
assert xmin >= 0.0 and xmin <= xmax, \
'Invalid bounding box x-coord xmin {} or xmax {} at {}.txt' \
.format(xmin, xmax, index)
assert ymin >= 0.0 and ymin <= ymax, \
'Invalid bounding box y-coord ymin {} or ymax {} at {}.txt' \
.format(ymin, ymax, index)
x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])
bboxes.append([x, y, w, h, cls])
idx2annotation[index] = bboxes
return idx2annotation
def evaluate_detections(self, eval_dir, global_step, all_boxes):
"""Evaluate detection results.
Args:
eval_dir: directory to write evaluation logs
global_step: step of the checkpoint
all_boxes: all_boxes[cls][image] = N x 5 arrays of
[xmin, ymin, xmax, ymax, score]
Returns:
aps: array of average precisions.
names: class names corresponding to each ap
"""
det_file_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step), 'data')
if not os.path.isdir(det_file_dir):
os.makedirs(det_file_dir)
for im_idx, index in enumerate(self._image_idx):
filename = os.path.join(det_file_dir, index+'.txt')
with open(filename, 'wt') as f:
for cls_idx, cls in enumerate(self._classes):
dets = all_boxes[cls_idx][im_idx]
for k in xrange(len(dets)):
f.write(
'{:s} -1 -1 0.0 {:.2f} {:.2f} {:.2f} {:.2f} 0.0 0.0 0.0 0.0 0.0 '
'0.0 0.0 {:.3f}\n'.format(
cls.lower(), dets[k][0], dets[k][1], dets[k][2], dets[k][3],
dets[k][4])
)
cmd = self._eval_tool + ' ' \
+ os.path.join(self._data_root_path, 'training') + ' ' \
+ os.path.join(self._data_root_path, 'ImageSets',
self._image_set+'.txt') + ' ' \
+ os.path.dirname(det_file_dir) + ' ' + str(len(self._image_idx))
print('Running: {}'.format(cmd))
status = subprocess.call(cmd, shell=True)
aps = []
names = []
for cls in self._classes:
det_file_name = os.path.join(
os.path.dirname(det_file_dir), 'stats_{:s}_ap.txt'.format(cls))
if os.path.exists(det_file_name):
with open(det_file_name, 'r') as f:
lines = f.readlines()
assert len(lines) == 3, \
'Line number of {} should be 3'.format(det_file_name)
aps.append(float(lines[0].split('=')[1].strip()))
aps.append(float(lines[1].split('=')[1].strip()))
aps.append(float(lines[2].split('=')[1].strip()))
else:
aps.extend([0.0, 0.0, 0.0])
names.append(cls+'_easy')
names.append(cls+'_medium')
names.append(cls+'_hard')
return aps, names
def do_detection_analysis_in_eval(self, eval_dir, global_step):
det_file_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step), 'data')
det_error_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step),
'error_analysis')
if not os.path.exists(det_error_dir):
os.makedirs(det_error_dir)
det_error_file = os.path.join(det_error_dir, 'det_error_file.txt')
stats = self.analyze_detections(det_file_dir, det_error_file)
ims = self.visualize_detections(
image_dir=self._image_path,
image_format='.png',
det_error_file=det_error_file,
output_image_dir=det_error_dir,
num_det_per_type=10
)
return stats, ims
def analyze_detections(self, detection_file_dir, det_error_file):
def _save_detection(f, idx, error_type, det, score):
f.write(
'{:s} {:s} {:.1f} {:.1f} {:.1f} {:.1f} {:s} {:.3f}\n'.format(
idx, error_type,
det[0]-det[2]/2., det[1]-det[3]/2.,
det[0]+det[2]/2., det[1]+det[3]/2.,
self._classes[int(det[4])],
score
)
)
# load detections
self._det_rois = {}
for idx in self._image_idx:
det_file_name = os.path.join(detection_file_dir, idx+'.txt')
with open(det_file_name) as f:
lines = f.readlines()
f.close()
bboxes = []
for line in lines:
obj = line.strip().split(' ')
cls = self._class_to_idx[obj[0].lower().strip()]
xmin = float(obj[4])
ymin = float(obj[5])
xmax = float(obj[6])
ymax = float(obj[7])
score = float(obj[-1])
x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])
bboxes.append([x, y, w, h, cls, score])
bboxes.sort(key=lambda x: x[-1], reverse=True)
self._det_rois[idx] = bboxes
# do error analysis
num_objs = 0.
num_dets = 0.
num_correct = 0.
num_loc_error = 0.
num_cls_error = 0.
num_bg_error = 0.
num_repeated_error = 0.
num_detected_obj = 0.
with open(det_error_file, 'w') as f:
for idx in self._image_idx:
gt_bboxes = np.array(self._rois[idx])
num_objs += len(gt_bboxes)
detected = [False]*len(gt_bboxes)
det_bboxes = self._det_rois[idx]
for i, det in enumerate(det_bboxes):
if i < len(gt_bboxes):
num_dets += 1
ious = batch_iou(gt_bboxes[:, :4], det[:4])
max_iou = np.max(ious)
gt_idx = np.argmax(ious)
# if not detected[gt_idx]:
# if max_iou > 0.1:
# if gt_bboxes[gt_idx, 4] == det[4]:
# if max_iou >= 0.5:
# if i < len(gt_bboxes):
# num_correct += 1
# detected[gt_idx] = True
# else:
# if i < len(gt_bboxes):
# num_loc_error += 1
# _save_detection(f, idx, 'loc', det, det[5])
# else:
# if i < len(gt_bboxes):
# num_cls_error += 1
# _save_detection(f, idx, 'cls', det, det[5])
# else:
# if i < len(gt_bboxes):
# num_bg_error += 1
# _save_detection(f, idx, 'bg', det, det[5])
# else:
# if i < len(gt_bboxes):
# num_repeated_error += 1
if max_iou > 0.1:
if gt_bboxes[gt_idx, 4] == det[4]:
if max_iou >= 0.5:
if i < len(gt_bboxes):
if not detected[gt_idx]:
num_correct += 1
detected[gt_idx] = True
else:
num_repeated_error += 1
else:
if i < len(gt_bboxes):
num_loc_error += 1
_save_detection(f, idx, 'loc', det, det[5])
else:
if i < len(gt_bboxes):
num_cls_error += 1
_save_detection(f, idx, 'cls', det, det[5])
else:
if i < len(gt_bboxes):
num_bg_error += 1
_save_detection(f, idx, 'bg', det, det[5])
for i, gt in enumerate(gt_bboxes):
if not detected[i]:
_save_detection(f, idx, 'missed', gt, -1.0)
num_detected_obj += sum(detected)
f.close()
print ('Detection Analysis:')
print (' Number of detections: {}'.format(num_dets))
print (' Number of objects: {}'.format(num_objs))
print (' Percentage of correct detections: {}'.format(
num_correct/num_dets))
print (' Percentage of localization error: {}'.format(
num_loc_error/num_dets))
print (' Percentage of classification error: {}'.format(
num_cls_error/num_dets))
print (' Percentage of background error: {}'.format(
num_bg_error/num_dets))
print (' Percentage of repeated detections: {}'.format(
num_repeated_error/num_dets))
print (' Recall: {}'.format(
num_detected_obj/num_objs))
out = {}
out['num of detections'] = num_dets
out['num of objects'] = num_objs
out['% correct detections'] = num_correct/num_dets
out['% localization error'] = num_loc_error/num_dets
out['% classification error'] = num_cls_error/num_dets
out['% background error'] = num_bg_error/num_dets
out['% repeated error'] = num_repeated_error/num_dets
out['% recall'] = num_detected_obj/num_objs
return out
| {
"repo_name": "goan15910/ConvDet",
"path": "src/dataset/kitti.py",
"copies": "1",
"size": "11103",
"license": "bsd-2-clause",
"hash": 857754909008375900,
"line_mean": 34.2476190476,
"line_max": 82,
"alpha_frac": 0.5350806088,
"autogenerated": false,
"ratio": 3.2285548124454784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4263635421245478,
"avg_score": null,
"num_lines": null
} |
"""Image data base class for pascal voc"""
import cv2
import os
import numpy as np
import xml.etree.ElementTree as ET
from utils.util import bbox_transform_inv
from dataset.imdb import imdb
from dataset.voc_eval import voc_eval
class pascal_voc(imdb):
def __init__(self, image_set, year, data_path, mc):
imdb.__init__(self, 'voc_'+year+'_'+image_set, mc)
self._year = year
self._image_set = image_set
self._data_root_path = data_path
self._data_path = os.path.join(self._data_root_path, 'VOC' + self._year)
self._classes = self.mc.CLASS_NAMES
self._class_to_idx = dict(zip(self.classes, range(self.num_classes)))
# a list of string indices of images in the directory
self._image_idx = self._load_image_set_idx()
# a dict of image_idx -> [[cx, cy, w, h, cls_idx]]. x,y,w,h are not divided by
# the image width and height
self._rois = self._load_pascal_annotation()
## batch reader ##
self._perm_idx = None
self._cur_idx = 0
# TODO(bichen): add a random seed as parameter
self._shuffle_image_idx()
def _load_image_set_idx(self):
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set+'.txt')
assert os.path.exists(image_set_file), \
'File does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_idx = [x.strip() for x in f.readlines()]
return image_idx
def _image_path_at(self, idx):
image_path = os.path.join(self._data_path, 'JPEGImages', idx+'.jpg')
assert os.path.exists(image_path), \
'Image does not exist: {}'.format(image_path)
return image_path
def _load_pascal_annotation(self):
idx2annotation = {}
for index in self._image_idx:
filename = os.path.join(self._data_path, 'Annotations', index+'.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
objs = [obj for obj in objs if int(obj.find('difficult').text) == 0]
bboxes = []
for obj in objs:
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
xmin = float(bbox.find('xmin').text) - 1
xmax = float(bbox.find('xmax').text) - 1
ymin = float(bbox.find('ymin').text) - 1
ymax = float(bbox.find('ymax').text) - 1
assert xmin >= 0.0 and xmin <= xmax, \
'Invalid bounding box x-coord xmin {} or xmax {} at {}.xml' \
.format(xmin, xmax, index)
assert ymin >= 0.0 and ymin <= ymax, \
'Invalid bounding box y-coord ymin {} or ymax {} at {}.xml' \
.format(ymin, ymax, index)
x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])
#print(self._class_to_idx)
cls = self._class_to_idx[obj.find('name').text.lower().strip()]
bboxes.append([x, y, w, h, cls])
idx2annotation[index] = bboxes
return idx2annotation
def evaluate_detections(self, eval_dir, global_step, all_boxes):
"""Evaluate detection results.
Args:
eval_dir: directory to write evaluation logs
global_step: step of the checkpoint
all_boxes: all_boxes[cls][image] = N x 5 arrays of
[xmin, ymin, xmax, ymax, score]
Returns:
aps: array of average precisions.
names: class names corresponding to each ap
"""
det_file_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step))
if not os.path.isdir(det_file_dir):
os.mkdir(det_file_dir)
det_file_path_template = os.path.join(det_file_dir, '{:s}.txt')
for cls_idx, cls in enumerate(self._classes):
det_file_name = det_file_path_template.format(cls)
with open(det_file_name, 'wt') as f:
for im_idx, index in enumerate(self._image_idx):
dets = all_boxes[cls_idx][im_idx]
# VOC expects 1-based indices
for k in xrange(len(dets)):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k][-1],
dets[k][0]+1, dets[k][1]+1,
dets[k][2]+1, dets[k][3]+1)
)
# Evaluate detection results
annopath = os.path.join(
self._data_root_path,
'VOC'+self._year,
'Annotations',
'{:s}.xml'
)
imagesetfile = os.path.join(
self._data_root_path,
'VOC'+self._year,
'ImageSets',
'Main',
self._image_set+'.txt'
)
cachedir = os.path.join(self._data_root_path, 'annotations_cache')
aps = []
use_07_metric = True if int(self._year) < 2010 else False
for i, cls in enumerate(self._classes):
filename = det_file_path_template.format(cls)
_, _, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
print ('{:s}: AP = {:.4f}'.format(cls, ap))
print ('Mean AP = {:.4f}'.format(np.mean(aps)))
return aps, self._classes
| {
"repo_name": "Walter1218/self_driving_car_ND",
"path": "squeezeDet/src/dataset/pascal_voc.py",
"copies": "1",
"size": "5019",
"license": "mit",
"hash": -3438374078338747400,
"line_mean": 35.3695652174,
"line_max": 82,
"alpha_frac": 0.5847778442,
"autogenerated": false,
"ratio": 3.231809401159047,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9282618325451881,
"avg_score": 0.006793783981433016,
"num_lines": 138
} |
"""Image data base class for pascal voc"""
import cv2
import os
import numpy as np
import xml.etree.ElementTree as ET
from utils.util import bbox_transform_inv
from dataset.imdb import imdb
from dataset.voc_eval import voc_eval
class pascal_voc(imdb):
def __init__(self, image_set, year, data_path, mc):
imdb.__init__(self, 'voc_'+year+'_'+image_set, mc)
self._year = year
self._image_set = image_set
self._data_root_path = data_path
self._data_path = os.path.join(self._data_root_path, 'VOC' + self._year)
self._classes = self.mc.CLASS_NAMES
self._class_to_idx = dict(zip(self.classes, xrange(self.num_classes)))
# a list of string indices of images in the directory
self._image_idx = self._load_image_set_idx()
# a dict of image_idx -> [[cx, cy, w, h, cls_idx]]. x,y,w,h are not divided by
# the image width and height
self._rois = self._load_pascal_annotation()
## batch reader ##
self._perm_idx = None
self._cur_idx = 0
# TODO(bichen): add a random seed as parameter
self._shuffle_image_idx()
def _load_image_set_idx(self):
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set+'.txt')
assert os.path.exists(image_set_file), \
'File does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_idx = [x.strip() for x in f.readlines()]
return image_idx
def _image_path_at(self, idx):
image_path = os.path.join(self._data_path, 'JPEGImages', idx+'.jpg')
assert os.path.exists(image_path), \
'Image does not exist: {}'.format(image_path)
return image_path
def _load_pascal_annotation(self):
idx2annotation = {}
for index in self._image_idx:
filename = os.path.join(self._data_path, 'Annotations', index+'.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
objs = [obj for obj in objs if int(obj.find('difficult').text) == 0]
bboxes = []
for obj in objs:
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
xmin = float(bbox.find('xmin').text) - 1
xmax = float(bbox.find('xmax').text) - 1
ymin = float(bbox.find('ymin').text) - 1
ymax = float(bbox.find('ymax').text) - 1
assert xmin >= 0.0 and xmin <= xmax, \
'Invalid bounding box x-coord xmin {} or xmax {} at {}.xml' \
.format(xmin, xmax, index)
assert ymin >= 0.0 and ymin <= ymax, \
'Invalid bounding box y-coord ymin {} or ymax {} at {}.xml' \
.format(ymin, ymax, index)
x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])
cls = self._class_to_idx[obj.find('name').text.lower().strip()]
bboxes.append([x, y, w, h, cls])
idx2annotation[index] = bboxes
return idx2annotation
def evaluate_detections(self, eval_dir, global_step, all_boxes):
"""Evaluate detection results.
Args:
eval_dir: directory to write evaluation logs
global_step: step of the checkpoint
all_boxes: all_boxes[cls][image] = N x 5 arrays of
[xmin, ymin, xmax, ymax, score]
Returns:
aps: array of average precisions.
names: class names corresponding to each ap
"""
det_file_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step))
if not os.path.isdir(det_file_dir):
os.mkdir(det_file_dir)
det_file_path_template = os.path.join(det_file_dir, '{:s}.txt')
for cls_idx, cls in enumerate(self._classes):
det_file_name = det_file_path_template.format(cls)
with open(det_file_name, 'wt') as f:
for im_idx, index in enumerate(self._image_idx):
dets = all_boxes[cls_idx][im_idx]
# VOC expects 1-based indices
for k in xrange(len(dets)):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k][-1],
dets[k][0]+1, dets[k][1]+1,
dets[k][2]+1, dets[k][3]+1)
)
# Evaluate detection results
annopath = os.path.join(
self._data_root_path,
'VOC'+self._year,
'Annotations',
'{:s}.xml'
)
imagesetfile = os.path.join(
self._data_root_path,
'VOC'+self._year,
'ImageSets',
'Main',
self._image_set+'.txt'
)
cachedir = os.path.join(self._data_root_path, 'annotations_cache')
aps = []
use_07_metric = True if int(self._year) < 2010 else False
for i, cls in enumerate(self._classes):
filename = det_file_path_template.format(cls)
_, _, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
print ('{:s}: AP = {:.4f}'.format(cls, ap))
print ('Mean AP = {:.4f}'.format(np.mean(aps)))
return aps, self._classes
| {
"repo_name": "BichenWuUCB/squeezeDet",
"path": "src/dataset/pascal_voc.py",
"copies": "1",
"size": "4989",
"license": "bsd-2-clause",
"hash": 2842205434112637000,
"line_mean": 35.4160583942,
"line_max": 82,
"alpha_frac": 0.5846863099,
"autogenerated": false,
"ratio": 3.2333117303953336,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43179980402953333,
"avg_score": null,
"num_lines": null
} |
"""Image data base class for pascal voc"""
import os
import xml.etree.ElementTree as ET
import numpy as np
from dataset.imdb import imdb
from dataset.voc_eval import voc_eval
from utils.util import bbox_transform_inv
class fpascal_voc(imdb):
def __init__(self, image_set, data_path, mc):
imdb.__init__(self, 'voc_gesture', mc)
self._image_set = image_set
self._year = '2007'
self._data_root_path = data_path
self._data_path = os.path.join(self._data_root_path, 'VOC2007')
self._classes = self.mc.CLASS_NAMES
self._class_to_idx = dict(zip(self.classes, xrange(self.num_classes)))
# a list of string indices of images in the directory
self._image_idx = self._load_image_set_idx()
# a dict of image_idx -> [[cx, cy, w, h, cls_idx]]. x,y,w,h are not divided by
# the image width and height
self._rois = self._load_pascal_annotation()
## batch reader ##
self._perm_idx = None
self._cur_idx = 0
# TODO(bichen): add a random seed as parameter
self._shuffle_image_idx()
def _load_image_set_idx(self):
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set+'.txt')
assert os.path.exists(image_set_file), \
'File does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_idx = [x.strip() for x in f.readlines()]
return image_idx
def _image_path_at(self, idx):
image_path = os.path.join(self._data_path, 'JPEGImages', idx+'.jpg')
assert os.path.exists(image_path), \
'Image does not exist: {}'.format(image_path)
return image_path
def _load_pascal_annotation(self):
idx2annotation = {}
for index in self._image_idx:
filename = os.path.join(self._data_path, 'Annotations', index+'.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
objs = [obj for obj in objs if int(obj.find('difficult').text) == 0]
bboxes = []
for obj in objs:
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
xmin = float(bbox.find('xmin').text) - 1
xmax = float(bbox.find('xmax').text) - 1
ymin = float(bbox.find('ymin').text) - 1
ymax = float(bbox.find('ymax').text) - 1
assert xmin >= 0.0 and xmin <= xmax, \
'Invalid bounding box x-coord xmin {} or xmax {} at {}.xml' \
.format(xmin, xmax, index)
assert ymin >= 0.0 and ymin <= ymax, \
'Invalid bounding box y-coord ymin {} or ymax {} at {}.xml' \
.format(ymin, ymax, index)
x, y, w, h = bbox_transform_inv([xmin, ymin, xmax, ymax])
cls = self._class_to_idx[obj.find('name').text.lower().strip()]
bboxes.append([x, y, w, h, cls])
idx2annotation[index] = bboxes
return idx2annotation
def evaluate_detections(self, eval_dir, global_step, all_boxes):
"""Evaluate detection results.
Args:
eval_dir: directory to write evaluation logs
global_step: step of the checkpoint
all_boxes: all_boxes[cls][image] = N x 5 arrays of
[xmin, ymin, xmax, ymax, score]
Returns:
aps: array of average precisions.
names: class names corresponding to each ap
"""
det_file_dir = os.path.join(
eval_dir, 'detection_files_{:s}'.format(global_step))
if not os.path.isdir(det_file_dir):
os.mkdir(det_file_dir)
det_file_path_template = os.path.join(det_file_dir, '{:s}.txt')
for cls_idx, cls in enumerate(self._classes):
det_file_name = det_file_path_template.format(cls)
with open(det_file_name, 'wt') as f:
for im_idx, index in enumerate(self._image_idx):
dets = all_boxes[cls_idx][im_idx]
# VOC expects 1-based indices
for k in xrange(len(dets)):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k][-1],
dets[k][0]+1, dets[k][1]+1,
dets[k][2]+1, dets[k][3]+1)
)
# Evaluate detection results
annopath = os.path.join(
self._data_root_path,
'VOC2007',
'Annotations',
'{:s}.xml'
)
imagesetfile = os.path.join(
self._data_root_path,
'VOC2007',
'ImageSets',
'Main',
self._image_set+'.txt'
)
cachedir = os.path.join(self._data_root_path, 'annotations_cache')
aps = []
use_07_metric = True if int(self._year) < 2010 else False
for i, cls in enumerate(self._classes):
filename = det_file_path_template.format(cls)
_, _, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
print ('{:s}: AP = {:.4f}'.format(cls, ap))
print ('Mean AP = {:.4f}'.format(np.mean(aps)))
return aps, self._classes | {
"repo_name": "fyhtea/squeezeDet-hand",
"path": "src/dataset/fpascal_voc.py",
"copies": "1",
"size": "4937",
"license": "bsd-2-clause",
"hash": -3214644608159848000,
"line_mean": 34.7826086957,
"line_max": 82,
"alpha_frac": 0.5849706299,
"autogenerated": false,
"ratio": 3.239501312335958,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43244719422359584,
"avg_score": null,
"num_lines": null
} |
"""Model configuration for pascal dataset"""
import numpy as np
from config.config import base_model_config
def kitti_res50_config():
"""Specify the parameters to tune below."""
mc = base_model_config('KITTI')
mc.IMAGE_WIDTH = 1242
mc.IMAGE_HEIGHT = 375
mc.BATCH_SIZE = 20
mc.WEIGHT_DECAY = 0.0001
mc.LEARNING_RATE = 0.01
mc.DECAY_STEPS = 10000
mc.MAX_GRAD_NORM = 1.0
mc.MOMENTUM = 0.9
mc.LR_DECAY_FACTOR = 0.5
mc.LOSS_COEF_BBOX = 5.0
mc.LOSS_COEF_CONF_POS = 75.0
mc.LOSS_COEF_CONF_NEG = 100.0
mc.LOSS_COEF_CLASS = 1.0
mc.PLOT_PROB_THRESH = 0.4
mc.NMS_THRESH = 0.4
mc.PROB_THRESH = 0.005
mc.TOP_N_DETECTION = 64
mc.DATA_AUGMENTATION = True
mc.DRIFT_X = 150
mc.DRIFT_Y = 100
mc.EXCLUDE_HARD_EXAMPLES = False
mc.ANCHOR_BOX = set_anchors(mc)
mc.ANCHORS = len(mc.ANCHOR_BOX)
mc.ANCHOR_PER_GRID = 9
return mc
def set_anchors(mc):
H, W, B = 24, 78, 9
anchor_shapes = np.reshape(
[np.array(
[[ 94., 49.], [ 225., 161.], [ 170., 91.],
[ 390., 181.], [ 41., 32.], [ 128., 64.],
[ 298., 164.], [ 232., 99.], [ 65., 42.]])] * H * W,
(H, W, B, 2)
)
center_x = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, W+1)*float(mc.IMAGE_WIDTH)/(W+1)]*H*B),
(B, H, W)
),
(1, 2, 0)
),
(H, W, B, 1)
)
center_y = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, H+1)*float(mc.IMAGE_HEIGHT)/(H+1)]*W*B),
(B, W, H)
),
(2, 1, 0)
),
(H, W, B, 1)
)
anchors = np.reshape(
np.concatenate((center_x, center_y, anchor_shapes), axis=3),
(-1, 4)
)
return anchors
| {
"repo_name": "Walter1218/self_driving_car_ND",
"path": "squeezeDet/src/config/kitti_res50_config.py",
"copies": "1",
"size": "2029",
"license": "mit",
"hash": 1218731503395744800,
"line_mean": 24.6835443038,
"line_max": 77,
"alpha_frac": 0.4751108921,
"autogenerated": false,
"ratio": 2.833798882681564,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3808909774781564,
"avg_score": null,
"num_lines": null
} |
"""Model configuration for pascal dataset"""
import numpy as np
from config import base_model_config
def kitti_model_config():
"""Specify the parameters to tune below."""
mc = base_model_config('KITTI')
# mc.IMAGE_WIDTH = 1864 # half width 621
# mc.IMAGE_HEIGHT = 562 # half height 187
mc.IMAGE_WIDTH = 1242 # half width 621
mc.IMAGE_HEIGHT = 375 # half height 187
# mc.IMAGE_WIDTH = 621
# mc.IMAGE_HEIGHT = 187
mc.WEIGHT_DECAY = 0.0001
mc.PROB_THRESH = 0.005
mc.TOP_N_DETECTION = 64
mc.PLOT_PROB_THRESH = 0.4
mc.NMS_THRESH = 0.4
mc.LEARNING_RATE = 0.01
mc.MOMENTUM = 0.9
mc.DECAY_STEPS = 10000
mc.LR_DECAY_FACTOR = 0.5
mc.BATCH_SIZE = 20
mc.LOSS_COEF_BBOX = 5.0
mc.LOSS_COEF_CONF_POS = 75.0
mc.LOSS_COEF_CONF_NEG = 100.0
mc.LOSS_COEF_CLASS = 1.0
mc.MAX_GRAD_NORM = 1.0
mc.DATA_AUGMENTATION = True
mc.DRIFT_X = 150
mc.DRIFT_Y = 100
mc.ANCHOR_BOX = set_anchors(mc)
mc.ANCHORS = len(mc.ANCHOR_BOX)
mc.ANCHOR_PER_GRID = 9
mc.USE_DECONV = False
mc.EXCLUDE_HARD_EXAMPLES = False
return mc
def set_anchors(mc):
H, W, B = 22, 76, 9
anchor_shapes = np.reshape(
[np.array(
[[ 36., 37.], [ 366., 174.], [ 115., 59.],
[ 162., 87.], [ 38., 90.], [ 258., 173.],
[ 224., 108.], [ 78., 170.], [ 72., 43.]])] * H * W,
(H, W, B, 2)
)
center_x = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, W+1)*float(mc.IMAGE_WIDTH)/(W+1)]*H*B),
(B, H, W)
),
(1, 2, 0)
),
(H, W, B, 1)
)
center_y = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, H+1)*float(mc.IMAGE_HEIGHT)/(H+1)]*W*B),
(B, W, H)
),
(2, 1, 0)
),
(H, W, B, 1)
)
anchors = np.reshape(
np.concatenate((center_x, center_y, anchor_shapes), axis=3),
(-1, 4)
)
return anchors
| {
"repo_name": "goan15910/ConvDet",
"path": "src/config/kitti_model_config.py",
"copies": "1",
"size": "2263",
"license": "bsd-2-clause",
"hash": -5782812523437378000,
"line_mean": 27.6455696203,
"line_max": 77,
"alpha_frac": 0.4803358374,
"autogenerated": false,
"ratio": 2.8828025477707007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3863138385170701,
"avg_score": null,
"num_lines": null
} |
"""Model configuration for pascal dataset"""
import numpy as np
from config import base_model_config
def kitti_res50_config():
"""Specify the parameters to tune below."""
mc = base_model_config('KITTI')
mc.IMAGE_WIDTH = 1242
mc.IMAGE_HEIGHT = 375
mc.BATCH_SIZE = 20
mc.WEIGHT_DECAY = 0.0001
mc.LEARNING_RATE = 0.01
mc.DECAY_STEPS = 10000
mc.MAX_GRAD_NORM = 1.0
mc.MOMENTUM = 0.9
mc.LR_DECAY_FACTOR = 0.5
mc.LOSS_COEF_BBOX = 5.0
mc.LOSS_COEF_CONF_POS = 75.0
mc.LOSS_COEF_CONF_NEG = 100.0
mc.LOSS_COEF_CLASS = 1.0
mc.PLOT_PROB_THRESH = 0.4
mc.NMS_THRESH = 0.4
mc.PROB_THRESH = 0.005
mc.TOP_N_DETECTION = 64
mc.DATA_AUGMENTATION = True
mc.DRIFT_X = 150
mc.DRIFT_Y = 100
mc.EXCLUDE_HARD_EXAMPLES = False
mc.ANCHOR_BOX = set_anchors(mc)
mc.ANCHORS = len(mc.ANCHOR_BOX)
mc.ANCHOR_PER_GRID = 9
return mc
def set_anchors(mc):
H, W, B = 24, 78, 9
anchor_shapes = np.reshape(
[np.array(
[[ 94., 49.], [ 225., 161.], [ 170., 91.],
[ 390., 181.], [ 41., 32.], [ 128., 64.],
[ 298., 164.], [ 232., 99.], [ 65., 42.]])] * H * W,
(H, W, B, 2)
)
center_x = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, W+1)*float(mc.IMAGE_WIDTH)/(W+1)]*H*B),
(B, H, W)
),
(1, 2, 0)
),
(H, W, B, 1)
)
center_y = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, H+1)*float(mc.IMAGE_HEIGHT)/(H+1)]*W*B),
(B, W, H)
),
(2, 1, 0)
),
(H, W, B, 1)
)
anchors = np.reshape(
np.concatenate((center_x, center_y, anchor_shapes), axis=3),
(-1, 4)
)
return anchors
| {
"repo_name": "BichenWuUCB/squeezeDet",
"path": "src/config/kitti_res50_config.py",
"copies": "2",
"size": "2023",
"license": "bsd-2-clause",
"hash": -8995766064910847000,
"line_mean": 24.6075949367,
"line_max": 77,
"alpha_frac": 0.4735541275,
"autogenerated": false,
"ratio": 2.8293706293706293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9174860046794538,
"avg_score": 0.025612942015218264,
"num_lines": 79
} |
"""Model configuration for pascal dataset"""
import numpy as np
from config import base_model_config
def kitti_squeezeDet_config():
"""Specify the parameters to tune below."""
mc = base_model_config('KITTI')
mc.IMAGE_WIDTH = 1242
mc.IMAGE_HEIGHT = 375
mc.BATCH_SIZE = 20
mc.WEIGHT_DECAY = 0.0001
mc.LEARNING_RATE = 0.01
mc.DECAY_STEPS = 10000
mc.MAX_GRAD_NORM = 1.0
mc.MOMENTUM = 0.9
mc.LR_DECAY_FACTOR = 0.5
mc.LOSS_COEF_BBOX = 5.0
mc.LOSS_COEF_CONF_POS = 75.0
mc.LOSS_COEF_CONF_NEG = 100.0
mc.LOSS_COEF_CLASS = 1.0
mc.PLOT_PROB_THRESH = 0.4
mc.NMS_THRESH = 0.4
mc.PROB_THRESH = 0.005
mc.TOP_N_DETECTION = 64
mc.DATA_AUGMENTATION = True
mc.DRIFT_X = 150
mc.DRIFT_Y = 100
mc.EXCLUDE_HARD_EXAMPLES = False
mc.ANCHOR_BOX = set_anchors(mc)
mc.ANCHORS = len(mc.ANCHOR_BOX)
mc.ANCHOR_PER_GRID = 9
return mc
def set_anchors(mc):
H, W, B = 22, 76, 9
anchor_shapes = np.reshape(
[np.array(
[[ 36., 37.], [ 366., 174.], [ 115., 59.],
[ 162., 87.], [ 38., 90.], [ 258., 173.],
[ 224., 108.], [ 78., 170.], [ 72., 43.]])] * H * W,
(H, W, B, 2)
)
center_x = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, W+1)*float(mc.IMAGE_WIDTH)/(W+1)]*H*B),
(B, H, W)
),
(1, 2, 0)
),
(H, W, B, 1)
)
center_y = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, H+1)*float(mc.IMAGE_HEIGHT)/(H+1)]*W*B),
(B, W, H)
),
(2, 1, 0)
),
(H, W, B, 1)
)
anchors = np.reshape(
np.concatenate((center_x, center_y, anchor_shapes), axis=3),
(-1, 4)
)
return anchors
| {
"repo_name": "goan15910/ConvDet",
"path": "src/config/kitti_squeezeDet_config.py",
"copies": "1",
"size": "2028",
"license": "bsd-2-clause",
"hash": 2602234770618667000,
"line_mean": 24.6708860759,
"line_max": 77,
"alpha_frac": 0.474852071,
"autogenerated": false,
"ratio": 2.8403361344537816,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8687516120310943,
"avg_score": 0.025534417028567506,
"num_lines": 79
} |
"""Model configuration for pascal dataset"""
import numpy as np
from config import base_model_config
def kitti_squeezeDetPlus_config():
"""Specify the parameters to tune below."""
mc = base_model_config('KITTI')
mc.IMAGE_WIDTH = 1242
mc.IMAGE_HEIGHT = 375
mc.BATCH_SIZE = 20
mc.WEIGHT_DECAY = 0.0001
mc.LEARNING_RATE = 0.01
mc.DECAY_STEPS = 10000
mc.MAX_GRAD_NORM = 1.0
mc.MOMENTUM = 0.9
mc.LR_DECAY_FACTOR = 0.5
mc.LOSS_COEF_BBOX = 5.0
mc.LOSS_COEF_CONF_POS = 75.0
mc.LOSS_COEF_CONF_NEG = 100.0
mc.LOSS_COEF_CLASS = 1.0
mc.PLOT_PROB_THRESH = 0.4
mc.NMS_THRESH = 0.4
mc.PROB_THRESH = 0.005
mc.TOP_N_DETECTION = 64
mc.DATA_AUGMENTATION = True
mc.DRIFT_X = 150
mc.DRIFT_Y = 100
mc.EXCLUDE_HARD_EXAMPLES = False
mc.ANCHOR_BOX = set_anchors(mc)
mc.ANCHORS = len(mc.ANCHOR_BOX)
mc.ANCHOR_PER_GRID = 9
return mc
def set_anchors(mc):
H, W, B = 22, 76, 9
anchor_shapes = np.reshape(
[np.array(
[[ 36., 37.], [ 366., 174.], [ 115., 59.],
[ 162., 87.], [ 38., 90.], [ 258., 173.],
[ 224., 108.], [ 78., 170.], [ 72., 43.]])] * H * W,
(H, W, B, 2)
)
center_x = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, W+1)*float(mc.IMAGE_WIDTH)/(W+1)]*H*B),
(B, H, W)
),
(1, 2, 0)
),
(H, W, B, 1)
)
center_y = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, H+1)*float(mc.IMAGE_HEIGHT)/(H+1)]*W*B),
(B, W, H)
),
(2, 1, 0)
),
(H, W, B, 1)
)
anchors = np.reshape(
np.concatenate((center_x, center_y, anchor_shapes), axis=3),
(-1, 4)
)
return anchors
| {
"repo_name": "goan15910/ConvDet",
"path": "src/config/kitti_squeezeDetPlus_config.py",
"copies": "2",
"size": "2032",
"license": "bsd-2-clause",
"hash": -2631181191224948000,
"line_mean": 24.7215189873,
"line_max": 77,
"alpha_frac": 0.4758858268,
"autogenerated": false,
"ratio": 2.841958041958042,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.919040511500411,
"avg_score": 0.025487750750786492,
"num_lines": 79
} |
"""Model configuration for pascal dataset"""
import numpy as np
from config import base_model_config
def kitti_vgg16_config():
"""Specify the parameters to tune below."""
mc = base_model_config('KITTI')
mc.IMAGE_WIDTH = 1242
mc.IMAGE_HEIGHT = 375
mc.BATCH_SIZE = 5
mc.WEIGHT_DECAY = 0.0001
mc.LEARNING_RATE = 0.01
mc.DECAY_STEPS = 10000
mc.MAX_GRAD_NORM = 1.0
mc.MOMENTUM = 0.9
mc.LR_DECAY_FACTOR = 0.5
mc.LOSS_COEF_BBOX = 5.0
mc.LOSS_COEF_CONF_POS = 75.0
mc.LOSS_COEF_CONF_NEG = 100.0
mc.LOSS_COEF_CLASS = 1.0
mc.PLOT_PROB_THRESH = 0.4
mc.NMS_THRESH = 0.4
mc.PROB_THRESH = 0.005
mc.TOP_N_DETECTION = 64
mc.DATA_AUGMENTATION = True
mc.DRIFT_X = 150
mc.DRIFT_Y = 100
mc.EXCLUDE_HARD_EXAMPLES = False
mc.ANCHOR_BOX = set_anchors(mc)
mc.ANCHORS = len(mc.ANCHOR_BOX)
mc.ANCHOR_PER_GRID = 9
return mc
def set_anchors(mc):
H, W, B = 24, 78, 9
anchor_shapes = np.reshape(
[np.array(
[[ 36., 37.], [ 366., 174.], [ 115., 59.],
[ 162., 87.], [ 38., 90.], [ 258., 173.],
[ 224., 108.], [ 78., 170.], [ 72., 43.]])] * H * W,
(H, W, B, 2)
)
center_x = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, W+1)*float(mc.IMAGE_WIDTH)/(W+1)]*H*B),
(B, H, W)
),
(1, 2, 0)
),
(H, W, B, 1)
)
center_y = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, H+1)*float(mc.IMAGE_HEIGHT)/(H+1)]*W*B),
(B, W, H)
),
(2, 1, 0)
),
(H, W, B, 1)
)
anchors = np.reshape(
np.concatenate((center_x, center_y, anchor_shapes), axis=3),
(-1, 4)
)
return anchors
| {
"repo_name": "BichenWuUCB/squeezeDet",
"path": "src/config/kitti_vgg16_config.py",
"copies": "2",
"size": "2022",
"license": "bsd-2-clause",
"hash": 4172640554345742300,
"line_mean": 24.5949367089,
"line_max": 77,
"alpha_frac": 0.4732937685,
"autogenerated": false,
"ratio": 2.831932773109244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43052265416092433,
"avg_score": null,
"num_lines": null
} |
"""Neural network model base class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from utils import util
from easydict import EasyDict as edict
import numpy as np
import tensorflow as tf
def _add_loss_summaries(total_loss):
"""Add summaries for losses
Generates loss summaries for visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
"""
losses = tf.get_collection('losses')
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
tf.summary.scalar(l.op.name, l)
def _variable_on_device(name, shape, initializer, trainable=True):
"""Helper to create a Variable.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
# TODO(bichen): fix the hard-coded data type below
dtype = tf.float32
if not callable(initializer):
var = tf.get_variable(name, initializer=initializer, trainable=trainable)
else:
var = tf.get_variable(
name, shape, initializer=initializer, dtype=dtype, trainable=trainable)
return var
def _variable_with_weight_decay(name, shape, wd, initializer, trainable=True):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_device(name, shape, initializer, trainable)
if wd is not None and trainable:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
class ModelSkeleton:
"""Base class of NN detection models."""
def __init__(self, mc):
self.mc = mc
# a scalar tensor in range (0, 1]. Usually set to 0.5 in training phase and
# 1.0 in evaluation phase
self.keep_prob = 0.5 if mc.IS_TRAINING else 1.0
# image batch input
self.ph_image_input = tf.placeholder(
tf.float32, [mc.BATCH_SIZE, mc.IMAGE_HEIGHT, mc.IMAGE_WIDTH, 3],
name='image_input'
)
# A tensor where an element is 1 if the corresponding box is "responsible"
# for detection an object and 0 otherwise.
self.ph_input_mask = tf.placeholder(
tf.float32, [mc.BATCH_SIZE, mc.ANCHORS, 1], name='box_mask')
# Tensor used to represent bounding box deltas.
self.ph_box_delta_input = tf.placeholder(
tf.float32, [mc.BATCH_SIZE, mc.ANCHORS, 4], name='box_delta_input')
# Tensor used to represent bounding box coordinates.
self.ph_box_input = tf.placeholder(
tf.float32, [mc.BATCH_SIZE, mc.ANCHORS, 4], name='box_input')
# Tensor used to represent labels
self.ph_labels = tf.placeholder(
tf.float32, [mc.BATCH_SIZE, mc.ANCHORS, mc.CLASSES], name='labels')
# IOU between predicted anchors with ground-truth boxes
self.ious = tf.Variable(
initial_value=np.zeros((mc.BATCH_SIZE, mc.ANCHORS)), trainable=False,
name='iou', dtype=tf.float32
)
self.FIFOQueue = tf.FIFOQueue(
capacity=mc.QUEUE_CAPACITY,
dtypes=[tf.float32, tf.float32, tf.float32,
tf.float32, tf.float32],
shapes=[[mc.IMAGE_HEIGHT, mc.IMAGE_WIDTH, 3],
[mc.ANCHORS, 1],
[mc.ANCHORS, 4],
[mc.ANCHORS, 4],
[mc.ANCHORS, mc.CLASSES]],
)
self.enqueue_op = self.FIFOQueue.enqueue_many(
[self.ph_image_input, self.ph_input_mask,
self.ph_box_delta_input, self.ph_box_input, self.ph_labels]
)
self.image_input, self.input_mask, self.box_delta_input, \
self.box_input, self.labels = tf.train.batch(
self.FIFOQueue.dequeue(), batch_size=mc.BATCH_SIZE,
capacity=mc.QUEUE_CAPACITY)
# model parameters
self.model_params = []
# model size counter
self.model_size_counter = [] # array of tuple of layer name, parameter size
# flop counter
self.flop_counter = [] # array of tuple of layer name, flop number
# activation counter
self.activation_counter = [] # array of tuple of layer name, output activations
self.activation_counter.append(('input', mc.IMAGE_WIDTH*mc.IMAGE_HEIGHT*3))
def _add_forward_graph(self):
"""NN architecture specification."""
raise NotImplementedError
def _add_interpretation_graph(self):
"""Interpret NN output."""
mc = self.mc
with tf.variable_scope('interpret_output') as scope:
preds = self.preds
# probability
num_class_probs = mc.ANCHOR_PER_GRID*mc.CLASSES
self.pred_class_probs = tf.reshape(
tf.nn.softmax(
tf.reshape(
preds[:, :, :, :num_class_probs],
[-1, mc.CLASSES]
)
),
[mc.BATCH_SIZE, mc.ANCHORS, mc.CLASSES],
name='pred_class_probs'
)
# confidence
num_confidence_scores = mc.ANCHOR_PER_GRID+num_class_probs
self.pred_conf = tf.sigmoid(
tf.reshape(
preds[:, :, :, num_class_probs:num_confidence_scores],
[mc.BATCH_SIZE, mc.ANCHORS]
),
name='pred_confidence_score'
)
# bbox_delta
self.pred_box_delta = tf.reshape(
preds[:, :, :, num_confidence_scores:],
[mc.BATCH_SIZE, mc.ANCHORS, 4],
name='bbox_delta'
)
# number of object. Used to normalize bbox and classification loss
self.num_objects = tf.reduce_sum(self.input_mask, name='num_objects')
with tf.variable_scope('bbox') as scope:
with tf.variable_scope('stretching'):
delta_x, delta_y, delta_w, delta_h = tf.unstack(
self.pred_box_delta, axis=2)
anchor_x = mc.ANCHOR_BOX[:, 0]
anchor_y = mc.ANCHOR_BOX[:, 1]
anchor_w = mc.ANCHOR_BOX[:, 2]
anchor_h = mc.ANCHOR_BOX[:, 3]
box_center_x = tf.identity(
anchor_x + delta_x * anchor_w, name='bbox_cx')
box_center_y = tf.identity(
anchor_y + delta_y * anchor_h, name='bbox_cy')
box_width = tf.identity(
anchor_w * util.safe_exp(delta_w, mc.EXP_THRESH),
name='bbox_width')
box_height = tf.identity(
anchor_h * util.safe_exp(delta_h, mc.EXP_THRESH),
name='bbox_height')
self._activation_summary(delta_x, 'delta_x')
self._activation_summary(delta_y, 'delta_y')
self._activation_summary(delta_w, 'delta_w')
self._activation_summary(delta_h, 'delta_h')
self._activation_summary(box_center_x, 'bbox_cx')
self._activation_summary(box_center_y, 'bbox_cy')
self._activation_summary(box_width, 'bbox_width')
self._activation_summary(box_height, 'bbox_height')
with tf.variable_scope('trimming'):
xmins, ymins, xmaxs, ymaxs = util.bbox_transform(
[box_center_x, box_center_y, box_width, box_height])
# The max x position is mc.IMAGE_WIDTH - 1 since we use zero-based
# pixels. Same for y.
xmins = tf.minimum(
tf.maximum(0.0, xmins), mc.IMAGE_WIDTH-1.0, name='bbox_xmin')
self._activation_summary(xmins, 'box_xmin')
ymins = tf.minimum(
tf.maximum(0.0, ymins), mc.IMAGE_HEIGHT-1.0, name='bbox_ymin')
self._activation_summary(ymins, 'box_ymin')
xmaxs = tf.maximum(
tf.minimum(mc.IMAGE_WIDTH-1.0, xmaxs), 0.0, name='bbox_xmax')
self._activation_summary(xmaxs, 'box_xmax')
ymaxs = tf.maximum(
tf.minimum(mc.IMAGE_HEIGHT-1.0, ymaxs), 0.0, name='bbox_ymax')
self._activation_summary(ymaxs, 'box_ymax')
self.det_boxes = tf.transpose(
tf.stack(util.bbox_transform_inv([xmins, ymins, xmaxs, ymaxs])),
(1, 2, 0), name='bbox'
)
with tf.variable_scope('IOU'):
def _tensor_iou(box1, box2):
with tf.variable_scope('intersection'):
xmin = tf.maximum(box1[0], box2[0], name='xmin')
ymin = tf.maximum(box1[1], box2[1], name='ymin')
xmax = tf.minimum(box1[2], box2[2], name='xmax')
ymax = tf.minimum(box1[3], box2[3], name='ymax')
w = tf.maximum(0.0, xmax-xmin, name='inter_w')
h = tf.maximum(0.0, ymax-ymin, name='inter_h')
intersection = tf.multiply(w, h, name='intersection')
with tf.variable_scope('union'):
w1 = tf.subtract(box1[2], box1[0], name='w1')
h1 = tf.subtract(box1[3], box1[1], name='h1')
w2 = tf.subtract(box2[2], box2[0], name='w2')
h2 = tf.subtract(box2[3], box2[1], name='h2')
union = w1*h1 + w2*h2 - intersection
return intersection/(union+mc.EPSILON) \
* tf.reshape(self.input_mask, [mc.BATCH_SIZE, mc.ANCHORS])
self.ious = self.ious.assign(
_tensor_iou(
util.bbox_transform(tf.unstack(self.det_boxes, axis=2)),
util.bbox_transform(tf.unstack(self.box_input, axis=2))
)
)
self._activation_summary(self.ious, 'conf_score')
with tf.variable_scope('probability') as scope:
self._activation_summary(self.pred_class_probs, 'class_probs')
probs = tf.multiply(
self.pred_class_probs,
tf.reshape(self.pred_conf, [mc.BATCH_SIZE, mc.ANCHORS, 1]),
name='final_class_prob'
)
self._activation_summary(probs, 'final_class_prob')
self.det_probs = tf.reduce_max(probs, 2, name='score')
self.det_class = tf.argmax(probs, 2, name='class_idx')
def _add_loss_graph(self):
"""Define the loss operation."""
mc = self.mc
with tf.variable_scope('class_regression') as scope:
# cross-entropy: q * -log(p) + (1-q) * -log(1-p)
# add a small value into log to prevent blowing up
self.class_loss = tf.truediv(
tf.reduce_sum(
(self.labels*(-tf.log(self.pred_class_probs+mc.EPSILON))
+ (1-self.labels)*(-tf.log(1-self.pred_class_probs+mc.EPSILON)))
* self.input_mask * mc.LOSS_COEF_CLASS),
self.num_objects,
name='class_loss'
)
tf.add_to_collection('losses', self.class_loss)
with tf.variable_scope('confidence_score_regression') as scope:
input_mask = tf.reshape(self.input_mask, [mc.BATCH_SIZE, mc.ANCHORS])
self.conf_loss = tf.reduce_mean(
tf.reduce_sum(
tf.square((self.ious - self.pred_conf))
* (input_mask*mc.LOSS_COEF_CONF_POS/self.num_objects
+(1-input_mask)*mc.LOSS_COEF_CONF_NEG/(mc.ANCHORS-self.num_objects)),
reduction_indices=[1]
),
name='confidence_loss'
)
tf.add_to_collection('losses', self.conf_loss)
tf.summary.scalar('mean iou', tf.reduce_sum(self.ious)/self.num_objects)
with tf.variable_scope('bounding_box_regression') as scope:
self.bbox_loss = tf.truediv(
tf.reduce_sum(
mc.LOSS_COEF_BBOX * tf.square(
self.input_mask*(self.pred_box_delta-self.box_delta_input))),
self.num_objects,
name='bbox_loss'
)
tf.add_to_collection('losses', self.bbox_loss)
# add above losses as well as weight decay losses to form the total loss
self.loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_train_graph(self):
"""Define the training operation."""
mc = self.mc
self.global_step = tf.Variable(0, name='global_step', trainable=False)
lr = tf.train.exponential_decay(mc.LEARNING_RATE,
self.global_step,
mc.DECAY_STEPS,
mc.LR_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
_add_loss_summaries(self.loss)
opt = tf.train.MomentumOptimizer(learning_rate=lr, momentum=mc.MOMENTUM)
grads_vars = opt.compute_gradients(self.loss, tf.trainable_variables())
with tf.variable_scope('clip_gradient') as scope:
for i, (grad, var) in enumerate(grads_vars):
grads_vars[i] = (tf.clip_by_norm(grad, mc.MAX_GRAD_NORM), var)
apply_gradient_op = opt.apply_gradients(grads_vars, global_step=self.global_step)
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
for grad, var in grads_vars:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
with tf.control_dependencies([apply_gradient_op]):
self.train_op = tf.no_op(name='train')
def _add_viz_graph(self):
"""Define the visualization operation."""
mc = self.mc
self.image_to_show = tf.placeholder(
tf.float32, [None, mc.IMAGE_HEIGHT, mc.IMAGE_WIDTH, 3],
name='image_to_show'
)
self.viz_op = tf.summary.image('sample_detection_results',
self.image_to_show, collections='image_summary',
max_outputs=mc.BATCH_SIZE)
def _conv_bn_layer(
self, inputs, conv_param_name, bn_param_name, scale_param_name, filters,
size, stride, padding='SAME', freeze=False, relu=True,
conv_with_bias=False, stddev=0.001):
""" Convolution + BatchNorm + [relu] layer. Batch mean and var are treated
as constant. Weights have to be initialized from a pre-trained model or
restored from a checkpoint.
Args:
inputs: input tensor
conv_param_name: name of the convolution parameters
bn_param_name: name of the batch normalization parameters
scale_param_name: name of the scale parameters
filters: number of output filters.
size: kernel size.
stride: stride
padding: 'SAME' or 'VALID'. See tensorflow doc for detailed description.
freeze: if true, then do not train the parameters in this layer.
xavier: whether to use xavier weight initializer or not.
relu: whether to use relu or not.
conv_with_bias: whether or not add bias term to the convolution output.
stddev: standard deviation used for random weight initializer.
Returns:
A convolutional layer operation.
"""
mc = self.mc
with tf.variable_scope(conv_param_name) as scope:
channels = inputs.get_shape()[3]
if mc.LOAD_PRETRAINED_MODEL:
cw = self.caffemodel_weight
kernel_val = np.transpose(cw[conv_param_name][0], [2,3,1,0])
if conv_with_bias:
bias_val = cw[conv_param_name][1]
mean_val = cw[bn_param_name][0]
var_val = cw[bn_param_name][1]
gamma_val = cw[scale_param_name][0]
beta_val = cw[scale_param_name][1]
else:
kernel_val = tf.truncated_normal_initializer(
stddev=stddev, dtype=tf.float32)
if conv_with_bias:
bias_val = tf.constant_initializer(0.0)
mean_val = tf.constant_initializer(0.0)
var_val = tf.constant_initializer(1.0)
gamma_val = tf.constant_initializer(1.0)
beta_val = tf.constant_initializer(0.0)
# re-order the caffe kernel with shape [out, in, h, w] -> tf kernel with
# shape [h, w, in, out]
kernel = _variable_with_weight_decay(
'kernels', shape=[size, size, int(channels), filters],
wd=mc.WEIGHT_DECAY, initializer=kernel_val, trainable=(not freeze))
self.model_params += [kernel]
if conv_with_bias:
biases = _variable_on_device('biases', [filters], bias_val,
trainable=(not freeze))
self.model_params += [biases]
gamma = _variable_on_device('gamma', [filters], gamma_val,
trainable=(not freeze))
beta = _variable_on_device('beta', [filters], beta_val,
trainable=(not freeze))
mean = _variable_on_device('mean', [filters], mean_val, trainable=False)
var = _variable_on_device('var', [filters], var_val, trainable=False)
self.model_params += [gamma, beta, mean, var]
conv = tf.nn.conv2d(
inputs, kernel, [1, stride, stride, 1], padding=padding,
name='convolution')
if conv_with_bias:
conv = tf.nn.bias_add(conv, biases, name='bias_add')
conv = tf.nn.batch_normalization(
conv, mean=mean, variance=var, offset=beta, scale=gamma,
variance_epsilon=mc.BATCH_NORM_EPSILON, name='batch_norm')
self.model_size_counter.append(
(conv_param_name, (1+size*size*int(channels))*filters)
)
out_shape = conv.get_shape().as_list()
num_flops = \
(1+2*int(channels)*size*size)*filters*out_shape[1]*out_shape[2]
if relu:
num_flops += 2*filters*out_shape[1]*out_shape[2]
self.flop_counter.append((conv_param_name, num_flops))
self.activation_counter.append(
(conv_param_name, out_shape[1]*out_shape[2]*out_shape[3])
)
if relu:
return tf.nn.relu(conv)
else:
return conv
def _conv_layer(
self, layer_name, inputs, filters, size, stride, padding='SAME',
freeze=False, xavier=False, relu=True, stddev=0.001):
"""Convolutional layer operation constructor.
Args:
layer_name: layer name.
inputs: input tensor
filters: number of output filters.
size: kernel size.
stride: stride
padding: 'SAME' or 'VALID'. See tensorflow doc for detailed description.
freeze: if true, then do not train the parameters in this layer.
xavier: whether to use xavier weight initializer or not.
relu: whether to use relu or not.
stddev: standard deviation used for random weight initializer.
Returns:
A convolutional layer operation.
"""
mc = self.mc
use_pretrained_param = False
if mc.LOAD_PRETRAINED_MODEL:
cw = self.caffemodel_weight
if layer_name in cw:
kernel_val = np.transpose(cw[layer_name][0], [2,3,1,0])
bias_val = cw[layer_name][1]
# check the shape
if (kernel_val.shape ==
(size, size, inputs.get_shape().as_list()[-1], filters)) \
and (bias_val.shape == (filters, )):
use_pretrained_param = True
else:
print ('Shape of the pretrained parameter of {} does not match, '
'use randomly initialized parameter'.format(layer_name))
else:
print ('Cannot find {} in the pretrained model. Use randomly initialized '
'parameters'.format(layer_name))
if mc.DEBUG_MODE:
print('Input tensor shape to {}: {}'.format(layer_name, inputs.get_shape()))
with tf.variable_scope(layer_name) as scope:
channels = inputs.get_shape()[3]
# re-order the caffe kernel with shape [out, in, h, w] -> tf kernel with
# shape [h, w, in, out]
if use_pretrained_param:
if mc.DEBUG_MODE:
print ('Using pretrained model for {}'.format(layer_name))
kernel_init = tf.constant(kernel_val , dtype=tf.float32)
bias_init = tf.constant(bias_val, dtype=tf.float32)
elif xavier:
kernel_init = tf.contrib.layers.xavier_initializer_conv2d()
bias_init = tf.constant_initializer(0.0)
else:
kernel_init = tf.truncated_normal_initializer(
stddev=stddev, dtype=tf.float32)
bias_init = tf.constant_initializer(0.0)
kernel = _variable_with_weight_decay(
'kernels', shape=[size, size, int(channels), filters],
wd=mc.WEIGHT_DECAY, initializer=kernel_init, trainable=(not freeze))
biases = _variable_on_device('biases', [filters], bias_init,
trainable=(not freeze))
self.model_params += [kernel, biases]
conv = tf.nn.conv2d(
inputs, kernel, [1, stride, stride, 1], padding=padding,
name='convolution')
conv_bias = tf.nn.bias_add(conv, biases, name='bias_add')
if relu:
out = tf.nn.relu(conv_bias, 'relu')
else:
out = conv_bias
self.model_size_counter.append(
(layer_name, (1+size*size*int(channels))*filters)
)
out_shape = out.get_shape().as_list()
num_flops = \
(1+2*int(channels)*size*size)*filters*out_shape[1]*out_shape[2]
if relu:
num_flops += 2*filters*out_shape[1]*out_shape[2]
self.flop_counter.append((layer_name, num_flops))
self.activation_counter.append(
(layer_name, out_shape[1]*out_shape[2]*out_shape[3])
)
return out
def _pooling_layer(
self, layer_name, inputs, size, stride, padding='SAME'):
"""Pooling layer operation constructor.
Args:
layer_name: layer name.
inputs: input tensor
size: kernel size.
stride: stride
padding: 'SAME' or 'VALID'. See tensorflow doc for detailed description.
Returns:
A pooling layer operation.
"""
with tf.variable_scope(layer_name) as scope:
out = tf.nn.max_pool(inputs,
ksize=[1, size, size, 1],
strides=[1, stride, stride, 1],
padding=padding)
activation_size = np.prod(out.get_shape().as_list()[1:])
self.activation_counter.append((layer_name, activation_size))
return out
def _fc_layer(
self, layer_name, inputs, hiddens, flatten=False, relu=True,
xavier=False, stddev=0.001):
"""Fully connected layer operation constructor.
Args:
layer_name: layer name.
inputs: input tensor
hiddens: number of (hidden) neurons in this layer.
flatten: if true, reshape the input 4D tensor of shape
(batch, height, weight, channel) into a 2D tensor with shape
(batch, -1). This is used when the input to the fully connected layer
is output of a convolutional layer.
relu: whether to use relu or not.
xavier: whether to use xavier weight initializer or not.
stddev: standard deviation used for random weight initializer.
Returns:
A fully connected layer operation.
"""
mc = self.mc
use_pretrained_param = False
if mc.LOAD_PRETRAINED_MODEL:
cw = self.caffemodel_weight
if layer_name in cw:
use_pretrained_param = True
kernel_val = cw[layer_name][0]
bias_val = cw[layer_name][1]
if mc.DEBUG_MODE:
print('Input tensor shape to {}: {}'.format(layer_name, inputs.get_shape()))
with tf.variable_scope(layer_name) as scope:
input_shape = inputs.get_shape().as_list()
if flatten:
dim = input_shape[1]*input_shape[2]*input_shape[3]
inputs = tf.reshape(inputs, [-1, dim])
if use_pretrained_param:
try:
# check the size before layout transform
assert kernel_val.shape == (hiddens, dim), \
'kernel shape error at {}'.format(layer_name)
kernel_val = np.reshape(
np.transpose(
np.reshape(
kernel_val, # O x (C*H*W)
(hiddens, input_shape[3], input_shape[1], input_shape[2])
), # O x C x H x W
(2, 3, 1, 0)
), # H x W x C x O
(dim, -1)
) # (H*W*C) x O
# check the size after layout transform
assert kernel_val.shape == (dim, hiddens), \
'kernel shape error at {}'.format(layer_name)
except:
# Do not use pretrained parameter if shape doesn't match
use_pretrained_param = False
print ('Shape of the pretrained parameter of {} does not match, '
'use randomly initialized parameter'.format(layer_name))
else:
dim = input_shape[1]
if use_pretrained_param:
try:
kernel_val = np.transpose(kernel_val, (1,0))
assert kernel_val.shape == (dim, hiddens), \
'kernel shape error at {}'.format(layer_name)
except:
use_pretrained_param = False
print ('Shape of the pretrained parameter of {} does not match, '
'use randomly initialized parameter'.format(layer_name))
if use_pretrained_param:
if mc.DEBUG_MODE:
print ('Using pretrained model for {}'.format(layer_name))
kernel_init = tf.constant(kernel_val, dtype=tf.float32)
bias_init = tf.constant(bias_val, dtype=tf.float32)
elif xavier:
kernel_init = tf.contrib.layers.xavier_initializer()
bias_init = tf.constant_initializer(0.0)
else:
kernel_init = tf.truncated_normal_initializer(
stddev=stddev, dtype=tf.float32)
bias_init = tf.constant_initializer(0.0)
weights = _variable_with_weight_decay(
'weights', shape=[dim, hiddens], wd=mc.WEIGHT_DECAY,
initializer=kernel_init)
biases = _variable_on_device('biases', [hiddens], bias_init)
self.model_params += [weights, biases]
outputs = tf.nn.bias_add(tf.matmul(inputs, weights), biases)
if relu:
outputs = tf.nn.relu(outputs, 'relu')
# count layer stats
self.model_size_counter.append((layer_name, (dim+1)*hiddens))
num_flops = 2 * dim * hiddens + hiddens
if relu:
num_flops += 2*hiddens
self.flop_counter.append((layer_name, num_flops))
self.activation_counter.append((layer_name, hiddens))
return outputs
def filter_prediction(self, boxes, probs, cls_idx):
"""Filter bounding box predictions with probability threshold and
non-maximum supression.
Args:
boxes: array of [cx, cy, w, h].
probs: array of probabilities
cls_idx: array of class indices
Returns:
final_boxes: array of filtered bounding boxes.
final_probs: array of filtered probabilities
final_cls_idx: array of filtered class indices
"""
mc = self.mc
if mc.TOP_N_DETECTION < len(probs) and mc.TOP_N_DETECTION > 0:
order = probs.argsort()[:-mc.TOP_N_DETECTION-1:-1]
probs = probs[order]
boxes = boxes[order]
cls_idx = cls_idx[order]
else:
filtered_idx = np.nonzero(probs>mc.PROB_THRESH)[0]
probs = probs[filtered_idx]
boxes = boxes[filtered_idx]
cls_idx = cls_idx[filtered_idx]
final_boxes = []
final_probs = []
final_cls_idx = []
for c in range(mc.CLASSES):
idx_per_class = [i for i in range(len(probs)) if cls_idx[i] == c]
keep = util.nms(boxes[idx_per_class], probs[idx_per_class], mc.NMS_THRESH)
for i in range(len(keep)):
if keep[i]:
final_boxes.append(boxes[idx_per_class[i]])
final_probs.append(probs[idx_per_class[i]])
final_cls_idx.append(c)
return final_boxes, final_probs, final_cls_idx
def _activation_summary(self, x, layer_name):
"""Helper to create summaries for activations.
Args:
x: layer output tensor
layer_name: name of the layer
Returns:
nothing
"""
with tf.variable_scope('activation_summary') as scope:
tf.summary.histogram(
'activation_summary/'+layer_name, x)
tf.summary.scalar(
'activation_summary/'+layer_name+'/sparsity', tf.nn.zero_fraction(x))
tf.summary.scalar(
'activation_summary/'+layer_name+'/average', tf.reduce_mean(x))
tf.summary.scalar(
'activation_summary/'+layer_name+'/max', tf.reduce_max(x))
tf.summary.scalar(
'activation_summary/'+layer_name+'/min', tf.reduce_min(x))
| {
"repo_name": "BichenWuUCB/squeezeDet",
"path": "src/nn_skeleton.py",
"copies": "1",
"size": "27924",
"license": "bsd-2-clause",
"hash": -5864975339788389000,
"line_mean": 35.9854304636,
"line_max": 86,
"alpha_frac": 0.6036026357,
"autogenerated": false,
"ratio": 3.5571974522292993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.96040858397594,
"avg_score": 0.011342849633979985,
"num_lines": 755
} |
# Original license text is below
# BSD 2-Clause License
#
# Copyright (c) 2016, Bichen Wu
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""SqueezeDet Demo.
In image detection mode, for a given image, detect objects and draw bounding
boxes around them. In video detection mode, perform real-time detection on the
video stream.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
from threading import Thread
import cv2
import time
import sys
import os
import glob
import numpy as np
import tensorflow as tf
from config import *
from nets import *
from utils.util import bbox_transform
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'checkpoint', './data/model_checkpoints/squeezeDet/model.ckpt-87000',
"""Path to the model parameter file.""")
tf.app.flags.DEFINE_string(
'out_dir', './data/out/', """Directory to dump output image or video.""")
tf.app.flags.DEFINE_string(
'image_dir', './', """Directory with images""")
tf.app.flags.DEFINE_string(
'label_dir', './', """Directory with image labels""")
tf.app.flags.DEFINE_float(
'iou_threshold', 0.7, """Threshold for IoU metric to determine false positives""")
tf.app.flags.DEFINE_integer(
'gpu', 0, """GPU ID""")
tf.app.flags.DEFINE_string(
'demo_net', 'squeezeDet', """Neural net architecture.""")
tf.app.flags.DEFINE_string(
'finisher_file', '', """Finisher file. If present, the app stops. Useful to interrupt the continuous mode.""")
tf.app.flags.DEFINE_integer(
'input_device', -1, """Input device (like webcam) ID. If specified, images are taken from this device instead of image dir.""")
tf.app.flags.DEFINE_integer(
"webcam_max_image_count", 10000, "Maximum image count generated in the webcam mode.");
tf.app.flags.DEFINE_string(
"skip_files_including", "", "Skip files from the beginning to the given one (inclusive)");
UNASSIGNED = -2
UNKNOWN = -1
EASY = 0
MODERATE = 1
HARD = 2
MIN_HEIGHT = [40, 25, 25] # minimum height for evaluated groundtruth/detections
MAX_OCCLUSION = [0, 1, 2] # maximum occlusion level of the groundtruth used for evaluation
MAX_TRUNCATION = [0.15, 0.3, 0.5] # maximum truncation level of the groundtruth used for evaluation
def bb_intersection_over_union(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = (xB - xA + 1) * (yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def my_draw_box(im, box_list, label_list, color=(0,255,0), cdict=None, label_placement='bottom'):
assert label_placement == 'bottom' or label_placement == 'top', \
'label_placement format not accepted: {}.'.format(label_placement)
for bbox, label in zip(box_list, label_list):
xmin, ymin, xmax, ymax = [int(b) for b in bbox]
l = label.split(':')[0] # text before "CLASS: (PROB)"
if cdict and l in cdict:
c = cdict[l]
else:
c = color
# draw box
cv2.rectangle(im, (xmin, ymin), (xmax, ymax), c, 1)
# draw label
font = cv2.FONT_HERSHEY_SIMPLEX
if label_placement == 'bottom':
cv2.putText(im, label, (xmin, ymax), font, 0.3, c, 1)
else:
cv2.putText(im, label, (xmin, ymin), font, 0.3, c, 1)
box_counter = 0
class Box:
def __init__(self, klass, bbox, occlusion=0, truncation=0, prob=0):
global box_counter
self.id = box_counter
box_counter += 1
self.klass = klass
self.bbox = bbox
self.occlusion = occlusion
self.truncation = truncation
self.prob = prob
self.assigned_difficulty = UNASSIGNED
def height(self):
return self.bbox[3] - self.bbox[1]
def should_ignore(self, difficulty):
return self.occlusion > MAX_OCCLUSION[difficulty] or self.truncation > MAX_TRUNCATION[difficulty] or self.height() < MIN_HEIGHT[difficulty]
def difficulty(self):
if not self.should_ignore(EASY):
return EASY
if not self.should_ignore(MODERATE):
return MODERATE
if not self.should_ignore(HARD):
return HARD
return UNKNOWN
def care(r_box, dontcare):
threshold = 0.7 if 'car' == r_box.klass else 0.5
for dc_box in dontcare:
iou = bb_intersection_over_union(r_box.bbox, dc_box.bbox)
if iou >= threshold:
return False
return True
def eval_boxes(expected, recognized, klass, difficulty):
gt = [b for b in expected if b.klass == klass and difficulty == b.difficulty()]
rec = []
if UNKNOWN == difficulty:
rec = [b for b in recognized if b.klass == klass]
else:
rec = [b for b in recognized if b.klass == klass and UNASSIGNED == b.assigned_difficulty and b.height() >= MIN_HEIGHT[difficulty]]
assigned_rec = [False for b in rec]
assigned_gt = [False for b in gt]
tp = 0
fn = 0
for r_index, r_box in enumerate(rec):
threshold = 0.7 if 'car' == r_box.klass else 0.5
for gt_index, gt_box in enumerate(gt):
if assigned_gt[gt_index]:
continue
iou = bb_intersection_over_union(r_box.bbox, gt_box.bbox)
# print('+ r_id {}, gt_id {}, iou {}'.format(r_box.id, gt_box.id, iou))
if iou >= threshold:
assigned_rec[r_index] = True
assigned_gt[gt_index] = True
r_box.assigned_difficulty = difficulty
break
if assigned_rec[r_index]:
tp += 1
else:
fn += 1
return (tp, len(gt))
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
class Stat:
def __init__(self):
self.avg = 0.0
self.count = 0
def add(self, v):
self.count += 1
self.avg = (self.avg * (self.count - 1) + v) / float(self.count)
def addIf(self, v, condition):
if condition or not isclose(v, 0):
self.add(v)
def calc_mAP(avg_precision):
s = 0.0
count = 0
for v in avg_precision.values():
for stat in v:
if 0 < stat.count:
s += stat.avg
count += 1
return 0 if 0 == count else s / float(count)
def rescale(x, orig_scale, target_scale):
return float(target_scale) * (float(x) / float(orig_scale))
def rescale_boxes(boxes, boxes_shape, target_shape):
bh, bw = boxes_shape[:2]
th, tw = target_shape[:2]
ret = []
for box in boxes:
b = box.bbox
nb = [rescale(b[0], bw, tw), rescale(b[1], bh, th), rescale(b[2], bw, tw), rescale(b[3], bh, th)]
ret.append(Box(box.klass, nb, occlusion=box.occlusion, truncation=box.truncation, prob=box.prob))
return ret
def safe_div(all_rec, tp):
return 0 if 0 == all_rec else (float(tp) / float(all_rec))
def detect_image(mc, sess, model, class_names, avg_precision, orig_im, file_name, original_file_path):
global box_counter
box_counter = 0
boxed_img = orig_im.copy()
im = orig_im.astype(np.float32, copy=True)
im = cv2.resize(im, (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT))
input_image = im - mc.BGR_MEANS
start_clock = time.time()
# Detect
det_boxes, det_probs, det_class = sess.run(
[model.det_boxes, model.det_probs, model.det_class],
feed_dict={model.image_input:[input_image]})
# Filter
final_boxes, final_probs, final_class = model.filter_prediction(det_boxes[0], det_probs[0], det_class[0])
duration = time.time() - start_clock
keep_idx = [idx for idx in range(len(final_probs)) \
if final_probs[idx] > mc.PLOT_PROB_THRESH]
final_boxes = [final_boxes[idx] for idx in keep_idx]
final_probs = [final_probs[idx] for idx in keep_idx]
final_class = [final_class[idx] for idx in keep_idx]
recognized = [Box(class_names[k], bbox_transform(bbox), prob=p) for k, bbox, p in zip(final_class, final_boxes, final_probs)]
# TODO(bichen): move this color dict to configuration file
cls2clr = {
'car': (255, 191, 0),
'cyclist': (0, 191, 255),
'pedestrian':(255, 0, 191)
}
expected = []
dontcare = []
class_count = dict((k, 0) for k in class_names)
if FLAGS.label_dir:
label_file_name = os.path.join(FLAGS.label_dir, file_name)
label_file_name = os.path.splitext(label_file_name)[0] + '.txt'
if os.path.isfile(label_file_name):
with open(label_file_name) as lf:
label_lines = [x.strip() for x in lf.readlines()]
for l in label_lines:
parts = l.strip().lower().split(' ')
klass = parts[0]
bbox = [float(parts[i]) for i in [4, 5, 6, 7]]
if klass in class_count.keys():
class_count[klass] += 1
b = Box(klass, bbox, truncation=float(parts[1]), occlusion=float(parts[2]))
expected.append(b)
elif klass == 'dontcare':
dontcare.append(Box(klass, bbox))
expected_class_count = class_count
rescaled_recognized = rescale_boxes(recognized, im.shape, orig_im.shape)
# Draw dontcare boxes
my_draw_box(
boxed_img, [b.bbox for b in dontcare],
['dontcare' for b in dontcare],
label_placement='top', color=(255,255,255)
)
# Draw original boxes
my_draw_box(
boxed_img, [b.bbox for b in expected],
[box.klass + ': (TRUE)' for box in expected],
label_placement='top', color=(200,200,200)
)
# Draw recognized boxes
my_draw_box(
boxed_img, [b.bbox for b in rescaled_recognized],
[b.klass + ': (%.2f)' % b.prob for b in rescaled_recognized],
cdict=cls2clr,
)
out_file_name = os.path.join(FLAGS.out_dir, file_name)
cv2.imwrite(out_file_name, orig_im)
boxed_out_file_name = os.path.join(FLAGS.out_dir, 'boxed_' + file_name)
cv2.imwrite(boxed_out_file_name, boxed_img)
print('File: {}'.format(out_file_name))
if '' != original_file_path:
print('Original file: {}'.format(original_file_path))
print('Duration: {} sec'.format(duration))
class_count = dict((k, 0) for k in class_names)
for k in final_class:
class_count[class_names[k]] += 1
for k, v in class_count.items():
print('Recognized {}: {}'.format(k, v))
for k, v in expected_class_count.items():
print('Expected {}: {}'.format(k, v))
for box in rescaled_recognized:
b = box.bbox
print('Detection {}: {:.3f} {:.3f} {:.3f} {:.3f} {:.3f}'.format(box.klass, b[0], b[1], b[2], b[3], box.prob))
for box in expected:
b = box.bbox
print('Ground truth {}: {:.3f} {:.3f} {:.3f} {:.3f} 1'.format(box.klass, b[0], b[1], b[2], b[3]))
expected = [b for b in expected if care(b, dontcare)]
recognized = [b for b in recognized if care(b, dontcare)]
for k in class_names:
all_rec = len([b for b in recognized if b.klass == k])
all_gt = len([b for b in expected if b.klass == k])
report = 0 != all_rec or 0 != all_gt # don't report not found and actually unexpected labels, but still count them for mAP
eval_boxes(expected, recognized, k, UNKNOWN)
tp_easy, all_gt_easy = eval_boxes(expected, recognized, k, EASY)
tp_mod, all_gt_mod = eval_boxes(expected, recognized, k, MODERATE)
tp_hard, all_gt_hard = eval_boxes(expected, recognized, k, HARD)
tp = tp_easy + tp_mod + tp_hard
fp = all_rec - tp
if report:
print('True positive {}: {} easy, {} moderate, {} hard'.format(k, tp_easy, tp_mod, tp_hard))
print('False positive {}: {}'.format(k, fp))
precision = [
safe_div(tp_easy + fp, tp_easy),
safe_div(tp_mod + fp, tp_mod),
safe_div(tp_hard + fp, tp_hard)
]
recall = 0.0
if 0 == all_gt:
recall = 1.0 if 0 == all_rec else 0.0
else:
recall = float(tp) / float(all_gt)
if report:
print('Precision {}: {:.2f} easy, {:.2f} moderate, {:.2f} hard'.format(k, precision[EASY], precision[MODERATE], precision[HARD]))
print('Recall {}: {:.2f}'.format(k, recall))
ap = avg_precision[k]
ap[EASY].addIf(precision[EASY], 0 < all_gt_easy)
ap[MODERATE].addIf(precision[MODERATE], 0 < all_gt_mod)
ap[HARD].addIf(precision[HARD], 0 < all_gt_hard)
if report:
print('Rolling AP {}: {:.2f} easy, {:.2f} moderate, {:.2f} hard'.format(k, ap[EASY].avg, ap[MODERATE].avg, ap[HARD].avg))
print('Rolling mAP: {:.4f}'.format(calc_mAP(avg_precision)))
print('')
sys.stdout.flush()
def should_finish():
return '' != FLAGS.finisher_file and os.path.isfile(FLAGS.finisher_file)
class WebcamVideoStream:
"""This class is a modified version of the class taken from the 'imutils' library
The MIT License (MIT)
Copyright (c) 2015-2016 Adrian Rosebrock, http://www.pyimagesearch.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
def __init__(self, src=0):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
self.stream.release()
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return (self.grabbed, self.frame)
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def detect_webcam(fn, device_id):
vs = WebcamVideoStream(device_id).start()
i = 0
while not should_finish():
ret, im = vs.read()
if not ret:
break
fn(im, 'webcam_%06d.jpg' % i, '')
i = (i + 1) % FLAGS.webcam_max_image_count
vs.stop()
def detect_dir(fn, d):
image_list = sorted([os.path.join(d, f) for f in os.listdir(d) if os.path.isfile(os.path.join(d, f))])
if '' != FLAGS.skip_files_including:
try:
i = image_list.index(FLAGS.skip_files_including)
image_list = image_list[i+1:]
except ValueError:
pass
for f in image_list:
if should_finish():
break
im = cv2.imread(f)
fn(im, os.path.split(f)[1], f)
def image_demo():
"""Detect image."""
assert FLAGS.demo_net == 'squeezeDet' or FLAGS.demo_net == 'squeezeDet+' or FLAGS.demo_net == 'resnet50' or FLAGS.demo_net == 'vgg16', \
'Selected nueral net architecture not supported: {}'.format(FLAGS.demo_net)
with tf.Graph().as_default():
# Load model
if FLAGS.demo_net == 'squeezeDet':
mc = kitti_squeezeDet_config()
mc.BATCH_SIZE = 1
# model parameters will be restored from checkpoint
mc.LOAD_PRETRAINED_MODEL = False
model = SqueezeDet(mc, FLAGS.gpu)
elif FLAGS.demo_net == 'squeezeDet+':
mc = kitti_squeezeDetPlus_config()
mc.BATCH_SIZE = 1
mc.LOAD_PRETRAINED_MODEL = False
model = SqueezeDetPlus(mc, FLAGS.gpu)
elif FLAGS.demo_net == 'resnet50':
mc = kitti_res50_config()
mc.BATCH_SIZE = 1
mc.LOAD_PRETRAINED_MODEL = False
model = ResNet50ConvDet(mc, FLAGS.gpu)
elif FLAGS.demo_net == 'vgg16':
mc = kitti_vgg16_config()
mc.BATCH_SIZE = 1
mc.LOAD_PRETRAINED_MODEL = False
model = VGG16ConvDet(mc, FLAGS.gpu)
saver = tf.train.Saver(model.model_params)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
saver.restore(sess, FLAGS.checkpoint)
class_names = [k.lower() for k in mc.CLASS_NAMES]
avg_precision = dict((k, [Stat(), Stat(), Stat()]) for k in class_names)
fn = partial(detect_image, mc, sess, model, class_names, avg_precision)
if 0 <= FLAGS.input_device:
detect_webcam(fn, FLAGS.input_device)
else:
detect_dir(fn, FLAGS.image_dir)
def main(argv=None):
if not tf.gfile.Exists(FLAGS.out_dir):
tf.gfile.MakeDirs(FLAGS.out_dir)
image_demo()
if __name__ == '__main__':
tf.app.run()
| {
"repo_name": "dsavenko/ck-tensorflow",
"path": "program/squeezedet/continuous.py",
"copies": "1",
"size": "19890",
"license": "bsd-3-clause",
"hash": 8111823255399025000,
"line_mean": 35.6298342541,
"line_max": 147,
"alpha_frac": 0.6179487179,
"autogenerated": false,
"ratio": 3.4358265676282604,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9516943050332025,
"avg_score": 0.007366447039247262,
"num_lines": 543
} |
"""ResNet50+ConvDet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import joblib
from utils import util
from easydict import EasyDict as edict
import numpy as np
import tensorflow as tf
from nn_skeleton import ModelSkeleton
class ResNet50ConvDet(ModelSkeleton):
def __init__(self, mc, gpu_id=0):
with tf.device('/gpu:{}'.format(gpu_id)):
ModelSkeleton.__init__(self, mc)
self._add_forward_graph()
self._add_interpretation_graph()
self._add_loss_graph()
self._add_train_graph()
self._add_viz_graph()
def _add_forward_graph(self):
"""NN architecture."""
mc = self.mc
if mc.LOAD_PRETRAINED_MODEL:
assert tf.gfile.Exists(mc.PRETRAINED_MODEL_PATH), \
'Cannot find pretrained model at the given path:' \
' {}'.format(mc.PRETRAINED_MODEL_PATH)
self.caffemodel_weight = joblib.load(mc.PRETRAINED_MODEL_PATH)
conv1 = self._conv_bn_layer(
self.image_input, 'conv1', 'bn_conv1', 'scale_conv1', filters=64,
size=7, stride=2, freeze=True, conv_with_bias=True)
pool1 = self._pooling_layer(
'pool1', conv1, size=3, stride=2, padding='VALID')
with tf.variable_scope('conv2_x') as scope:
with tf.variable_scope('res2a'):
branch1 = self._conv_bn_layer(
pool1, 'res2a_branch1', 'bn2a_branch1', 'scale2a_branch1',
filters=256, size=1, stride=1, freeze=True, relu=False)
branch2 = self._res_branch(
pool1, layer_name='2a', in_filters=64, out_filters=256,
down_sample=False, freeze=True)
res2a = tf.nn.relu(branch1+branch2, 'relu')
with tf.variable_scope('res2b'):
branch2 = self._res_branch(
res2a, layer_name='2b', in_filters=64, out_filters=256,
down_sample=False, freeze=True)
res2b = tf.nn.relu(res2a+branch2, 'relu')
with tf.variable_scope('res2c'):
branch2 = self._res_branch(
res2b, layer_name='2c', in_filters=64, out_filters=256,
down_sample=False, freeze=True)
res2c = tf.nn.relu(res2b+branch2, 'relu')
with tf.variable_scope('conv3_x') as scope:
with tf.variable_scope('res3a'):
branch1 = self._conv_bn_layer(
res2c, 'res3a_branch1', 'bn3a_branch1', 'scale3a_branch1',
filters=512, size=1, stride=2, freeze=True, relu=False)
branch2 = self._res_branch(
res2c, layer_name='3a', in_filters=128, out_filters=512,
down_sample=True, freeze=True)
res3a = tf.nn.relu(branch1+branch2, 'relu')
with tf.variable_scope('res3b'):
branch2 = self._res_branch(
res3a, layer_name='3b', in_filters=128, out_filters=512,
down_sample=False, freeze=True)
res3b = tf.nn.relu(res3a+branch2, 'relu')
with tf.variable_scope('res3c'):
branch2 = self._res_branch(
res3b, layer_name='3c', in_filters=128, out_filters=512,
down_sample=False, freeze=True)
res3c = tf.nn.relu(res3b+branch2, 'relu')
with tf.variable_scope('res3d'):
branch2 = self._res_branch(
res3c, layer_name='3d', in_filters=128, out_filters=512,
down_sample=False, freeze=True)
res3d = tf.nn.relu(res3c+branch2, 'relu')
with tf.variable_scope('conv4_x') as scope:
with tf.variable_scope('res4a'):
branch1 = self._conv_bn_layer(
res3d, 'res4a_branch1', 'bn4a_branch1', 'scale4a_branch1',
filters=1024, size=1, stride=2, relu=False)
branch2 = self._res_branch(
res3d, layer_name='4a', in_filters=256, out_filters=1024,
down_sample=True)
res4a = tf.nn.relu(branch1+branch2, 'relu')
with tf.variable_scope('res4b'):
branch2 = self._res_branch(
res4a, layer_name='4b', in_filters=256, out_filters=1024,
down_sample=False)
res4b = tf.nn.relu(res4a+branch2, 'relu')
with tf.variable_scope('res4c'):
branch2 = self._res_branch(
res4b, layer_name='4c', in_filters=256, out_filters=1024,
down_sample=False)
res4c = tf.nn.relu(res4b+branch2, 'relu')
with tf.variable_scope('res4d'):
branch2 = self._res_branch(
res4c, layer_name='4d', in_filters=256, out_filters=1024,
down_sample=False)
res4d = tf.nn.relu(res4c+branch2, 'relu')
with tf.variable_scope('res4e'):
branch2 = self._res_branch(
res4d, layer_name='4e', in_filters=256, out_filters=1024,
down_sample=False)
res4e = tf.nn.relu(res4d+branch2, 'relu')
with tf.variable_scope('res4f'):
branch2 = self._res_branch(
res4e, layer_name='4f', in_filters=256, out_filters=1024,
down_sample=False)
res4f = tf.nn.relu(res4e+branch2, 'relu')
dropout4 = tf.nn.dropout(res4f, self.keep_prob, name='drop4')
num_output = mc.ANCHOR_PER_GRID * (mc.CLASSES + 1 + 4)
self.preds = self._conv_layer(
'conv5', dropout4, filters=num_output, size=3, stride=1,
padding='SAME', xavier=False, relu=False, stddev=0.0001)
def _res_branch(
self, inputs, layer_name, in_filters, out_filters, down_sample=False,
freeze=False):
"""Residual branch constructor.
Args:
inputs: input tensor
layer_name: layer name
in_filters: number of filters in XX_branch2a and XX_branch2b layers.
out_filters: number of filters in XX_branch2clayers.
donw_sample: if true, down sample the input feature map
freeze: if true, do not change parameters in this layer
Returns:
A residual branch output operation.
"""
with tf.variable_scope('res'+layer_name+'_branch2'):
stride = 2 if down_sample else 1
output = self._conv_bn_layer(
inputs,
conv_param_name='res'+layer_name+'_branch2a',
bn_param_name='bn'+layer_name+'_branch2a',
scale_param_name='scale'+layer_name+'_branch2a',
filters=in_filters, size=1, stride=stride, freeze=freeze)
output = self._conv_bn_layer(
output,
conv_param_name='res'+layer_name+'_branch2b',
bn_param_name='bn'+layer_name+'_branch2b',
scale_param_name='scale'+layer_name+'_branch2b',
filters=in_filters, size=3, stride=1, freeze=freeze)
output = self._conv_bn_layer(
output,
conv_param_name='res'+layer_name+'_branch2c',
bn_param_name='bn'+layer_name+'_branch2c',
scale_param_name='scale'+layer_name+'_branch2c',
filters=out_filters, size=1, stride=1, freeze=freeze, relu=False)
return output
| {
"repo_name": "BichenWuUCB/squeezeDet",
"path": "src/nets/resnet50_convDet.py",
"copies": "1",
"size": "6837",
"license": "bsd-2-clause",
"hash": -3284377319621639700,
"line_mean": 39.4556213018,
"line_max": 76,
"alpha_frac": 0.6081614743,
"autogenerated": false,
"ratio": 3.1711502782931356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9253135206027443,
"avg_score": 0.00523530931313843,
"num_lines": 169
} |
"""SqueezeDet Demo.
In image detection mode, for a given image, detect objects and draw bounding
boxes around them. In video detection mode, perform real-time detection on the
video stream.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import time
import sys
import os
import glob
import numpy as np
import tensorflow as tf
from config import *
from train import _draw_box
from nets import *
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'mode', 'image', """'image' or 'video'.""")
tf.app.flags.DEFINE_string(
'checkpoint', './data/model_checkpoints/squeezeDet/model.ckpt-87000',
"""Path to the model parameter file.""")
tf.app.flags.DEFINE_string(
'input_path', './data/sample.png',
"""Input image or video to be detected. Can process glob input such as """
"""./data/00000*.png.""")
tf.app.flags.DEFINE_string(
'out_dir', './data/out/', """Directory to dump output image or video.""")
def video_demo():
"""Detect videos."""
cap = cv2.VideoCapture(FLAGS.input_path)
# Define the codec and create VideoWriter object
# fourcc = cv2.cv.CV_FOURCC(*'XVID')
# fourcc = cv2.cv.CV_FOURCC(*'MJPG')
# in_file_name = os.path.split(FLAGS.input_path)[1]
# out_file_name = os.path.join(FLAGS.out_dir, 'out_'+in_file_name)
# out = cv2.VideoWriter(out_file_name, fourcc, 30.0, (375,1242), True)
# out = VideoWriter(out_file_name, frameSize=(1242, 375))
# out.open()
with tf.Graph().as_default():
# Load model
mc = kitti_squeezeDet_config()
mc.BATCH_SIZE = 1
# model parameters will be restored from checkpoint
mc.LOAD_PRETRAINED_MODEL = False
model = SqueezeDet(mc, FLAGS.gpu)
saver = tf.train.Saver(model.model_params)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
saver.restore(sess, FLAGS.checkpoint)
times = {}
count = 0
while cap.isOpened():
t_start = time.time()
count += 1
out_im_name = os.path.join(FLAGS.out_dir, str(count).zfill(6)+'.jpg')
# Load images from video and crop
ret, frame = cap.read()
if ret==True:
# crop frames
frame = frame[500:-205, 239:-439, :]
im_input = frame.astype(np.float32) - mc.BGR_MEANS
else:
break
t_reshape = time.time()
times['reshape']= t_reshape - t_start
# Detect
det_boxes, det_probs, det_class = sess.run(
[model.det_boxes, model.det_probs, model.det_class],
feed_dict={model.image_input:[im_input], model.keep_prob: 1.0})
t_detect = time.time()
times['detect']= t_detect - t_reshape
# Filter
final_boxes, final_probs, final_class = model.filter_prediction(
det_boxes[0], det_probs[0], det_class[0])
keep_idx = [idx for idx in range(len(final_probs)) \
if final_probs[idx] > mc.PLOT_PROB_THRESH]
final_boxes = [final_boxes[idx] for idx in keep_idx]
final_probs = [final_probs[idx] for idx in keep_idx]
final_class = [final_class[idx] for idx in keep_idx]
t_filter = time.time()
times['filter']= t_filter - t_detect
# Draw boxes
# TODO(bichen): move this color dict to configuration file
cls2clr = {
'car': (255, 191, 0),
'cyclist': (0, 191, 255),
'pedestrian':(255, 0, 191)
}
_draw_box(
frame, final_boxes,
[mc.CLASS_NAMES[idx]+': (%.2f)'% prob \
for idx, prob in zip(final_class, final_probs)],
cdict=cls2clr
)
t_draw = time.time()
times['draw']= t_draw - t_filter
cv2.imwrite(out_im_name, frame)
# out.write(frame)
times['total']= time.time() - t_start
# time_str = ''
# for t in times:
# time_str += '{} time: {:.4f} '.format(t[0], t[1])
# time_str += '\n'
time_str = 'Total time: {:.4f}, detection time: {:.4f}, filter time: '\
'{:.4f}'. \
format(times['total'], times['detect'], times['filter'])
print (time_str)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release everything if job is finished
cap.release()
# out.release()
cv2.destroyAllWindows()
def image_demo():
"""Detect image."""
with tf.Graph().as_default():
# Load model
mc = kitti_squeezeDet_config()
mc.BATCH_SIZE = 1
# model parameters will be restored from checkpoint
mc.LOAD_PRETRAINED_MODEL = False
model = SqueezeDet(mc, FLAGS.gpu)
saver = tf.train.Saver(model.model_params)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
saver.restore(sess, FLAGS.checkpoint)
for f in glob.iglob(FLAGS.input_path):
im = cv2.imread(f)
im = im.astype(np.float32, copy=False)
im = cv2.resize(im, (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT))
input_image = im - mc.BGR_MEANS
# Detect
det_boxes, det_probs, det_class = sess.run(
[model.det_boxes, model.det_probs, model.det_class],
feed_dict={model.image_input:[input_image], model.keep_prob: 1.0})
# Filter
final_boxes, final_probs, final_class = model.filter_prediction(
det_boxes[0], det_probs[0], det_class[0])
keep_idx = [idx for idx in range(len(final_probs)) \
if final_probs[idx] > mc.PLOT_PROB_THRESH]
final_boxes = [final_boxes[idx] for idx in keep_idx]
final_probs = [final_probs[idx] for idx in keep_idx]
final_class = [final_class[idx] for idx in keep_idx]
# TODO(bichen): move this color dict to configuration file
cls2clr = {
'car': (255, 191, 0),
'cyclist': (0, 191, 255),
'pedestrian':(255, 0, 191)
}
# Draw boxes
_draw_box(
im, final_boxes,
[mc.CLASS_NAMES[idx]+': (%.2f)'% prob \
for idx, prob in zip(final_class, final_probs)],
cdict=cls2clr,
)
file_name = os.path.split(f)[1]
out_file_name = os.path.join(FLAGS.out_dir, 'out_'+file_name)
cv2.imwrite(out_file_name, im)
print ('Image detection output saved to {}'.format(out_file_name))
def main(argv=None):
if not tf.gfile.Exists(FLAGS.out_dir):
tf.gfile.MakeDirs(FLAGS.out_dir)
if FLAGS.mode == 'image':
image_demo()
else:
video_demo()
if __name__ == '__main__':
tf.app.run()
| {
"repo_name": "Walter1218/self_driving_car_ND",
"path": "squeezeDet/src/demo.py",
"copies": "2",
"size": "6683",
"license": "mit",
"hash": 4769277836595081000,
"line_mean": 29.797235023,
"line_max": 79,
"alpha_frac": 0.5816250187,
"autogenerated": false,
"ratio": 3.3018774703557314,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48835024890557316,
"avg_score": null,
"num_lines": null
} |
"""SqueezeDet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import joblib
from utils import util
from easydict import EasyDict as edict
import numpy as np
import tensorflow as tf
from nn_skeleton import ModelSkeleton
class SqueezeDet(ModelSkeleton):
def __init__(self, mc, gpu_id=0):
with tf.device('/gpu:{}'.format(gpu_id)):
ModelSkeleton.__init__(self, mc)
self._add_forward_graph()
self._add_interpretation_graph()
self._add_loss_graph()
self._add_train_graph()
self._add_viz_graph()
def _add_forward_graph(self):
"""NN architecture."""
mc = self.mc
if mc.LOAD_PRETRAINED_MODEL:
assert tf.gfile.Exists(mc.PRETRAINED_MODEL_PATH), \
'Cannot find pretrained model at the given path:' \
' {}'.format(mc.PRETRAINED_MODEL_PATH)
self.caffemodel_weight = joblib.load(mc.PRETRAINED_MODEL_PATH)
conv1 = self._conv_layer(
'conv1', self.image_input, filters=64, size=3, stride=2,
padding='SAME', freeze=True)
pool1 = self._pooling_layer(
'pool1', conv1, size=3, stride=2, padding='SAME')
fire2 = self._fire_layer(
'fire2', pool1, s1x1=16, e1x1=64, e3x3=64, freeze=False)
fire3 = self._fire_layer(
'fire3', fire2, s1x1=16, e1x1=64, e3x3=64, freeze=False)
pool3 = self._pooling_layer(
'pool3', fire3, size=3, stride=2, padding='SAME')
fire4 = self._fire_layer(
'fire4', pool3, s1x1=32, e1x1=128, e3x3=128, freeze=False)
fire5 = self._fire_layer(
'fire5', fire4, s1x1=32, e1x1=128, e3x3=128, freeze=False)
pool5 = self._pooling_layer(
'pool5', fire5, size=3, stride=2, padding='SAME')
fire6 = self._fire_layer(
'fire6', pool5, s1x1=48, e1x1=192, e3x3=192, freeze=False)
fire7 = self._fire_layer(
'fire7', fire6, s1x1=48, e1x1=192, e3x3=192, freeze=False)
fire8 = self._fire_layer(
'fire8', fire7, s1x1=64, e1x1=256, e3x3=256, freeze=False)
fire9 = self._fire_layer(
'fire9', fire8, s1x1=64, e1x1=256, e3x3=256, freeze=False)
# Two extra fire modules that are not trained before
fire10 = self._fire_layer(
'fire10', fire9, s1x1=96, e1x1=384, e3x3=384, freeze=False)
fire11 = self._fire_layer(
'fire11', fire10, s1x1=96, e1x1=384, e3x3=384, freeze=False)
dropout11 = tf.nn.dropout(fire11, self.keep_prob, name='drop11')
num_output = mc.ANCHOR_PER_GRID * (mc.CLASSES + 1 + 4)
self.preds = self._conv_layer(
'conv12', dropout11, filters=num_output, size=3, stride=1,
padding='SAME', xavier=False, relu=False, stddev=0.0001)
def _fire_layer(self, layer_name, inputs, s1x1, e1x1, e3x3, stddev=0.01,
freeze=False):
"""Fire layer constructor.
Args:
layer_name: layer name
inputs: input tensor
s1x1: number of 1x1 filters in squeeze layer.
e1x1: number of 1x1 filters in expand layer.
e3x3: number of 3x3 filters in expand layer.
freeze: if true, do not train parameters in this layer.
Returns:
fire layer operation.
"""
sq1x1 = self._conv_layer(
layer_name+'/squeeze1x1', inputs, filters=s1x1, size=1, stride=1,
padding='SAME', stddev=stddev, freeze=freeze)
ex1x1 = self._conv_layer(
layer_name+'/expand1x1', sq1x1, filters=e1x1, size=1, stride=1,
padding='SAME', stddev=stddev, freeze=freeze)
ex3x3 = self._conv_layer(
layer_name+'/expand3x3', sq1x1, filters=e3x3, size=3, stride=1,
padding='SAME', stddev=stddev, freeze=freeze)
return tf.concat([ex1x1, ex3x3], 3, name=layer_name+'/concat')
| {
"repo_name": "BichenWuUCB/squeezeDet",
"path": "src/nets/squeezeDet.py",
"copies": "1",
"size": "3765",
"license": "bsd-2-clause",
"hash": 1974240793766151700,
"line_mean": 34.5188679245,
"line_max": 74,
"alpha_frac": 0.6379814077,
"autogenerated": false,
"ratio": 2.8674790555978675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40054604632978674,
"avg_score": null,
"num_lines": null
} |
"""SqueezeDet+ model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import joblib
from utils import util
from easydict import EasyDict as edict
import numpy as np
import tensorflow as tf
from nn_skeleton import ModelSkeleton
class SqueezeDetPlus(ModelSkeleton):
def __init__(self, mc, gpu_id=0):
with tf.device('/gpu:{}'.format(gpu_id)):
ModelSkeleton.__init__(self, mc)
self._add_forward_graph()
self._add_interpretation_graph()
self._add_loss_graph()
self._add_train_graph()
self._add_viz_graph()
def _add_forward_graph(self):
"""NN architecture."""
mc = self.mc
if mc.LOAD_PRETRAINED_MODEL:
assert tf.gfile.Exists(mc.PRETRAINED_MODEL_PATH), \
'Cannot find pretrained model at the given path:' \
' {}'.format(mc.PRETRAINED_MODEL_PATH)
self.caffemodel_weight = joblib.load(mc.PRETRAINED_MODEL_PATH)
conv1 = self._conv_layer(
'conv1', self.image_input, filters=96, size=7, stride=2,
padding='VALID', freeze=True)
pool1 = self._pooling_layer(
'pool1', conv1, size=3, stride=2, padding='VALID')
fire2 = self._fire_layer(
'fire2', pool1, s1x1=96, e1x1=64, e3x3=64, freeze=False)
fire3 = self._fire_layer(
'fire3', fire2, s1x1=96, e1x1=64, e3x3=64, freeze=False)
fire4 = self._fire_layer(
'fire4', fire3, s1x1=192, e1x1=128, e3x3=128, freeze=False)
pool4 = self._pooling_layer(
'pool4', fire4, size=3, stride=2, padding='VALID')
fire5 = self._fire_layer(
'fire5', pool4, s1x1=192, e1x1=128, e3x3=128, freeze=False)
fire6 = self._fire_layer(
'fire6', fire5, s1x1=288, e1x1=192, e3x3=192, freeze=False)
fire7 = self._fire_layer(
'fire7', fire6, s1x1=288, e1x1=192, e3x3=192, freeze=False)
fire8 = self._fire_layer(
'fire8', fire7, s1x1=384, e1x1=256, e3x3=256, freeze=False)
pool8 = self._pooling_layer(
'pool8', fire8, size=3, stride=2, padding='VALID')
fire9 = self._fire_layer(
'fire9', pool8, s1x1=384, e1x1=256, e3x3=256, freeze=False)
# Two extra fire modules that are not trained before
fire10 = self._fire_layer(
'fire10', fire9, s1x1=384, e1x1=256, e3x3=256, freeze=False)
fire11 = self._fire_layer(
'fire11', fire10, s1x1=384, e1x1=256, e3x3=256, freeze=False)
dropout11 = tf.nn.dropout(fire11, self.keep_prob, name='drop11')
num_output = mc.ANCHOR_PER_GRID * (mc.CLASSES + 1 + 4)
self.preds = self._conv_layer(
'conv12', dropout11, filters=num_output, size=3, stride=1,
padding='SAME', xavier=False, relu=False, stddev=0.0001)
def _fire_layer(self, layer_name, inputs, s1x1, e1x1, e3x3, stddev=0.01,
freeze=False):
"""Fire layer constructor.
Args:
layer_name: layer name
inputs: input tensor
s1x1: number of 1x1 filters in squeeze layer.
e1x1: number of 1x1 filters in expand layer.
e3x3: number of 3x3 filters in expand layer.
freeze: if true, do not train parameters in this layer.
Returns:
fire layer operation.
"""
sq1x1 = self._conv_layer(
layer_name+'/squeeze1x1', inputs, filters=s1x1, size=1, stride=1,
padding='SAME', stddev=stddev, freeze=freeze)
ex1x1 = self._conv_layer(
layer_name+'/expand1x1', sq1x1, filters=e1x1, size=1, stride=1,
padding='SAME', stddev=stddev, freeze=freeze)
ex3x3 = self._conv_layer(
layer_name+'/expand3x3', sq1x1, filters=e3x3, size=3, stride=1,
padding='SAME', stddev=stddev, freeze=freeze)
return tf.concat([ex1x1, ex3x3], 3, name=layer_name+'/concat')
| {
"repo_name": "BichenWuUCB/squeezeDet",
"path": "src/nets/squeezeDetPlus.py",
"copies": "1",
"size": "3782",
"license": "bsd-2-clause",
"hash": -1726244074204250000,
"line_mean": 34.679245283,
"line_max": 74,
"alpha_frac": 0.6393442623,
"autogenerated": false,
"ratio": 2.876045627376426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40153898896764256,
"avg_score": null,
"num_lines": null
} |
"""The data base wrapper class"""
import os
import random
import shutil
from PIL import Image, ImageFont, ImageDraw
import cv2
import numpy as np
from utils.util import iou, batch_iou, drift_dist, recolor, scale_trans, rand_flip
class imdb(object):
"""Image database."""
def __init__(self, name, mc):
self._name = name
self._classes = []
self._image_set = []
self._image_idx = []
self._data_root_path = []
self._rois = {}
self.mc = mc
# batch reader
self._perm_idx = None
self._cur_idx = 0
@property
def name(self):
return self._name
@property
def classes(self):
return self._classes
@property
def num_classes(self):
return len(self._classes)
@property
def image_idx(self):
return self._image_idx
@property
def image_set(self):
return self._image_set
@property
def data_root_path(self):
return self._data_root_path
@property
def year(self):
return self._year
def _shuffle_image_idx(self):
self._perm_idx = [self._image_idx[i] for i in
np.random.permutation(np.arange(len(self._image_idx)))]
self._cur_idx = 0
def read_image_batch(self, shuffle=True):
"""Only Read a batch of images
Args:
shuffle: whether or not to shuffle the dataset
Returns:
images: length batch_size list of arrays [height, width, 3]
"""
mc = self.mc
if shuffle:
if self._cur_idx + mc.BATCH_SIZE >= len(self._image_idx):
self._shuffle_image_idx()
batch_idx = self._perm_idx[self._cur_idx:self._cur_idx+mc.BATCH_SIZE]
self._cur_idx += mc.BATCH_SIZE
else:
if self._cur_idx + mc.BATCH_SIZE >= len(self._image_idx):
batch_idx = self._image_idx[self._cur_idx:] \
+ self._image_idx[:self._cur_idx + mc.BATCH_SIZE-len(self._image_idx)]
self._cur_idx += mc.BATCH_SIZE - len(self._image_idx)
else:
batch_idx = self._image_idx[self._cur_idx:self._cur_idx+mc.BATCH_SIZE]
self._cur_idx += mc.BATCH_SIZE
images, scales = [], []
for i in batch_idx:
im = cv2.imread(self._image_path_at(i))
if mc.SUB_BGR_MEANS:
im = im.astype(np.float32, copy=False)
im -= mc.BGR_MEANS
orig_h, orig_w, _ = [float(v) for v in im.shape]
im = cv2.resize(im, (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT))
x_scale = mc.IMAGE_WIDTH/orig_w
y_scale = mc.IMAGE_HEIGHT/orig_h
images.append(im)
scales.append((x_scale, y_scale))
return images, scales
def read_batch(self, shuffle=True):
"""Read a batch of image and bounding box annotations.
Args:
shuffle: whether or not to shuffle the dataset
Returns:
image_per_batch: images. Shape: batch_size x width x height x [b, g, r]
label_per_batch: labels. Shape: batch_size x object_num
delta_per_batch: bounding box deltas. Shape: batch_size x object_num x
[dx ,dy, dw, dh]
aidx_per_batch: index of anchors that are responsible for prediction.
Shape: batch_size x object_num
bbox_per_batch: scaled bounding boxes. Shape: batch_size x object_num x
[cx, cy, w, h]
"""
mc = self.mc
if shuffle:
if self._cur_idx + mc.BATCH_SIZE >= len(self._image_idx):
self._shuffle_image_idx()
batch_idx = self._perm_idx[self._cur_idx:self._cur_idx+mc.BATCH_SIZE]
self._cur_idx += mc.BATCH_SIZE
else:
if self._cur_idx + mc.BATCH_SIZE >= len(self._image_idx):
batch_idx = self._image_idx[self._cur_idx:] \
+ self._image_idx[:self._cur_idx + mc.BATCH_SIZE-len(self._image_idx)]
self._cur_idx += mc.BATCH_SIZE - len(self._image_idx)
else:
batch_idx = self._image_idx[self._cur_idx:self._cur_idx+mc.BATCH_SIZE]
self._cur_idx += mc.BATCH_SIZE
image_per_batch = []
label_per_batch = []
bbox_per_batch = []
delta_per_batch = []
aidx_per_batch = []
if mc.DEBUG_MODE:
avg_ious = 0.
num_objects = 0.
max_iou = 0.0
min_iou = 1.0
num_zero_iou_obj = 0
for idx in batch_idx:
# load the image
im = cv2.imread(self._image_path_at(idx))
orig_h, orig_w, _ = [float(v) for v in im.shape]
# load annotations
label_this_batch = np.array([b[4] for b in self._rois[idx][:]])
gt_bbox = np.array([[b[0], b[1], b[2], b[3]] for b in self._rois[idx][:]])
if mc.DATA_AUGMENTATION:
assert mc.DATA_AUG_TYPE in ['SQT', 'YOLO'], \
'Invalid augmentation type: {}'.format(mc.DATA_AUG_TYPE)
if mc.DATA_AUG_TYPE == 'SQT':
im, gt_bbox = drift_dist(im, gt_bbox, mc, orig_h, orig_w)
elif mc.DATA_AUG_TYPE == 'YOLO':
if np.random.randint(2) > 0.5:
im, gt_bbox, label_this_batch = scale_trans(im, gt_bbox, label_this_batch)
im = recolor(im)
im, gt_bbox = rand_flip(im, gt_bbox, orig_w)
# Remove BGR bias
if mc.SUB_BGR_MEANS:
im = im.astype(np.float32, copy=False)
im -= mc.BGR_MEANS
#im = im.astype(np.uint8, copy=False)
label_per_batch.append(label_this_batch.tolist())
# scale image
im = cv2.resize(im, (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT))
image_per_batch.append(im)
# scale annotation
x_scale = mc.IMAGE_WIDTH/orig_w
y_scale = mc.IMAGE_HEIGHT/orig_h
gt_bbox[:, 0::2] = gt_bbox[:, 0::2]*x_scale
gt_bbox[:, 1::2] = gt_bbox[:, 1::2]*y_scale
bbox_per_batch.append(gt_bbox)
aidx_per_image, delta_per_image = [], []
aidx_set = set()
for i in range(len(gt_bbox)):
overlaps = batch_iou(mc.ANCHOR_BOX, gt_bbox[i])
aidx = len(mc.ANCHOR_BOX)
for ov_idx in np.argsort(overlaps)[::-1]:
if overlaps[ov_idx] <= 0:
if mc.DEBUG_MODE:
min_iou = min(overlaps[ov_idx], min_iou)
num_objects += 1
num_zero_iou_obj += 1
break
if ov_idx not in aidx_set:
aidx_set.add(ov_idx)
aidx = ov_idx
if mc.DEBUG_MODE:
max_iou = max(overlaps[ov_idx], max_iou)
min_iou = min(overlaps[ov_idx], min_iou)
avg_ious += overlaps[ov_idx]
num_objects += 1
break
if aidx == len(mc.ANCHOR_BOX):
# even the largeset available overlap is 0, thus, choose one with the
# smallest square distance
dist = np.sum(np.square(gt_bbox[i] - mc.ANCHOR_BOX), axis=1)
for dist_idx in np.argsort(dist):
if dist_idx not in aidx_set:
aidx_set.add(dist_idx)
aidx = dist_idx
break
box_cx, box_cy, box_w, box_h = gt_bbox[i]
delta = [0]*4
delta[0] = (box_cx - mc.ANCHOR_BOX[aidx][0])/box_w
delta[1] = (box_cy - mc.ANCHOR_BOX[aidx][1])/box_h
delta[2] = np.log(box_w/mc.ANCHOR_BOX[aidx][2])
delta[3] = np.log(box_h/mc.ANCHOR_BOX[aidx][3])
aidx_per_image.append(aidx)
delta_per_image.append(delta)
delta_per_batch.append(delta_per_image)
aidx_per_batch.append(aidx_per_image)
if mc.DEBUG_MODE:
print ('max iou: {}'.format(max_iou))
print ('min iou: {}'.format(min_iou))
print ('avg iou: {}'.format(avg_ious/num_objects))
print ('number of objects: {}'.format(num_objects))
print ('number of objects with 0 iou: {}'.format(num_zero_iou_obj))
return image_per_batch, label_per_batch, delta_per_batch, \
aidx_per_batch, bbox_per_batch
def evaluate_detections(self):
raise NotImplementedError
def visualize_detections(
self, image_dir, image_format, det_error_file, output_image_dir,
num_det_per_type=10):
# load detections
with open(det_error_file) as f:
lines = f.readlines()
random.shuffle(lines)
f.close()
dets_per_type = {}
for line in lines:
obj = line.strip().split(' ')
error_type = obj[1]
if error_type not in dets_per_type:
dets_per_type[error_type] = [{
'im_idx':obj[0],
'bbox':[float(obj[2]), float(obj[3]), float(obj[4]), float(obj[5])],
'class':obj[6],
'score': float(obj[7])
}]
else:
dets_per_type[error_type].append({
'im_idx':obj[0],
'bbox':[float(obj[2]), float(obj[3]), float(obj[4]), float(obj[5])],
'class':obj[6],
'score': float(obj[7])
})
out_ims = []
# Randomly select some detections and plot them
COLOR = (200, 200, 0)
for error_type, dets in dets_per_type.iteritems():
det_im_dir = os.path.join(output_image_dir, error_type)
if os.path.exists(det_im_dir):
shutil.rmtree(det_im_dir)
os.makedirs(det_im_dir)
for i in range(min(num_det_per_type, len(dets))):
det = dets[i]
im = Image.open(
os.path.join(image_dir, det['im_idx']+image_format))
draw = ImageDraw.Draw(im)
draw.rectangle(det['bbox'], outline=COLOR)
draw.text((det['bbox'][0], det['bbox'][1]),
'{:s} ({:.2f})'.format(det['class'], det['score']),
fill=COLOR)
out_im_path = os.path.join(det_im_dir, str(i)+image_format)
im.save(out_im_path)
im = np.array(im)
out_ims.append(im[:,:,::-1]) # RGB to BGR
return out_ims
| {
"repo_name": "goan15910/ConvDet",
"path": "src/dataset/imdb.py",
"copies": "1",
"size": "9449",
"license": "bsd-2-clause",
"hash": -200703213310001280,
"line_mean": 31.3595890411,
"line_max": 86,
"alpha_frac": 0.5669383003,
"autogenerated": false,
"ratio": 3.0889179470415167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9066240226102634,
"avg_score": 0.017923204247776457,
"num_lines": 292
} |
"""The data base wrapper class"""
import os
import random
import shutil
from PIL import Image, ImageFont, ImageDraw
import cv2
import numpy as np
from utils.util import iou, batch_iou
class imdb(object):
"""Image database."""
def __init__(self, name, mc):
self._name = name
self._classes = []
self._image_set = []
self._image_idx = []
self._data_root_path = []
self._rois = {}
self.mc = mc
# batch reader
self._perm_idx = None
self._cur_idx = 0
@property
def name(self):
return self._name
@property
def classes(self):
return self._classes
@property
def num_classes(self):
return len(self._classes)
@property
def image_idx(self):
return self._image_idx
@property
def image_set(self):
return self._image_set
@property
def data_root_path(self):
return self._data_root_path
@property
def year(self):
return self._year
def _shuffle_image_idx(self):
self._perm_idx = [self._image_idx[i] for i in
np.random.permutation(np.arange(len(self._image_idx)))]
self._cur_idx = 0
def read_image_batch(self, shuffle=True):
"""Only Read a batch of images
Args:
shuffle: whether or not to shuffle the dataset
Returns:
images: length batch_size list of arrays [height, width, 3]
"""
mc = self.mc
if shuffle:
if self._cur_idx + mc.BATCH_SIZE >= len(self._image_idx):
self._shuffle_image_idx()
batch_idx = self._perm_idx[self._cur_idx:self._cur_idx+mc.BATCH_SIZE]
self._cur_idx += mc.BATCH_SIZE
else:
if self._cur_idx + mc.BATCH_SIZE >= len(self._image_idx):
batch_idx = self._image_idx[self._cur_idx:] \
+ self._image_idx[:self._cur_idx + mc.BATCH_SIZE-len(self._image_idx)]
self._cur_idx += mc.BATCH_SIZE - len(self._image_idx)
else:
batch_idx = self._image_idx[self._cur_idx:self._cur_idx+mc.BATCH_SIZE]
self._cur_idx += mc.BATCH_SIZE
images, scales = [], []
for i in batch_idx:
im = cv2.imread(self._image_path_at(i))
im = im.astype(np.float32, copy=False)
im -= mc.BGR_MEANS
orig_h, orig_w, _ = [float(v) for v in im.shape]
im = cv2.resize(im, (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT))
x_scale = mc.IMAGE_WIDTH/orig_w
y_scale = mc.IMAGE_HEIGHT/orig_h
images.append(im)
scales.append((x_scale, y_scale))
return images, scales
def read_batch(self, shuffle=True):
"""Read a batch of image and bounding box annotations.
Args:
shuffle: whether or not to shuffle the dataset
Returns:
image_per_batch: images. Shape: batch_size x width x height x [b, g, r]
label_per_batch: labels. Shape: batch_size x object_num
delta_per_batch: bounding box deltas. Shape: batch_size x object_num x
[dx ,dy, dw, dh]
aidx_per_batch: index of anchors that are responsible for prediction.
Shape: batch_size x object_num
bbox_per_batch: scaled bounding boxes. Shape: batch_size x object_num x
[cx, cy, w, h]
"""
mc = self.mc
if shuffle:
if self._cur_idx + mc.BATCH_SIZE >= len(self._image_idx):
self._shuffle_image_idx()
batch_idx = self._perm_idx[self._cur_idx:self._cur_idx+mc.BATCH_SIZE]
self._cur_idx += mc.BATCH_SIZE
else:
if self._cur_idx + mc.BATCH_SIZE >= len(self._image_idx):
batch_idx = self._image_idx[self._cur_idx:] \
+ self._image_idx[:self._cur_idx + mc.BATCH_SIZE-len(self._image_idx)]
self._cur_idx += mc.BATCH_SIZE - len(self._image_idx)
else:
batch_idx = self._image_idx[self._cur_idx:self._cur_idx+mc.BATCH_SIZE]
self._cur_idx += mc.BATCH_SIZE
image_per_batch = []
label_per_batch = []
bbox_per_batch = []
delta_per_batch = []
aidx_per_batch = []
if mc.DEBUG_MODE:
avg_ious = 0.
num_objects = 0.
max_iou = 0.0
min_iou = 1.0
num_zero_iou_obj = 0
for idx in batch_idx:
# load the image
im = cv2.imread(self._image_path_at(idx)).astype(np.float32, copy=False)
im -= mc.BGR_MEANS
orig_h, orig_w, _ = [float(v) for v in im.shape]
# load annotations
label_per_batch.append([b[4] for b in self._rois[idx][:]])
gt_bbox = np.array([[b[0], b[1], b[2], b[3]] for b in self._rois[idx][:]])
if mc.DATA_AUGMENTATION:
assert mc.DRIFT_X >= 0 and mc.DRIFT_Y > 0, \
'mc.DRIFT_X and mc.DRIFT_Y must be >= 0'
if mc.DRIFT_X > 0 or mc.DRIFT_Y > 0:
# Ensures that gt boundibg box is not cutted out of the image
max_drift_x = min(gt_bbox[:, 0] - gt_bbox[:, 2]/2.0+1)
max_drift_y = min(gt_bbox[:, 1] - gt_bbox[:, 3]/2.0+1)
assert max_drift_x >= 0 and max_drift_y >= 0, 'bbox out of image'
dy = np.random.randint(-mc.DRIFT_Y, min(mc.DRIFT_Y+1, max_drift_y))
dx = np.random.randint(-mc.DRIFT_X, min(mc.DRIFT_X+1, max_drift_x))
# shift bbox
gt_bbox[:, 0] = gt_bbox[:, 0] - dx
gt_bbox[:, 1] = gt_bbox[:, 1] - dy
# distort image
orig_h -= dy
orig_w -= dx
orig_x, dist_x = max(dx, 0), max(-dx, 0)
orig_y, dist_y = max(dy, 0), max(-dy, 0)
distorted_im = np.zeros(
(int(orig_h), int(orig_w), 3)).astype(np.float32)
distorted_im[dist_y:, dist_x:, :] = im[orig_y:, orig_x:, :]
im = distorted_im
# Flip image with 50% probability
if np.random.randint(2) > 0.5:
im = im[:, ::-1, :]
gt_bbox[:, 0] = orig_w - 1 - gt_bbox[:, 0]
# scale image
im = cv2.resize(im, (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT))
image_per_batch.append(im)
# scale annotation
x_scale = mc.IMAGE_WIDTH/orig_w
y_scale = mc.IMAGE_HEIGHT/orig_h
gt_bbox[:, 0::2] = gt_bbox[:, 0::2]*x_scale
gt_bbox[:, 1::2] = gt_bbox[:, 1::2]*y_scale
bbox_per_batch.append(gt_bbox)
aidx_per_image, delta_per_image = [], []
aidx_set = set()
for i in range(len(gt_bbox)):
overlaps = batch_iou(mc.ANCHOR_BOX, gt_bbox[i])
aidx = len(mc.ANCHOR_BOX)
for ov_idx in np.argsort(overlaps)[::-1]:
if overlaps[ov_idx] <= 0:
if mc.DEBUG_MODE:
min_iou = min(overlaps[ov_idx], min_iou)
num_objects += 1
num_zero_iou_obj += 1
break
if ov_idx not in aidx_set:
aidx_set.add(ov_idx)
aidx = ov_idx
if mc.DEBUG_MODE:
max_iou = max(overlaps[ov_idx], max_iou)
min_iou = min(overlaps[ov_idx], min_iou)
avg_ious += overlaps[ov_idx]
num_objects += 1
break
if aidx == len(mc.ANCHOR_BOX):
# even the largeset available overlap is 0, thus, choose one with the
# smallest square distance
dist = np.sum(np.square(gt_bbox[i] - mc.ANCHOR_BOX), axis=1)
for dist_idx in np.argsort(dist):
if dist_idx not in aidx_set:
aidx_set.add(dist_idx)
aidx = dist_idx
break
box_cx, box_cy, box_w, box_h = gt_bbox[i]
delta = [0]*4
delta[0] = (box_cx - mc.ANCHOR_BOX[aidx][0])/mc.ANCHOR_BOX[aidx][2]
delta[1] = (box_cy - mc.ANCHOR_BOX[aidx][1])/mc.ANCHOR_BOX[aidx][3]
delta[2] = np.log(box_w/mc.ANCHOR_BOX[aidx][2])
delta[3] = np.log(box_h/mc.ANCHOR_BOX[aidx][3])
aidx_per_image.append(aidx)
delta_per_image.append(delta)
delta_per_batch.append(delta_per_image)
aidx_per_batch.append(aidx_per_image)
if mc.DEBUG_MODE:
print ('max iou: {}'.format(max_iou))
print ('min iou: {}'.format(min_iou))
print ('avg iou: {}'.format(avg_ious/num_objects))
print ('number of objects: {}'.format(num_objects))
print ('number of objects with 0 iou: {}'.format(num_zero_iou_obj))
return image_per_batch, label_per_batch, delta_per_batch, \
aidx_per_batch, bbox_per_batch
def evaluate_detections(self):
raise NotImplementedError
def visualize_detections(
self, image_dir, image_format, det_error_file, output_image_dir,
num_det_per_type=10):
# load detections
with open(det_error_file) as f:
lines = f.readlines()
random.shuffle(lines)
f.close()
dets_per_type = {}
for line in lines:
obj = line.strip().split(' ')
error_type = obj[1]
if error_type not in dets_per_type:
dets_per_type[error_type] = [{
'im_idx':obj[0],
'bbox':[float(obj[2]), float(obj[3]), float(obj[4]), float(obj[5])],
'class':obj[6],
'score': float(obj[7])
}]
else:
dets_per_type[error_type].append({
'im_idx':obj[0],
'bbox':[float(obj[2]), float(obj[3]), float(obj[4]), float(obj[5])],
'class':obj[6],
'score': float(obj[7])
})
out_ims = []
# Randomly select some detections and plot them
COLOR = (200, 200, 0)
for error_type, dets in dets_per_type.iteritems():
det_im_dir = os.path.join(output_image_dir, error_type)
if os.path.exists(det_im_dir):
shutil.rmtree(det_im_dir)
os.makedirs(det_im_dir)
for i in range(min(num_det_per_type, len(dets))):
det = dets[i]
im = Image.open(
os.path.join(image_dir, det['im_idx']+image_format))
draw = ImageDraw.Draw(im)
draw.rectangle(det['bbox'], outline=COLOR)
draw.text((det['bbox'][0], det['bbox'][1]),
'{:s} ({:.2f})'.format(det['class'], det['score']),
fill=COLOR)
out_im_path = os.path.join(det_im_dir, str(i)+image_format)
im.save(out_im_path)
im = np.array(im)
out_ims.append(im[:,:,::-1]) # RGB to BGR
return out_ims
| {
"repo_name": "BichenWuUCB/squeezeDet",
"path": "src/dataset/imdb.py",
"copies": "1",
"size": "9980",
"license": "bsd-2-clause",
"hash": 6584210963973892000,
"line_mean": 31.614379085,
"line_max": 82,
"alpha_frac": 0.5578156313,
"autogenerated": false,
"ratio": 3.0566615620214397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41144771933214397,
"avg_score": null,
"num_lines": null
} |
"""Train"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os.path
import shutil
import sys
import time
from datetime import datetime
import tempfile
import json
import cv2
import numpy as np
import tensorflow as tf
from config import *
from dataset import kitti, nexarear
from nets import *
from six.moves import xrange
from utils.util import sparse_to_dense, bgr_to_rgb, bbox_transform
JSON_PREFIX = '.json'
import nx_commons.model_evaulation.iou_engine as iou_engine
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('dataset', 'NEXAREAR',
"""Currently support KITTI and NEXAREAR datasets.""")
tf.app.flags.DEFINE_string('data_path', '', """Root directory of data""")
tf.app.flags.DEFINE_string('image_set', 'train',
""" Can be train, trainval, val, or test""")
tf.app.flags.DEFINE_string('year', '2007',
"""VOC challenge year. 2007 or 2012"""
"""Only used for Pascal VOC dataset""")
tf.app.flags.DEFINE_string('train_dir', '/opt/data/logs/NEXAREAR/squeezeDet/train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_string('test_dir', '/opt/data/logs/NEXAREAR/squeezeDet/test',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Maximum number of batches to run.""")
tf.app.flags.DEFINE_string('net', 'squeezeDet',
"""Neural net architecture. """)
tf.app.flags.DEFINE_string('pretrained_model_path', '',
"""Path to the pretrained model.""")
tf.app.flags.DEFINE_integer('summary_step', 10,
"""Number of steps to save summary.""")
tf.app.flags.DEFINE_integer('checkpoint_step', 1000,
"""Number of steps to save summary.""")
tf.app.flags.DEFINE_string('gpu', '0', """gpu id.""")
tf.app.flags.DEFINE_float('iou_threshold', 0.75,
"""IOU threshold""")
tf.app.flags.DEFINE_integer('max_model_to_keep', 100,
"""Max Number of models checkpoints to keep.""")
tf.app.flags.DEFINE_integer('num_of_test_iterations', 2,
"""Number of test iterations with batch size.""")
def _draw_box(im, box_list, label_list, color=(0,255,0), cdict=None, form='center'):
assert form == 'center' or form == 'diagonal', \
'bounding box format not accepted: {}.'.format(form)
for bbox, label in zip(box_list, label_list):
if form == 'center':
bbox = bbox_transform(bbox)
xmin, ymin, xmax, ymax = [int(b) for b in bbox]
l = label.split(':')[0] # text before "CLASS: (PROB)"
if cdict and l in cdict:
c = cdict[l]
else:
c = color
# draw box
cv2.rectangle(im, (xmin, ymin), (xmax, ymax), c, 1)
# draw label
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(im, label, (xmin, ymax), font, 0.3, c, 1)
def _viz_prediction_result(model, images, bboxes, labels, batch_det_bbox,
batch_det_class, batch_det_prob):
mc = model.mc
for i in range(len(images)):
# draw ground truth
_draw_box(
images[i], bboxes[i],
[mc.CLASS_NAMES[idx] for idx in labels[i]],
(0, 255, 0))
# draw prediction
det_bbox, det_prob, det_class = model.filter_prediction(
batch_det_bbox[i], batch_det_prob[i], batch_det_class[i])
keep_idx = [idx for idx in range(len(det_prob)) \
if det_prob[idx] > mc.PLOT_PROB_THRESH]
det_bbox = [det_bbox[idx] for idx in keep_idx]
det_prob = [det_prob[idx] for idx in keep_idx]
det_class = [det_class[idx] for idx in keep_idx]
_draw_box(
images[i], det_bbox,
[mc.CLASS_NAMES[idx]+': (%.2f)'% prob \
for idx, prob in zip(det_class, det_prob)],
(0, 0, 255))
def train():
"""Train SqueezeDet model"""
assert FLAGS.dataset == 'KITTI' or FLAGS.dataset == 'NEXAREAR', \
'Currently only support KITTI and NEXAREAR datasets'
with tf.Graph().as_default():
assert FLAGS.net == 'vgg16' or FLAGS.net == 'resnet50' \
or FLAGS.net == 'squeezeDet' or FLAGS.net == 'squeezeDet+', \
'Selected neural net architecture not supported: {}'.format(FLAGS.net)
if FLAGS.dataset == 'KITTI':
if FLAGS.net == 'vgg16':
mc = kitti_vgg16_config()
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = VGG16ConvDet(mc, FLAGS.gpu)
elif FLAGS.net == 'resnet50':
mc = kitti_res50_config()
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = ResNet50ConvDet(mc, FLAGS.gpu)
elif FLAGS.net == 'squeezeDet':
mc = kitti_squeezeDet_config()
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = SqueezeDet(mc, FLAGS.gpu)
elif FLAGS.net == 'squeezeDet+':
mc = kitti_squeezeDetPlus_config()
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = SqueezeDetPlus(mc, FLAGS.gpu)
imdb = kitti(FLAGS.image_set, FLAGS.data_path, mc)
elif FLAGS.dataset == 'NEXAREAR':
assert FLAGS.net == 'squeezeDet' or FLAGS.net == 'squeezeDet+' or FLAGS.net == 'resnet50' or FLAGS.net == 'vgg16', \
'Currently only the squeezeDet model is supported for the NEXAREAR dataset'
if FLAGS.net == 'squeezeDet':
mc = nexarear_squeezeDet_config()
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = SqueezeDet(mc, FLAGS.gpu)
elif FLAGS.net == 'squeezeDet+':
mc = nexarear_squeezeDetPlus_config()
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = SqueezeDetPlus(mc, FLAGS.gpu)
elif FLAGS.net == 'resnet50':
mc = nexarear_res50_config()
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = ResNet50ConvDet(mc, FLAGS.gpu)
elif FLAGS.net == 'vgg16':
mc = nexarear_vgg16_config()
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = VGG16ConvDet(mc, FLAGS.gpu)
imdb = nexarear(FLAGS.image_set, FLAGS.data_path, mc)
if not os.path.isdir(FLAGS.train_dir):
print(os.makedirs(FLAGS.train_dir))
model_metric_fname = os.path.join(FLAGS.train_dir, 'model_metrics.txt')
print('Model metric filename {}'.format(model_metric_fname))
# dump configuration
#with open(os.path.join(FLAGS.train_dir, 'model_training_configuration.txt'), 'w') as conf_dump_file:
# json.dump(mc, conf_dump_file)
# save model size, flops, activations by layers
with open(model_metric_fname, 'w') as f:
f.write('Number of parameter by layer:\n')
count = 0
for c in model.model_size_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
count = 0
f.write('\nActivation size by layer:\n')
for c in model.activation_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
count = 0
f.write('\nNumber of flops by layer:\n')
for c in model.flop_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
print ('Model statistics saved to {}.'.format(
os.path.join(FLAGS.train_dir, 'model_metrics.txt')))
saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.max_model_to_keep)
summary_op = tf.summary.merge_all()
init = tf.global_variables_initializer()
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run(init)
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
test_summary_writer = tf.summary.FileWriter(FLAGS.test_dir, sess.graph)
with tf.variable_scope('Test_Model') as scope:
precision = tf.placeholder(tf.float32, name='precision')
precision_op = tf.summary.scalar('precision_summary', precision)
num_of_detections = tf.placeholder(tf.int32, name='num_of_detections')
num_of_detections_op = tf.summary.scalar('num_of_detections_summary', num_of_detections)
localization_error_precentage = tf.placeholder(tf.float32, name='localization_error_precentage')
localization_error_precentage_op = tf.summary.scalar('localization_error_precentage_summary', localization_error_precentage)
classification_error_precentage = tf.placeholder(tf.float32, name='classification_error_precentage')
classification_error_precentage_op = tf.summary.scalar('classification_error_precentage_summary', classification_error_precentage)
background_error_precentage = tf.placeholder(tf.float32, name='background_error_precentage')
background_error_precentage_op = tf.summary.scalar('background_error_precentage_summary', background_error_precentage)
repeated_error_precentage = tf.placeholder(tf.float32, name='repeated_error_precentage')
repeated_error_precentage_op = tf.summary.scalar('repeated_error_precentage_summary', repeated_error_precentage)
recall = tf.placeholder(tf.float32, name='recall')
recall_op = tf.summary.scalar('recall_summary', recall)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
# read batch input
image_per_batch, label_per_batch, box_delta_per_batch, aidx_per_batch, \
bbox_per_batch = imdb.read_batch()
label_indices, bbox_indices, box_delta_values, mask_indices, box_values, \
= [], [], [], [], []
aidx_set = set()
num_discarded_labels = 0
num_labels = 0
for i in range(len(label_per_batch)): # batch_size
for j in range(len(label_per_batch[i])): # number of annotations
num_labels += 1
if (i, aidx_per_batch[i][j]) not in aidx_set:
aidx_set.add((i, aidx_per_batch[i][j]))
label_indices.append(
[i, aidx_per_batch[i][j], label_per_batch[i][j]])
mask_indices.append([i, aidx_per_batch[i][j]])
bbox_indices.extend(
[[i, aidx_per_batch[i][j], k] for k in range(4)])
box_delta_values.extend(box_delta_per_batch[i][j])
box_values.extend(bbox_per_batch[i][j])
else:
num_discarded_labels += 1
if mc.DEBUG_MODE:
print ('Warning: Discarded {}/({}) labels that are assigned to the same'
'anchor'.format(num_discarded_labels, num_labels))
feed_dict = {
model.image_input: image_per_batch,
model.keep_prob: mc.KEEP_PROB,
model.input_mask: np.reshape(
sparse_to_dense(
mask_indices, [mc.BATCH_SIZE, mc.ANCHORS],
[1.0]*len(mask_indices)),
[mc.BATCH_SIZE, mc.ANCHORS, 1]),
model.box_delta_input: sparse_to_dense(
bbox_indices, [mc.BATCH_SIZE, mc.ANCHORS, 4],
box_delta_values),
model.box_input: sparse_to_dense(
bbox_indices, [mc.BATCH_SIZE, mc.ANCHORS, 4],
box_values),
model.labels: sparse_to_dense(
label_indices,
[mc.BATCH_SIZE, mc.ANCHORS, mc.CLASSES],
[1.0]*len(label_indices)),
}
if step % FLAGS.summary_step == 0:
pred_json_folder = tempfile.mkdtemp()
# test model
for ii_test in range(FLAGS.num_of_test_iterations):
test_img_batch, scales_batch, img_fnames_batch = imdb.read_test_image_batch()
ground_truth_boxes_directory = imdb.get_label_path()
test_feed_dict = {
model.image_input: test_img_batch,
model.keep_prob: mc.KEEP_PROB,
model.input_mask: np.reshape(
sparse_to_dense(
mask_indices, [mc.BATCH_SIZE, mc.ANCHORS],
[1.0] * len(mask_indices)),
[mc.BATCH_SIZE, mc.ANCHORS, 1]),
model.box_delta_input: sparse_to_dense(
bbox_indices, [mc.BATCH_SIZE, mc.ANCHORS, 4],
box_delta_values),
model.box_input: sparse_to_dense(
bbox_indices, [mc.BATCH_SIZE, mc.ANCHORS, 4],
box_values),
model.labels: sparse_to_dense(
label_indices,
[mc.BATCH_SIZE, mc.ANCHORS, mc.CLASSES],
[1.0] * len(label_indices)),
}
compute_time = infer_bounding_boxes_on_image_batch(model, sess, test_feed_dict, img_fnames_batch, pred_json_folder)
print('Processing time batch {} {}'.format(ii_test, compute_time))
# extract score
results = iou_engine.get_bbox_average_iou_evaulation(ground_truth_boxes_directory, pred_json_folder, imdb.classes, FLAGS.iou_threshold , in_images_dir=None, out_images_and_boxes_dir=None)
print('Model Eval Score {}'.format(results))
clean_folders([pred_json_folder])
model_eval_summary_feed_dict = {num_of_detections:results['num_of_detections'],precision : results['precision'],localization_error_precentage:results['localization_error_precentage'],
classification_error_precentage:results['classification_error_precentage'], background_error_precentage:results['background_error_precentage'],
repeated_error_precentage: results['repeated_error_precentage'],recall:results['recall']}
model_validation_summary = sess.run(
[num_of_detections_op, precision_op, localization_error_precentage_op, classification_error_precentage_op,
background_error_precentage_op, repeated_error_precentage_op, recall_op],feed_dict=model_eval_summary_feed_dict)
for val_stats in model_validation_summary :
test_summary_writer.add_summary(val_stats, step)
op_list = [
model.train_op, model.loss, summary_op, model.det_boxes,
model.det_probs, model.det_class, model.conf_loss,
model.bbox_loss, model.class_loss
]
_, loss_value, summary_str, det_boxes, det_probs, det_class, conf_loss, \
bbox_loss, class_loss = sess.run(op_list, feed_dict=feed_dict)
_viz_prediction_result(
model, image_per_batch, bbox_per_batch, label_per_batch, det_boxes,
det_class, det_probs)
image_per_batch = bgr_to_rgb(image_per_batch)
viz_summary = sess.run(
model.viz_op, feed_dict={model.image_to_show: image_per_batch})
num_discarded_labels_op = tf.summary.scalar(
'counter/num_discarded_labels', num_discarded_labels)
num_labels_op = tf.summary.scalar(
'counter/num_labels', num_labels)
counter_summary_str = sess.run([num_discarded_labels_op, num_labels_op])
summary_writer.add_summary(summary_str, step)
summary_writer.add_summary(viz_summary, step)
for sum_str in counter_summary_str:
summary_writer.add_summary(sum_str, step)
print ('conf_loss: {}, bbox_loss: {}, class_loss: {}'.
format(conf_loss, bbox_loss, class_loss))
else:
_, loss_value, conf_loss, bbox_loss, class_loss = sess.run(
[model.train_op, model.loss, model.conf_loss, model.bbox_loss,
model.class_loss], feed_dict=feed_dict)
duration = time.time() - start_time
assert not np.isnan(loss_value), \
'Model diverged. Total loss: {}, conf_loss: {}, bbox_loss: {}, ' \
'class_loss: {}'.format(loss_value, conf_loss, bbox_loss, class_loss)
if step % 10 == 0:
num_images_per_step = mc.BATCH_SIZE
images_per_sec = num_images_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f images/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
images_per_sec, sec_per_batch))
sys.stdout.flush()
# Save the model checkpoint periodically.
if step % FLAGS.checkpoint_step == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def clean_folders(folders_lst):
for folder in folders_lst:
if os.path.isdir(folder):
shutil.rmtree(folder)
def infer_bounding_boxes_on_image_batch(model, sess, test_feed_dict, img_fnames_batch, pred_json_folder):
t_start = time.time()
det_boxes, det_probs, det_class = sess.run(
[model.det_boxes, model.det_probs, model.det_class],
feed_dict=test_feed_dict)
test_img_batch = test_feed_dict[model.image_input]
for ii_img, img in enumerate(test_img_batch):
width = img.shape[1]
height = img.shape[0]
DW = float(float(width) / float(model.mc.IMAGE_WIDTH))
DH = float(float(height) / float(model.mc.IMAGE_HEIGHT))
final_boxes, final_probs, final_class = model.filter_prediction(
det_boxes[ii_img], det_probs[ii_img], det_class[ii_img])
compute_time = time.time() - t_start
keep_idx = [idx for idx in range(len(final_probs)) \
if final_probs[idx] > model.mc.PLOT_PROB_THRESH]
final_boxes = [final_boxes[idx] for idx in keep_idx]
final_probs = [final_probs[idx] for idx in keep_idx]
final_class = [final_class[idx] for idx in keep_idx]
box_list = final_boxes
label_list = [model.mc.CLASS_NAMES[idx] + ': (%.2f)' % prob \
for idx, prob in zip(final_class, final_probs)]
all_out_boxes = []
for bbox, label in zip(box_list, label_list):
cx, cy, w, h = bbox
xmin_sdet = cx - w / 2
ymin_sdet = cy - h / 2
xmax_sdet = cx + w / 2
ymax_sdet = cy + h / 2
xmin = float(xmin_sdet * DW)
ymin = float(ymin_sdet * DH)
xmax = float(xmax_sdet * DW)
ymax = float(ymax_sdet * DH)
class_label = label.split(':')[0] # text before "CLASS: (PROB)"
out_box = { "type": "RECT",
"label": class_label,
"position": "UNDEFINED",
"bounding_box_with_pose": { "p0": { 'x' : xmin, 'y' : ymin},
"p1": { 'x' : xmax, 'y' : ymax},
"width": float(w*DW),
"height": float(h*DH),
"aspect_ratio": float(w*DW)/float(h*DH),
"pose": "REAR"
}
}
all_out_boxes.append(out_box)
img_basename = os.path.basename(img_fnames_batch[ii_img])
json_dict_res = {'img_filename': img_basename, 'bounding_box_object_annotation': all_out_boxes}
json_fname = os.path.join(pred_json_folder, img_basename + JSON_PREFIX)
json_file = open(json_fname, 'w')
json.dump(json_dict_res, json_file, indent=4, sort_keys=True)
json_file.close()
return compute_time
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
| {
"repo_name": "getnexar/squeezeDet",
"path": "src/train.py",
"copies": "1",
"size": "20175",
"license": "bsd-2-clause",
"hash": 8071640660885272000,
"line_mean": 42.7635574837,
"line_max": 195,
"alpha_frac": 0.5872614622,
"autogenerated": false,
"ratio": 3.4742552092302392,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9525192394336011,
"avg_score": 0.007264855418845858,
"num_lines": 461
} |
"""Utility functions."""
import numpy as np
import time
import tensorflow as tf
import cv2
def iou(box1, box2):
"""Compute the Intersection-Over-Union of two given boxes.
Args:
box1: array of 4 elements [cx, cy, width, height].
box2: same as above
Returns:
iou: a float number in range [0, 1]. iou of the two boxes.
"""
lr = min(box1[0]+0.5*box1[2], box2[0]+0.5*box2[2]) - \
max(box1[0]-0.5*box1[2], box2[0]-0.5*box2[2])
if lr > 0:
tb = min(box1[1]+0.5*box1[3], box2[1]+0.5*box2[3]) - \
max(box1[1]-0.5*box1[3], box2[1]-0.5*box2[3])
if tb > 0:
intersection = tb*lr
union = box1[2]*box1[3]+box2[2]*box2[3]-intersection
return intersection/union
return 0
def batch_iou(boxes, box):
"""Compute the Intersection-Over-Union of a batch of boxes with another
box.
Args:
box1: 2D array of [cx, cy, width, height].
box2: a single array of [cx, cy, width, height]
Returns:
ious: array of a float number in range [0, 1].
"""
lr = np.maximum(
np.minimum(boxes[:,0]+0.5*boxes[:,2], box[0]+0.5*box[2]) - \
np.maximum(boxes[:,0]-0.5*boxes[:,2], box[0]-0.5*box[2]),
0
)
tb = np.maximum(
np.minimum(boxes[:,1]+0.5*boxes[:,3], box[1]+0.5*box[3]) - \
np.maximum(boxes[:,1]-0.5*boxes[:,3], box[1]-0.5*box[3]),
0
)
inter = lr*tb
union = boxes[:,2]*boxes[:,3] + box[2]*box[3] - inter
return inter/union
def nms(boxes, probs, threshold):
"""Non-Maximum supression.
Args:
boxes: array of [cx, cy, w, h] (center format)
probs: array of probabilities
threshold: two boxes are considered overlapping if their IOU is largher than
this threshold
form: 'center' or 'diagonal'
Returns:
keep: array of True or False.
"""
order = probs.argsort()[::-1]
keep = [True]*len(order)
for i in range(len(order)-1):
ovps = batch_iou(boxes[order[i+1:]], boxes[order[i]])
for j, ov in enumerate(ovps):
if ov > threshold:
keep[order[j+i+1]] = False
return keep
# TODO(bichen): this is not equivalent with full NMS. Need to improve it.
def recursive_nms(boxes, probs, threshold, form='center'):
"""Recursive Non-Maximum supression.
Args:
boxes: array of [cx, cy, w, h] (center format) or [xmin, ymin, xmax, ymax]
probs: array of probabilities
threshold: two boxes are considered overlapping if their IOU is largher than
this threshold
form: 'center' or 'diagonal'
Returns:
keep: array of True or False.
"""
assert form == 'center' or form == 'diagonal', \
'bounding box format not accepted: {}.'.format(form)
if form == 'center':
# convert to diagonal format
boxes = np.array([bbox_transform(b) for b in boxes])
areas = (boxes[:, 2]-boxes[:, 0])*(boxes[:, 3]-boxes[:, 1])
hidx = boxes[:, 0].argsort()
keep = [True]*len(hidx)
def _nms(hidx):
order = probs[hidx].argsort()[::-1]
for idx in range(len(order)):
if not keep[hidx[order[idx]]]:
continue
xx2 = boxes[hidx[order[idx]], 2]
for jdx in range(idx+1, len(order)):
if not keep[hidx[order[jdx]]]:
continue
xx1 = boxes[hidx[order[jdx]], 0]
if xx2 < xx1:
break
w = xx2 - xx1
yy1 = max(boxes[hidx[order[idx]], 1], boxes[hidx[order[jdx]], 1])
yy2 = min(boxes[hidx[order[idx]], 3], boxes[hidx[order[jdx]], 3])
if yy2 <= yy1:
continue
h = yy2-yy1
inter = w*h
iou = inter/(areas[hidx[order[idx]]]+areas[hidx[order[jdx]]]-inter)
if iou > threshold:
keep[hidx[order[jdx]]] = False
def _recur(hidx):
if len(hidx) <= 20:
_nms(hidx)
else:
mid = len(hidx)/2
_recur(hidx[:mid])
_recur(hidx[mid:])
_nms([idx for idx in hidx if keep[idx]])
_recur(hidx)
return keep
def sparse_to_dense(sp_indices, output_shape, values, default_value=0):
"""Build a dense matrix from sparse representations.
Args:
sp_indices: A [0-2]-D array that contains the index to place values.
shape: shape of the dense matrix.
values: A {0,1}-D array where values corresponds to the index in each row of
sp_indices.
default_value: values to set for indices not specified in sp_indices.
Return:
A dense numpy N-D array with shape output_shape.
"""
assert len(sp_indices) == len(values), \
'Length of sp_indices is not equal to length of values'
array = np.ones(output_shape) * default_value
for idx, value in zip(sp_indices, values):
array[tuple(idx)] = value
return array
def bgr_to_rgb(ims):
"""Convert a list of images from BGR format to RGB format."""
out = []
for im in ims:
out.append(im[:,:,::-1])
return out
def bbox_transform(bbox):
"""convert a bbox of form [cx, cy, w, h] to [xmin, ymin, xmax, ymax].
"""
with tf.variable_scope('bbox_transform') as scope:
cx = bbox[..., 0]
cy = bbox[..., 1]
w = bbox[..., 2]
h = bbox[..., 3]
out_box = np.stack(
[cx-w/2, cy-h/2, cx+w/2, cy+h/2],
axis=-1
)
return out_box
def bbox_transform_inv(bbox):
"""convert a bbox of form [xmin, ymin, xmax, ymax] to [cx, cy, w, h].
"""
with tf.variable_scope('bbox_transform_inv') as scope:
xmin = bbox[..., 0]
ymin = bbox[..., 1]
xmax = bbox[..., 2]
ymax = bbox[..., 3]
w = xmax - xmin + 1.0
h = ymax - ymin + 1.0
out_box = np.stack(
[xmin + 0.5*w, ymin + 0.5*h, w, h],
axis=-1
)
return out_box
def recolor(im, a = .1):
t = [np.random.uniform()]
t += [np.random.uniform()]
t += [np.random.uniform()]
t = np.array(t) * 2. - 1.
# random amplify each channel
im = im.astype(float) * (1 + t * a)
mx = 255. * (1 + a)
up = np.random.uniform() * 2 - 1
im = np.power(im/mx, 1. + up * .5)
return np.array(im * 255., np.uint8)
def scale_trans(im, gt_bbox, labels):
# Set up parameters
h, w, c = im.shape
scale = np.random.uniform(0, 2) / 10. + 1.
max_offx = (scale-1.) * w
max_offy = (scale-1.) * h
offx = int(np.random.uniform() * max_offx)
offy = int(np.random.uniform() * max_offy)
# Scale and translate image
im = cv2.resize(im, (int(w*scale), int(h*scale)))
im = im[offy : (offy + h), offx : (offx + w)]
# Scale and translate gt bboxes
gt_bbox = gt_bbox * scale
valid_idx = (gt_bbox[:, 0] >= 0) * (gt_bbox[:, 1] >= 0) * \
(gt_bbox[:, 0] <= w) * (gt_bbox[:, 1] <= h)
gt_bbox[:, 0] = np.clip(gt_bbox[:, 0] - offx, 0, w)
gt_bbox[:, 1] = np.clip(gt_bbox[:, 1] - offy, 0, h)
return im, gt_bbox[valid_idx], labels[valid_idx]
def drift_dist(im, gt_bbox, mc, orig_h, orig_w):
assert mc.DRIFT_X >= 0 and mc.DRIFT_Y > 0, \
'mc.DRIFT_X and mc.DRIFT_Y must be >= 0'
if mc.DRIFT_X > 0 or mc.DRIFT_Y > 0:
# Ensures that gt boundibg box is not cutted out of the image
max_drift_x = min(gt_bbox[:, 0] - gt_bbox[:, 2]/2.0+1)
max_drift_y = min(gt_bbox[:, 1] - gt_bbox[:, 3]/2.0+1)
assert max_drift_x >= 0 and max_drift_y >= 0, 'bbox out of image'
dy = np.random.randint(-mc.DRIFT_Y, min(mc.DRIFT_Y+1, max_drift_y))
dx = np.random.randint(-mc.DRIFT_X, min(mc.DRIFT_X+1, max_drift_x))
# shift bbox
gt_bbox[:, 0] = gt_bbox[:, 0] - dx
gt_bbox[:, 1] = gt_bbox[:, 1] - dy
# distort image
orig_h -= dy
orig_w -= dx
orig_x, dist_x = max(dx, 0), max(-dx, 0)
orig_y, dist_y = max(dy, 0), max(-dy, 0)
distorted_im = np.zeros(
(int(orig_h), int(orig_w), 3)).astype(np.float32)
distorted_im[dist_y:, dist_x:, :] = im[orig_y:, orig_x:, :]
im = distorted_im
return im, gt_bbox
def rand_flip(im, gt_bbox, orig_w):
# Flip image with 50% probability
if np.random.randint(2) > 0.5:
im = im[:, ::-1, :]
gt_bbox[:, 0] = orig_w - 1 - gt_bbox[:, 0]
return im, gt_bbox
class Timer(object):
def __init__(self):
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.duration = 0.0
self.average_time = 0.0
def tic(self):
self.start_time = time.time()
def toc(self, average=True):
self.duration = time.time() - self.start_time
self.total_time += self.duration
self.calls += 1
self.average_time = self.total_time/self.calls
if average:
return self.average_time
else:
return self.duration
def safe_exp(w, thresh):
"""Safe exponential function for tensors."""
slope = np.exp(thresh)
with tf.variable_scope('safe_exponential'):
lin_region = tf.to_float(w > thresh)
lin_out = slope*(w - thresh + 1.)
exp_out = tf.exp(w)
out = lin_region*lin_out + (1.-lin_region)*exp_out
return out
| {
"repo_name": "goan15910/ConvDet",
"path": "src/utils/util.py",
"copies": "1",
"size": "8776",
"license": "bsd-2-clause",
"hash": -3360748288548132400,
"line_mean": 27.9636963696,
"line_max": 80,
"alpha_frac": 0.5754329991,
"autogenerated": false,
"ratio": 2.859563375692408,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8816135792432725,
"avg_score": 0.023772116471936482,
"num_lines": 303
} |
"""Utility functions."""
import numpy as np
import time
import tensorflow as tf
def iou(box1, box2):
"""Compute the Intersection-Over-Union of two given boxes.
Args:
box1: array of 4 elements [cx, cy, width, height].
box2: same as above
Returns:
iou: a float number in range [0, 1]. iou of the two boxes.
"""
lr = min(box1[0]+0.5*box1[2], box2[0]+0.5*box2[2]) - \
max(box1[0]-0.5*box1[2], box2[0]-0.5*box2[2])
if lr > 0:
tb = min(box1[1]+0.5*box1[3], box2[1]+0.5*box2[3]) - \
max(box1[1]-0.5*box1[3], box2[1]-0.5*box2[3])
if tb > 0:
intersection = tb*lr
union = box1[2]*box1[3]+box2[2]*box2[3]-intersection
return intersection/union
return 0
def batch_iou(boxes, box):
"""Compute the Intersection-Over-Union of a batch of boxes with another
box.
Args:
box1: 2D array of [cx, cy, width, height].
box2: a single array of [cx, cy, width, height]
Returns:
ious: array of a float number in range [0, 1].
"""
lr = np.maximum(
np.minimum(boxes[:,0]+0.5*boxes[:,2], box[0]+0.5*box[2]) - \
np.maximum(boxes[:,0]-0.5*boxes[:,2], box[0]-0.5*box[2]),
0
)
tb = np.maximum(
np.minimum(boxes[:,1]+0.5*boxes[:,3], box[1]+0.5*box[3]) - \
np.maximum(boxes[:,1]-0.5*boxes[:,3], box[1]-0.5*box[3]),
0
)
inter = lr*tb
union = boxes[:,2]*boxes[:,3] + box[2]*box[3] - inter
return inter/union
def nms(boxes, probs, threshold):
"""Non-Maximum supression.
Args:
boxes: array of [cx, cy, w, h] (center format)
probs: array of probabilities
threshold: two boxes are considered overlapping if their IOU is largher than
this threshold
form: 'center' or 'diagonal'
Returns:
keep: array of True or False.
"""
order = probs.argsort()[::-1]
keep = [True]*len(order)
for i in range(len(order)-1):
ovps = batch_iou(boxes[order[i+1:]], boxes[order[i]])
for j, ov in enumerate(ovps):
if ov > threshold:
keep[order[j+i+1]] = False
return keep
# TODO(bichen): this is not equivalent with full NMS. Need to improve it.
def recursive_nms(boxes, probs, threshold, form='center'):
"""Recursive Non-Maximum supression.
Args:
boxes: array of [cx, cy, w, h] (center format) or [xmin, ymin, xmax, ymax]
probs: array of probabilities
threshold: two boxes are considered overlapping if their IOU is largher than
this threshold
form: 'center' or 'diagonal'
Returns:
keep: array of True or False.
"""
assert form == 'center' or form == 'diagonal', \
'bounding box format not accepted: {}.'.format(form)
if form == 'center':
# convert to diagonal format
boxes = np.array([bbox_transform(b) for b in boxes])
areas = (boxes[:, 2]-boxes[:, 0])*(boxes[:, 3]-boxes[:, 1])
hidx = boxes[:, 0].argsort()
keep = [True]*len(hidx)
def _nms(hidx):
order = probs[hidx].argsort()[::-1]
for idx in range(len(order)):
if not keep[hidx[order[idx]]]:
continue
xx2 = boxes[hidx[order[idx]], 2]
for jdx in range(idx+1, len(order)):
if not keep[hidx[order[jdx]]]:
continue
xx1 = boxes[hidx[order[jdx]], 0]
if xx2 < xx1:
break
w = xx2 - xx1
yy1 = max(boxes[hidx[order[idx]], 1], boxes[hidx[order[jdx]], 1])
yy2 = min(boxes[hidx[order[idx]], 3], boxes[hidx[order[jdx]], 3])
if yy2 <= yy1:
continue
h = yy2-yy1
inter = w*h
iou = inter/(areas[hidx[order[idx]]]+areas[hidx[order[jdx]]]-inter)
if iou > threshold:
keep[hidx[order[jdx]]] = False
def _recur(hidx):
if len(hidx) <= 20:
_nms(hidx)
else:
mid = len(hidx)/2
_recur(hidx[:mid])
_recur(hidx[mid:])
_nms([idx for idx in hidx if keep[idx]])
_recur(hidx)
return keep
def sparse_to_dense(sp_indices, output_shape, values, default_value=0):
"""Build a dense matrix from sparse representations.
Args:
sp_indices: A [0-2]-D array that contains the index to place values.
shape: shape of the dense matrix.
values: A {0,1}-D array where values corresponds to the index in each row of
sp_indices.
default_value: values to set for indices not specified in sp_indices.
Return:
A dense numpy N-D array with shape output_shape.
"""
assert len(sp_indices) == len(values), \
'Length of sp_indices is not equal to length of values'
array = np.ones(output_shape) * default_value
for idx, value in zip(sp_indices, values):
array[tuple(idx)] = value
return array
def bgr_to_rgb(ims):
"""Convert a list of images from BGR format to RGB format."""
out = []
for im in ims:
out.append(im[:,:,::-1])
return out
def bbox_transform(bbox):
"""convert a bbox of form [cx, cy, w, h] to [xmin, ymin, xmax, ymax]. Works
for numpy array or list of tensors.
"""
with tf.variable_scope('bbox_transform') as scope:
cx, cy, w, h = bbox
out_box = [[]]*4
out_box[0] = cx-w/2
out_box[1] = cy-h/2
out_box[2] = cx+w/2
out_box[3] = cy+h/2
return out_box
def bbox_transform_inv(bbox):
"""convert a bbox of form [xmin, ymin, xmax, ymax] to [cx, cy, w, h]. Works
for numpy array or list of tensors.
"""
with tf.variable_scope('bbox_transform_inv') as scope:
xmin, ymin, xmax, ymax = bbox
out_box = [[]]*4
width = xmax - xmin + 1.0
height = ymax - ymin + 1.0
out_box[0] = xmin + 0.5*width
out_box[1] = ymin + 0.5*height
out_box[2] = width
out_box[3] = height
return out_box
class Timer(object):
def __init__(self):
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.duration = 0.0
self.average_time = 0.0
def tic(self):
self.start_time = time.time()
def toc(self, average=True):
self.duration = time.time() - self.start_time
self.total_time += self.duration
self.calls += 1
self.average_time = self.total_time/self.calls
if average:
return self.average_time
else:
return self.duration
def safe_exp(w, thresh):
"""Safe exponential function for tensors."""
slope = np.exp(thresh)
with tf.variable_scope('safe_exponential'):
lin_region = tf.to_float(w > thresh)
lin_out = slope*(w - thresh + 1.)
exp_out = tf.exp(w)
out = lin_region*lin_out + (1.-lin_region)*exp_out
return out
| {
"repo_name": "fyhtea/squeezeDet-hand",
"path": "src/utils/util.py",
"copies": "3",
"size": "6444",
"license": "bsd-2-clause",
"hash": 626075610747182000,
"line_mean": 26.775862069,
"line_max": 80,
"alpha_frac": 0.6000931099,
"autogenerated": false,
"ratio": 3.0126227208976157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5112715830797615,
"avg_score": null,
"num_lines": null
} |
"""VGG16+ConvDet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import joblib
from utils import util
from easydict import EasyDict as edict
import numpy as np
import tensorflow as tf
from nn_skeleton import ModelSkeleton
class VGG16ConvDet(ModelSkeleton):
def __init__(self, mc, gpu_id=0):
with tf.device('/gpu:{}'.format(gpu_id)):
ModelSkeleton.__init__(self, mc)
self._add_forward_graph()
self._add_interpretation_graph()
self._add_loss_graph()
self._add_train_graph()
self._add_viz_graph()
def _add_forward_graph(self):
"""Build the VGG-16 model."""
mc = self.mc
if mc.LOAD_PRETRAINED_MODEL:
assert tf.gfile.Exists(mc.PRETRAINED_MODEL_PATH), \
'Cannot find pretrained model at the given path:' \
' {}'.format(mc.PRETRAINED_MODEL_PATH)
self.caffemodel_weight = joblib.load(mc.PRETRAINED_MODEL_PATH)
with tf.variable_scope('conv1') as scope:
conv1_1 = self._conv_layer(
'conv1_1', self.image_input, filters=64, size=3, stride=1, freeze=True)
conv1_2 = self._conv_layer(
'conv1_2', conv1_1, filters=64, size=3, stride=1, freeze=True)
pool1 = self._pooling_layer(
'pool1', conv1_2, size=2, stride=2)
with tf.variable_scope('conv2') as scope:
conv2_1 = self._conv_layer(
'conv2_1', pool1, filters=128, size=3, stride=1, freeze=True)
conv2_2 = self._conv_layer(
'conv2_2', conv2_1, filters=128, size=3, stride=1, freeze=True)
pool2 = self._pooling_layer(
'pool2', conv2_2, size=2, stride=2)
with tf.variable_scope('conv3') as scope:
conv3_1 = self._conv_layer(
'conv3_1', pool2, filters=256, size=3, stride=1)
conv3_2 = self._conv_layer(
'conv3_2', conv3_1, filters=256, size=3, stride=1)
conv3_3 = self._conv_layer(
'conv3_3', conv3_2, filters=256, size=3, stride=1)
pool3 = self._pooling_layer(
'pool3', conv3_3, size=2, stride=2)
with tf.variable_scope('conv4') as scope:
conv4_1 = self._conv_layer(
'conv4_1', pool3, filters=512, size=3, stride=1)
conv4_2 = self._conv_layer(
'conv4_2', conv4_1, filters=512, size=3, stride=1)
conv4_3 = self._conv_layer(
'conv4_3', conv4_2, filters=512, size=3, stride=1)
pool4 = self._pooling_layer(
'pool4', conv4_3, size=2, stride=2)
with tf.variable_scope('conv5') as scope:
conv5_1 = self._conv_layer(
'conv5_1', pool4, filters=512, size=3, stride=1)
conv5_2 = self._conv_layer(
'conv5_2', conv5_1, filters=512, size=3, stride=1)
conv5_3 = self._conv_layer(
'conv5_3', conv5_2, filters=512, size=3, stride=1)
dropout5 = tf.nn.dropout(conv5_3, self.keep_prob, name='drop6')
num_output = mc.ANCHOR_PER_GRID * (mc.CLASSES + 1 + 4)
self.preds = self._conv_layer(
'conv6', dropout5, filters=num_output, size=3, stride=1,
padding='SAME', xavier=False, relu=False, stddev=0.0001)
| {
"repo_name": "BichenWuUCB/squeezeDet",
"path": "src/nets/vgg16_convDet.py",
"copies": "1",
"size": "3184",
"license": "bsd-2-clause",
"hash": -5984719655860090000,
"line_mean": 34.3777777778,
"line_max": 81,
"alpha_frac": 0.6140075377,
"autogenerated": false,
"ratio": 2.926470588235294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4040478125935294,
"avg_score": null,
"num_lines": null
} |
"""VGG16-ConvDet-v2 model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import joblib
from utils import util
from easydict import EasyDict as edict
import numpy as np
import tensorflow as tf
from nn_skeleton import ModelSkeleton
class VGG16ConvDetV2(ModelSkeleton):
def __init__(self, mc, gpu_id):
with tf.device('/gpu:{}'.format(gpu_id)):
ModelSkeleton.__init__(self, mc)
self._add_forward_graph()
self._add_interpretation_graph()
assert mc.LOSS_TYPE in ['SQT', 'YOLO'], \
'Loss type {0} not defined'.format(mc.LOSS_TYPE)
if mc.LOSS_TYPE == 'SQT':
self._add_sqt_loss_graph()
elif mc.LOSS_TYPE == 'YOLO':
self._add_yolo_loss_graph()
self._add_train_graph()
self._add_viz_graph()
def _add_forward_graph(self):
"""Build the VGG-16 model."""
mc = self.mc
if mc.LOAD_PRETRAINED_MODEL:
assert tf.gfile.Exists(mc.PRETRAINED_MODEL_PATH), \
'Cannot find pretrained model at the given path:' \
' {}'.format(mc.PRETRAINED_MODEL_PATH)
self.caffemodel_weight = joblib.load(mc.PRETRAINED_MODEL_PATH)
with tf.variable_scope('conv1') as scope:
conv1_1 = self._conv_layer(
'conv1_1', self.image_input, filters=64, size=3, stride=1, freeze=True)
conv1_2 = self._conv_layer(
'conv1_2', conv1_1, filters=64, size=3, stride=1, freeze=True)
pool1 = self._pooling_layer(
'pool1', conv1_2, size=2, stride=2)
with tf.variable_scope('conv2') as scope:
conv2_1 = self._conv_layer(
'conv2_1', pool1, filters=128, size=3, stride=1, freeze=True)
conv2_2 = self._conv_layer(
'conv2_2', conv2_1, filters=128, size=3, stride=1, freeze=True)
pool2 = self._pooling_layer(
'pool2', conv2_2, size=2, stride=2)
with tf.variable_scope('conv3') as scope:
conv3_1 = self._conv_layer(
'conv3_1', pool2, filters=256, size=3, stride=1)
conv3_2 = self._conv_layer(
'conv3_2', conv3_1, filters=256, size=3, stride=1)
conv3_3 = self._conv_layer(
'conv3_3', conv3_2, filters=256, size=3, stride=1)
pool3 = self._pooling_layer(
'pool3', conv3_3, size=2, stride=2)
with tf.variable_scope('conv4') as scope:
conv4_1 = self._conv_layer(
'conv4_1', pool3, filters=512, size=3, stride=1)
conv4_2 = self._conv_layer(
'conv4_2', conv4_1, filters=512, size=3, stride=1)
conv4_3 = self._conv_layer(
'conv4_3', conv4_2, filters=512, size=3, stride=1)
pool4 = self._pooling_layer(
'pool4', conv4_3, size=2, stride=2)
with tf.variable_scope('conv5') as scope:
conv5_1 = self._conv_layer(
'conv5_1', pool4, filters=512, size=3, stride=1)
conv5_2 = self._conv_layer(
'conv5_2', conv5_1, filters=512, size=3, stride=1)
conv5_3 = self._conv_layer(
'conv5_3', conv5_2, filters=512, size=3, stride=1)
pool5 = self._pooling_layer(
'pool5', conv5_3, size=2, stride=2)
with tf.variable_scope('conv6') as scope:
conv6_1 = self._conv_layer(
'conv6_1', pool5, filters=1024, size=3, stride=1)
conv6_2 = self._conv_layer(
'conv6_2', conv6_1, filters=1024, size=3, stride=1)
dropout6 = tf.nn.dropout(conv6_2, self.keep_prob, name='drop6')
num_output = mc.ANCHOR_PER_GRID * (mc.CLASSES + 1 + 4)
self.preds = self._conv_layer(
'conv7', dropout6, filters=num_output, size=3, stride=1,
padding='SAME', xavier=False, relu=False, stddev=0.0001)
| {
"repo_name": "goan15910/ConvDet",
"path": "src/nets/vgg16_convDet_v2.py",
"copies": "1",
"size": "3721",
"license": "bsd-2-clause",
"hash": -3732553369005036500,
"line_mean": 35.1262135922,
"line_max": 81,
"alpha_frac": 0.6062886321,
"autogenerated": false,
"ratio": 2.9093041438623923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8970890879590041,
"avg_score": 0.008940379274470346,
"num_lines": 103
} |
__author__ = 'Bieliaievskyi Sergey'
__credits__ = ["Bieliaievskyi Sergey"]
__license__ = "Apache License"
__version__ = "1.0.0"
__maintainer__ = "Bieliaievskyi Sergey"
__email__ = "magelan09@gmail.com"
__status__ = "Release"
import urllib.parse
import mimetypes
import base64
import pycurl
import json
import io
class PushBullet:
def __init__(self, api_id):
self.auth_id = api_id
self.post_data = {}
self.main_url = 'https://api.pushbullet.com/v2/'
def pb_request(self, url, req_method=None):
curl_obj = pycurl.Curl()
response = io.BytesIO()
encoded_string = base64.encodebytes(('%s:' % self.auth_id).encode())[:-1]
auth = "Basic %s" % encoded_string.decode('utf-8')
if self.post_data:
req_data = json.dumps(self.post_data)
curl_obj.setopt(curl_obj.POSTFIELDS, req_data)
if req_method:
curl_obj.setopt(pycurl.CUSTOMREQUEST, req_method)
curl_obj.setopt(pycurl.URL, url)
curl_obj.setopt(curl_obj.WRITEFUNCTION, response.write)
curl_obj.setopt(curl_obj.HTTPHEADER, ["Authorization: %s" % auth,
"Accept: application/json",
"Content-Type: application/json",
"User-Agent: PushBullet-agent"])
curl_obj.perform()
return curl_obj.getinfo(pycurl.HTTP_CODE), \
curl_obj.getinfo(pycurl.EFFECTIVE_URL), \
json.loads(response.getvalue().decode())
class PBFileUpload(PushBullet):
def __init__(self, api_key_id):
PushBullet.__init__(self, api_key_id)
self.file_upload_param = {}
def upload_request(self, file_name):
url_param = urllib.parse.urlencode({'file_name': file_name, 'file_type': mimetypes.guess_type(file_name)[0]})
self.file_upload_param = self.pb_request('%supload-request?%s' % (self.main_url, url_param))[2]
def pb_upload(self, file_path):
buffer = io.BytesIO()
data = [(elm, str(val)) for (elm, val) in self.file_upload_param['data'].items()]
data.append(('file', (pycurl.FORM_FILE, file_path)))
my_curl = pycurl.Curl()
my_curl.setopt(pycurl.URL, self.file_upload_param['upload_url'])
my_curl.setopt(pycurl.HTTPPOST, data)
my_curl.setopt(pycurl.WRITEFUNCTION, buffer.write)
my_curl.perform()
return my_curl.getinfo(pycurl.HTTP_CODE), \
my_curl.getinfo(pycurl.EFFECTIVE_URL)
class PBUsers(PushBullet):
def __init__(self, api_key_id):
PushBullet.__init__(self, api_key_id)
def get_me(self):
return self.pb_request('%susers/me' % self.main_url)
class PBContacts(PushBullet):
def __init__(self, api_key_id):
PushBullet.__init__(self, api_key_id)
def get_contacts(self):
return self.pb_request('%scontacts' % self.main_url)
def del_contact(self, contact_iden):
return self.pb_request('%scontacts/%s' % (self.main_url, contact_iden), req_method='DELETE')
class PBDevices(PushBullet):
def __init__(self, api_key_id):
PushBullet.__init__(self, api_key_id)
def get_connected_devices(self):
return self.pb_request('%sdevices' % self.main_url)
def del_connected_device(self, dev_id):
return self.pb_request('%sdevices/%s' % (self.main_url, dev_id), req_method='DELETE')
class PBPushes(PushBullet):
def __init__(self, api_key_id):
PushBullet.__init__(self, api_key_id)
def get_push_history(self):
return self.pb_request('%spushes?modified_after=0' % self.main_url)
def del_pushes(self, push_id):
return self.pb_request('%spushes/%s' % (self.main_url, push_id), req_method='DELETE')
def pushes(self, **pbargs):
self.post_data = pbargs
return self.pb_request('%spushes' % self.main_url) | {
"repo_name": "pymag09/pushbullet",
"path": "pushbullet.py",
"copies": "1",
"size": "3902",
"license": "apache-2.0",
"hash": -9188806091319095000,
"line_mean": 34.8073394495,
"line_max": 117,
"alpha_frac": 0.6022552537,
"autogenerated": false,
"ratio": 3.238174273858921,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4340429527558921,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bigyan'
import logging
from multiFileLogging import class2
def setup_logger(loggerName, logFile, level=logging.DEBUG):
logger = logging.getLogger(loggerName)
formatter = \
logging.Formatter('[%(asctime)s]' + ' ' +
'{%(threadName)s/%(filename)s/%(module)s/%(funcName)s/%(lineno)d}' + ' ' +
'%(levelname)s | %(message)s')
fileHandler = logging.FileHandler(logFile, mode='w')
fileHandler.setFormatter(formatter)
logger.setLevel(level)
logger.addHandler(fileHandler)
#logger.propagate = False
class class1:
__logger = None
def __init__(self, log):
self.__logger = logging.getLogger(log)
def log(self, message):
self.__logger.info(message)
def main():
setup_logger("class1_1Log", "class1_1.log")
setup_logger("class1_2Log", "class1_2.log")
setup_logger("class2Log", "class2.log")
logger = logging.getLogger("class1_1Log")
logger.warn("some warning")
c1_1 = class1(log="class1_1Log")
c1_1.log("c1_1.log called")
c1_2 = class1(log="class1_2Log")
c1_2.log("c1_2.log called")
c2 = class2()
c2.log("c2.log called")
if __name__ == "__main__":
main() | {
"repo_name": "bigyanbhar/single-file-code",
"path": "multiFileLogging2.py",
"copies": "1",
"size": "1233",
"license": "apache-2.0",
"hash": 8533825570394326000,
"line_mean": 23.1960784314,
"line_max": 100,
"alpha_frac": 0.600973236,
"autogenerated": false,
"ratio": 3.261904761904762,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43628779979047616,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bigyan'
import logging
import os
class class2:
__logger = None
def __init__(self):
self.__logger = logging.getLogger("class2Log")
def log(self, message):
self.__logger.info(message)
#logging.basicConfig(
# filename=self.__expId + ".log",
# format='[%(asctime)s]' + ' ' +
# '{%(threadName)s/%(filename)s/%(module)s/%(funcName)s/%(lineno)d}' + ' ' +
# '%(levelname)s | %(message)s',
# filemode='w',
# level=logging.DEBUG)
#def setup_logger(logger_name, log_file, level=logging.INFO):
# logging.basicConfig(
# format='[%(asctime)s]' + ' ' +
# '{%(threadName)s/%(filename)s/%(module)s/%(funcName)s/%(lineno)d}' + ' ' +
# '%(levelname)s | %(message)s',
# filemode='w',
# level=logging.DEBUG)
# logger = logging.getLogger(logger_name)
#
# #formatter = logging.Formatter('%(asctime)s : %(message)s')
# fileHandler = logging.FileHandler(log_file, mode='w')
# #fileHandler.setFormatter(formatter)
# #streamHandler = logging.StreamHandler()
# #streamHandler.setFormatter(formatter)
#
# #logger.setLevel(level)
# logger.addHandler(fileHandler)
# #logger.addHandler(streamHandler)
# logger.propagate = False
def setup_logger(loggerName, logFile, level=logging.DEBUG):
logger = logging.getLogger(loggerName)
formatter = \
logging.Formatter('[%(asctime)s]' + ' ' +
'{%(threadName)s/%(filename)s/%(module)s/%(funcName)s/%(lineno)d}' + ' ' +
'%(levelname)s | %(message)s')
fileHandler = logging.FileHandler(logFile, mode='w')
fileHandler.setFormatter(formatter)
logger.setLevel(level)
logger.addHandler(fileHandler)
def main():
os.mkdir("log1")
setup_logger('log1', r'log1/log1.log')
log1 = logging.getLogger('log1')
log1.info('Info for log 1!')
#log1.handlers[0].stream.close()
os.mkdir("log2")
setup_logger('log2', r'log2/log2.log')
log2 = logging.getLogger('log2')
log2.info('Info for log 2!')
log1.error('Oh, no! Something went wrong!')
logging.info("Wow")
if __name__ == "__main__":
main() | {
"repo_name": "bigyanbhar/single-file-code",
"path": "multiFileLogging.py",
"copies": "1",
"size": "2184",
"license": "apache-2.0",
"hash": -7488991883897003000,
"line_mean": 27.3766233766,
"line_max": 100,
"alpha_frac": 0.5934065934,
"autogenerated": false,
"ratio": 3.299093655589124,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43925002489891235,
"avg_score": null,
"num_lines": null
} |
__author__ = "Biju Joseph"
import logging
import os
import json
logger = logging.getLogger('repo')
class Repository:
"""
Provides a unified interface for repositories
"""
def __init__(self, name):
"""
Will initialize the repository
:param name: name of the data store
"""
self.name = name
def persist(self, obj):
"""Persists the given JSON in mongo collection
:param obj: the object to persist
"""
logging.debug("Persisting : %s", obj)
def delete(self, selector):
"""Delete the given object
:param selector: the object to delete
"""
logging.debug("Deleting : %s", selector)
def load(self, selector):
"""
Retrieves the objects data store
:param selector: filter criteria
"""
logging.debug("Loading object by selector: %s", selector)
class JSONFSRepository(Repository):
def __init__(self, directory):
"""
A repository that just writes objects to a file
:param directory: The directory where the objects needs to be written
"""
#super().__init__(directory)
super(JSONFSRepository, self).__init__(directory)
def delete(self, selector):
"""
Will remove the object form file system
:param selector: the file ID
"""
full_path = os.path.join(self.name, selector + ".json")
logging.debug("Deleting %s", full_path)
os.remove(full_path)
def persist(self, obj):
"""Persists the given object in file system
:param obj: the object to persist
"""
full_path = os.path.join(self.name, obj['id'] + ".json")
with open (full_path, 'w', encoding='utf8') as f:
json.dump(obj, f, ensure_ascii=False)
logging.debug("Persisting : %s", full_path)
def load(self, selector):
"""
Retrieves the json object from store
:param selector: file ID
"""
full_path = os.path.join(self.name, selector + ".json")
logging.debug("Loading object by selector: %s", selector)
with open (full_path) as f:
obj = json.load(f)
return obj
class BatchRepository(Repository, object):
"""
A repository that decorates another one with batching support.
This is usually useful in bulk loading of large data set
"""
def __init__(self, batch_size, repo):
"""
Will initialize the repository
:param batch_size: The batch size needed
:param repo: the repository to decorate
"""
self.repo = repo
self.batch = []
self.batch_size = batch_size if batch_size > 1 else 1
#super().__init__(repo.name)
super(BatchRepository, self).__init__(repo.name)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.flush()
def flush(self):
"""
Flushes the internal cache, by calling persist on actual repository
Note : We can explicitly call flush when no more object are to be added
"""
logging.info("Flushing")
if len(self.batch) > 0:
result = self.repo.persist(self.batch)
del self.batch[:]
return result
def persist(self, obj):
""" Will keep adding objects to internal cache and will persist once the cache is full
:param obj: object to be persisted
:return: None, if added to batch, else will return the output of flush
"""
self.batch.append(obj)
if len(self.batch) >= self.batch_size:
logging.debug("Batch full, going to flush : %d", len(self.batch))
return self.flush()
def load(self, selector):
"""
Will delegate to the repository that is decorated
"""
return self.repo.load(selector)
| {
"repo_name": "semanticbits/owh-ds",
"path": "software/owh/backoffice/obsolete/repositories.py",
"copies": "1",
"size": "3928",
"license": "apache-2.0",
"hash": 5486056497046489000,
"line_mean": 29.9291338583,
"line_max": 94,
"alpha_frac": 0.5814663951,
"autogenerated": false,
"ratio": 4.325991189427313,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5407457584527313,
"avg_score": null,
"num_lines": null
} |
__author__ = "Biju Joseph"
import logging
import elasticsearch
import elasticsearch.helpers
from repositories import Repository
logger = logging.getLogger('elastic')
INDEX_SETTINGS = {
"settings": {
"refresh_interval" : "60s"
}
}
class ElasticSearchRepository(Repository, object):
""" A facade that provides functionality to index objects in ElasticSearch
Attributes:
host_name: the host name
port: the port
doctype: the name of the document type
"""
def __init__(self, host_name, port, index_name,doctype):
""" Initializes the repo """
self.es = elasticsearch.Elasticsearch([{'host': host_name, 'port': port}])
self.index_name = index_name
super(ElasticSearchRepository, self).__init__(doctype)
def create_mappings(self, mapping):
"""
Will create the mapping ONLY if the type is not present in the index.
:param mapping:
"""
if not self.es.indices.exists_type(index=self.index_name, doc_type=self.name):
self.es.indices.put_mapping(index=self.index_name, doc_type=self.name, body=mapping)
def create_index(self, mapping):
"""
Will create the index
Args:
mapping:
"""
if self.es.indices.exists(self.index_name):
logging.info('Index %s already exists', self.index_name)
return
try:
# Create the index
self.es.indices.create(index= self.index_name, body=INDEX_SETTINGS, ignore=400)
#specify the mapping
self.create_mappings(mapping)
self.es.indices.clear_cache(index=self.index_name)
except Exception as e:
logging.fatal('Error creating index [%s] for doctype [%s]', self.index_name, self.name)
logging.error(e)
raise
def delete_index(self):
"""
Will delete the index
"""
try:
self.es.indices.delete(index=self.index_name)
logging.info('Deleted index %s', self.index_name)
except elasticsearch.ElasticsearchException:
logging.info('Unable to delete index %s', self.index_name)
def recreate_index(self, mappings):
self.delete_index()
self.create_index(mappings)
def load(self, selector):
"""
Will returnt the document identified by the ID in selector
"""
return self.es.get(index=self.index_name, doc_type=self.name, id=selector)
def persist(self, obj):
"""
Will persist objects into the index
Args:
obj: an array of json objects that supports es bulk format
Returns:
The elastic search response object
"""
res = self.es.bulk(index= self.index_name, body= obj, refresh=True, request_timeout = 30)
print "Flush"
return res
def search(self, criteria, sort, pagination):
return self.es.search(index=self.index_name, body=criteria) | {
"repo_name": "semanticbits/owh-ds",
"path": "software/owh/backoffice/obsolete/elasticsearch_repository.py",
"copies": "1",
"size": "3022",
"license": "apache-2.0",
"hash": 3535152110464983600,
"line_mean": 30.1649484536,
"line_max": 100,
"alpha_frac": 0.6082064858,
"autogenerated": false,
"ratio": 4.11156462585034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.521977111165034,
"avg_score": null,
"num_lines": null
} |
__author__ = "Biju Joseph"
import logging
import elasticsearch
import time
from elasticsearch.helpers import bulk, scan
from repositories import Repository
logger = logging.getLogger('elastic')
logging.getLogger('elasticsearch').setLevel("WARN")
INDEX_SETTINGS = {
"settings": {
"refresh_interval" : "-1"
}
}
class ElasticSearchRepository(Repository, object):
""" A facade that provides functionality to index objects in ElasticSearch
Attributes:
host_name: the host name
port: the port
doctype: the name of the document type
"""
def __init__(self, esConfig):
""" Initializes the repo """
if esConfig and 'host' in esConfig and 'port' in esConfig and 'index' in esConfig and 'type' in esConfig :
self.es = elasticsearch.Elasticsearch([{'host': esConfig['host'], 'port': esConfig['port']}])
self.index_name = esConfig['index']
Repository.__init__(self,esConfig['type'])
else:
logger.error ("One or more elastic search configuration not specified, unable to initialize elastic search")
raise ValueError("One or more elastic search configuration not specified")
def create_mappings(self, mapping):
"""
Will create the mapping ONLY if the type is not present in the index.
:param mapping:
"""
if not self.es.indices.exists_type(index=self.index_name, doc_type=self.name):
self.es.indices.put_mapping(index=self.index_name, doc_type=self.name, body=mapping)
def create_index(self, mapping):
"""
Will create the index
Args:
mapping:
"""
if self.es.indices.exists(self.index_name):
logging.info('Index %s already exists', self.index_name)
return
try:
# Create the index
logging.info('Creating index %s', self.index_name)
self.es.indices.create(index= self.index_name, body=INDEX_SETTINGS, ignore=400)
logging.info('Index created successfully. Waiting for shard allocations..')
time.sleep(10)
#specify the mapping
self.create_mappings(mapping)
self.es.indices.clear_cache(index=self.index_name)
except Exception as e:
logging.fatal('Error creating index [%s] for doctype [%s]', self.index_name, self.name)
logging.error(e)
raise
def delete_index(self):
"""
Will delete the index
"""
try:
self.es.indices.delete(index=self.index_name)
logging.info('Deleted index %s', self.index_name)
except elasticsearch.ElasticsearchException:
logging.info('Unable to delete index %s', self.index_name)
def recreate_index(self, mappings):
self.delete_index()
self.create_index(mappings)
def load(self, selector):
"""
Will returnt the document identified by the ID in selector
"""
return self.es.get(index=self.index_name, doc_type=self.name, id=selector)
def persist(self, obj):
"""
Will persist objects into the index
Args:
obj: an array of json objects that supports es bulk format
Returns:
The elastic search response object
"""
res = self.es.bulk(index= self.index_name, body= obj, request_timeout = 30)
return res
def refresh_index(self):
"""Refresh the index"""
return self.es.indices.refresh(index=self.index_name)
def search(self, criteria, sort, pagination):
return self.es.search(index=self.index_name, body=criteria)
def count_records(self):
return self.es.count(index=self.index_name, doc_type=self.name)['count']
def get_record_by_id(self, id):
return self.es.get(index=self.index_name, doc_type=self.name, id=id)
def delete_records_by_query(self, q):
"""Delete records in the index matching the given query"""
# FIXME: The delete function is not working as expected, the results vary by ES version
# in 1.5.x, the delete action doesn't work at all, in 2.4.x it delete more records than specified
# will need to use a newer version of ES
# set the elasticsearh log to WARN to avoid too many logs from the scan
logging.getLogger('elasticsearch').setLevel("WARN")
bulk_deletes = []
for result in scan(self.es,
query=q,
index=self.index_name,
doc_type=self.name,
_source=True,
track_scores=False,
scroll='5m'):
logger.info(result)
result['_op_type'] = 'delete'
bulk_deletes.append(result)
result = bulk(self.es, bulk_deletes)
logging.getLogger('elasticsearch').setLevel("INFO")
self.refresh_index()
return result
def delete_records_for_year(self, year):
"""Delete records with attribute current_year = year"""
logger.warn("Deleting data for year %d", year)
query={"query":{"match": {"current_year": str(year)}}}
return self.delete_records_by_query(query)
| {
"repo_name": "semanticbits/owh-ds",
"path": "software/owh/backoffice/owh/etl/common/elasticsearch_repository.py",
"copies": "1",
"size": "5204",
"license": "apache-2.0",
"hash": -6509440737295635000,
"line_mean": 35.9078014184,
"line_max": 120,
"alpha_frac": 0.6154880861,
"autogenerated": false,
"ratio": 4.117088607594937,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004281059945419323,
"num_lines": 141
} |
__author__ = 'Bill French'
import argparse
from mi.idk.da_server import DirectAccessServer
from mi.idk.exceptions import ParameterRequired
def run():
opts = parseArgs()
app = DirectAccessServer(opts.launch_monitor)
if(opts.telnet and opts.vps):
ParameterRequired("-t and -v are mutually exclusive")
if(opts.telnet):
app.start_telnet_server()
elif(opts.vps):
app.start_vps_server()
else:
ParameterRequired("-t or -v required.")
def launch_logger_window():
pass
def launch_stream_window():
pass
def parseArgs():
parser = argparse.ArgumentParser(description="IDK Start Direct Access")
parser.add_argument("-t", dest='telnet', action="store_true",
help="run telnet direct access" )
parser.add_argument("-v", dest='vps', action="store_true",
help="run virtual serial port access" )
parser.add_argument("-m", dest='launch_monitor', action="store_true",
help="Launch data file monitor" )
return parser.parse_args()
if __name__ == '__main__':
run()
| {
"repo_name": "danmergens/mi-instrument",
"path": "mi/idk/scripts/da_server.py",
"copies": "11",
"size": "1122",
"license": "bsd-2-clause",
"hash": -1626588792605931000,
"line_mean": 25.0930232558,
"line_max": 75,
"alpha_frac": 0.61942959,
"autogenerated": false,
"ratio": 3.8958333333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bill French'
import argparse
from mi.idk.platform.nose_test import NoseTest
from mi.idk.platform.metadata import Metadata
from mi.core.log import get_logger ; log = get_logger()
import yaml
import time
import os
import re
from glob import glob
from mi.idk.config import Config
DEFAULT_DIR='/tmp/dsa_ingest'
def run():
"""
Run tests for one or more platform drivers. If -b is passed then
we build all drivers, otherwise we use the current IDK driver.
@return: If any test fails return false, otherwise true
"""
opts = parseArgs()
failed = False
count = 0
success = 0
failure = 0
error = 0
start_time = time.time()
for metadata in get_metadata(opts):
count += 1
app = NoseTest(metadata,
testname=opts.testname,
suppress_stdout=opts.suppress_stdout,
noseargs=opts.noseargs)
app.report_header()
if( opts.unit ):
result = app.run_unit()
elif( opts.integration ):
result = app.run_integration()
elif( opts.qualification ):
result = app.run_qualification()
elif( opts.ingest ):
result = app.run_ingestion(opts.directory, opts.exit_time)
else:
result = app.run()
if(not result): failed = True
success += app.success_count
error += app.error_count
failure += app.failure_count
if(count > 1):
driver_report(count, time.time() - start_time, success, error, failure)
return failure
def get_metadata(opts):
"""
return a list of metadata objects that we would like to
run test for. If buildall option is set then we search
the working directory tree for drivers, otherwise we
return the current IDK driver metadata
@param opts: command line options dictionary.
@return: list of all metadata data objects we would
like to run tests for.
"""
result = []
if(opts.buildall):
paths = get_driver_paths()
for path in paths:
log.debug("Adding driver path: %s", path)
result.append(Metadata(path))
else:
result.append(Metadata())
return result
def parseArgs():
parser = argparse.ArgumentParser(description="IDK Start Driver")
parser.add_argument("-s", dest='suppress_stdout', action="store_true",
help="hide stdout" )
parser.add_argument("-g", dest='ingest', action="store_true",
help="run ingestion test from directory" )
parser.add_argument("-d", dest='directory',
help="ingestion directory for -g (DEFAULT: %s)" % DEFAULT_DIR,
default=DEFAULT_DIR)
parser.add_argument("-x", dest='exit_time',
help="ingestion runtime in seconds for -g (DEFAULT: None)")
parser.add_argument("-u", dest='unit', action="store_true",
help="only run unit tests" )
parser.add_argument("-i", dest='integration', action="store_true",
help="only run integration tests" )
parser.add_argument("-q", dest='qualification', action="store_true",
help="only run qualification tests" )
parser.add_argument("-b", dest='buildall', action="store_true",
help="run all tests for all drivers")
parser.add_argument("-t", dest='testname',
help="test function name to run (all if not set)" )
parser.add_argument("-n", dest='noseargs',
help="extra nosetest args, use '+' for '-'" )
#parser.add_argument("-m", dest='launch_monitor', action="store_true",
# help="Launch data file monitor" )
return parser.parse_args()
def get_driver_paths():
"""
@brief Get a list of all the different platform driver paths in the working
directory
"""
result = []
driver_dir = os.path.join(Config().get("working_repo"), 'mi', 'platform')
log.debug("Driver Dir: %s", driver_dir)
files = []
for dirname,_,_ in os.walk(driver_dir):
files.extend(glob(os.path.join(dirname,"metadata.yml")))
log.debug("Files: %s", files)
for f in files:
matcher = re.compile( "%s/(.*)/metadata.yml" % driver_dir )
match = matcher.match(f)
result.append(match.group(1))
return result
if __name__ == '__main__':
run()
def driver_report(count, duration, success, error, failure):
msg ="\n------------------------ Cumulative Nosetest Result ------------------------\n"
msg += "Drivers tested: %d\n" % count
msg += "Ran %d tests in %.3fs\n\n" % (success + error + failure, duration)
if(failure or error):
msg += "FAILED ("
if(error):
msg += "errors=%d" % error
if(failure): msg += ", "
if(failure):
msg += "failure=%d" % failure
msg += ")"
else:
msg += "OK"
msg += "\n"
log.warn(msg)
| {
"repo_name": "danmergens/mi-instrument",
"path": "mi/idk/scripts/platform/test_driver.py",
"copies": "11",
"size": "5044",
"license": "bsd-2-clause",
"hash": 4986639524029693000,
"line_mean": 31.7532467532,
"line_max": 91,
"alpha_frac": 0.5729579699,
"autogenerated": false,
"ratio": 3.9810576164167326,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012579277292174226,
"num_lines": 154
} |
__author__ = "Bill Riehl (briehl@gmail.com)"
__version__ = "0.0.1"
__date__ = "$Date: 2014/07/09 $"
from cell import Cell
class Playground(object):
"""
An abstract Cell playground.
"""
def __init__(self, n):
"""
This default initializer inits with n random cells.
In general, initializing a Playground should involve adding
some number of Cells, or at least initing the structure in which
Cells will exist.
"""
self.cells = []
for i in range(0, n):
self.cells.append(Cell(type="random", xlim=[-10, 10], ylim=[-100, 100], zlim=[-1000, 1000]))
def play(self):
"""
Does one step of playing for the Cells in this Playground.
Should be implemented by subclass to do whatever that calls for.
"""
print "Playground's open!"
for c in self.cells:
print "%d %d %d" % ( c.x, c.y, c.z )
def neighborhood(self, cell):
"""
Returns a structure representing the nearest neighborhood
around the given cell (excluding that cell)
"""
pass | {
"repo_name": "briehl/cell-playground",
"path": "cellplayground/playground/playground.py",
"copies": "1",
"size": "1123",
"license": "mit",
"hash": -5507456094921245000,
"line_mean": 30.2222222222,
"line_max": 104,
"alpha_frac": 0.5734639359,
"autogenerated": false,
"ratio": 3.8197278911564627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4893191827056462,
"avg_score": null,
"num_lines": null
} |
__author__ = "Bill Riehl (briehl@gmail.com)"
__version__ = "0.0.1"
__date__ = "$Date: 2014/07/09 $"
from cellplayground.playground.cell import Cell
import unittest
class CellTestCase(unittest.TestCase):
def setUp(self):
pass
def test_cell_1d(self):
types = ["random", "min", "max"]
for t in types:
c = Cell(t, xlim=[-10, 10], ylim=[0,0], zlim=[0,0])
self.assertIsInstance(c, Cell)
def test_cell_1d_badtype(self):
with self.assertRaises(ValueError) as err:
c = Cell("wut", xlim=[-10,10], ylim=[0,0], zlim=[0,0])
def test_cell_bad_loc(self):
with self.assertRaises(ValueError) as err:
c = Cell(loc=[])
with self.assertRaises(ValueError) as err:
c = Cell(loc=[1,2,3,4])
def test_cell_2d(self):
c = Cell(loc=[1, 1])
self.assertEqual(c.x, 1)
self.assertEqual(c.y, 1)
self.assertEqual(c.z, 0)
def test_cell_3d(self):
c = Cell(loc=[1, 1, 1])
self.assertEqual(c.x, 1)
self.assertEqual(c.y, 1)
self.assertEqual(c.z, 1)
def test_cell_can_play(self):
c = Cell()
self.assertTrue(getattr(c, 'play') and callable(c.play))
| {
"repo_name": "briehl/cell-playground",
"path": "cellplayground/test/test_basecell.py",
"copies": "1",
"size": "1232",
"license": "mit",
"hash": 5401789747439984000,
"line_mean": 28.3333333333,
"line_max": 66,
"alpha_frac": 0.5568181818,
"autogenerated": false,
"ratio": 3.027027027027027,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4083845208827027,
"avg_score": null,
"num_lines": null
} |
__author__ = "Bill Riehl (briehl@gmail.com)"
__version__ = "0.0.1"
__date__ = "$Date: 2014/07/09 $"
import random
class Cell(object):
"""
A generic (abstract?) Cell class
A Cell should be initialized with a location, at least.
Subclasses of Cell should implement the play() function,
which does an action for a single step.
"""
def __init__(self, type=None, loc=[0,0,0], xlim=[0,0], ylim=[0,0], zlim=[0,0]):
(self.x, self.y, self.z) = [0, 0, 0]
if len(loc) < 1:
raise ValueError("Location must have at least one value")
if len(loc) > 3:
raise ValueError("Location can have up to 3 dimensions")
self.x = loc[0]
if len(loc) > 1:
self.y = loc[1]
if len(loc) == 3:
self.z = loc[2]
if type is not None:
self.x = self.init_pos(type, xlim)
self.y = self.init_pos(type, ylim)
self.z = self.init_pos(type, zlim)
def init_pos(self, type, lim):
if type == "random":
return random.randint(lim[0], lim[1])
elif type == "min":
return min(lim)
elif type == "max":
return max(lim)
else:
raise ValueError("Unknown cell type '{}'".format(type))
def play(self):
# implement in subclass!
pass
| {
"repo_name": "briehl/cell-playground",
"path": "cellplayground/playground/cell.py",
"copies": "1",
"size": "1358",
"license": "mit",
"hash": 3838257006086835700,
"line_mean": 30.5813953488,
"line_max": 83,
"alpha_frac": 0.5257731959,
"autogenerated": false,
"ratio": 3.4035087719298245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9415162366500921,
"avg_score": 0.002823920265780731,
"num_lines": 43
} |
__author__ = 'billryan'
from feedgen.feed import FeedGenerator
class Atom:
"""GitHub Atom"""
def __init__(self):
self.atom = True
def init_fg(self, repo_info):
fg = FeedGenerator()
title = 'Recent commits to ' + repo_info['full_name']
fg.title(title)
fg.link(href=repo_info['html_url'])
fg.updated(repo_info['updated_at'])
fg.id(repo_info['html_url'])
fg.author(repo_info['author'])
return fg
def add_entry(self, fg, commit_info):
fe = fg.add_entry()
fe.title(commit_info['message'])
fe.link(href=commit_info['html_url'])
id_prefix = 'tag:github.com,2008:Grit::Commit/'
entry_id = id_prefix + commit_info['sha']
fe.id(entry_id)
fe.author(commit_info['author'])
fe.published(commit_info['updated'])
fe.updated(commit_info['updated'])
fe.content(commit_info['diff'])
return fg
def gen_atom(self, repo_info, commits_info, atom_fn):
fg_repo = self.init_fg(repo_info)
for commit_info in commits_info:
self.add_entry(fg_repo, commit_info)
fg_repo.atom_file(atom_fn)
| {
"repo_name": "billryan/github-rss",
"path": "rss_gen/rss_gen.py",
"copies": "1",
"size": "1185",
"license": "mit",
"hash": -8694095825909693000,
"line_mean": 31.027027027,
"line_max": 61,
"alpha_frac": 0.576371308,
"autogenerated": false,
"ratio": 3.356940509915014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4433311817915014,
"avg_score": null,
"num_lines": null
} |
from flask import Flask
from flask import request, redirect
import requests
app = Flask(__name__)
cas = {
'name': 'demo',
'secret': '977beed4-ab6f-4e1f-b60c-9d84c60e1d5a',
'identify': '24a03e6e-d1ad-4f11-bd02-566b06b39481',
};
@app.route('/')
def hello_world():
return redirect('http://example.com/public/oauth/authorize?name={name}'.format(name=cas['name']));
@app.route('/cas/oauth/callback')
def callback():
code = request.args.get('code', '')
headers = {'authorization': 'oauth {secret}'.format(secret = cas['secret'])}
r = requests.get('http://example.com/oauth/users/self?code={code}'.format(code=code),
headers=headers)
res = r.json()
if (res['code'] != 0):
return res['data']['value'];
return 'hello, big brother: {username}'.format(username=res['data']['value']['username']);
if __name__ == '__main__':
app.run(port=3000, debug=True)
| {
"repo_name": "detailyang/cas-server",
"path": "examples/python/index.py",
"copies": "2",
"size": "1133",
"license": "mit",
"hash": -8316867894907096000,
"line_mean": 28.8157894737,
"line_max": 102,
"alpha_frac": 0.6443071492,
"autogenerated": false,
"ratio": 2.8903061224489797,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9453307032496542,
"avg_score": 0.016261247830487647,
"num_lines": 38
} |
__author__ = 'bingxinfan'
# Best Time to Buy and Sell Stocks III
# Best Time to Buy and Sell Stocks IV
'''
class Solution {
public:
int maxProfit(int k, vector<int> &prices) {
int n = (int)prices.size(), ret = 0, v, p = 0;
priority_queue<int> profits;
stack<pair<int, int> > vp_pairs;
while (p < n) {
// find next valley/peak pair
for (v = p; v < n - 1 && prices[v] >= prices[v+1]; v++);
for (p = v + 1; p < n && prices[p] >= prices[p-1]; p++);
// save profit of 1 transaction at last v/p pair, if current v is lower than last v
while (!vp_pairs.empty() && prices[v] < prices[vp_pairs.top().first]) {
profits.push(prices[vp_pairs.top().second-1] - prices[vp_pairs.top().first]);
vp_pairs.pop();
}
// save profit difference between 1 transaction (last v and current p) and 2 transactions (last v/p + current v/p),
// if current v is higher than last v and current p is higher than last p
while (!vp_pairs.empty() && prices[p-1] >= prices[vp_pairs.top().second-1]) {
profits.push(prices[vp_pairs.top().second-1] - prices[v]);
v = vp_pairs.top().first;
vp_pairs.pop();
}
vp_pairs.push(pair<int, int>(v, p));
}
// save profits of the rest v/p pairs
while (!vp_pairs.empty()) {
profits.push(prices[vp_pairs.top().second-1] - prices[vp_pairs.top().first]);
vp_pairs.pop();
}
// sum up first k highest profits
for (int i = 0; i < k && !profits.empty(); i++) {
ret += profits.top();
profits.pop();
}
return ret;
}
};
'''
| {
"repo_name": "misscindy/Interview",
"path": "DP_Backtrack_Recursion/LC12x_Stocks.py",
"copies": "1",
"size": "1783",
"license": "cc0-1.0",
"hash": 1286495685343913000,
"line_mean": 36.1458333333,
"line_max": 127,
"alpha_frac": 0.5098149187,
"autogenerated": false,
"ratio": 3.283609576427256,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4293424495127256,
"avg_score": null,
"num_lines": null
} |
import re
import sys
def printlist(list):
for value,key in list :
print str(key) + " : " + str(value)
with open(sys.argv[1],'r') as file :
data = file.read()
words = re.compile('[a-zA-Z0-9]+')
dict = {}
for x in words.findall(data) :
if x not in dict :
dict[x] = 1
else :
dict[x] += 1
list1 = dict.items()
list2 = []
for key,value in list1 :
list2.append((value,key))
list2.sort(reverse = True)
temp = list2[0][0]
sum = 0
flag = 0
sum2 = sum
length = len(list2)
i = 0
while i < length :
if temp == list2[i][0]:
sum += 1
else:
flag = 1
temp = list2[i][0]
i -= 1
if flag == 1:
if sum2 == 0:
printlist(list2[sum2 + sum - 1: : -1])
else:
printlist(list2[sum2 + sum - 1: sum2 - 1 : -1])
sum2 += sum
sum = 0
flag = 0
i += 1
printlist(list2[sum2 + sum - 1: sum2 - 1 : -1])
| {
"repo_name": "CADTS-Bachelor/playbook",
"path": "grade-2015/WangPeng/count.py",
"copies": "1",
"size": "1163",
"license": "mit",
"hash": -8695332359487664000,
"line_mean": 22.3125,
"line_max": 63,
"alpha_frac": 0.4530831099,
"autogenerated": false,
"ratio": 2.9140625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38671456098999996,
"avg_score": null,
"num_lines": null
} |
__author__ = 'BisharaKorkor'
import numpy as np
from math import exp, pow, sqrt, pi, fmod
def movingaverage(a, w):
""" An array b of length len(a)-w is returned where b_n = (a_n + a_n-1 + ... + a_n-w)/w """
return [np.mean(a[i:i+w]) for i in range(len(a)-w)]
def gaussiankernel(sigma, width):
"""Generates gaussian kernel"""
# tmp is a non-normalized gaussian kernel
tmp = [exp(-pow((width/2 - i) / sigma, 2)/2)/(sigma * sqrt(2 * pi)) for i in range(width)]
# compute sum for normalization
s = np.sum(tmp)
# return the normalized kernel
return [i / s for i in tmp]
def movingbaseline(array, width):
""" Each array value is assigned to be it's value divided by the average of the preceding width (inclusive)
elements"""
mva = movingaverage(array, width)
return [array[i+width]/mva[i] for i in range(len(mva))]
def exponentialsmoothing(array, alpha):
sa = [array[0]] #smoothed array
for i in range(len(array)):
sa += [alpha * array[i] + (1-alpha) * sa[i]]
del sa[0]
return sa
def histogramfrom2Darray(array, nbins):
"""
Creates histogram of elements from 2 dimensional array
:param array: input 2 dimensional array
:param nbins: number of bins so that bin size = (maximum value in array - minimum value in array) / nbins
the motivation for returning this array is for the purpose of easily plotting with matplotlib
:return: list of three elements:
list[0] = length nbins list of integers, a histogram of the array elements
list[1] = length nbins list of values of array element types, values of the lower end of the bins
list[2] = [minimum in list, maximum in list]
this is just good to know sometimes.
"""
#find minimum
minimum = np.min(array)
#find maximu
maximum = np.max(array)
#compute bin size
binsize = (maximum - minimum) / nbins
#create bin array
bins = [minimum + binsize * i for i in range(nbins)]
histo = [0 for b in range(nbins)]
for x in array:
for y in x:
#find the lower end of the affiliated bin
ab = y - (minimum + fmod(y - minimum, binsize))
histo[int(ab/binsize)-1] += 1
return [histo, bins, [minimum, maximum]]
def sum_of_subset(array, x, y, dx, dy):
summ = 0 # summ because sum is native
for ix in range(x, x + dx):
for iy in range(y, y + dy):
summ += array[ix][iy]
return summ
def subset(array, x, y, dx, dy):
ss = []
for ix in range(x, x + dx):
for iy in range(y, y + dy):
ss.appen(array[ix][iy])
return ss
| {
"repo_name": "BishKor/pyboon",
"path": "arrayoperations.py",
"copies": "1",
"size": "2659",
"license": "mit",
"hash": 1430280231814263000,
"line_mean": 30.2823529412,
"line_max": 111,
"alpha_frac": 0.6126363294,
"autogenerated": false,
"ratio": 3.408974358974359,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4521610688374359,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bitvis AS'
__copyright__ = "Copyright 2017, Bitvis AS"
__version__ = "1.0.0"
__email__ = "support@bitvis.no"
import os
import glob
import fileinput
def print_help():
print("\rPlease place the VVC which is to be modified into the \"vvc_to_be_modified\" directory")
print("- Place the source files into the \"src\" directory")
print("- Place compile scripts (if applicable) into the \"scripts\" directory\n")
def is_input_vhdl_legal(requested_vvc_name):
# Check if name contains illegal VHDL characters
illegal_chars = set("<->!¤%&/()=?`\´}][{€$£@ ^¨~'*;:.,|§\" ")
if any((c in illegal_chars) for c in requested_vvc_name):
print("Input contains illegal VHDL characters. Please try again.")
return False
if requested_vvc_name.__len__() < 1:
print("Input too short. Please try again.")
return False
if requested_vvc_name.__len__() > 14:
print("WARNING: Name exceeds default maximum name length, defined in UVVM Utility Library constant C_LOG_SCOPE_WIDTH")
print(" - Please increase C_LOG_SCOPE_WIDTH in the adaptations_pkg.vhd")
if (requested_vvc_name[0] == '_') or (requested_vvc_name[0].isdigit()):
print("Input must start with a letter")
return False
return True
# Get vvc name and check if it is valid. Repeat until valid name is received.
def get_vvc_name():
requested_vvc_name = input("\rPlease enter the original VVC name: ")
if is_input_vhdl_legal(requested_vvc_name.lower()) is False:
return get_vvc_name()
return requested_vvc_name
# Get vvc name extension and check if it is valid. Repeat until valid extension is received.
def get_name_extension():
requested_vvc_ext = input("\rPlease enter the new VVC name extension: ")
if is_input_vhdl_legal(requested_vvc_ext.lower()) is False:
return get_name_extension()
return requested_vvc_ext
# Check if the expected base files in the VVC exists in the vvc_to_be_modified/src/ folder
def expected_base_vvc_files_exists(vvc_name):
if not os.path.isfile("vvc_to_be_modified/src/"+vvc_name.lower()+"_vvc.vhd"):
print("File "+vvc_name.lower()+"_vvc.vhd was not found in the src directory")
return False
if not os.path.isfile("vvc_to_be_modified/src/vvc_methods_pkg.vhd"):
print("File vvc_methods_pkg.vhd was not found in the src directory")
return False
return True
# Check if this is a multi-channel VVC by looking for expected leaf-vvc file names
def is_multi_channel_vvc(vvc_name):
vvcs_in_src = glob.glob("vvc_to_be_modified/src/"+vvc_name.lower()+"*_vvc.vhd")
if vvcs_in_src.__len__() > 1:
return True
return False
# Get a list of all leaf VVCs
def get_multi_channel_vvcs_as_list(vvc_name):
vvcs_in_src = glob.glob("vvc_to_be_modified/src/"+vvc_name.lower()+"*_vvc.vhd")
# Remove file prefix
vvcs_in_src = [w.replace('vvc_to_be_modified/src\\', '') for w in vvcs_in_src]
# Remove the wrapper VVC
vvcs_in_src.remove(vvc_name.lower()+"_vvc.vhd")
return vvcs_in_src
# Replace the necessary names in the vvc_methods_pkg
def replace_vvc_methods_pkg(vvc_name, new_name_extension):
# Replace targets
with fileinput.FileInput("vvc_to_be_modified/src/vvc_methods_pkg.vhd", inplace=True, backup='.bak') as file:
for line in file:
print(line.replace(vvc_name.upper()+"_VVC", vvc_name.upper()+"_"+new_name_extension.upper()+"_VVC"),
end='')
# Replace shared variables
with fileinput.FileInput("vvc_to_be_modified/src/vvc_methods_pkg.vhd", inplace=True, backup='.bak') as file:
for line in file:
print(line.replace("shared_"+vvc_name.lower(), "shared_"+vvc_name.lower()+"_"+new_name_extension.lower()),
end='')
# Replace all names in the VVC
def replace_vvc(file_name, vvc_name, new_name_extension):
# Replace entity name
with fileinput.FileInput("vvc_to_be_modified/src/"+file_name.lower()+"_vvc.vhd", inplace=True, backup='.bak')\
as file:
for line in file:
print(line.replace("entity "+vvc_name.lower(), "entity "+vvc_name.lower()+"_"+new_name_extension.lower()),
end='')
# Replace architecture name
with fileinput.FileInput("vvc_to_be_modified/src/"+file_name.lower()+"_vvc.vhd", inplace=True, backup='.bak')\
as file:
for line in file:
print(line.replace("architecture behave of "+vvc_name.lower(),
"architecture behave of "+vvc_name.lower()+"_"+new_name_extension.lower()), end='')
# Replace shared variables
with fileinput.FileInput("vvc_to_be_modified/src/"+file_name.lower()+"_vvc.vhd", inplace=True, backup='.bak')\
as file:
for line in file:
print(line.replace("shared_"+vvc_name.lower(), "shared_"+vvc_name.lower()+"_"+new_name_extension.lower()),
end='')
# Get VVC name and channel from the VVC file name
def parse_vvc_name_and_channel_from_file_name(vvc_file_name):
return vvc_file_name.replace('_vvc.vhd', '')
# Get VVC channel from the VVC file name and base name
def parse_channel_from_vvc_name(full_vvc_name, vvc_base_name):
retstr = full_vvc_name.replace('_vvc.vhd', '')
retstr = retstr.replace(vvc_base_name+"_",'')
return retstr
# Replace all VVC names in the VVC wrapper, if this is a multi-channel VVC
def replace_vvc_wrapper(vvc_name, new_name_extension, list_of_leaf_vvcs):
# Replace entity name
with fileinput.FileInput("vvc_to_be_modified/src/"+vvc_name.lower()+"_vvc.vhd", inplace=True, backup='.bak')\
as file:
for line in file:
print(line.replace("entity "+vvc_name.lower(), "entity "+vvc_name.lower()+"_"+new_name_extension.lower()),
end='')
# Replace architecture name
with fileinput.FileInput("vvc_to_be_modified/src/"+vvc_name.lower()+"_vvc.vhd", inplace=True, backup='.bak')\
as file:
for line in file:
print(line.replace("architecture struct of "+vvc_name.lower(),
"architecture struct of "+vvc_name.lower()+"_"+new_name_extension.lower()),
end='')
for i in list_of_leaf_vvcs:
channel = parse_channel_from_vvc_name(i,vvc_name)
# Replace all instance declarations
with fileinput.FileInput("vvc_to_be_modified/src/"+vvc_name.lower()+"_vvc.vhd", inplace=True, backup='.bak')\
as file:
for line in file:
print(line.replace("i1_"+vvc_name.lower()+"_"+channel.lower()+": entity work."+vvc_name.lower()+"_"+
channel.lower()+"_vvc",
"i1_"+vvc_name.lower()+"_"+new_name_extension+"_"+channel.lower()+": entity work."+
vvc_name.lower()+"_"+new_name_extension+"_"+channel.lower()+"_vvc"), end='')
# Remove .bak backup files
def remove_backup_files(path):
for currentFile in glob.glob(os.path.join(path, '*')):
if currentFile.endswith(".bak"):
os.remove(currentFile)
# Method which replaces the names in all VVC source files
def replace_vvc_names(vvc_name, new_name_extension, multi_channel_vvc):
replace_vvc_methods_pkg(vvc_name, new_name_extension)
if multi_channel_vvc:
list_of_leaf_vvcs = get_multi_channel_vvcs_as_list(vvc_name)
for i in list_of_leaf_vvcs:
replace_vvc(parse_vvc_name_and_channel_from_file_name(i), vvc_name, new_name_extension)
replace_vvc_wrapper(vvc_name, new_name_extension, list_of_leaf_vvcs)
else:
replace_vvc(vvc_name, vvc_name, new_name_extension)
remove_backup_files("vvc_to_be_modified/src/")
print("Source files successfully modified")
# Replaces the compiled library name in the VVC compile scripts
def replace_script(file_name, vvc_name, new_name_extension):
# Replace lib name
with fileinput.FileInput("vvc_to_be_modified/script/"+file_name, inplace=True, backup='.bak') as file:
for line in file:
print(line.replace("quietly set lib_name \"bitvis_vip_"+vvc_name.lower()+"\"",
"quietly set lib_name \"bitvis_vip_"+vvc_name.lower()+"_"+new_name_extension.lower()+
"\""), end='')
# Checks if the expected scripts exists
def script_exists(script_name):
if not os.path.isfile("vvc_to_be_modified/script/"+script_name):
print("Script "+script_name+" was not found in the script directory. Skipping...")
return False
print("Found script "+ script_name)
return True
# Modify all scripts, if they exist. Will not fail if scripts does not exist.
def replace_script_names(vvc_name, new_name_extension):
if script_exists("compile_bfm.do"):
replace_script("compile_bfm.do", vvc_name, new_name_extension)
if script_exists("compile_src.do"):
replace_script("compile_src.do", vvc_name, new_name_extension)
remove_backup_files("vvc_to_be_modified/script/")
# Main entry point for the script
if __name__ == '__main__':
print_help()
vvc_name = get_vvc_name()
multi_channel_vvc = False
# Exit if the expected files were found
if not expected_base_vvc_files_exists(vvc_name):
print("Did not find all necessary files in the src directory")
exit(1)
else:
print("Found the necessary VVC source files")
new_name_extension = get_name_extension()
multi_channel_vvc = is_multi_channel_vvc(vvc_name)
if multi_channel_vvc:
print("Detected that this is a multi-channel VVC")
else:
print("Detected that this is a single channel VVC")
replace_vvc_names(vvc_name, new_name_extension, multi_channel_vvc)
replace_script_names(vvc_name, new_name_extension)
print("\nSUCCESS")
exit(0) | {
"repo_name": "AndyMcC0/UVVM_All",
"path": "uvvm_vvc_framework/script/vvc_name_modifier/vvc_name_modifier.py",
"copies": "3",
"size": "10061",
"license": "mit",
"hash": 9083887708208410000,
"line_mean": 42.3405172414,
"line_max": 126,
"alpha_frac": 0.6251243286,
"autogenerated": false,
"ratio": 3.3886080215706103,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.551373235017061,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bitvis AS'
__copyright__ = "Copyright 2017, Bitvis AS"
__version__ = "1.1.1"
__email__ = "support@bitvis.no"
import os
division_line = "--========================================================================================================================"
class Channel:
def __init__(self, name):
self.name = name
self.queue_names = ["cmd"]
def add_queue(self, name):
self.queue_names.append(name)
def len_of_queue(self):
return len(self.queue_names)
def print_linefeed(file_handle):
file_handle.write("\n")
def fill_with_n_spaces(used_spaces, required_spaces):
retstr = ""
if required_spaces >= used_spaces:
for i in range(required_spaces-used_spaces):
retstr = retstr + " "
return retstr
# Check if name contains illegal VHDL characters
def is_input_vhdl_legal(requested_vvc_name):
illegal_chars = set("<->!¤%&/()=?`\´}][{€$£@ ^¨~'*;:.,|§\" ")
if any((c in illegal_chars) for c in requested_vvc_name):
print("Input contains illegal VHDL characters. Please try again.")
return False
if requested_vvc_name.__len__() < 1:
print("Input too short. Please try again.")
return False
if requested_vvc_name.__len__() > 14:
print("WARNING: Name exceeds default maximum name length, defined in UVVM Utility Library constant C_LOG_SCOPE_WIDTH")
print(" - Please increase C_LOG_SCOPE_WIDTH in the adaptations_pkg.vhd")
if (requested_vvc_name[0] == '_') or (requested_vvc_name[0].isdigit()):
print("Input must start with a letter")
return False
return True
# Ask user if VVC is multi-channel
def is_multi_channel_vvc():
input_accepted = False
choice = ''
while not input_accepted:
choice = input("\rUse multiple, concurrent channels for this VVC? [y/n]: ")
choice = choice.lower()
if choice == 'y':
input_accepted = True
elif choice == 'n':
input_accepted = True
else:
print("Input not accepted. Please use either y or n")
return choice
# Get the number of channels in the VVC, if multi-channel VVC.
def get_number_of_channels():
input_accepted = False
while not input_accepted:
raw_input = input("\rSet the number of concurrent channels to use [2-99]: ")
try:
number_selected = int(raw_input)
except ValueError:
print("Input was not an integer!")
continue
if number_selected < 2:
print("Selected number "+ str(number_selected) + " is too small. Please use a number between 2 and 99")
elif number_selected > 99:
print("Selected number "+ str(number_selected) + " is too large. Please use a number between 2 and 99")
else:
input_accepted = True
return number_selected
# Get the channel name and check if it is valid
def get_channel_name():
requested_vvc_channel_name = input("\rPlease enter a channel name (e.g. tx or rx): ")
if is_input_vhdl_legal(requested_vvc_channel_name.lower()) is False:
return get_channel_name()
return requested_vvc_channel_name
# Ask user if channel is multi-queue
def is_multi_queue_channel():
input_accepted = False
choice = ''
while not input_accepted:
choice = input("\rUse multiple queues for this channel? [y/n]: ")
choice = choice.lower()
if choice == 'y':
input_accepted = True
elif choice == 'n':
input_accepted = True
else:
print("Input not accepted. Please use either y or n")
return choice
# Get the number of queues in the channel, if multi-queue channel.
def get_number_of_queues():
input_accepted = False
while not input_accepted:
raw_input = input("\rSet the number of concurrent queues to use [2-99], included cmd queue: ")
try:
number_selected = int(raw_input)
except ValueError:
print("Input was not an integer!")
continue
if number_selected < 2:
print("Selected number "+ str(number_selected) + " is too small. Please use a number between 2 and 99")
elif number_selected > 99:
print("Selected number "+ str(number_selected) + " is too large. Please use a number between 2 and 99")
else:
input_accepted = True
return number_selected
# Get the channel name and check if it is valid
def get_queue_name():
requested_queue_name = input("\rPlease enter a queue name (e.g. read): ")
if is_input_vhdl_legal(requested_queue_name.lower()) is False:
return get_queue_name()
return requested_queue_name
# Get the VVC name and check if it is valid
def get_vvc_name():
requested_vvc_name = input("\rPlease enter the VVC Name (e.g. SBI, UART, axilite): ")
if is_input_vhdl_legal(requested_vvc_name.lower()) is False:
return get_vvc_name()
return requested_vvc_name
# Adds header to the output file
def add_vvc_header(file_handle):
file_handle.write(division_line+"\n")
file_handle.write("-- This VVC was generated with Bitvis VVC Generator\n")
file_handle.write(division_line+"\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
# Adds included libraries to a leaf VVC
def add_leaf_includes(file_handle, vvc_name):
file_handle.write("library ieee;\n")
file_handle.write("use ieee.std_logic_1164.all;\n")
file_handle.write("use ieee.numeric_std.all;\n")
print_linefeed(file_handle)
file_handle.write("library uvvm_util;\n")
file_handle.write("context uvvm_util.uvvm_util_context;\n")
print_linefeed(file_handle)
file_handle.write("library uvvm_vvc_framework;\n")
file_handle.write("use uvvm_vvc_framework.ti_vvc_framework_support_pkg.all;\n")
print_linefeed(file_handle)
file_handle.write("use work."+vvc_name.lower()+"_bfm_pkg.all;\n")
file_handle.write("use work.vvc_methods_pkg.all;\n")
file_handle.write("use work.vvc_cmd_pkg.all;\n")
file_handle.write("use work.td_target_support_pkg.all;\n")
file_handle.write("use work.td_vvc_entity_support_pkg.all;\n")
file_handle.write("use work.td_cmd_queue_pkg.all;\n")
file_handle.write("use work.td_result_queue_pkg.all;\n")
print_linefeed(file_handle)
file_handle.write(division_line+"\n")
# Adds included libraries to a wrapper VVC
def add_wrapper_includes(file_handle, vvc_name):
file_handle.write("library ieee;\n")
file_handle.write("use ieee.std_logic_1164.all;\n")
file_handle.write("use ieee.numeric_std.all;\n")
print_linefeed(file_handle)
file_handle.write("library uvvm_util;\n")
file_handle.write("context uvvm_util.uvvm_util_context;\n")
print_linefeed(file_handle)
file_handle.write("use work."+vvc_name.lower()+"_bfm_pkg.all;\n")
print_linefeed(file_handle)
file_handle.write(division_line+"\n")
def add_vvc_entity(file_handle, vvc_name, vvc_channel):
if vvc_channel != "NA":
file_handle.write("entity "+vvc_name.lower()+"_"+vvc_channel.lower()+"_vvc is\n")
else:
file_handle.write("entity "+vvc_name.lower()+"_vvc is\n")
file_handle.write(" generic (\n")
file_handle.write(" --<USER_INPUT> Insert interface specific generic constants here\n")
file_handle.write(" -- Example: \n")
file_handle.write(" -- GC_ADDR_WIDTH : integer range 1 to C_VVC_CMD_ADDR_MAX_LENGTH;\n")
file_handle.write(" -- GC_DATA_WIDTH : integer range 1 to C_VVC_CMD_DATA_MAX_LENGTH;\n")
file_handle.write(" GC_INSTANCE_IDX : natural;\n")
if vvc_channel != "NA":
file_handle.write(" GC_CHANNEL : t_channel;\n")
file_handle.write(" GC_"+vvc_name.upper()+"_BFM_CONFIG"+fill_with_n_spaces(vvc_name.__len__(),26)+
": t_"+vvc_name.lower()+"_bfm_config"+fill_with_n_spaces(vvc_name.__len__(),13)+
":= C_"+vvc_name.upper()+"_BFM_CONFIG_DEFAULT;\n")
file_handle.write(" GC_CMD_QUEUE_COUNT_MAX : natural := 1000;\n")
file_handle.write(" GC_CMD_QUEUE_COUNT_THRESHOLD : natural := 950;\n")
file_handle.write(" GC_CMD_QUEUE_COUNT_THRESHOLD_SEVERITY : t_alert_level := WARNING;\n")
file_handle.write(" GC_RESULT_QUEUE_COUNT_MAX : natural := 1000;\n")
file_handle.write(" GC_RESULT_QUEUE_COUNT_THRESHOLD : natural := 950;\n")
file_handle.write(" GC_RESULT_QUEUE_COUNT_THRESHOLD_SEVERITY : t_alert_level := WARNING\n")
file_handle.write(" );\n")
file_handle.write(" port (\n")
file_handle.write(" --<USER_INPUT> Insert BFM interface signals here\n")
file_handle.write(" -- Example: \n")
if(vvc_channel == "NA"):
file_handle.write(" -- "+vvc_name.lower()+"_vvc_if"+fill_with_n_spaces(vvc_name.__len__(),21)+
": inout t_"+vvc_name.lower()+"_if := init_"+vvc_name.lower()+
"_if_signals(GC_ADDR_WIDTH, GC_DATA_WIDTH); \n")
else:
file_handle.write(" -- "+vvc_name.lower()+"_"+vvc_channel.lower()+"_vvc_if" +
fill_with_n_spaces(vvc_name.__len__()+vvc_channel.__len__(), 20) +
": inout t_"+vvc_name.lower()+"_"+vvc_channel.lower()+"_if := init_"+vvc_name.lower()+
"_"+vvc_channel.lower()+"_if_signals(GC_ADDR_WIDTH, GC_DATA_WIDTH); \n")
file_handle.write(" -- VVC control signals: \n")
file_handle.write(" -- rst : in std_logic; -- Optional VVC Reset\n")
file_handle.write(" clk : in std_logic\n")
file_handle.write(" );\n")
if vvc_channel != "NA":
file_handle.write("end entity "+vvc_name.lower()+"_"+vvc_channel.lower()+"_vvc;\n")
else:
file_handle.write("end entity "+vvc_name.lower()+"_vvc;\n")
print_linefeed(file_handle)
file_handle.write(division_line+"\n")
file_handle.write(division_line+"\n")
def add_architecture_declaration(file_handle, vvc_name, vvc_channel):
len_of_queue = vvc_channel.len_of_queue()
if vvc_channel.name != "NA":
file_handle.write("architecture behave of "+vvc_name.lower()+"_"+vvc_channel.name.lower()+"_vvc is\n")
else:
file_handle.write("architecture behave of "+vvc_name.lower()+"_vvc is\n")
print_linefeed(file_handle)
file_handle.write(" constant C_SCOPE : string := C_VVC_NAME & \",\" & to_string(GC_INSTANCE_IDX);\n")
file_handle.write(" constant C_VVC_LABELS : t_vvc_labels := assign_vvc_labels(C_SCOPE, C_VVC_NAME,")
if vvc_channel.name == "NA":
file_handle.write(" GC_INSTANCE_IDX, NA);\n")
else:
file_handle.write(" GC_INSTANCE_IDX, GC_CHANNEL);\n")
print_linefeed(file_handle)
file_handle.write(" signal executor_is_busy : boolean := false;\n")
file_handle.write(" signal queue_is_increasing : boolean := false;\n")
file_handle.write(" signal last_cmd_idx_executed : natural := 0;\n")
if len_of_queue > 1:
for i in range(1, len_of_queue):
file_handle.write(" signal "+vvc_channel.queue_names[i]+"_is_busy : boolean := false;\n")
file_handle.write(" signal "+vvc_channel.queue_names[i]+"_queue_is_increasing : boolean := false;\n")
file_handle.write(" signal last_"+vvc_channel.queue_names[i]+"_idx_executed : natural := 0;\n")
file_handle.write(" signal terminate_current_cmd : t_flag_record;\n")
print_linefeed(file_handle)
file_handle.write(" -- Instantiation of the element dedicated Queue\n")
file_handle.write(" shared variable command_queue : work.td_cmd_queue_pkg.t_generic_queue;\n")
if len_of_queue > 1:
for i in range(1, len_of_queue):
file_handle.write(" shared variable "+vvc_channel.queue_names[i]+"_queue : work.td_cmd_queue_pkg.t_generic_queue;\n")
file_handle.write(" shared variable result_queue : work.td_result_queue_pkg.t_generic_queue;\n")
print_linefeed(file_handle)
if vvc_channel.name == "NA":
file_handle.write(" alias vvc_config : t_vvc_config is shared_"+vvc_name.lower()+"_vvc_config(GC_INSTANCE_IDX);\n")
file_handle.write(" alias vvc_status : t_vvc_status is shared_"+vvc_name.lower()+"_vvc_status(GC_INSTANCE_IDX);\n")
file_handle.write(" alias transaction_info : t_transaction_info is "
+"shared_"+vvc_name.lower()+"_transaction_info(GC_INSTANCE_IDX);\n")
else:
file_handle.write(" alias vvc_config : t_vvc_config is shared_"+vvc_name.lower()+"_vvc_config(GC_CHANNEL, GC_INSTANCE_IDX);\n")
file_handle.write(" alias vvc_status : t_vvc_status is shared_"+vvc_name.lower()+"_vvc_status(GC_CHANNEL, GC_INSTANCE_IDX);\n")
file_handle.write(" alias transaction_info : t_transaction_info is "
+"shared_"+vvc_name.lower()+"_transaction_info(GC_CHANNEL, GC_INSTANCE_IDX);\n")
print_linefeed(file_handle)
file_handle.write("begin\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
def add_wrapper_architecture_declaration(file_handle, vvc_name):
file_handle.write("architecture struct of "+vvc_name.lower()+"_vvc is\n")
print_linefeed(file_handle)
file_handle.write("begin\n")
print_linefeed(file_handle)
def add_wrapper_architecture_end(file_handle):
print_linefeed(file_handle)
file_handle.write("end struct;\n")
print_linefeed(file_handle)
def add_vvc_constructor(file_handle, vvc_name):
file_handle.write(division_line+"\n")
file_handle.write("-- Constructor\n")
file_handle.write("-- - Set up the defaults and show constructor if enabled\n")
file_handle.write(division_line+"\n")
file_handle.write(" work.td_vvc_entity_support_pkg.vvc_constructor(C_SCOPE, GC_INSTANCE_IDX, vvc_config, command_queue, result_queue, "+
"GC_"+vvc_name.upper()+"_BFM_CONFIG,\n")
file_handle.write(" GC_CMD_QUEUE_COUNT_MAX, GC_CMD_QUEUE_COUNT_THRESHOLD, "+
"GC_CMD_QUEUE_COUNT_THRESHOLD_SEVERITY,\n")
file_handle.write(" GC_RESULT_QUEUE_COUNT_MAX, GC_RESULT_QUEUE_COUNT_THRESHOLD, "+
"GC_RESULT_QUEUE_COUNT_THRESHOLD_SEVERITY);\n")
file_handle.write(division_line+"\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
def add_vvc_interpreter(file_handle, vvc_channel):
len_of_queue = vvc_channel.len_of_queue()
file_handle.write(division_line+"\n")
file_handle.write("-- Command interpreter\n")
file_handle.write("-- - Interpret, decode and acknowledge commands from the central sequencer\n")
file_handle.write(division_line+"\n")
file_handle.write(" cmd_interpreter : process\n")
file_handle.write(" variable v_cmd_has_been_acked : boolean; -- Indicates if acknowledge_cmd() has been called for the current shared_vvc_cmd\n")
file_handle.write(" variable v_local_vvc_cmd : t_vvc_cmd_record := C_VVC_CMD_DEFAULT;\n")
file_handle.write(" begin\n")
print_linefeed(file_handle)
file_handle.write(" -- 0. Initialize the process prior to first command\n")
file_handle.write(" work.td_vvc_entity_support_pkg.initialize_interpreter(terminate_current_cmd, global_awaiting_completion);\n")
file_handle.write(" -- initialise shared_vvc_last_received_cmd_idx for channel and instance\n")
if vvc_channel.name == "NA":
file_handle.write(" shared_vvc_last_received_cmd_idx(NA, GC_INSTANCE_IDX) := 0;\n")
else:
file_handle.write(" shared_vvc_last_received_cmd_idx(GC_CHANNEL, GC_INSTANCE_IDX) := 0;\n")
print_linefeed(file_handle)
file_handle.write(" -- Then for every single command from the sequencer\n")
file_handle.write(" loop -- basically as long as new commands are received\n")
print_linefeed(file_handle)
file_handle.write(" -- 1. wait until command targeted at this VVC. Must match VVC name, instance and channel"+
" (if applicable)\n")
file_handle.write(" -- releases global semaphore\n")
file_handle.write(" -------------------------------------------------------------------------\n")
file_handle.write(" work.td_vvc_entity_support_pkg.await_cmd_from_sequencer(C_VVC_LABELS, vvc_config, THIS_VVCT, "+
"VVC_BROADCAST, global_vvc_busy, global_vvc_ack, shared_vvc_cmd, v_local_vvc_cmd);\n")
file_handle.write(" v_cmd_has_been_acked := false; -- Clear flag\n")
file_handle.write(" -- update shared_vvc_last_received_cmd_idx with received command index\n")
if vvc_channel.name == "NA":
file_handle.write(" shared_vvc_last_received_cmd_idx(NA, GC_INSTANCE_IDX) := v_local_vvc_cmd.cmd_idx;\n")
else:
file_handle.write(" shared_vvc_last_received_cmd_idx(GC_CHANNEL, GC_INSTANCE_IDX) := v_local_vvc_cmd.cmd_idx;\n")
print_linefeed(file_handle)
file_handle.write(" -- 2a. Put command on the queue if intended for the executor\n")
file_handle.write(" -------------------------------------------------------------------------\n")
file_handle.write(" if v_local_vvc_cmd.command_type = QUEUED then\n")
file_handle.write(" work.td_vvc_entity_support_pkg.put_command_on_queue(v_local_vvc_cmd, command_queue, vvc_status, "+
"queue_is_increasing);\n")
print_linefeed(file_handle)
file_handle.write(" -- 2b. Otherwise command is intended for immediate response\n")
file_handle.write(" -------------------------------------------------------------------------\n")
file_handle.write(" elsif v_local_vvc_cmd.command_type = IMMEDIATE then\n")
file_handle.write(" case v_local_vvc_cmd.operation is\n")
print_linefeed(file_handle)
file_handle.write(" when AWAIT_COMPLETION =>\n")
file_handle.write(" -- Await completion of all commands in the cmd_executor queue\n")
file_handle.write(" work.td_vvc_entity_support_pkg.interpreter_await_completion(v_local_vvc_cmd, command_queue, "
"vvc_config, executor_is_busy, C_VVC_LABELS, last_cmd_idx_executed);\n")
if len_of_queue > 1:
for i in range(1, len_of_queue):
queue_name = vvc_channel.queue_names[i]
file_handle.write(" -- Await completion of all commands in the "+queue_name+" queue\n")
file_handle.write(" work.td_vvc_entity_support_pkg.interpreter_await_completion(v_local_vvc_cmd, "+queue_name+"_queue, "
"vvc_config, "+queue_name+"_is_busy, C_VVC_LABELS, last_"+queue_name+"_idx_executed);\n")
print_linefeed(file_handle)
file_handle.write(" when AWAIT_ANY_COMPLETION =>\n")
file_handle.write(" if not v_local_vvc_cmd.gen_boolean then\n")
file_handle.write(" -- Called with lastness = NOT_LAST: Acknowledge immediately to let the sequencer continue\n")
file_handle.write(" work.td_target_support_pkg.acknowledge_cmd(global_vvc_ack,v_local_vvc_cmd.cmd_idx);\n")
file_handle.write(" v_cmd_has_been_acked := true;\n")
file_handle.write(" end if;\n")
file_handle.write(" work.td_vvc_entity_support_pkg.interpreter_await_any_completion(v_local_vvc_cmd, command_queue, vvc_config, "+
"executor_is_busy, C_VVC_LABELS, last_cmd_idx_executed, global_awaiting_completion);\n")
print_linefeed(file_handle)
file_handle.write(" when DISABLE_LOG_MSG =>\n")
file_handle.write(" uvvm_util.methods_pkg.disable_log_msg(v_local_vvc_cmd.msg_id, vvc_config.msg_id_panel"
+", to_string(v_local_vvc_cmd.msg) & format_command_idx(v_local_vvc_cmd), C_SCOPE, v_local_vvc_cmd.quietness);\n")
print_linefeed(file_handle)
file_handle.write(" when ENABLE_LOG_MSG =>\n")
file_handle.write(" uvvm_util.methods_pkg.enable_log_msg(v_local_vvc_cmd.msg_id, vvc_config.msg_id_panel"+
", to_string(v_local_vvc_cmd.msg) & format_command_idx(v_local_vvc_cmd), C_SCOPE, v_local_vvc_cmd.quietness);\n")
print_linefeed(file_handle)
file_handle.write(" when FLUSH_COMMAND_QUEUE =>\n")
file_handle.write(" work.td_vvc_entity_support_pkg.interpreter_flush_command_queue(v_local_vvc_cmd, command_queue"+
", vvc_config, vvc_status, C_VVC_LABELS);\n")
print_linefeed(file_handle)
file_handle.write(" when TERMINATE_CURRENT_COMMAND =>\n")
file_handle.write(" work.td_vvc_entity_support_pkg.interpreter_terminate_current_command(v_local_vvc_cmd, "+
"vvc_config, C_VVC_LABELS, terminate_current_cmd);\n")
print_linefeed(file_handle)
file_handle.write(" -- when FETCH_RESULT =>\n")
file_handle.write(" -- work.td_vvc_entity_support_pkg.interpreter_fetch_result(result_queue, v_local_vvc_cmd, "+
"vvc_config, C_VVC_LABELS, last_cmd_idx_executed, shared_vvc_response);\n")
print_linefeed(file_handle)
file_handle.write(" when others =>\n")
file_handle.write(" tb_error(\"Unsupported command received for IMMEDIATE execution: '\" & "+
"to_string(v_local_vvc_cmd.operation) & \"'\", C_SCOPE);\n")
print_linefeed(file_handle)
file_handle.write(" end case;\n")
print_linefeed(file_handle)
file_handle.write(" else\n")
file_handle.write(" tb_error(\"command_type is not IMMEDIATE or QUEUED\", C_SCOPE);\n")
file_handle.write(" end if;\n")
print_linefeed(file_handle)
file_handle.write(" -- 3. Acknowledge command after runing or queuing the command\n")
file_handle.write(" -------------------------------------------------------------------------\n")
file_handle.write(" if not v_cmd_has_been_acked then\n")
file_handle.write(" work.td_target_support_pkg.acknowledge_cmd(global_vvc_ack,v_local_vvc_cmd.cmd_idx);\n")
file_handle.write(" end if;\n")
print_linefeed(file_handle)
file_handle.write(" end loop;\n")
file_handle.write(" end process;\n")
file_handle.write(division_line+"\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
print_linefeed(file_handle)
def add_vvc_executor(file_handle, vvc_channel):
len_of_queue = vvc_channel.len_of_queue()
file_handle.write(division_line+"\n")
file_handle.write("-- Command executor\n")
file_handle.write("-- - Fetch and execute the commands\n")
file_handle.write(division_line+"\n")
file_handle.write(" cmd_executor : process\n")
file_handle.write(" variable v_cmd : t_vvc_cmd_record;\n")
file_handle.write(" -- variable v_read_data : t_vvc_result; -- See vvc_cmd_pkg\n")
file_handle.write(" variable v_timestamp_start_of_current_bfm_access : time := 0 ns;\n")
file_handle.write(" variable v_timestamp_start_of_last_bfm_access : time := 0 ns;\n")
file_handle.write(" variable v_timestamp_end_of_last_bfm_access : time := 0 ns;\n")
file_handle.write(" variable v_command_is_bfm_access : boolean;\n")
file_handle.write(" -- variable v_normalised_addr : unsigned(GC_ADDR_WIDTH-1 downto 0) := (others => '0');\n")
file_handle.write(" -- variable v_normalised_data : std_logic_vector(GC_DATA_WIDTH-1 downto 0) := (others => '0');\n")
file_handle.write(" begin\n")
print_linefeed(file_handle)
file_handle.write(" -- 0. Initialize the process prior to first command\n")
file_handle.write(" -------------------------------------------------------------------------\n")
file_handle.write(" work.td_vvc_entity_support_pkg.initialize_executor(terminate_current_cmd);\n")
file_handle.write(" loop\n")
print_linefeed(file_handle)
file_handle.write(" -- 1. Set defaults, fetch command and log\n")
file_handle.write(" -------------------------------------------------------------------------\n")
file_handle.write(" work.td_vvc_entity_support_pkg.fetch_command_and_prepare_executor(v_cmd, command_queue, vvc_config"+
", vvc_status, queue_is_increasing, executor_is_busy, C_VVC_LABELS);\n")
print_linefeed(file_handle)
file_handle.write(" -- Reset the transaction info for waveview\n")
file_handle.write(" transaction_info := C_TRANSACTION_INFO_DEFAULT;\n")
file_handle.write(" transaction_info.operation := v_cmd.operation;\n")
file_handle.write(" transaction_info.msg := pad_string(to_string(v_cmd.msg)"
", ' ', transaction_info.msg'length);\n")
print_linefeed(file_handle)
file_handle.write(" -- Check if command is a BFM access\n")
file_handle.write(" --<USER_INPUT> Replace this if statement with a check of the current v_cmd.operation, in "
"order to set v_cmd_is_bfm_access to true if this is a BFM access command\n")
file_handle.write(" -- Example:\n")
file_handle.write(" -- if v_cmd.operation = WRITE or v_cmd.operation = READ or v_cmd.operation = CHECK or v_cmd.operation = POLL_UNTIL then \n")
file_handle.write(" if true then -- Replace this line with actual check\n")
file_handle.write(" v_command_is_bfm_access := true;\n")
file_handle.write(" else\n")
file_handle.write(" v_command_is_bfm_access := false;\n")
file_handle.write(" end if;\n")
print_linefeed(file_handle)
file_handle.write(" -- Insert delay if needed\n")
file_handle.write(" work.td_vvc_entity_support_pkg.insert_inter_bfm_delay_if_requested(vvc_config"
" => vvc_config,\n")
file_handle.write(" "
"command_is_bfm_access => v_command_is_bfm_access,\n")
file_handle.write(" "
"timestamp_start_of_last_bfm_access => v_timestamp_start_of_last_bfm_access,\n")
file_handle.write(" "
"timestamp_end_of_last_bfm_access => v_timestamp_end_of_last_bfm_access,\n")
file_handle.write(" "
"scope => C_SCOPE);\n")
print_linefeed(file_handle)
file_handle.write(" if v_command_is_bfm_access then\n")
file_handle.write(" v_timestamp_start_of_current_bfm_access := now;\n")
file_handle.write(" end if;\n")
print_linefeed(file_handle)
file_handle.write(" -- 2. Execute the fetched command\n")
file_handle.write(" -------------------------------------------------------------------------\n")
file_handle.write(" case v_cmd.operation is -- Only operations in the dedicated record are relevant\n")
print_linefeed(file_handle)
file_handle.write(" -- VVC dedicated operations\n")
file_handle.write(" --===================================\n")
print_linefeed(file_handle)
file_handle.write(" --<USER_INPUT>: Insert BFM procedure calls here\n")
file_handle.write(" -- Example:\n")
file_handle.write(" -- when WRITE =>\n")
file_handle.write(" -- v_normalised_addr := normalize_and_check(v_cmd.addr, v_normalised_addr, ALLOW_WIDER_NARROWER, \"addr\", \"shared_vvc_cmd.addr\", \""+vvc_name.lower()+"_write() called with to wide address. \" & v_cmd.msg);\n")
file_handle.write(" -- v_normalised_data := normalize_and_check(v_cmd.data, v_normalised_data, ALLOW_WIDER_NARROWER, \"data\", \"shared_vvc_cmd.data\", \""+vvc_name.lower()+"_write() called with to wide data. \" & v_cmd.msg);\n")
file_handle.write(" -- -- Add info to the transaction_for_waveview_struct if needed\n")
file_handle.write(" -- transaction_info.data(GC_DATA_WIDTH - 1 downto 0) := "
"v_normalised_data;\n")
file_handle.write(" -- transaction_info.addr(GC_ADDR_WIDTH - 1 downto 0) := "
"v_normalised_addr;\n")
file_handle.write(" -- -- Call the corresponding procedure in the BFM package.\n")
file_handle.write(" -- "+vvc_name.lower()+"_write(addr_value => v_normalised_addr,\n")
file_handle.write(" -- data_value => v_normalised_data,\n")
file_handle.write(" -- msg => format_msg(v_cmd),\n")
file_handle.write(" -- clk => clk,\n")
file_handle.write(" -- "+vvc_name.lower()+"_if => "+vvc_name.lower()+"_vvc_if,\n")
file_handle.write(" -- scope => C_SCOPE,\n")
file_handle.write(" -- msg_id_panel => vvc_config.msg_id_panel,\n")
file_handle.write(" -- config => vvc_config.bfm_config);\n")
print_linefeed(file_handle)
if len_of_queue > 1:
file_handle.write(" -- -- Eksample of pipelined read, eg. Avalon interface.\n")
else:
file_handle.write(" -- -- If the result from the BFM call is to be stored, e.g. in a read call, "
"use the additional procedure illustrated in this read example\n")
file_handle.write(" -- when READ =>\n")
file_handle.write(" -- v_normalised_addr := normalize_and_check(v_cmd.addr, v_normalised_addr, ALLOW_WIDER_NARROWER, \"addr\", \"shared_vvc_cmd.addr\", \""+vvc_name.lower()+"_write() called with to wide address. \" & v_cmd.msg);\n")
file_handle.write(" -- -- Add info to the transaction_for_waveview_struct if needed\n")
file_handle.write(" -- transaction_info.addr(GC_ADDR_WIDTH - 1 downto 0) := "
"v_normalised_addr;\n")
file_handle.write(" -- -- Call the corresponding procedure in the BFM package.\n")
if len_of_queue > 1:
file_handle.write(" -- if vvc_config.use_read_pipeline then\n")
file_handle.write(" -- -- Stall until response command queue is no longer full\n")
file_handle.write(" -- while command_response_queue.get_count(VOID) > vvc_config.num_pipeline_stages loop\n")
file_handle.write(" -- wait for vvc_config.bfm_config.clock_period;\n")
file_handle.write(" -- end loop;\n")
file_handle.write(" -- avalon_mm_read_request( addr_value => v_normalised_addr,\n")
file_handle.write(" -- msg => format_msg(v_cmd),\n")
file_handle.write(" -- clk => clk,\n")
file_handle.write(" -- avalon_mm_if => avalon_mm_vvc_master_if,\n")
file_handle.write(" -- scope => C_SCOPE,\n")
file_handle.write(" -- msg_id_panel => vvc_config.msg_id_panel,\n")
file_handle.write(" -- config => vvc_config.bfm_config);\n")
file_handle.write(" -- work.td_vvc_entity_support_pkg.put_command_on_queue(v_cmd, command_response_queue, vvc_status, response_queue_is_increasing);\n")
print_linefeed(file_handle)
file_handle.write(" -- else\n")
file_handle.write(" -- avalon_mm_read( addr_value => v_normalised_addr,\n")
file_handle.write(" -- data_value => v_read_data(GC_DATA_WIDTH-1 downto 0),\n")
file_handle.write(" -- msg => format_msg(v_cmd),\n")
file_handle.write(" -- clk => clk,\n")
file_handle.write(" -- avalon_mm_if => avalon_mm_vvc_master_if,\n")
file_handle.write(" -- scope => C_SCOPE,\n")
file_handle.write(" -- msg_id_panel => vvc_config.msg_id_panel,\n")
file_handle.write(" -- config => vvc_config.bfm_config);\n")
file_handle.write(" -- -- Store the result\n")
file_handle.write(" -- work.td_vvc_entity_support_pkg.store_result(result_queue => result_queue,\n")
file_handle.write(" -- cmd_idx => v_cmd.cmd_idx,\n")
file_handle.write(" -- result => v_read_data );\n")
file_handle.write(" -- end if;\n")
else:
file_handle.write(" -- "+vvc_name.lower()+"_read(addr_value => v_normalised_addr,\n")
file_handle.write(" -- data_value => v_read_data,\n")
file_handle.write(" -- msg => format_msg(v_cmd),\n")
file_handle.write(" -- clk => clk,\n")
file_handle.write(" -- "+vvc_name.lower()+"_if => "+vvc_name.lower()+"_vvc_if,\n")
file_handle.write(" -- scope => C_SCOPE,\n")
file_handle.write(" -- msg_id_panel => vvc_config.msg_id_panel,\n")
file_handle.write(" -- config => vvc_config.bfm_config);\n")
file_handle.write(" -- -- Store the result\n")
file_handle.write(" -- work.td_vvc_entity_support_pkg.store_result(instance_idx => GC_INSTANCE_IDX,\n")
file_handle.write(" -- cmd_idx => v_cmd.cmd_idx,\n")
file_handle.write(" -- data => v_read_data);\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" -- UVVM common operations\n")
file_handle.write(" --===================================\n")
file_handle.write(" when INSERT_DELAY =>\n")
file_handle.write(" log(ID_INSERTED_DELAY, \"Running: \" & to_string(v_cmd.proc_call) & \" \" & "
"format_command_idx(v_cmd), C_SCOPE, vvc_config.msg_id_panel);\n")
file_handle.write(" if v_cmd.gen_integer_array(0) = -1 then\n")
file_handle.write(" -- Delay specified using time\n")
file_handle.write(" wait until terminate_current_cmd.is_active = '1' for v_cmd.delay;\n")
file_handle.write(" else\n")
file_handle.write(" -- Delay specified using integer\n")
file_handle.write(" wait until terminate_current_cmd.is_active = '1' for v_cmd.gen_integer_array(0) * vvc_config.bfm_config.clock_period;\n")
file_handle.write(" end if;\n")
print_linefeed(file_handle)
file_handle.write(" when others =>\n")
file_handle.write(" tb_error(\"Unsupported local command received for execution: '\" & "
"to_string(v_cmd.operation) & \"'\", C_SCOPE);\n")
file_handle.write(" end case;\n")
print_linefeed(file_handle)
file_handle.write(" if v_command_is_bfm_access then\n")
file_handle.write(" v_timestamp_end_of_last_bfm_access := now;\n")
file_handle.write(" v_timestamp_start_of_last_bfm_access := v_timestamp_start_of_current_bfm_access;\n")
file_handle.write(" if ((vvc_config.inter_bfm_delay.delay_type = TIME_START2START) and \n")
file_handle.write(" ((now - v_timestamp_start_of_current_bfm_access) > vvc_config.inter_bfm_delay.delay_in_time)) then\n")
file_handle.write(" alert(vvc_config.inter_bfm_delay.inter_bfm_delay_violation_severity, \"BFM access exceeded specified "
"start-to-start inter-bfm delay, \" & \n")
file_handle.write(" to_string(vvc_config.inter_bfm_delay.delay_in_time) & \".\", C_SCOPE);\n")
file_handle.write(" end if;\n")
file_handle.write(" end if;\n")
print_linefeed(file_handle)
file_handle.write(" -- Reset terminate flag if any occurred\n")
file_handle.write(" if (terminate_current_cmd.is_active = '1') then\n")
file_handle.write(" log(ID_CMD_EXECUTOR, \"Termination request received\", C_SCOPE, "
"vvc_config.msg_id_panel);\n")
file_handle.write(" uvvm_vvc_framework.ti_vvc_framework_support_pkg.reset_flag(terminate_current_cmd);\n")
file_handle.write(" end if;\n")
print_linefeed(file_handle)
file_handle.write(" last_cmd_idx_executed <= v_cmd.cmd_idx;\n")
file_handle.write(" -- Reset the transaction info for waveview\n")
file_handle.write(" transaction_info := C_TRANSACTION_INFO_DEFAULT;\n")
print_linefeed(file_handle)
file_handle.write(" end loop;\n")
file_handle.write(" end process;\n")
file_handle.write(division_line+"\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
print_linefeed(file_handle)
def add_vvc_pipeline_step(file_handle, queue_name):
file_handle.write(division_line+"\n")
file_handle.write("-- Pipelined step\n")
file_handle.write("-- - Fetch and execute the commands in the "+queue_name+" queue\n")
file_handle.write(division_line+"\n")
file_handle.write(" "+queue_name+"_executor : process\n")
file_handle.write(" variable v_cmd : t_vvc_cmd_record;\n")
file_handle.write(" -- variable v_read_data : t_vvc_result; -- See vvc_cmd_pkg\n")
file_handle.write(" -- variable v_normalised_addr : unsigned(GC_ADDR_WIDTH-1 downto 0) := (others => '0');\n")
file_handle.write(" -- variable v_normalised_data : std_logic_vector(GC_DATA_WIDTH-1 downto 0) := (others => '0');\n")
file_handle.write(" begin\n")
file_handle.write(" -- Set the "+queue_name+" queue up with the same settings as the command queue\n")
file_handle.write(" "+queue_name+"_queue.set_scope(C_SCOPE & \":"+queue_name.upper()+"\");\n")
file_handle.write(" "+queue_name+"_queue.set_queue_count_max(vvc_config.cmd_queue_count_max);\n")
file_handle.write(" "+queue_name+"_queue.set_queue_count_threshold(vvc_config.cmd_queue_count_threshold);\n")
file_handle.write(" "+queue_name+"_queue.set_queue_count_threshold_severity(vvc_config.cmd_queue_count_threshold_severity);\n")
file_handle.write(" wait for 0 ns; -- Wait for "+queue_name+" queue to initialize completely\n")
print_linefeed(file_handle)
file_handle.write(" loop\n")
file_handle.write(" -- Fetch commands\n")
file_handle.write(" -------------------------------------------------------------------------\n")
file_handle.write(" work.td_vvc_entity_support_pkg.fetch_command_and_prepare_executor(v_cmd, "+queue_name+"_queue, vvc_config"+
", vvc_status, "+queue_name+"_queue_is_increasing, "+queue_name+"_is_busy, C_VVC_LABELS);\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" -- Execute the fetched command\n")
file_handle.write(" -------------------------------------------------------------------------\n")
file_handle.write(" case v_cmd.operation is -- Only operations in the dedicated record are relevant\n")
file_handle.write(" --<USER_INPUT>: Insert BFM procedure calls here\n")
file_handle.write(" -- Example of pipelined step used for read operations on the Avalon interface:\n")
file_handle.write(" -- when READ =>\n")
file_handle.write(" -- -- Initiate read response\n")
file_handle.write(" -- avalon_mm_read_response(addr_value => v_normalised_addr,\n")
file_handle.write(" -- data_value => v_read_data(GC_DATA_WIDTH-1 downto 0),\n")
file_handle.write(" -- msg => format_msg(v_cmd),\n")
file_handle.write(" -- clk => clk,\n")
file_handle.write(" -- avalon_mm_if => avalon_mm_vvc_master_if,\n")
file_handle.write(" -- scope => C_SCOPE,\n")
file_handle.write(" -- msg_id_panel => vvc_config.msg_id_panel,\n")
file_handle.write(" -- config => vvc_config.bfm_config);\n")
file_handle.write(" -- -- Store the result\n")
file_handle.write(" -- work.td_vvc_entity_support_pkg.store_result(result_queue => result_queue,\n")
file_handle.write(" -- cmd_idx => v_cmd.cmd_idx,\n")
file_handle.write(" -- data => v_read_data);\n")
print_linefeed(file_handle)
file_handle.write(" -- when CHECK =>\n")
file_handle.write(" -- -- Initiate check response\n")
file_handle.write(" -- avalon_mm_check_response(addr_value => v_normalised_addr,\n")
file_handle.write(" -- data_exp => v_normalised_data,\n")
file_handle.write(" -- msg => format_msg(v_cmd),\n")
file_handle.write(" -- clk => clk,\n")
file_handle.write(" -- avalon_mm_if => avalon_mm_vvc_master_if,\n")
file_handle.write(" -- alert_level => v_cmd.alert_level,\n")
file_handle.write(" -- scope => C_SCOPE,\n")
file_handle.write(" -- msg_id_panel => vvc_config.msg_id_panel,\n")
file_handle.write(" -- config => vvc_config.bfm_config);\n")
print_linefeed(file_handle)
file_handle.write(" when others =>")
file_handle.write(" tb_error(\"Unsupported local command received for execution: '\" & to_string(v_cmd.operation) & \"'\", C_SCOPE);\n")
print_linefeed(file_handle)
file_handle.write(" end case;\n")
print_linefeed(file_handle)
file_handle.write(" last_read_response_idx_executed <= v_cmd.cmd_idx;\n")
print_linefeed(file_handle)
file_handle.write(" end loop;\n")
print_linefeed(file_handle)
file_handle.write(" end process;\n")
print_linefeed(file_handle)
file_handle.write(division_line+"\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
print_linefeed(file_handle)
def add_vvc_terminator(file_handle):
file_handle.write(division_line+"\n")
file_handle.write("-- Command termination handler\n")
file_handle.write("-- - Handles the termination request record (sets and resets terminate flag on request)\n")
file_handle.write(division_line+"\n")
file_handle.write(" cmd_terminator : uvvm_vvc_framework.ti_vvc_framework_support_pkg.flag_handler(terminate_current_cmd);"
" -- flag: is_active, set, reset\n")
file_handle.write(division_line+"\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
def add_end_of_architecture(file_handle):
file_handle.write("end behave;\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
def add_leaf_vvc_entity(file_handle, vvc_name, channel):
print_linefeed(file_handle)
file_handle.write(" -- " + vvc_name.upper() + " " + channel.upper() + " VVC\n")
file_handle.write(" i1_"+vvc_name.lower()+"_"+channel.lower()+": entity work."+vvc_name.lower()+"_"+
channel.lower()+"_vvc\n")
file_handle.write(" generic map(\n")
file_handle.write(" --<USER_INPUT> Insert interface specific generic constants here\n")
file_handle.write(" -- Example: \n")
file_handle.write(" -- GC_DATA_WIDTH => GC_DATA_WIDTH,\n")
file_handle.write(" GC_INSTANCE_IDX => GC_INSTANCE_IDX,\n")
file_handle.write(" GC_CHANNEL => "+channel.upper()+",\n")
file_handle.write(" GC_"+vvc_name.upper()+"_BFM_CONFIG"+fill_with_n_spaces(vvc_name.__len__(),28)+
"=> GC_"+vvc_name.upper()+"_BFM_CONFIG,\n")
file_handle.write(" GC_CMD_QUEUE_COUNT_MAX => GC_CMD_QUEUE_COUNT_MAX,\n")
file_handle.write(" GC_CMD_QUEUE_COUNT_THRESHOLD => GC_CMD_QUEUE_COUNT_THRESHOLD,\n")
file_handle.write(" GC_CMD_QUEUE_COUNT_THRESHOLD_SEVERITY => GC_CMD_QUEUE_COUNT_THRESHOLD_SEVERITY\n")
file_handle.write(" )\n")
file_handle.write(" port map(\n")
file_handle.write(" --<USER_INPUT> Please insert the proper interface needed for this leaf VVC\n")
file_handle.write(" -- Example:\n")
file_handle.write(" -- " + vvc_name.lower() + "_vvc_" + channel.lower() + " => " +
vvc_name.lower()+"_vvc_if." + vvc_name.lower()+"_vvc_"+ channel.lower()+",\n")
file_handle.write(" -- rst => rst, -- Optional VVC Reset\n")
file_handle.write(" clk => clk\n")
file_handle.write(" );\n")
print_linefeed(file_handle)
def add_vvc_cmd_pkg_includes(file_handle):
file_handle.write("library ieee;\n")
file_handle.write("use ieee.std_logic_1164.all;\n")
file_handle.write("use ieee.numeric_std.all;\n")
print_linefeed(file_handle)
file_handle.write("library uvvm_util;\n")
file_handle.write("context uvvm_util.uvvm_util_context;\n")
print_linefeed(file_handle)
file_handle.write("library uvvm_vvc_framework;\n")
file_handle.write("use uvvm_vvc_framework.ti_vvc_framework_support_pkg.all;\n")
print_linefeed(file_handle)
file_handle.write(division_line+"\n")
file_handle.write(division_line+"\n")
def add_vvc_cmd_pkg_header(file_handle):
file_handle.write("package vvc_cmd_pkg is\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" "+division_line+"\n")
file_handle.write(" -- t_operation\n")
file_handle.write(" -- - VVC and BFM operations\n")
file_handle.write(" "+division_line+"\n")
file_handle.write(" type t_operation is (\n")
file_handle.write(" NO_OPERATION,\n")
file_handle.write(" AWAIT_COMPLETION,\n")
file_handle.write(" AWAIT_ANY_COMPLETION,\n")
file_handle.write(" ENABLE_LOG_MSG,\n")
file_handle.write(" DISABLE_LOG_MSG,\n")
file_handle.write(" FLUSH_COMMAND_QUEUE,\n")
file_handle.write(" FETCH_RESULT,\n")
file_handle.write(" INSERT_DELAY,\n")
file_handle.write(" TERMINATE_CURRENT_COMMAND\n")
file_handle.write(" --<USER_INPUT> Expand this type with enums for BFM procedures.\n")
file_handle.write(" -- Example: \n")
file_handle.write(" -- TRANSMIT, RECEIVE, EXPECT\n")
file_handle.write(" );\n")
print_linefeed(file_handle)
file_handle.write(" --<USER_INPUT> Create constants for the maximum sizes to use in this VVC.\n")
file_handle.write(" -- You can create VVCs with smaller sizes than these constants, but not larger.\n")
file_handle.write(" -- For example, given a VVC with parallel data bus and address bus, constraints should be "
"added for maximum data length\n")
file_handle.write(" -- and address length \n")
file_handle.write(" -- Example:\n")
file_handle.write(" constant C_VVC_CMD_DATA_MAX_LENGTH : natural := 8;\n")
file_handle.write(" -- constant C_VVC_CMD_ADDR_MAX_LENGTH : natural := 8;\n")
file_handle.write(" constant C_VVC_CMD_STRING_MAX_LENGTH : natural := 300;\n")
print_linefeed(file_handle)
file_handle.write(" "+division_line+"\n")
file_handle.write(" -- t_vvc_cmd_record\n")
file_handle.write(" -- - Record type used for communication with the VVC\n")
file_handle.write(" "+division_line+"\n")
file_handle.write(" type t_vvc_cmd_record is record\n")
file_handle.write(" -- VVC dedicated fields\n")
file_handle.write(" --<USER_INPUT> Insert all data types needed to transport data to the BFM here.\n")
file_handle.write(" -- This includes data field, address field, constraints (e.g. timeout), etc.\n")
file_handle.write(" -- Example: \n")
file_handle.write(" -- data : std_logic_vector(C_VVC_CMD_DATA_MAX_LENGTH-1 downto 0);\n")
file_handle.write(" -- max_receptions : integer;\n")
file_handle.write(" -- timeout : time;\n")
file_handle.write(" -- Common VVC fields\n")
file_handle.write(" operation : t_operation;\n")
file_handle.write(" proc_call : string(1 to C_VVC_CMD_STRING_MAX_LENGTH);\n")
file_handle.write(" msg : string(1 to C_VVC_CMD_STRING_MAX_LENGTH);\n")
file_handle.write(" cmd_idx : natural;\n")
file_handle.write(" command_type : t_immediate_or_queued;\n")
file_handle.write(" msg_id : t_msg_id;\n")
file_handle.write(" gen_integer_array : t_integer_array(0 to 1); -- Increase array length if needed\n")
file_handle.write(" gen_boolean : boolean; -- Generic boolean\n")
file_handle.write(" timeout : time;\n")
file_handle.write(" alert_level : t_alert_level;\n")
file_handle.write(" delay : time;\n")
file_handle.write(" quietness : t_quietness;\n")
file_handle.write(" end record;\n")
print_linefeed(file_handle)
file_handle.write(" constant C_VVC_CMD_DEFAULT : t_vvc_cmd_record := (\n")
file_handle.write(" --<USER_INPUT> Set the fields you added to the t_vvc_cmd_record above to their default "
"value here\n")
file_handle.write(" -- Example:\n")
file_handle.write(" -- data => (others => '0'),\n")
file_handle.write(" -- max_receptions => 1,\n")
file_handle.write(" -- timeout => 0 ns,\n")
file_handle.write(" -- Common VVC fields\n")
file_handle.write(" operation => NO_OPERATION,\n")
file_handle.write(" proc_call => (others => NUL),\n")
file_handle.write(" msg => (others => NUL),\n")
file_handle.write(" cmd_idx => 0,\n")
file_handle.write(" command_type => NO_COMMAND_TYPE,\n")
file_handle.write(" msg_id => NO_ID,\n")
file_handle.write(" gen_integer_array => (others => -1),\n")
file_handle.write(" gen_boolean => false,\n")
file_handle.write(" timeout => 0 ns,\n")
file_handle.write(" alert_level => FAILURE,\n")
file_handle.write(" delay => 0 ns,\n")
file_handle.write(" quietness => NON_QUIET\n")
file_handle.write(" );\n")
print_linefeed(file_handle)
file_handle.write(" "+division_line+"\n")
file_handle.write(" -- shared_vvc_cmd\n")
file_handle.write(" -- - Shared variable used for transmitting VVC commands\n")
file_handle.write(" "+division_line+"\n")
file_handle.write(" shared variable shared_vvc_cmd : t_vvc_cmd_record := C_VVC_CMD_DEFAULT;\n")
print_linefeed(file_handle)
file_handle.write(" "+division_line+"\n")
file_handle.write(" -- t_vvc_result, t_vvc_result_queue_element, t_vvc_response and shared_vvc_response :\n")
file_handle.write(" -- \n")
file_handle.write(" -- - Used for storing the result of a BFM procedure called by the VVC,\n")
file_handle.write(" -- so that the result can be transported from the VVC to for example a sequencer via\n")
file_handle.write(" -- fetch_result() as described in VVC_Framework_common_methods_QuickRef\n")
file_handle.write(" -- \n")
file_handle.write(" -- - t_vvc_result includes the return value of the procedure in the BFM.\n")
file_handle.write(" -- It can also be defined as a record if multiple values shall be transported from the BFM\n")
file_handle.write(" "+division_line+"\n")
file_handle.write(" subtype t_vvc_result is std_logic_vector(C_VVC_CMD_DATA_MAX_LENGTH-1 downto 0);\n")
print_linefeed(file_handle)
file_handle.write(" type t_vvc_result_queue_element is record\n")
file_handle.write(" cmd_idx : natural; -- from UVVM handshake mechanism\n")
file_handle.write(" result : t_vvc_result;\n")
file_handle.write(" end record;\n")
print_linefeed(file_handle)
file_handle.write(" type t_vvc_response is record\n")
file_handle.write(" fetch_is_accepted : boolean;\n")
file_handle.write(" transaction_result : t_transaction_result;\n")
file_handle.write(" result : t_vvc_result;\n")
file_handle.write(" end record;\n")
print_linefeed(file_handle)
file_handle.write(" shared variable shared_vvc_response : t_vvc_response;\n")
print_linefeed(file_handle)
file_handle.write(" "+division_line+"\n")
file_handle.write(" -- t_last_received_cmd_idx : \n")
file_handle.write(" -- - Used to store the last queued cmd in vvc interpreter.\n")
file_handle.write(" "+division_line+"\n")
file_handle.write(" type t_last_received_cmd_idx is array (t_channel range <>,natural range <>) of integer;\n")
print_linefeed(file_handle)
file_handle.write(" "+division_line+"\n")
file_handle.write(" -- shared_vvc_last_received_cmd_idx\n")
file_handle.write(" -- - Shared variable used to get last queued index from vvc to sequencer\n")
file_handle.write(" "+division_line+"\n")
file_handle.write(" shared variable shared_vvc_last_received_cmd_idx : t_last_received_cmd_idx"
"(t_channel'left to t_channel'right, 0 to C_MAX_VVC_INSTANCE_NUM) := (others => (others => -1));\n")
print_linefeed(file_handle)
file_handle.write("end package vvc_cmd_pkg;\n")
print_linefeed(file_handle)
def add_vvc_cmd_pkg_body(file_handle):
print_linefeed(file_handle)
file_handle.write("package body vvc_cmd_pkg is\n")
file_handle.write("end package body vvc_cmd_pkg;\n")
print_linefeed(file_handle)
def add_methods_pkg_includes(file_handle, vvc_name):
file_handle.write("library ieee;\n")
file_handle.write("use ieee.std_logic_1164.all;\n")
file_handle.write("use ieee.numeric_std.all;\n")
print_linefeed(file_handle)
file_handle.write("library uvvm_util;\n")
file_handle.write("context uvvm_util.uvvm_util_context;\n")
print_linefeed(file_handle)
file_handle.write("library uvvm_vvc_framework;\n")
file_handle.write("use uvvm_vvc_framework.ti_vvc_framework_support_pkg.all;\n")
print_linefeed(file_handle)
file_handle.write("use work."+vvc_name.lower()+"_bfm_pkg.all;\n")
file_handle.write("use work.vvc_cmd_pkg.all;\n")
file_handle.write("use work.td_target_support_pkg.all;\n")
print_linefeed(file_handle)
file_handle.write(division_line+"\n")
file_handle.write(division_line+"\n")
def add_methods_pkg_header(file_handle, vvc_name, vvc_channels):
file_handle.write("package vvc_methods_pkg is\n")
print_linefeed(file_handle)
file_handle.write(" "+division_line+"\n")
file_handle.write(" -- Types and constants for the "+vvc_name.upper()+" VVC \n")
file_handle.write(" "+division_line+"\n")
file_handle.write(" constant C_VVC_NAME : string := \""+vvc_name.upper()+"_VVC\";\n")
print_linefeed(file_handle)
file_handle.write(" signal "+vvc_name.upper()+"_VVCT"+fill_with_n_spaces(vvc_name.__len__(),13)+
": t_vvc_target_record := set_vvc_target_defaults(C_VVC_NAME);\n")
file_handle.write(" alias THIS_VVCT : t_vvc_target_record is "+vvc_name.upper()+"_VVCT;\n")
file_handle.write(" alias t_bfm_config is t_"+vvc_name.lower()+"_bfm_config;\n")
print_linefeed(file_handle)
file_handle.write(" -- Type found in UVVM-Util types_pkg\n")
file_handle.write(" constant C_"+vvc_name.upper()+"_INTER_BFM_DELAY_DEFAULT : t_inter_bfm_delay := (\n")
file_handle.write(" delay_type => NO_DELAY,\n")
file_handle.write(" delay_in_time => 0 ns,\n")
file_handle.write(" inter_bfm_delay_violation_severity => WARNING\n")
file_handle.write(" );\n")
print_linefeed(file_handle)
file_handle.write(" type t_vvc_config is\n")
file_handle.write(" record\n")
file_handle.write(" inter_bfm_delay : t_inter_bfm_delay;-- Minimum delay between BFM "+
"accesses from the VVC. If parameter delay_type is set to NO_DELAY, BFM accesses will be back to back, i.e. no delay.\n")
file_handle.write(" cmd_queue_count_max : natural; -- Maximum pending number in command "+
"queue before queue is full. Adding additional commands will result in an ERROR.\n")
file_handle.write(" cmd_queue_count_threshold : natural; -- An alert with severity 'cmd_queue_count_threshold_severity' "+
"will be issued if command queue exceeds this count. Used for early warning if command queue is almost full. Will be ignored if set to 0.\n")
file_handle.write(" cmd_queue_count_threshold_severity : t_alert_level; -- Severity of alert to be initiated if exceeding cmd_queue_count_threshold\n")
file_handle.write(" result_queue_count_max : natural;\n")
file_handle.write(" result_queue_count_threshold_severity : t_alert_level;\n")
file_handle.write(" result_queue_count_threshold : natural;\n")
file_handle.write(" bfm_config : t_"+vvc_name.lower()+"_bfm_config; -- Configuration for the BFM. See BFM quick reference\n")
file_handle.write(" msg_id_panel : t_msg_id_panel; -- VVC dedicated message ID panel\n")
file_handle.write(" end record;\n")
print_linefeed(file_handle)
if vvc_channels.__len__() == 1:
file_handle.write(" type t_vvc_config_array is array (natural range <>) of t_vvc_config;\n")
else:
file_handle.write(" type t_vvc_config_array is array (t_channel range <>, natural range <>) of t_vvc_config;\n")
print_linefeed(file_handle)
file_handle.write(" constant C_"+vvc_name.upper()+"_VVC_CONFIG_DEFAULT : t_vvc_config := (\n")
file_handle.write(" inter_bfm_delay => C_"+vvc_name.upper()+"_INTER_BFM_DELAY_DEFAULT,\n")
file_handle.write(" cmd_queue_count_max => C_CMD_QUEUE_COUNT_MAX, -- from adaptation package\n")
file_handle.write(" cmd_queue_count_threshold => C_CMD_QUEUE_COUNT_THRESHOLD,\n")
file_handle.write(" cmd_queue_count_threshold_severity => C_CMD_QUEUE_COUNT_THRESHOLD_SEVERITY,\n")
file_handle.write(" result_queue_count_max => C_RESULT_QUEUE_COUNT_MAX,\n")
file_handle.write(" result_queue_count_threshold_severity => C_RESULT_QUEUE_COUNT_THRESHOLD_SEVERITY,\n")
file_handle.write(" result_queue_count_threshold => C_RESULT_QUEUE_COUNT_THRESHOLD,\n")
file_handle.write(" bfm_config => C_"+vvc_name.upper()+"_BFM_CONFIG_DEFAULT,\n")
file_handle.write(" msg_id_panel => C_VVC_MSG_ID_PANEL_DEFAULT\n")
file_handle.write(" );\n")
print_linefeed(file_handle)
file_handle.write(" type t_vvc_status is\n")
file_handle.write(" record\n")
file_handle.write(" current_cmd_idx : natural;\n")
file_handle.write(" previous_cmd_idx : natural;\n")
file_handle.write(" pending_cmd_cnt : natural;\n")
file_handle.write(" end record;\n")
print_linefeed(file_handle)
if vvc_channels.__len__() == 1:
file_handle.write(" type t_vvc_status_array is array (natural range <>) of t_vvc_status;\n")
else:
file_handle.write(" type t_vvc_status_array is array (t_channel range <>, natural range <>) of t_vvc_status;\n")
print_linefeed(file_handle)
file_handle.write(" constant C_VVC_STATUS_DEFAULT : t_vvc_status := (\n")
file_handle.write(" current_cmd_idx => 0,\n")
file_handle.write(" previous_cmd_idx => 0,\n")
file_handle.write(" pending_cmd_cnt => 0\n")
file_handle.write(" );\n")
print_linefeed(file_handle)
file_handle.write(" -- Transaction information to include in the wave view during simulation\n")
file_handle.write(" type t_transaction_info is\n")
file_handle.write(" record\n")
file_handle.write(" operation : t_operation;\n")
file_handle.write(" msg : string(1 to C_VVC_CMD_STRING_MAX_LENGTH);\n")
file_handle.write(" --<USER_INPUT> Fields that could be useful to track in the waveview can be placed in this "
"record.\n")
file_handle.write(" -- Example:\n")
file_handle.write(" -- addr : unsigned(C_VVC_CMD_ADDR_MAX_LENGTH-1 downto 0);\n")
file_handle.write(" -- data : std_logic_vector(C_VVC_CMD_DATA_MAX_LENGTH-1 downto 0);\n")
file_handle.write(" end record;\n")
print_linefeed(file_handle)
if vvc_channels.__len__() == 1:
file_handle.write(" type t_transaction_info_array is array (natural range <>) of "
"t_transaction_info;\n")
else:
file_handle.write(" type t_transaction_info_array is array (t_channel range <>, "
"natural range <>) of t_transaction_info;\n")
print_linefeed(file_handle)
file_handle.write(" constant C_TRANSACTION_INFO_DEFAULT : t_transaction_info := (\n")
file_handle.write(" --<USER_INPUT> Set the data fields added to the t_transaction_info record to \n")
file_handle.write(" -- their default values here.\n")
file_handle.write(" -- Example:\n")
file_handle.write(" -- addr => (others => '0'),\n")
file_handle.write(" -- data => (others => '0'),\n")
file_handle.write(" operation => NO_OPERATION,\n"),
file_handle.write(" msg => (others => ' ')\n")
file_handle.write(" );\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
if vvc_channels.__len__() == 1:
file_handle.write(" shared variable shared_"+vvc_name.lower()+"_vvc_config : t_vvc_config_array(0 to "
"C_MAX_VVC_INSTANCE_NUM-1) := (others => C_"+vvc_name.upper()+"_VVC_CONFIG_DEFAULT);\n")
file_handle.write(" shared variable shared_"+vvc_name.lower()+"_vvc_status : t_vvc_status_array(0 to "
"C_MAX_VVC_INSTANCE_NUM-1) := (others => C_VVC_STATUS_DEFAULT);\n")
file_handle.write(" shared variable shared_"+vvc_name.lower()+"_transaction_info : "
"t_transaction_info_array(0 to C_MAX_VVC_INSTANCE_NUM-1) := "
"(others => C_TRANSACTION_INFO_DEFAULT);\n")
else:
file_handle.write(" shared variable shared_"+vvc_name.lower()+"_vvc_config : t_vvc_config_array(t_channel'left"
" to t_channel'right, 0 to C_MAX_VVC_INSTANCE_NUM-1) := (others => (others => "
"C_"+vvc_name.upper()+"_VVC_CONFIG_DEFAULT));\n")
file_handle.write(" shared variable shared_"+vvc_name.lower()+"_vvc_status : t_vvc_status_array(t_channel'left"
" to t_channel'right, 0 to C_MAX_VVC_INSTANCE_NUM-1) := (others => (others => "
"C_VVC_STATUS_DEFAULT));\n")
file_handle.write(" shared variable shared_"+vvc_name.lower()+"_transaction_info : "
"t_transaction_info_array(t_channel'left to t_channel'right, 0 to "
"C_MAX_VVC_INSTANCE_NUM-1) := (others => (others => C_TRANSACTION_INFO_DEFAULT));\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" "+division_line+"\n")
file_handle.write(" -- Methods dedicated to this VVC \n")
file_handle.write(" -- - These procedures are called from the testbench in order to queue BFM calls \n")
file_handle.write(" -- in the VVC command queue. The VVC will store and forward these calls to the\n")
file_handle.write(" -- "+vvc_name.upper()+" BFM when the command is at the from of the VVC command queue.\n")
file_handle.write(" "+division_line+"\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" --<USER_INPUT> Please insert the VVC procedure declarations here \n")
file_handle.write(" --Example with single VVC channel: \n")
file_handle.write(" -- procedure "+vvc_name.lower()+"_write(\n")
file_handle.write(" -- signal VVCT : inout t_vvc_target_record;\n")
file_handle.write(" -- constant vvc_instance_idx : in integer;\n")
file_handle.write(" -- constant addr : in unsigned;\n")
file_handle.write(" -- constant data : in std_logic_vector;\n")
file_handle.write(" -- constant msg : in string\n")
file_handle.write(" -- );\n")
print_linefeed(file_handle)
file_handle.write(" --Example with multiple VVC channels: \n")
file_handle.write(" -- procedure "+vvc_name.lower()+"_write(\n")
file_handle.write(" -- signal VVCT : inout t_vvc_target_record;\n")
file_handle.write(" -- constant vvc_instance_idx : in integer;\n")
file_handle.write(" -- constant channel : in t_channel;\n")
file_handle.write(" -- constant addr : in unsigned;\n")
file_handle.write(" -- constant data : in std_logic_vector;\n")
file_handle.write(" -- constant msg : in string\n")
file_handle.write(" -- );\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write("end package vvc_methods_pkg;\n")
print_linefeed(file_handle)
def add_methods_pkg_body(file_handle, vvc_name):
print_linefeed(file_handle)
file_handle.write("package body vvc_methods_pkg is\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" "+division_line+"\n")
file_handle.write(" -- Methods dedicated to this VVC\n")
file_handle.write(" "+division_line+"\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" --<USER_INPUT> Please insert the VVC procedure implementations here.\n")
file_handle.write(" -- These procedures will be used to forward commands to the VVC executor, which will\n")
file_handle.write(" -- call the corresponding BFM procedures. \n")
file_handle.write(" -- Example using single channel:\n")
file_handle.write(" -- procedure "+vvc_name.lower()+"_write( \n")
file_handle.write(" -- signal VVCT : inout t_vvc_target_record;\n")
file_handle.write(" -- constant vvc_instance_idx : in integer;\n")
file_handle.write(" -- constant addr : in unsigned;\n")
file_handle.write(" -- constant data : in std_logic_vector;\n")
file_handle.write(" -- constant msg : in string\n")
file_handle.write(" -- ) is\n")
file_handle.write(" -- constant proc_name : string := \""+vvc_name.lower()+"_write\";\n")
file_handle.write(" -- constant proc_call : string := proc_name & \"(\" & to_string(VVCT, "
"vvc_instance_idx) -- First part common for all\n")
file_handle.write(" -- & \", \" & to_string(addr, HEX, AS_IS, INCL_RADIX) & \", \" & "
"to_string(data, HEX, AS_IS, INCL_RADIX) & \")\";\n")
file_handle.write(" -- constant v_normalised_addr : unsigned(C_VVC_CMD_ADDR_MAX_LENGTH-1 downto 0) := \n"
" -- normalize_and_check(addr, shared_vvc_cmd.addr, ALLOW_WIDER_NARROWER, \"addr\", \"shared_vvc_cmd.addr\", "
"proc_call & \" called with to wide addr. \" & msg);\n")
file_handle.write(" -- constant v_normalised_data : std_logic_vector(C_VVC_CMD_DATA_MAX_LENGTH-1 downto 0) := \n"
" -- normalize_and_check(data, shared_vvc_cmd.data, ALLOW_WIDER_NARROWER, \"data\", \"shared_vvc_cmd.data\", "
"proc_call & \" called with to wide data. \" & msg);\n")
file_handle.write(" -- begin\n")
file_handle.write(" -- -- Create command by setting common global 'VVCT' signal record and dedicated VVC 'shared_vvc_cmd' record\n")
file_handle.write(" -- -- locking semaphore in set_general_target_and_command_fields to gain exclusive right to VVCT and shared_vvc_cmd\n")
file_handle.write(" -- -- semaphore gets unlocked in await_cmd_from_sequencer of the targeted VVC\n")
file_handle.write(" -- set_general_target_and_command_fields(VVCT, vvc_instance_idx, proc_call, msg, "
"QUEUED, WRITE);\n")
file_handle.write(" -- shared_vvc_cmd.addr := v_normalised_addr;\n")
file_handle.write(" -- shared_vvc_cmd.data := v_normalised_data;\n")
file_handle.write(" -- send_command_to_vvc(VVCT);\n")
file_handle.write(" -- end procedure;\n")
print_linefeed(file_handle)
file_handle.write(" -- Example using multiple channels:\n")
file_handle.write(" -- procedure "+vvc_name.lower()+"_receive(\n")
file_handle.write(" -- signal VVCT : inout t_vvc_target_record;\n")
file_handle.write(" -- constant vvc_instance_idx : in integer;\n")
file_handle.write(" -- constant channel : in t_channel;\n")
file_handle.write(" -- constant msg : in string;\n")
file_handle.write(" -- constant alert_level : in t_alert_level := ERROR\n")
file_handle.write(" -- ) is\n")
file_handle.write(" -- constant proc_name : string := \""+vvc_name.lower()+"_receive\";\n")
file_handle.write(" -- constant proc_call : string := proc_name & \"(\" & "
"to_string(VVCT, vvc_instance_idx, channel) & \")\";\n")
file_handle.write(" -- begin\n")
file_handle.write(" -- -- Create command by setting common global 'VVCT' signal record and dedicated VVC 'shared_vvc_cmd' record\n")
file_handle.write(" -- -- locking semaphore in set_general_target_and_command_fields to gain exclusive right to VVCT and shared_vvc_cmd\n")
file_handle.write(" -- -- semaphore gets unlocked in await_cmd_from_sequencer of the targeted VVC\n")
file_handle.write(" -- set_general_target_and_command_fields(VVCT, vvc_instance_idx, channel, proc_call, msg, "
"QUEUED, RECEIVE);\n")
file_handle.write(" -- shared_vvc_cmd.operation := RECEIVE;\n")
file_handle.write(" -- shared_vvc_cmd.alert_level := alert_level;\n")
file_handle.write(" -- send_command_to_vvc(VVCT);\n")
file_handle.write(" -- end procedure;\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write("end package body vvc_methods_pkg;\n")
def add_bfm_pkg_includes(file_handle):
file_handle.write("library ieee;\n")
file_handle.write("use ieee.std_logic_1164.all;\n")
file_handle.write("use ieee.numeric_std.all;\n")
print_linefeed(file_handle)
file_handle.write("library uvvm_util;\n")
file_handle.write("context uvvm_util.uvvm_util_context;\n")
print_linefeed(file_handle)
file_handle.write(division_line+"\n")
file_handle.write(division_line+"\n")
def add_bfm_pkg_header(file_handle, vvc_name):
file_handle.write("package "+vvc_name.lower()+"_bfm_pkg is\n")
print_linefeed(file_handle)
file_handle.write(" " + division_line+"\n")
file_handle.write(" -- Types and constants for "+vvc_name.upper()+" BFM \n")
file_handle.write(" " + division_line+"\n")
file_handle.write(" constant C_SCOPE : string := \""+vvc_name.upper()+" BFM\";\n")
print_linefeed(file_handle)
file_handle.write(" -- Optional interface record for BFM signals\n")
file_handle.write(" -- type t_"+vvc_name.lower()+"_if is record\n")
file_handle.write(" --<USER_INPUT> Insert all BFM signals here\n")
file_handle.write(" -- Example:\n")
file_handle.write(" -- cs : std_logic; -- to dut\n")
file_handle.write(" -- addr : unsigned; -- to dut\n")
file_handle.write(" -- rena : std_logic; -- to dut\n")
file_handle.write(" -- wena : std_logic; -- to dut\n")
file_handle.write(" -- wdata : std_logic_vector; -- to dut\n")
file_handle.write(" -- ready : std_logic; -- from dut\n")
file_handle.write(" -- rdata : std_logic_vector; -- from dut\n")
file_handle.write(" -- end record;\n")
print_linefeed(file_handle)
file_handle.write(" -- Configuration record to be assigned in the test harness.\n")
file_handle.write(" type t_"+vvc_name.lower()+"_bfm_config is\n")
file_handle.write(" record\n")
file_handle.write(" --<USER_INPUT> Insert all BFM config parameters here\n")
file_handle.write(" -- Example:\n")
file_handle.write(" -- max_wait_cycles : integer;\n")
file_handle.write(" -- max_wait_cycles_severity : t_alert_level;\n")
file_handle.write(" -- id_for_bfm : t_msg_id;\n")
file_handle.write(" -- id_for_bfm_wait : t_msg_id;\n")
file_handle.write(" -- id_for_bfm_poll : t_msg_id;\n")
file_handle.write(" clock_period : time; -- Needed in the VVC\n")
file_handle.write(" end record;\n")
print_linefeed(file_handle)
file_handle.write(" -- Define the default value for the BFM config\n")
file_handle.write(" constant C_"+vvc_name.upper()+"_BFM_CONFIG_DEFAULT : t_"+vvc_name.lower()+"_bfm_config := (\n")
file_handle.write(" --<USER_INPUT> Insert defaults for all BFM config parameters here\n")
file_handle.write(" -- Example:\n")
file_handle.write(" -- max_wait_cycles => 10,\n")
file_handle.write(" -- max_wait_cycles_severity => failure,\n")
file_handle.write(" -- id_for_bfm => ID_BFM,\n")
file_handle.write(" -- id_for_bfm_wait => ID_BFM_WAIT,\n")
file_handle.write(" -- id_for_bfm_poll => ID_BFM_POLL,\n")
file_handle.write(" clock_period => 10 ns\n")
file_handle.write(" );\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" " + division_line+"\n")
file_handle.write(" -- BFM procedures \n")
file_handle.write(" " + division_line+"\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" --<USER_INPUT> Insert BFM procedure declarations here, e.g. read and write operations\n")
file_handle.write(" -- It is recommended to also have an init function which sets the BFM signals to their "
"default state\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write("end package "+vvc_name.lower()+"_bfm_pkg;\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(division_line+"\n")
file_handle.write(division_line+"\n")
def add_bfm_pkg_body(file_handle, vvc_name):
print_linefeed(file_handle)
file_handle.write("package body "+vvc_name.lower()+"_bfm_pkg is\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" --<USER_INPUT> Insert BFM procedure implementation here.\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write("end package body "+vvc_name.lower()+"_bfm_pkg;\n")
print_linefeed(file_handle)
def generate_bfm_skeleton(vvc_name):
f = open("output/"+vvc_name.lower()+"_bfm_pkg.vhd", 'w')
add_vvc_header(f)
add_bfm_pkg_includes(f)
add_bfm_pkg_header(f, vvc_name)
add_bfm_pkg_body(f, vvc_name)
f.close()
def generate_vvc_methods_pkg_file(vvc_name, vvc_channels):
f = open("output/vvc_methods_pkg.vhd", 'w')
add_vvc_header(f)
add_methods_pkg_includes(f, vvc_name)
add_methods_pkg_header(f,vvc_name,vvc_channels)
add_methods_pkg_body(f, vvc_name)
f.close()
def generate_vvc_cmd_pkg_file():
f = open("output/vvc_cmd_pkg.vhd", 'w')
add_vvc_header(f)
add_vvc_cmd_pkg_includes(f)
add_vvc_cmd_pkg_header(f)
add_vvc_cmd_pkg_body(f)
f.close()
def generate_vvc_file(vvc_name, vvc_channels):
# Create main VVC, or leaf VVCs if multiple channels
for channel in vvc_channels:
num_of_queues = channel.len_of_queue()
if channel.name == "NA":
vvc_file_name = "output/"+vvc_name.lower()+"_vvc.vhd"
else:
vvc_file_name = "output/"+vvc_name.lower()+"_"+channel.name.lower()+"_vvc.vhd"
f = open(vvc_file_name, 'w')
add_vvc_header(f)
add_leaf_includes(f,vvc_name)
add_vvc_entity(f,vvc_name,channel.name)
add_architecture_declaration(f, vvc_name, channel)
add_vvc_constructor(f, vvc_name)
add_vvc_interpreter(f, channel)
add_vvc_executor(f, channel)
if (num_of_queues > 1):
for i in range(1, num_of_queues):
add_vvc_pipeline_step(f, channel.queue_names[i])
add_vvc_terminator(f)
add_end_of_architecture(f)
f.close()
# Create wrapper if multiple channels
if vvc_channels.__len__() != 1:
vvc_file_name = "output/"+vvc_name.lower()+"_vvc.vhd"
f = open(vvc_file_name, 'w')
add_vvc_header(f)
add_wrapper_includes(f,vvc_name)
add_vvc_entity(f,vvc_name,"NA")
add_wrapper_architecture_declaration(f, vvc_name)
for channel in vvc_channels:
add_leaf_vvc_entity(f, vvc_name, channel.name)
add_wrapper_architecture_end(f)
f.close()
# Entry point for the vvc_generator script
if __name__ == '__main__':
vvc_name = "not_set"
number_of_channels = 1
vvc_channels = []
vvc_name = get_vvc_name()
if is_multi_channel_vvc() == 'y':
number_of_channels = get_number_of_channels()
for i in range(number_of_channels):
channel = Channel(get_channel_name())
if is_multi_queue_channel() == 'y':
number_of_queues = get_number_of_queues()
if number_of_queues > 1:
for i in range(1, number_of_queues):
channel.add_queue(get_queue_name())
vvc_channels.append(channel)
else:
channel = Channel("NA")
if is_multi_queue_channel() == 'y':
number_of_queues = get_number_of_queues()
if number_of_queues > 1:
for i in range(1, number_of_queues):
channel.add_queue(get_queue_name())
vvc_channels.append(channel)
if not os.path.exists("output"):
os.makedirs("output")
generate_vvc_file(vvc_name, vvc_channels)
generate_vvc_cmd_pkg_file()
generate_vvc_methods_pkg_file(vvc_name, vvc_channels)
generate_bfm_skeleton(vvc_name)
print("\nThe vvc_generator script is now finished")
print("The generated VVC can be found in the output folder")
| {
"repo_name": "AndyMcC0/UVVM_All",
"path": "uvvm_vvc_framework/script/vvc_generator/vvc_generator.py",
"copies": "1",
"size": "80022",
"license": "mit",
"hash": 5375977851871328000,
"line_mean": 58.5349702381,
"line_max": 244,
"alpha_frac": 0.5860151222,
"autogenerated": false,
"ratio": 3.262057156834767,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43480722790347665,
"avg_score": null,
"num_lines": null
} |
__author__ = 'BJHaibo'
import os
import scrapy
# from scrapy.spider import Request
from scrapy.pipelines.images import ImagesPipeline
from scrapy.exceptions import DropItem
class MyImagePipeline(ImagesPipeline):
def __init__(self,store_uri,download_func=None):
# store_uri is automatically set from setting.py :IMAGE_STORE
self.store_path = store_uri
super(MyImagePipeline,self).__init__(store_uri,download_func=None)
def get_media_requests(self, item, info):
url = item.get('url',None)
if url is not None:
yield scrapy.Request(url)
#when the scrapy.Request finished downloading ,the method will be called
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok,x in results if ok]
if image_paths:
image_path = image_paths[0]
item['image_path'] = os.path.join(os.path.abspath(self.store_path)
,image_path) if image_path else ''
return item
| {
"repo_name": "haipersist/webspider",
"path": "spider/jobspider/pipelines/down_image.py",
"copies": "1",
"size": "1075",
"license": "mit",
"hash": 7759712044162594000,
"line_mean": 27.8611111111,
"line_max": 81,
"alpha_frac": 0.6130232558,
"autogenerated": false,
"ratio": 3.923357664233577,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5036380920033576,
"avg_score": null,
"num_lines": null
} |
__author__ = 'BJ'
from behave import *
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions
@given('I am browsing "{url}"')
def step_impl(context, url):
context.driver.get(url)
try:
WebDriverWait(context.driver, 10).until(expected_conditions.title_contains("Wikipedia"))
except TimeoutException:
print("The %s page failed to load! FAIL" % url)
context.failed = True
@given('The scenario has passed so far')
def step_impl(context):
assert context.failed is False
@when('I search for "{search_criteria}"')
def step_impl(context, search_criteria):
inputer_element = context.driver.find_element_by_name("search")
inputer_element.send_keys(search_criteria)
@when('I select language "{language}"')
def step_impl(context, language):
select = Select(context.driver.find_element_by_name('language'))
select.select_by_visible_text(language)
@when('Click the search button')
def step_impl(context):
context.driver.find_element_by_name("go").click()
@when('I select language_option "{language}"')
def step_impl(context, language):
context.driver.find_element_by_xpath("//div[@id='p-lang']").find_element_by_partial_link_text(language).click()
@then('The first heading of the search results page matches "{search_topic}"')
def step_impl(context, search_topic):
assert search_topic.lower() in context.driver.find_element_by_xpath("/html/body//h1").text.lower()
assert context.failed is False
@then('The search results page is available in "{language}"')
def step_impl(context, language):
context.driver.find_element_by_xpath("//div[@id='p-lang']")\
.find_element_by_partial_link_text(language)
assert context.failed is False
| {
"repo_name": "bjtallguy/FP_u1qJXqn0m31A6v0beo4",
"path": "q2/tests/features/steps/q2.py",
"copies": "1",
"size": "1883",
"license": "bsd-2-clause",
"hash": -4903319061603585000,
"line_mean": 32.0350877193,
"line_max": 115,
"alpha_frac": 0.7227827934,
"autogenerated": false,
"ratio": 3.5935114503816794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4816294243781679,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bj'
import unittest
from timeit import Timer
from q1 import find_longest_inc_subsequence as fls
class TestFindLongestIncrementingSubSequence(unittest.TestCase):
def test_example_one(self):
self.assertEqual(fls([1, 4, 1, 4, 2, 1, 3, 5, 6, 2, 3, 7]), 4)
def test_example_two(self):
self.assertEqual(fls([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5]), 3)
def test_example_three(self):
self.assertEqual(fls([2, 7, 1, 8, 2, 8, 1]), 2)
def test_empty_list(self):
self.assertEqual(fls([]), 0)
def test_single_value(self):
self.assertEqual(fls([7]), 0)
def test_all_same_value(self):
self.assertEqual(fls([2, 2, 2, 2, 2, 2, 2]), 0)
def test_a_string(self):
self.assertEqual(fls(['abc']), 0)
def test_list_of_strings(self):
self.assertEqual(fls(['a', 'b', 'c', 'b']), 3)
def test_speed(self):
"""Creates million int list and times search."""
t = Timer("""fls(seq)""", """
from random import randint
from q1 import find_longest_inc_subsequence as fls
seq = [randint(0, 100) for x in range(1000000)]
""")
self.assertLess(t.timeit(1), 2)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "bjtallguy/FP_u1qJXqn0m31A6v0beo4",
"path": "q1/tests/tests.py",
"copies": "1",
"size": "1220",
"license": "bsd-2-clause",
"hash": -7352055570909805000,
"line_mean": 26.7272727273,
"line_max": 70,
"alpha_frac": 0.5885245902,
"autogenerated": false,
"ratio": 3.0272952853598016,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4115819875559802,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bj'
"""
Q2 Web Front-End Test
Automate the following functional test using Selenium:
1. Navigate to the Wikipedia home page, http://www.wikipedia.org/.
2. Search for a given string in English:
(a) Type in a string given as parameter in the search input field.
(b) Select English as the search language.
(c) Click the search button.
3. Validate that the first heading of the search results page matches the
search string (ignoring case).
4. Verify that the search results page is available in a language given as
parameter.
5. Navigate to the search results page in that language.
6. Validate that the search results page in the new language includes a
link to the version in English.
"""
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions
# Test Data
start_page = "http://www.wikipedia.org/"
search_field_name = "search"
search_string = "London"
search_lang = "Deutsch"
# search_lang = "wiggle"
# Create a new instance of the Firefox driver.
driver = webdriver.Firefox()
try:
# 1. Navigate to the Wikipedia home page, http://www.wikipedia.org/.
driver.get(start_page)
try:
element = WebDriverWait(driver, 10).until(expected_conditions.title_contains("Wikipedia"))
except:
print("The %s page failed to load! FAIL" % start_page)
raise
# 2. Search for a given string in English:
# (a) Type in a string given as parameter in the search input field.
inputElement = driver.find_element_by_name(search_field_name)
# type in the search
inputElement.send_keys(search_string)
# (b) Select English as the search language.
select = Select(driver.find_element_by_name('language'))
select.select_by_visible_text("English")
# (c) Click the search button.
driver.find_element_by_name("go").click()
# 3. Validate that the first heading of the search results page matches the search string (ignoring case).
if search_string.lower() not in driver.find_element_by_xpath("/html/body//h1").text.lower():
raise AssertionError("Search string (%s) not found in first header" % search_string)
# 4. Verify that the search results page is available in a language given as parameter.
try:
new_lang_page = driver.find_element_by_xpath("//div[@id='p-lang']").find_element_by_partial_link_text(search_lang)
except NoSuchElementException:
print("Page not available in %s. FAIL" % search_lang)
raise
else:
print("Page is available in %s. PASS" % search_lang)
# 5. Navigate to the search results page in that language.
new_lang_page.click()
# 6. Validate that the search results page in the new language includes a link to the version in English
en_lang = "English"
try:
new_lang_page = driver.find_element_by_xpath("//div[@id='p-lang']").find_element_by_partial_link_text(en_lang)
except NoSuchElementException:
print("Page not available in %s. FAIL" % en_lang)
raise
else:
print("Page is available in %s. PASS" % en_lang)
finally:
driver.quit()
| {
"repo_name": "bjtallguy/FP_u1qJXqn0m31A6v0beo4",
"path": "q2/q2.py",
"copies": "1",
"size": "3270",
"license": "bsd-2-clause",
"hash": 8728535580024423000,
"line_mean": 37.9285714286,
"line_max": 122,
"alpha_frac": 0.7107033639,
"autogenerated": false,
"ratio": 3.762945914844649,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9962854086214891,
"avg_score": 0.0021590385059516598,
"num_lines": 84
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.