function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def M_to_D(M): """Parabolic eccentric anomaly from mean anomaly. Parameters ---------- M : ~astropy.units.Quantity Mean anomaly. Returns ------- D : ~astropy.units.Quantity Parabolic eccentric anomaly. """ return (M_to_D_fast(M.to_value(u.rad)) * u.rad).to(M.unit)
poliastro/poliastro
[ 713, 256, 713, 128, 1372947285 ]
def E_to_M(E, ecc): """Mean anomaly from eccentric anomaly. .. versionadded:: 0.4.0 Parameters ---------- E : ~astropy.units.Quantity Eccentric anomaly. ecc : ~astropy.units.Quantity Eccentricity. Returns ------- M : ~astropy.units.Quantity Mean anomaly. """ return (E_to_M_fast(E.to_value(u.rad), ecc.value) * u.rad).to(E.unit)
poliastro/poliastro
[ 713, 256, 713, 128, 1372947285 ]
def F_to_M(F, ecc): """Mean anomaly from eccentric anomaly. Parameters ---------- F : ~astropy.units.Quantity Hyperbolic eccentric anomaly. ecc : ~astropy.units.Quantity Eccentricity (>1). Returns ------- M : ~astropy.units.Quantity Mean anomaly. """ return (F_to_M_fast(F.to_value(u.rad), ecc.value) * u.rad).to(F.unit)
poliastro/poliastro
[ 713, 256, 713, 128, 1372947285 ]
def D_to_M(D): """Mean anomaly from eccentric anomaly. Parameters ---------- D : ~astropy.units.Quantity Parabolic eccentric anomaly. Returns ------- M : ~astropy.units.Quantity Mean anomaly. """ return (D_to_M_fast(D.to_value(u.rad)) * u.rad).to(D.unit)
poliastro/poliastro
[ 713, 256, 713, 128, 1372947285 ]
def __init__(self, status, message): display_error(message) finish_run()
munkireport/munkireport-php
[ 377, 133, 377, 145, 1380720923 ]
def display_error(msg, *args): """Call display error msg handler.""" display.display_error("%s" % msg, *args)
munkireport/munkireport-php
[ 377, 133, 377, 145, 1380720923 ]
def display_detail(msg, *args): """Call display detail msg handler.""" display.display_detail("%s" % msg, *args)
munkireport/munkireport-php
[ 377, 133, 377, 145, 1380720923 ]
def remove_run_file(): touchfile = '/Users/Shared/.com.github.munkireport.run' if os.path.exists(touchfile): os.remove(touchfile)
munkireport/munkireport-php
[ 377, 133, 377, 145, 1380720923 ]
def get_hardware_info(): """Uses system profiler to get hardware info for this machine.""" cmd = ["/usr/sbin/system_profiler", "SPHardwareDataType", "-xml"] proc = subprocess.Popen( cmd, shell=False, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) (output, dummy_error) = proc.communicate() try: plist = FoundationPlist.readPlistFromString(output) # system_profiler xml is an array sp_dict = plist[0] items = sp_dict["_items"] sp_hardware_dict = items[0] return sp_hardware_dict except BaseException: return {}
munkireport/munkireport-php
[ 377, 133, 377, 145, 1380720923 ]
def get_uid(username): try: uid = pwd.getpwnam(username)[2] except: uid = "" return uid
munkireport/munkireport-php
[ 377, 133, 377, 145, 1380720923 ]
def get_cpuinfo(): cmd = ["/usr/sbin/sysctl", "-n", "machdep.cpu.brand_string"] proc = subprocess.Popen( cmd, shell=False, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) (output, unused_error) = proc.communicate() output = output.strip() return output.decode("utf-8")
munkireport/munkireport-php
[ 377, 133, 377, 145, 1380720923 ]
def get_uptime(): cmd = ["/usr/sbin/sysctl", "-n", "kern.boottime"] proc = subprocess.Popen( cmd, shell=False, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) (output, unused_error) = proc.communicate() sec = int(re.sub(".*sec = (\d+),.*", "\\1", output)) up = int(time.time() - sec) return up if up > 0 else -1
munkireport/munkireport-php
[ 377, 133, 377, 145, 1380720923 ]
def pref(pref_name): """Return a preference. See prefs.py for details """ pref_value = CFPreferencesCopyAppValue(pref_name, BUNDLE_ID) return pref_value
munkireport/munkireport-php
[ 377, 133, 377, 145, 1380720923 ]
def runExternalScriptWithTimeout( script, allow_insecure=False, script_args=(), timeout=30
munkireport/munkireport-php
[ 377, 133, 377, 145, 1380720923 ]
def rundir(scriptdir, runtype, abort=False, submitscript=""): """Run scripts in directory scriptdir runtype is passed to the script if abort is True, a non-zero exit status will abort munki submitscript is put at the end of the scriptlist.""" if os.path.exists(scriptdir): from munkilib import utils # Get timeout for scripts scriptTimeOut = 30 if pref("scriptTimeOut"): scriptTimeOut = int(pref("scriptTimeOut")) display_detail("# Set custom script timeout to %s seconds" % scriptTimeOut) # Directory containing the scripts parentdir = os.path.basename(scriptdir) display_detail("# Executing scripts in %s" % parentdir) # Get all files in scriptdir files = os.listdir(scriptdir) # Sort files files.sort() # Find submit script and stick it on the end of the list if submitscript: try: sub = files.pop(files.index(submitscript)) files.append(sub) except Exception, e: display_error("%s not found in %s" % (submitscript, parentdir)) for script in files: # Skip files that start with a period if script.startswith("."): continue # Concatenate dir and filename scriptpath = os.path.join(scriptdir, script) # Skip directories if os.path.isdir(scriptpath): continue try: # Attempt to execute script display_detail("Running %s" % script) result, stdout, stderr = runExternalScriptWithTimeout( scriptpath, allow_insecure=False, script_args=[runtype], timeout=scriptTimeOut, ) if stdout: display_detail(stdout) if stderr: display_detail("%s Error: %s" % (script, stderr)) if result: if abort: display_detail("Aborted by %s" % script) exit(1) else: display_warning("%s return code: %d" % (script, result)) except utils.ScriptNotFoundError: pass # Script has disappeared - pass. except Exception, e: display_warning("%s: %s" % (script, str(e)))
munkireport/munkireport-php
[ 377, 133, 377, 145, 1380720923 ]
def gethash(filename, hash_function): """Calculates the hashvalue of the given file with the given hash_function. Args: filename: The file name to calculate the hash value of. hash_function: The hash function object to use, which was instantiated before calling this function, e.g. hashlib.md5(). Returns: The hashvalue of the given file as hex string. """ if not os.path.isfile(filename): return "NOT A FILE" fileref = open(filename, "rb") while 1: chunk = fileref.read(2 ** 16) if not chunk: break hash_function.update(chunk) fileref.close() return hash_function.hexdigest()
munkireport/munkireport-php
[ 377, 133, 377, 145, 1380720923 ]
def getOsVersion(only_major_minor=True, as_tuple=False): """Returns an OS version. Args: only_major_minor: Boolean. If True, only include major/minor versions. as_tuple: Boolean. If True, return a tuple of ints, otherwise a string. """ os.environ["SYSTEM_VERSION_COMPAT"] = '0' cmd = ["/usr/bin/sw_vers -productVersion"] proc = subprocess.Popen( cmd, shell=True, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) (output, unused_error) = proc.communicate() output = output.strip() os_version_tuple = output.split(".") if only_major_minor: os_version_tuple = os_version_tuple[0:2] if as_tuple: return tuple(map(int, os_version_tuple)) else: return ".".join(os_version_tuple)
munkireport/munkireport-php
[ 377, 133, 377, 145, 1380720923 ]
def test_checksum_len1(): assert luhn.checksum('7') == 7
mmcloughlin/luhn
[ 40, 11, 40, 2, 1434526871 ]
def test_checksum_len3(): assert luhn.checksum('383') == 3
mmcloughlin/luhn
[ 40, 11, 40, 2, 1434526871 ]
def test_checksum_len13(): assert luhn.checksum('4346537657597') == 9
mmcloughlin/luhn
[ 40, 11, 40, 2, 1434526871 ]
def test_valid(): assert luhn.verify('356938035643809')
mmcloughlin/luhn
[ 40, 11, 40, 2, 1434526871 ]
def test_generate(): assert luhn.generate('7992739871') == 3
mmcloughlin/luhn
[ 40, 11, 40, 2, 1434526871 ]
def _forward_backward(self, shape): @simple_in_memory_settings class Model(ff.BaseModel): meanstd = ff.PickleFeature( MeanStdNormalization, store=False) pipeline = ff.PickleFeature( PreprocessingPipeline, needs=(meanstd,), store=True) training = np.random.random_sample((100,) + shape) _id = Model.process(meanstd=training) model = Model(_id) data_shape = (10,) + shape data = np.random.random_sample(data_shape) result = model.pipeline.transform(data) self.assertEqual(data_shape, result.data.shape) inverted = result.inverse_transform() self.assertEqual(inverted.shape, data.shape) np.testing.assert_allclose(inverted, data)
JohnVinyard/zounds
[ 22, 6, 22, 26, 1458698104 ]
def test_can_process_2d(self): self._forward_backward((3, 4))
JohnVinyard/zounds
[ 22, 6, 22, 26, 1458698104 ]
def create_pos_n_neg(): for file_type in ['neg']:
Tianyi94/EC601Project_Somatic-Parkour-Game-based-on-OpenCV
[ 1, 2, 1, 5, 1506357464 ]
def add(a, b): return a+b
CloudI/CloudI
[ 384, 53, 384, 4, 1251785476 ]
def test_default_pool(self, pool=None): """ Tests the default pool """ # Setup server server = PooledJSONRPCServer(("localhost", 0), thread_pool=pool) server.register_function(add) # Serve in a thread thread = threading.Thread(target=server.serve_forever) thread.daemon = True thread.start() # Find its port port = server.socket.getsockname()[1] # Make the client client = ServerProxy("http://localhost:{0}".format(port)) # Check calls for _ in range(10): a, b = random.random(), random.random() result = client.add(a, b) self.assertEqual(result, a+b) # Close server server.server_close() thread.join()
CloudI/CloudI
[ 384, 53, 384, 4, 1251785476 ]
def test_low(self): self.assertEqual(smart_open.utils.clamp(5, 0, 10), 5)
piskvorky/smart_open
[ 2791, 347, 2791, 76, 1420203952 ]
def inject_extra_field_to_model(from_model, field_name, field): if not isinstance(from_model, six.string_types): field.contribute_to_class(from_model, field_name) return raise Exception('from_model must be a Model Class')
ShenggaoZhu/django-sortedone2many
[ 10, 3, 10, 3, 1438174777 ]
def __init__(self, reportDate): self.DownloadSubscriptionEventReport(reportDate) self.DownloadSubscriptionReport(reportDate) self.FetchSubscriptionEventData(reportDate) self.FetchSubscriptionData(reportDate) self.CleanUp(reportDate)
Acimaz/Google_Apple_Financial_Reporter
[ 1, 1, 1, 1, 1484910922 ]
def DownloadSubscriptionReport(self, date): print 'Downloading Apple Financial Report for Active Users (' + date + ')..' credentials = (self.userId, self.password, self.account, self.mode) command = 'Sales.getReport, {0},Subscription,Summary,{1},{2}'.format(self.vendorId, self.dateType, date) try: ApplePythonReporter.output_result(ApplePythonReporter.post_request(ApplePythonReporter.ENDPOINT_SALES, credentials, command)) except: pass #return iter(p.stdout.readline, b'')
Acimaz/Google_Apple_Financial_Reporter
[ 1, 1, 1, 1, 1484910922 ]
def FetchSubscriptionEventData(self, date): fileName = 'Subscription_Event_'+self.vendorId+'_' + date + '.txt' attempts = 0 while not os.path.isfile(fileName): if(attempts >= self.maxAttempts): break attempts += 1 time.sleep(1) if os.path.isfile(fileName): print 'Fetching SubscriptionEvents..' with open(fileName, 'rb') as inF: text = inF.read().splitlines() for row in text[1:]: line = string.split(row, '\t') # print line[self.eventIndex].__str__() if line[0].__str__().endswith(date[-2:]): if line[self.eventIndex] == 'Cancel': self.cancellations += int(line[self.quantityIndex]) if line[self.eventIndex] == 'Subscribe': self.subscribers += int(line[self.quantityIndex]) else: print 'SubscriptionEvent: There were no sales for the date specified'
Acimaz/Google_Apple_Financial_Reporter
[ 1, 1, 1, 1, 1484910922 ]
def FetchSubscriptionData(self, date): fileName = 'Subscription_'+self.vendorId+'_' + date + '.txt' attempts = 0 while not os.path.isfile(fileName): if (attempts >= self.maxAttempts): break attempts += 1 time.sleep(1) if os.path.isfile(fileName): print 'Fetching Subscriptions..' with open(fileName, 'rb') as inF: text = inF.read().splitlines() for row in text[1:]: line = string.split(row, '\t') # print line[0].__str__() self.activeSubscribers += int(line[self.activeSubscriberIndex]) else: print 'Subscription: There were no sales for the date specified'
Acimaz/Google_Apple_Financial_Reporter
[ 1, 1, 1, 1, 1484910922 ]
def draw_model(model): from IPython.display import SVG from keras.utils.visualize_util import model_to_dot from keras.utils.visualize_util import plot #graph = to_graph(model, show_shape=True) #graph.write_png("UFCNN_1.png") SVG(model_to_dot(model).create(prog='dot', format='svg')) plot(model, to_file='UFCNN_1.png')
lukovkin/ufcnn-keras
[ 93, 27, 93, 3, 1456850424 ]
def print_layers_shapes(model): for l in model.layers: print("{} : {} : {}".format(type(l), l.input_shape, l.output_shape))
lukovkin/ufcnn-keras
[ 93, 27, 93, 3, 1456850424 ]
def save_neuralnet (model, model_name): json_string = model.to_json() open(path + model_name + '_architecture.json', 'w').write(json_string) model.save_weights(path + model_name + '_weights.h5', overwrite=True) yaml_string = model.to_yaml() with open(path + model_name + '_data.yml', 'w') as outfile: outfile.write( yaml_string)
lukovkin/ufcnn-keras
[ 93, 27, 93, 3, 1456850424 ]
def ufcnn_model_concat(sequence_length=5000, features=1, nb_filter=150, filter_length=5, output_dim=1, optimizer='adagrad', loss='mse', regression = True, class_mode=None, activation="softplus", init="lecun_uniform"): #model = Graph()
lukovkin/ufcnn-keras
[ 93, 27, 93, 3, 1456850424 ]
def ufcnn_model_deconv(sequence_length=5000, features=4, nb_filter=150, filter_length=5, output_dim=1, optimizer='adagrad', loss='mse', regression = False, class_mode=None, activation="softplus", init="lecun_uniform"): #model = Graph() #model.add_input(name='input', input_shape=(None, features)) main_input = Input(name='input', shape=(None, features)) ######################################################### #model.add_node(ZeroPadding1D(2), name='input_padding', input='input') # to avoid lookahead bias input_padding = (ZeroPadding1D(2))(main_input) # to avoid lookahead bias ######################################################### #model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features)), name='conv1', input='input_padding') #model.add_node(Activation(activation), name='relu1', input='conv1') conv1 = Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features))(input_padding) relu1 = (Activation(activation))(conv1) ######################################################### #model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv2', input='relu1') #model.add_node(Activation(activation), name='relu2', input='conv2') conv2 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu1) relu2 = (Activation(activation))(conv2) ######################################################### #model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv3', input='relu2') #model.add_node(Activation(activation), name='relu3', input='conv3') conv3 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu2) relu3 = (Activation(activation))(conv3) ######################################################### #model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv4', input='relu3') #model.add_node(Activation(activation), name='relu4', input='conv4') conv4 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu3) relu4 = (Activation(activation))(conv4) ######################################################### #model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv5', input='relu4') #model.add_node(Activation(activation), name='relu5', input='conv5') conv5 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu4) relu5 = (Activation(activation))(conv5) ######################################################### #model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init), # name='conv6', # inputs=['relu3', 'relu5'], # merge_mode='concat', concat_axis=-1) #model.add_node(Activation(activation), name='relu6', input='conv6') conv6 = merge([relu3, relu5], mode='concat', concat_axis=1) relu6 = (Activation(activation))(conv6) ######################################################### #model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init), # name='conv7', # inputs=['relu2', 'relu6'], # merge_mode='concat', concat_axis=-1) #model.add_node(Activation(activation), name='relu7', input='conv7') conv7 = merge([relu2, relu6], mode='concat', concat_axis=1) relu7 = (Activation(activation))(conv7) ######################################################### #model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init), # name='conv8', # inputs=['relu1', 'relu7'], # merge_mode='concat', concat_axis=-1) #model.add_node(Activation(activation), name='relu8', input='conv8') conv8 = merge([relu1, relu7], mode='concat', concat_axis=1) relu8 = (Activation(activation))(conv8) ######################################################### if regression: ######################################################### #model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8') #model.add_output(name='output', input='conv9') conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8) output = conv9 #main_output = conv9.output else: #model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8') #model.add_node(Activation('softmax'), name='activation', input='conv9') #model.add_output(name='output', input='activation') conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8) activation = (Activation('softmax'))(conv9) #main_output = activation.output output = activation #model.compile(optimizer=optimizer, loss={'output': loss}) model = Model(input=main_input, output=output) model.compile(optimizer=optimizer, loss=loss) return model
lukovkin/ufcnn-keras
[ 93, 27, 93, 3, 1456850424 ]
def ufcnn_model(sequence_length=5000, features=1, nb_filter=150, filter_length=5, output_dim=1, optimizer='adagrad', loss='mse', regression = True, class_mode=None, init="lecun_uniform", mode='concat'): if mode == 'concat': return ufcnn_model_concat(sequence_length, features, nb_filter, filter_length, output_dim, optimizer, loss, regression, class_mode, init) else: raise NotImplemented
lukovkin/ufcnn-keras
[ 93, 27, 93, 3, 1456850424 ]
def train_and_predict_regression(model, sequence_length=5000, batch_size=128, epochs=5): lahead = 1 cos = gen_cosine_amp(xn = sequence_length * 100) expected_output = np.zeros((len(cos), 1, 1)) for i in range(len(cos) - lahead): expected_output[i, 0] = np.mean(cos[i + 1:i + lahead + 1]) print('Training') for i in range(epochs): print('Epoch', i, '/', epochs) model.fit({'input': cos, 'output': expected_output}, verbose=1, nb_epoch=1, shuffle=False, batch_size=batch_size) print('Predicting') predicted_output = model.predict({'input': cos,}, batch_size=batch_size)
lukovkin/ufcnn-keras
[ 93, 27, 93, 3, 1456850424 ]
def treat_X_tradcom(mean): """ treat some columns of the dataframe together when normalizing the dataframe: col. 1, 2, 4 ... Mkt Price, Bid price, Ask Price col 3 and 5 ... Ask & Bid price """ result = mean.copy() #print("Result before max",result) mkt = mean[1] bid_px = mean[2] ask_px = mean[4] px_max=max(mkt,bid_px,ask_px) result[1] = px_max result[2] = px_max result[4] = px_max bid = mean[3] ask = mean[5] ba_max=max(bid,ask) result[3] = ba_max result[5] = ba_max print("Result after max",result)
lukovkin/ufcnn-keras
[ 93, 27, 93, 3, 1456850424 ]
def standardize_inputs(source, colgroups=None, mean=None, std=None): """ Standardize input features. Groups of features could be listed in order to be standardized together. source: Pandas.DataFrame or filename of csv file with features colgroups: list of lists of groups of features to be standardized together (e.g. bid/ask price, bid/ask size) returns Xdf ...Pandas.DataFrame, mean ...Pandas.DataFrame, std ...Pandas.DataFrame """ import itertools import types
lukovkin/ufcnn-keras
[ 93, 27, 93, 3, 1456850424 ]
def standardize_columns(colgroup): """ Standardize group of columns together colgroup: Pandas.DataFrame returns: Pandas.DataFrames: Colum Group standardized, Mean of the colgroup, stddeviation of the colgroup """ _me = np.mean(colgroup.values.flatten()) centered = colgroup.sub(_me) me = pd.DataFrame(np.full(len(colgroup.columns),_me), index=colgroup.columns) _st = np.std(colgroup.values.flatten()) standardized = centered.div(_st) st = pd.DataFrame(np.full(len(colgroup.columns),_st), index=colgroup.columns)
lukovkin/ufcnn-keras
[ 93, 27, 93, 3, 1456850424 ]
def get_tradcom_normalization(filename, mean=None, std=None): """ read in all X Data Frames and find mean and std of all columns... """ Xdf = pd.read_csv(filename, sep=" ", index_col = 0, header = None) meanLoc = treat_X_tradcom(Xdf.mean()) print("Mean Loc") print (meanLoc) sys.stdout.flush() if mean is None: mean = meanLoc mean = mean.to_frame().transpose() meanDf=pd.concat([mean, meanLoc.to_frame().transpose()]) mean = meanDf.max() print("Mean") print (mean) sys.stdout.flush() stdLoc = treat_X_tradcom(Xdf.std()) print("Std Loc") print (stdLoc) sys.stdout.flush() if std is None: std = stdLoc std = std.to_frame().transpose() stdDf=pd.concat([std, stdLoc.to_frame().transpose()]) std = stdDf.max() print("Std") print (std)
lukovkin/ufcnn-keras
[ 93, 27, 93, 3, 1456850424 ]
def prepare_tradcom_classification(training=True, ret_type='df', sequence_length=5000, features_list=[1,2,3,4], output_dim=3, file_list=None, mean=None, std=None, training_count=None): """ prepare the datasets for the trading competition. training determines which datasets will be read returns: X and y: Pandas.DataFrames or np-Arrays storing the X - and y values for the fitting.
lukovkin/ufcnn-keras
[ 93, 27, 93, 3, 1456850424 ]
def generator(X, y): print("Call to generator") print(X.index.equals(y.index)) c = 1
lukovkin/ufcnn-keras
[ 93, 27, 93, 3, 1456850424 ]
def train_and_predict_classification(model, sequence_length=5000, features=32, output_dim=3, batch_size=128, epochs=5, name = "model", training_count=3, testing_count=3): final_loss = 0 file_list = sorted(glob.glob('./training_data_large/prod_data_*v.txt')) if len(file_list) == 0: print ("Files ./training_data_large/product_data_*txt and signal_*.csv are needed. Please copy them in the ./training_data_large/ . Aborting.") sys.exit()
lukovkin/ufcnn-keras
[ 93, 27, 93, 3, 1456850424 ]
def check_prediction(Xdf, y, yp, mean, std): """ Check the predicted classes and print results """ ## MSE for testing total_error = 0 correct_class= 0 y_pred_class = np.zeros((y.shape[2],)) y_corr_pred_class = np.zeros((y.shape[2],)) y_class = np.zeros((y.shape[2],)) y_labels = np.zeros((y.shape[1], y.shape[2])) a=['Buy','Sell','Hold'] for i in range (y.shape[1]): delta = 0. for j in range(y.shape[2]): delta += (y[0][i][j] - yp[0][i][j]) * (y[0][i][j] - yp[0][i][j])
lukovkin/ufcnn-keras
[ 93, 27, 93, 3, 1456850424 ]
def get_pnl(df, max_position=1, comission=0): deals = [] pnl = 0 position = 0 df_with_signals = df[(df['sell'] != 0) | (df['buy'] != 0)] for idx, row in df_with_signals.iterrows(): if row['buy'] == 1 and position < max_position: print(row) current_trade = -row['buy'] * row["askpx_"] position += 1 pnl = pnl + current_trade - comission deals.append(current_trade) print("Running PnL: {}, position: {}".format(pnl, position)) elif row['sell'] == 1 and position > -max_position: print(row) current_trade = row['sell'] * row["bidpx_"] position -= 1 pnl = pnl + current_trade - comission deals.append(current_trade) print("Running PnL: {}, position: {}".format(pnl, position)) if position == 1: day_closing_trade = df.iloc[-1]["bidpx_"] pnl = pnl + day_closing_trade - comission deals.append(day_closing_trade) print("Close last hanging deal on the end of the day, PnL: {}, position: {}".format(pnl, position)) elif position == -1: day_closing_trade = -df.iloc[-1]["askpx_"] pnl = pnl + day_closing_trade - comission deals.append(day_closing_trade) print("Close last hanging deal on the end of the day, PnL: {}, position: {}".format(pnl, position)) print("Check PnL: {} vs {}".format(pnl, np.sum(deals))) return pnl, len(deals)
lukovkin/ufcnn-keras
[ 93, 27, 93, 3, 1456850424 ]
def get_tracking_data (sequence_length=5000, count=2000, D=10, delta=0.3, omega_w=0.005, omega_ny=0.005): """ get tracking data for a target moving in a square with 2D side length delta ... radius of the round target omega_ ... random noise strength """
lukovkin/ufcnn-keras
[ 93, 27, 93, 3, 1456850424 ]
def get_simulation(write_spans = True): """ Make trading competition-like input and output data from the cosine function """ from signals import find_all_signals, make_spans, set_positions, pnl from datetime import date
lukovkin/ufcnn-keras
[ 93, 27, 93, 3, 1456850424 ]
def index(): experiments = get_experiments_list() # group by date, newest first experiments = sorted(experiments, key=lambda r: r.timestamp.date(), reverse=True) experiments = [(date, list(items)) for date, items in groupby(experiments, lambda r: r.timestamp.date())] # for each date sort its results, best first experiments = [(date, sorted(items, key=lambda r: r.score, reverse=True)) for date, items in experiments] return render_template('overview.html', experiments=experiments, score_name=config.score_name)
dssg/cincinnati2015-public
[ 3, 1, 3, 1, 1440710319 ]
def details(timestamp): # will fail with 404 if exp not known get_labels_predictions(timestamp) return render_template('details.html', timestamp=timestamp)
dssg/cincinnati2015-public
[ 3, 1, 3, 1, 1440710319 ]
def normalized_confusion_matrix(timestamp): test_labels, test_predictions = get_labels_predictions(timestamp) matrix_fig = plot_normalized_confusion_matrix(test_labels, test_predictions) return serve_matplotlib_fig(matrix_fig)
dssg/cincinnati2015-public
[ 3, 1, 3, 1, 1440710319 ]
def feature_importances(timestamp): features, importances = get_feature_importances(timestamp) importance_fig = plot_feature_importances(features, importances) return serve_matplotlib_fig(importance_fig)
dssg/cincinnati2015-public
[ 3, 1, 3, 1, 1440710319 ]
def precision_recall(timestamp): test_labels, test_predictions = get_labels_predictions(timestamp) prec_recall_fig = plot_precision_recall_n(test_labels, test_predictions) return serve_matplotlib_fig(prec_recall_fig)
dssg/cincinnati2015-public
[ 3, 1, 3, 1, 1440710319 ]
def precision_cutoff(timestamp): test_labels, test_predictions = get_labels_predictions(timestamp) prec_cutoff_fig = plot_precision_cutoff(test_labels, test_predictions) return serve_matplotlib_fig(prec_cutoff_fig)
dssg/cincinnati2015-public
[ 3, 1, 3, 1, 1440710319 ]
def ROC(timestamp): test_labels, test_predictions = get_labels_predictions(timestamp) roc_fig = plot_ROC(test_labels, test_predictions) return serve_matplotlib_fig(roc_fig)
dssg/cincinnati2015-public
[ 3, 1, 3, 1, 1440710319 ]
def growth(): experiments = get_experiments_list() # group by date, newest first experiments = sorted(experiments, key=lambda r: r.timestamp.date(), reverse=True) experiments = [(date, list(items)) for date, items in groupby(experiments, lambda r: r.timestamp.date())] # only keep best result for each day experiments = [(date, sorted(items, key=lambda r: r.score, reverse=True)[0]) for date, items in experiments] experiments = [(date, best.score) for date, best in experiments] growth_fig = plot_growth(experiments) return serve_matplotlib_fig(growth_fig)
dssg/cincinnati2015-public
[ 3, 1, 3, 1, 1440710319 ]
def __init__(self, *args, **kwargs): super(ObjectInspector, self).__init__(*args, **kwargs) self._selected_widget = None self._ui = Ui_ObjectInspector() self._ui.setupUi(self) # Make everything monospace. font = QFont('Monospace') font.setStyleHint(QFont.TypeWriter) self._ui.teInspectionResults.setFont(font) # Register signals. self._update_key = QShortcut(QKeySequence(Qt.Key_F7), self) self._ui.btnSelectParent.released.connect(self.select_parent) self._update_key.activated.connect(self.update_inspection)
zyantific/IDASkins
[ 885, 161, 885, 2, 1399390461 ]
def select_parent(self): if self._selected_widget: parent = self._selected_widget.parent() if parent and parent.inherits('QWidget'): self.update_selected_widget(parent)
zyantific/IDASkins
[ 885, 161, 885, 2, 1399390461 ]
def __init__(self, *args, **kwargs): pass
benoitc/uzmq
[ 35, 3, 35, 1, 1352213467 ]
def __getattr__(cls, name): if name in ('__file__', '__path__'): return '/dev/null' elif name[0] == name[0].upper(): mockType = type(name, (), {}) mockType.__module__ = __name__ return mockType else: return Mock()
benoitc/uzmq
[ 35, 3, 35, 1, 1352213467 ]
def test_plot_aggregates(base_tpi, base_params, reform_tpi, reform_parms, plot_type, vertical_line_years, plot_title): fig = output_plots.plot_aggregates( base_tpi, base_params, reform_tpi=reform_tpi, reform_params=reform_params, var_list=['Y', 'r'], plot_type=plot_type, num_years_to_plot=20, vertical_line_years=vertical_line_years, plot_title=plot_title) assert fig
OpenSourcePolicyCenter/dynamic
[ 60, 94, 60, 26, 1422541994 ]
def test_plot_aggregates_save_fig(tmpdir): path = os.path.join(tmpdir, 'test_plot.png') output_plots.plot_aggregates( base_tpi, base_params, plot_type='levels', path=path) img = mpimg.imread(path) assert isinstance(img, np.ndarray)
OpenSourcePolicyCenter/dynamic
[ 60, 94, 60, 26, 1422541994 ]
def test_plot_gdp_ratio(base_tpi, base_params, reform_tpi, reform_params, vertical_line_years, plot_title, plot_type): fig = output_plots.plot_gdp_ratio( base_tpi, base_params, reform_tpi=reform_tpi, reform_params=reform_params, plot_type=plot_type, vertical_line_years=vertical_line_years, plot_title=plot_title) assert fig
OpenSourcePolicyCenter/dynamic
[ 60, 94, 60, 26, 1422541994 ]
def test_ability_bar(): fig = output_plots.ability_bar( base_tpi, base_params, reform_tpi, reform_params, plot_title=' Test Plot Title') assert fig
OpenSourcePolicyCenter/dynamic
[ 60, 94, 60, 26, 1422541994 ]
def test_ability_bar_ss(): fig = output_plots.ability_bar_ss( base_ss, base_params, reform_ss, reform_params, plot_title=' Test Plot Title') assert fig
OpenSourcePolicyCenter/dynamic
[ 60, 94, 60, 26, 1422541994 ]
def test_ss_profiles(by_j, plot_data): fig = output_plots.ss_profiles( base_ss, base_params, reform_ss, reform_params, by_j=by_j, plot_data=plot_data, plot_title=' Test Plot Title') assert fig
OpenSourcePolicyCenter/dynamic
[ 60, 94, 60, 26, 1422541994 ]
def test_tpi_profiles(by_j): fig = output_plots.tpi_profiles( base_tpi, base_params, reform_tpi, reform_params, by_j=by_j, plot_title=' Test Plot Title') assert fig
OpenSourcePolicyCenter/dynamic
[ 60, 94, 60, 26, 1422541994 ]
def test_tpi_profiles_save_fig(tmpdir): path = os.path.join(tmpdir, 'test_plot.png') output_plots.tpi_profiles( base_tpi, base_params, reform_tpi, reform_params, path=path) img = mpimg.imread(path) assert isinstance(img, np.ndarray)
OpenSourcePolicyCenter/dynamic
[ 60, 94, 60, 26, 1422541994 ]
def test_ss_3Dplot(base_params, base_ss, reform_params, reform_ss, plot_type, plot_title): fig = output_plots.ss_3Dplot( base_params, base_ss, reform_params=reform_params, reform_ss=reform_ss, plot_type=plot_type, plot_title=plot_title) assert fig
OpenSourcePolicyCenter/dynamic
[ 60, 94, 60, 26, 1422541994 ]
def test_inequality_plot(base_tpi, base_params, reform_tpi, reform_params, ineq_measure, pctiles, plot_type): fig = output_plots.inequality_plot( base_tpi, base_params, reform_tpi=reform_tpi, reform_params=reform_params, ineq_measure=ineq_measure, pctiles=pctiles, plot_type=plot_type) assert fig
OpenSourcePolicyCenter/dynamic
[ 60, 94, 60, 26, 1422541994 ]
def read_f(name): with open(os.path.join(current_dir, name), encoding='utf8') as f: return f.read()
mozillazg/python-shanbay-team-assistant
[ 18, 2, 18, 3, 1381847526 ]
def meta_info(meta, filename='shanbay_assistant/__init__.py', default=''): meta = re.escape(meta) m = re.search(r"""%s\s+=\s+(?P<quote>['"])(?P<meta>.+?)(?P=quote)""" % meta, read_f(filename)) return m.group('meta') if m else default
mozillazg/python-shanbay-team-assistant
[ 18, 2, 18, 3, 1381847526 ]
def __init__( self, table_name: str, region: Optional[str] = None, host: Optional[str] = None, connect_timeout_seconds: Optional[float] = None, read_timeout_seconds: Optional[float] = None, max_retry_attempts: Optional[int] = None, base_backoff_ms: Optional[int] = None, max_pool_connections: Optional[int] = None, extra_headers: Optional[Mapping[str, str]] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None,
jlafon/PynamoDB
[ 2128, 418, 2128, 256, 1390184315 ]
def get_meta_table(self, refresh: bool = False) -> MetaTable: """ Returns a MetaTable """ return self.connection.get_meta_table(self.table_name, refresh=refresh)
jlafon/PynamoDB
[ 2128, 418, 2128, 256, 1390184315 ]
def delete_item( self, hash_key: str, range_key: Optional[str] = None, condition: Optional[Condition] = None, return_values: Optional[str] = None, return_consumed_capacity: Optional[str] = None, return_item_collection_metrics: Optional[str] = None, settings: OperationSettings = OperationSettings.default,
jlafon/PynamoDB
[ 2128, 418, 2128, 256, 1390184315 ]
def update_item( self, hash_key: str, range_key: Optional[str] = None, actions: Optional[Sequence[Action]] = None, condition: Optional[Condition] = None, return_consumed_capacity: Optional[str] = None, return_item_collection_metrics: Optional[str] = None, return_values: Optional[str] = None, settings: OperationSettings = OperationSettings.default,
jlafon/PynamoDB
[ 2128, 418, 2128, 256, 1390184315 ]
def put_item( self, hash_key: str, range_key: Optional[str] = None, attributes: Optional[Any] = None, condition: Optional[Condition] = None, return_values: Optional[str] = None, return_consumed_capacity: Optional[str] = None, return_item_collection_metrics: Optional[str] = None, settings: OperationSettings = OperationSettings.default,
jlafon/PynamoDB
[ 2128, 418, 2128, 256, 1390184315 ]
def batch_write_item( self, put_items: Optional[Any] = None, delete_items: Optional[Any] = None, return_consumed_capacity: Optional[str] = None, return_item_collection_metrics: Optional[str] = None, settings: OperationSettings = OperationSettings.default,
jlafon/PynamoDB
[ 2128, 418, 2128, 256, 1390184315 ]
def batch_get_item( self, keys: Sequence[str], consistent_read: Optional[bool] = None, return_consumed_capacity: Optional[str] = None, attributes_to_get: Optional[Any] = None, settings: OperationSettings = OperationSettings.default,
jlafon/PynamoDB
[ 2128, 418, 2128, 256, 1390184315 ]
def get_item( self, hash_key: str, range_key: Optional[str] = None, consistent_read: bool = False, attributes_to_get: Optional[Any] = None, settings: OperationSettings = OperationSettings.default,
jlafon/PynamoDB
[ 2128, 418, 2128, 256, 1390184315 ]
def scan( self, filter_condition: Optional[Any] = None, attributes_to_get: Optional[Any] = None, limit: Optional[int] = None, return_consumed_capacity: Optional[str] = None, segment: Optional[int] = None, total_segments: Optional[int] = None, exclusive_start_key: Optional[str] = None, consistent_read: Optional[bool] = None, index_name: Optional[str] = None, settings: OperationSettings = OperationSettings.default, select: Optional[str] = None,
jlafon/PynamoDB
[ 2128, 418, 2128, 256, 1390184315 ]
def query( self, hash_key: str, range_key_condition: Optional[Condition] = None, filter_condition: Optional[Any] = None, attributes_to_get: Optional[Any] = None, consistent_read: bool = False, exclusive_start_key: Optional[Any] = None, index_name: Optional[str] = None, limit: Optional[int] = None, return_consumed_capacity: Optional[str] = None, scan_index_forward: Optional[bool] = None, select: Optional[str] = None, settings: OperationSettings = OperationSettings.default,
jlafon/PynamoDB
[ 2128, 418, 2128, 256, 1390184315 ]
def describe_table(self) -> Dict: """ Performs the DescribeTable operation and returns the result """ return self.connection.describe_table(self.table_name)
jlafon/PynamoDB
[ 2128, 418, 2128, 256, 1390184315 ]
def update_time_to_live(self, ttl_attr_name: str) -> Dict: """ Performs the UpdateTimeToLive operation and returns the result """ return self.connection.update_time_to_live(self.table_name, ttl_attr_name)
jlafon/PynamoDB
[ 2128, 418, 2128, 256, 1390184315 ]
def create_table( self, attribute_definitions: Optional[Any] = None, key_schema: Optional[Any] = None, read_capacity_units: Optional[int] = None, write_capacity_units: Optional[int] = None, global_secondary_indexes: Optional[Any] = None, local_secondary_indexes: Optional[Any] = None, stream_specification: Optional[Dict] = None, billing_mode: str = DEFAULT_BILLING_MODE, tags: Optional[Dict[str, str]] = None,
jlafon/PynamoDB
[ 2128, 418, 2128, 256, 1390184315 ]
def alignment_mapping_from_file(f, char_mapping=DNA): """ Create a mapping from a file of alignment columns. """ columns, symbols = [], [] for line in f: column, symbol = line.split() columns.append(column) symbols.append(int(symbol)) align_count = len(columns[0]) mapping = IntToIntMapping(char_mapping.get_out_size() ** align_count) for column, symbol in zip(columns, symbols): index = char_mapping.translate_list(list(column))[0] mapping.set_mapping(index, symbol) return align_count, mapping
bxlab/bx-python
[ 132, 51, 132, 16, 1463071183 ]
def test_join_overlapping(): f = annos.join_overlapping s, e = f([], []) assert len(s) == 0 assert len(e) == 0 s = [1, 3, 6] e = [2, 4, 10] expect = (s, e) result = f(s, e) assert result == expect x = np.array([[1, 2], [3, 4], [4, 5], [6, 8], [8, 8], [8, 9], [10, 15], [10, 11], [11, 14], [14, 16]] ) expect = [[1, 2], [3, 5], [6, 9], [10, 16]] result = np.array(f(x[:, 0], x[:, 1])).T npt.assert_array_equal(result, expect)
cangermueller/deepcpg
[ 130, 68, 130, 18, 1474306651 ]
def test_is_in(): ys = [2, 4, 12, 17] ye = [2, 8, 15, 18] x = [-1, 2, 2, 3, 4, 8, 15, 16] expect = [False, True, True, False, True, True, True, False] result = annos.is_in(x, ys, ye) npt.assert_array_equal(result, expect)
cangermueller/deepcpg
[ 130, 68, 130, 18, 1474306651 ]
def test_extend_frame(): d = pd.DataFrame({ 'chromo': '1', 'start': [2, 3, 3, 1, 1], 'end': [3, 3, 8, 2, 1] }) d = d.loc[:, ['chromo', 'start', 'end']] expect = pd.DataFrame({ 'chromo': '1', 'start': [1, 2, 3, 1, 1], 'end': [4, 5, 8, 4, 4] }) expect = expect.loc[:, ['chromo', 'start', 'end']] actual = annos.extend_len_frame(d, 4) npt.assert_array_equal(actual.values, expect.values)
cangermueller/deepcpg
[ 130, 68, 130, 18, 1474306651 ]
def __init__(self, module, weights, dropout=0, variational=False): super(WeightDrop, self).__init__() self.module = module self.weights = weights self.dropout = dropout self.variational = variational self._setup()
eladhoffer/seq2seq.pytorch
[ 514, 79, 514, 9, 1496300739 ]
def _setup(self): # Terrible temporary solution to an issue regarding compacting weights re: CUDNN RNN if issubclass(type(self.module), torch.nn.RNNBase): self.module.flatten_parameters = self._dummy for name_w in self.weights: print('Applying weight drop of {} to {}'.format(self.dropout, name_w)) w = getattr(self.module, name_w) del self.module._parameters[name_w] self.module.register_parameter(name_w + '_raw', Parameter(w.data))
eladhoffer/seq2seq.pytorch
[ 514, 79, 514, 9, 1496300739 ]
def readme(): with open('README.md','r') as fr: return fr.read()
realcr/docker_machinator
[ 4, 2, 4, 1, 1484496824 ]
def __init__(self, message, error_number=None): Exception.__init__(self, message) self.error_number = error_number
psistats/linux-client
[ 2, 1, 2, 7, 1406404291 ]
def init(config_filename=DEFAULT_CONFIG_FILENAME): file_p = stdc.fopen(config_filename.encode('utf-8'), b'r') if file_p is None: error_number = get_errno() raise OSError(error_number, os.strerror(error_number), config_filename) try: _init(file_p) finally: stdc.fclose(file_p)
psistats/linux-client
[ 2, 1, 2, 7, 1406404291 ]
def __repr__(self): return '<%s name=%r number=%d type=%d mapping=%d flags=%08x>' % ( self.__class__.__name__, self.name, self.number, self.type, self.mapping, self.flags )
psistats/linux-client
[ 2, 1, 2, 7, 1406404291 ]
def __repr__(self): return '<%s name=%r number=%r type=%r>' % ( self.__class__.__name__, self.name, self.number, self.type )
psistats/linux-client
[ 2, 1, 2, 7, 1406404291 ]
def label(self): # # TODO Maybe this is a memory leak! # return _get_label(byref(self.chip), byref(self)).decode('utf-8')
psistats/linux-client
[ 2, 1, 2, 7, 1406404291 ]