file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
lstm.py | '''
Created on 18-Nov-2019
@author: 91984
'''
import pandas as pd
import numpy as np
import sys
# from datetime import datetime
import statsmodels.api as sm
import matplotlib.pylab as plt
from audioop import rms
# df = pd.read_csv('C:\\Users\\91984\\Desktop\\shampoo.csv')
from datetime import datetime
fr... | # print(df_proj)
#
# plt.figure(figsize=(20, 5))
# plt.plot(df_proj.index, df_proj['data'])
# plt.plot(df_proj.index, df_proj['Prediction'], color='r')
# plt.legend(loc='best', fontsize='xx-large')
# plt.xticks(fontsize=18)
# plt.yticks(fontsize=16)
# plt.show()
# #
# # scaler = MinMaxScaler(feature_range... | # index=future_dates[-n_input:].index, columns=['Prediction'])
#
# df_proj = pd.concat([df,df_predict], axis=1)
#
| random_line_split |
canvas.rs | use crate::color::Color;
use std::collections::VecDeque;
use std::io::{self, BufRead, BufReader, Read};
#[derive(Clone, Debug)]
pub struct Canvas {
pub width: usize,
pub height: usize,
data: Vec<Vec<Color>>,
}
const MAX_COLOR_VAL: u16 = 255;
const MAX_PPM_LINE_LENGTH: usize = 70;
// length of "255" is 3
/... |
#[test]
fn reading_file_with_wrong_magic_number() {
let ppm = "P32
1 1
255
0 0 0";
let result = canvas_from_ppm(ppm.as_bytes());
match result {
Err(ParseError::IncorrectFormat(msg)) => {
assert!(msg.contains("Incorrect magic number"))... | {
let mut canvas = Canvas::new(10, 2);
let color = color!(1, 0.8, 0.6);
// TODO: maybe turn this into a function on canvas?
for row in 0..canvas.height {
for column in 0..canvas.width {
canvas.write_pixel(column, row, color);
}
}
le... | identifier_body |
canvas.rs | use crate::color::Color;
use std::collections::VecDeque;
use std::io::{self, BufRead, BufReader, Read};
#[derive(Clone, Debug)]
pub struct Canvas {
pub width: usize,
pub height: usize,
data: Vec<Vec<Color>>,
}
const MAX_COLOR_VAL: u16 = 255;
const MAX_PPM_LINE_LENGTH: usize = 70;
// length of "255" is 3
/... | (&self, line: &mut String, ppm: &mut String) {
if line.len() < MAX_PPM_LINE_LENGTH - MAX_COLOR_VAL_STR_LEN {
(*line).push(' ');
} else {
ppm.push_str(&line);
ppm.push('\n');
line.clear();
}
}
// Return string containing PPM (portable pixel... | write_rgb_separator | identifier_name |
canvas.rs | use crate::color::Color;
use std::collections::VecDeque;
use std::io::{self, BufRead, BufReader, Read};
#[derive(Clone, Debug)]
pub struct Canvas {
pub width: usize,
pub height: usize,
data: Vec<Vec<Color>>,
}
const MAX_COLOR_VAL: u16 = 255;
const MAX_PPM_LINE_LENGTH: usize = 70;
// length of "255" is 3
/... | if i != self.width - 1 {
self.write_rgb_separator(&mut current_line, &mut ppm);
}
}
if !current_line.is_empty() {
ppm.push_str(¤t_line);
ppm.push('\n');
}
}
ppm
}
}
// TODO:... | current_line.push_str(&b.to_string());
// if not at end of row yet, write a space or newline if the next point will be on this line | random_line_split |
canvas.rs | use crate::color::Color;
use std::collections::VecDeque;
use std::io::{self, BufRead, BufReader, Read};
#[derive(Clone, Debug)]
pub struct Canvas {
pub width: usize,
pub height: usize,
data: Vec<Vec<Color>>,
}
const MAX_COLOR_VAL: u16 = 255;
const MAX_PPM_LINE_LENGTH: usize = 70;
// length of "255" is 3
/... |
}
}
Ok(canvas)
}
fn clean_line(
(index, line): (usize, Result<String, std::io::Error>),
) -> Option<(usize, Result<String, std::io::Error>)> {
match line {
Ok(s) => {
let s = s.trim();
if s.starts_with("#") || s.is_empty() {
None
} el... | {
x = 0;
y += 1;
} | conditional_block |
caching.py | # (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmenta... | latest = self._latest_date()
for stmt in (
"SELECT * FROM cache WHERE size IS NOT NULL AND owner='orphans' AND creation_date < ?",
"SELECT * FROM cache WHERE size IS NOT NULL AND creation_date < ? ORDER BY last_access ASC",
):
for entr... |
with self.connection as db:
| random_line_split |
caching.py | # (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmenta... | (self):
if self._connection is None:
cache_dir = SETTINGS.get("cache-directory")
if not os.path.exists(cache_dir):
os.makedirs(cache_dir, exist_ok=True)
cache_db = os.path.join(cache_dir, CACHE_DB)
LOG.debug("Cache database is %s", cache_db)
... | connection | identifier_name |
caching.py | # (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmenta... |
self._register_cache_file(
full,
"orphans",
None,
parent,
)
self._update_cache()
def _delete_file(self, path):
self._ensure_in_cache(path)
try:
if os.path.isdir(pa... | LOG.debug(
f"CliMetLab cache: orphan found: {full} with parent {parent}"
) | conditional_block |
caching.py | # (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmenta... |
def _housekeeping(self):
top = SETTINGS.get("cache-directory")
with self.connection as db:
for name in os.listdir(top):
if name == CACHE_DB:
continue
full = os.path.join(top, name)
count = db.execute(
... | """Update cache size and size of each file in the database ."""
with self.connection as db:
update = []
commit = False
for n in db.execute("SELECT path FROM cache WHERE size IS NULL"):
try:
path = n[0]
if os.path.isdir(p... | identifier_body |
resumeBuilder.js | /*
This is empty on purpose! Your code to build the resume will go here.
*/
// var name = "Peter";
// var awesomeToughts = "I am " + name + " and I am awesome";
// // .replace([old],[new]);
// var funToughts = awesomeToughts.replace("awesome","fun")
// console.log(funToughts);
// console.log(awesomeToughts);
// $("#m... | playWork();
//////////////////////
// Projects section //
//////////////////////
projects.display = function() {
$("#projects").append(HTMLprojectStart);
for (key in projects.projects){
if (projects.projects.hasOwnProperty(key)) {
var titleHTML = HTMLprojectTitle.replace("%data%", projec... | $("#workExperience").append(HTMLworkStart);
for (key in work.work){
if (work.work.hasOwnProperty(key)) {
var employerHTML = HTMLworkEmployer.replace("%data%", work.work[key].employer);
$(".work-entry:last").append(employerHTML);
var titleHTML = HTMLworkTitle.replace("%... | identifier_body |
resumeBuilder.js | /*
This is empty on purpose! Your code to build the resume will go here.
*/
// var name = "Peter";
// var awesomeToughts = "I am " + name + " and I am awesome";
// // .replace([old],[new]);
// var funToughts = awesomeToughts.replace("awesome","fun")
// console.log(funToughts);
// console.log(awesomeToughts);
// $("#m... | if (projects.projects.hasOwnProperty(key)) {
var titleHTML = HTMLprojectTitle.replace("%data%", projects.projects[key].title);
$(".project-entry:last").append(titleHTML);
var datesHTML = HTMLprojectDates.replace("%data%", projects.projects[key].dates);
$(".proje... | $("#projects").append(HTMLprojectStart);
for (key in projects.projects){ | random_line_split |
resumeBuilder.js | /*
This is empty on purpose! Your code to build the resume will go here.
*/
// var name = "Peter";
// var awesomeToughts = "I am " + name + " and I am awesome";
// // .replace([old],[new]);
// var funToughts = awesomeToughts.replace("awesome","fun")
// console.log(funToughts);
// console.log(awesomeToughts);
// $("#m... | }
}
}
projects.display();
/////////////////
// Map Section //
/////////////////
$("#mapDiv").append(googleMap);
// $(document).load(initializeMap());
| for (image in projects.projects[key].images) {
var imagesHTML = HTMLprojectImage.replace("%data%",projects.projects
[key].images[image]);
$(".project-entry:last").append(imagesHTML);
}
}
| conditional_block |
resumeBuilder.js | /*
This is empty on purpose! Your code to build the resume will go here.
*/
// var name = "Peter";
// var awesomeToughts = "I am " + name + " and I am awesome";
// // .replace([old],[new]);
// var funToughts = awesomeToughts.replace("awesome","fun")
// console.log(funToughts);
// console.log(awesomeToughts);
// $("#m... |
$("#workExperience").append(HTMLworkStart);
for (key in work.work){
if (work.work.hasOwnProperty(key)) {
var employerHTML = HTMLworkEmployer.replace("%data%", work.work[key].employer);
$(".work-entry:last").append(employerHTML);
var titleHTML = HTMLworkTitle.replace... | layWork() { | identifier_name |
task.go | package userdataprocess
import (
"context"
"encoding/hex"
"encoding/json"
"fmt"
"sync"
"time"
"github.com/pastelnetwork/gonode/common/errgroup"
"github.com/pastelnetwork/gonode/common/errors"
"github.com/pastelnetwork/gonode/common/log"
"github.com/pastelnetwork/gonode/common/service/task"
"github.com/past... | var nodes node.List
mns, err := task.pastelClient.MasterNodesTop(ctx)
if err != nil {
return nil, err
}
count := 0
for _, mn := range mns {
count++
if count <= maxNode {
nodes = append(nodes, node.NewNode(task.Service.nodeClient, mn.ExtAddress, mn.ExtKey))
} else {
break
}
}
return nodes, nil
... | random_line_split | |
task.go | package userdataprocess
import (
"context"
"encoding/hex"
"encoding/json"
"fmt"
"sync"
"time"
"github.com/pastelnetwork/gonode/common/errgroup"
"github.com/pastelnetwork/gonode/common/errors"
"github.com/pastelnetwork/gonode/common/log"
"github.com/pastelnetwork/gonode/common/service/task"
"github.com/past... |
if err := primary.Session(ctx, true); err != nil {
return nil, err
}
primary.SetPrimary(true)
if len(nodes) == 1 {
// If the number of nodes only have 1 node, we use this primary node and return directly
meshNodes.Add(primary)
return meshNodes, nil
}
nextConnCtx, nextConnCancel := context.WithCancel(ct... | {
return nil, err
} | conditional_block |
task.go | package userdataprocess
import (
"context"
"encoding/hex"
"encoding/json"
"fmt"
"sync"
"time"
"github.com/pastelnetwork/gonode/common/errgroup"
"github.com/pastelnetwork/gonode/common/errors"
"github.com/pastelnetwork/gonode/common/log"
"github.com/pastelnetwork/gonode/common/service/task"
"github.com/past... | (ctx context.Context) error {
ctx = log.ContextWithPrefix(ctx, fmt.Sprintf("%s-%s", logPrefix, task.ID()))
log.WithContext(ctx).Debugf("Start task")
defer log.WithContext(ctx).Debugf("End task")
defer close(task.resultChan)
defer close(task.resultChanGet)
if err := task.run(ctx); err != nil {
task.err = err
... | Run | identifier_name |
task.go | package userdataprocess
import (
"context"
"encoding/hex"
"encoding/json"
"fmt"
"sync"
"time"
"github.com/pastelnetwork/gonode/common/errgroup"
"github.com/pastelnetwork/gonode/common/errors"
"github.com/pastelnetwork/gonode/common/log"
"github.com/pastelnetwork/gonode/common/service/task"
"github.com/past... |
// SubscribeProcessResultGet returns the result state of userdata process
func (task *Task) SubscribeProcessResultGet() <-chan *userdata.ProcessRequest {
return task.resultChanGet
}
// NewTask returns a new Task instance.
func NewTask(service *Service, request *userdata.ProcessRequest, userpastelid string) *Task {
... | {
return task.resultChan
} | identifier_body |
models.py | # ------------------------------------------------------------------------
# coding=utf-8
# ------------------------------------------------------------------------
from datetime import datetime
from django.contrib import admin, messages
from django.contrib.auth.decorators import permission_required
from django.conf ... |
def __unicode__(self):
trans = None
# This might be provided using a .extra() clause to avoid hundreds of extra queries:
if hasattr(self, "preferred_translation"):
trans = getattr(self, "preferred_translation", u"")
else:
try:
trans = unicod... | super(MediaFileBase, self).__init__(*args, **kwargs)
if self.file and self.file.path:
self._original_file_path = self.file.path | identifier_body |
models.py | # ------------------------------------------------------------------------
# coding=utf-8
# ------------------------------------------------------------------------
from datetime import datetime
from django.contrib import admin, messages
from django.contrib.auth.decorators import permission_required
from django.conf ... |
count = 0
for zi in z.infolist():
if not zi.filename.endswith('/'):
from django.template.defaultfilters import slugify
from django.core.files.base import ContentFile
bname = path.basename(zi.fil... | if not storage:
messages.error(request, _("Could not access storage"))
return | random_line_split |
models.py | # ------------------------------------------------------------------------
# coding=utf-8
# ------------------------------------------------------------------------
from datetime import datetime
from django.contrib import admin, messages
from django.contrib.auth.decorators import permission_required
from django.conf ... | :
verbose_name = _('media file translation')
verbose_name_plural = _('media file translations')
def __unicode__(self):
return self.caption
#-------------------------------------------------------------------------
class MediaFileTranslationInline(admin.StackedInline):
model = MediaFi... | Meta | identifier_name |
models.py | # ------------------------------------------------------------------------
# coding=utf-8
# ------------------------------------------------------------------------
from datetime import datetime
from django.contrib import admin, messages
from django.contrib.auth.decorators import permission_required
from django.conf ... |
return HttpResponseRedirect(reverse('admin:medialibrary_mediafile_changelist'))
def queryset(self, request):
qs = super(MediaFileAdmin, self).queryset(request)
# FIXME: This is an ugly hack but it avoids 1-3 queries per *FILE*
# retrieving the translation information
if d... | messages.error(request, _("No input file given")) | conditional_block |
transformer.py | """
Encoder part is inherited from
https://github.com/PaddlePaddle/models/tree/release/1.8/PaddleNLP/machine_translation/transformer
Attention for the chage in `Scaled_dot_product`
"""
from functools import partial
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
dr... | pre_process_layer(attn_output, preprocess_cmd, prepostprocess_dropout),
d_inner_hid, d_model, relu_dropout
)
return post_process_layer(attn_output, ffd_output,
postprocess_cmd, prepostprocess_dropout)
def encoder(enc_input, attn_bias, n_layer, n_head,
d_key, d_value, d_model, d_inne... | attn_output = post_process_layer(enc_input, attn_output,
postprocess_cmd, prepostprocess_dropout)
ffd_output = positionwise_feed_forward( | random_line_split |
transformer.py | """
Encoder part is inherited from
https://github.com/PaddlePaddle/models/tree/release/1.8/PaddleNLP/machine_translation/transformer
Attention for the chage in `Scaled_dot_product`
"""
from functools import partial
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
dr... |
q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)
q, k, v = __split_heads_qkv(q, k, v, n_head, d_key, d_value)
ctx_multiheads = scaled_dot_product_attention(q, k, v, pos_enc, attn_bias, d_key,
dropout_rate)
out = __combine_heads(... | """
Scaled Dot-Product Attention
Change:
- Different from the original one.
We will remove the scale factor math: \sqrt{d_k} according to the paper.
- Bias for attention and position encoding are added.
"""
# product = layers.matmul(x=q, y=k... | identifier_body |
transformer.py | """
Encoder part is inherited from
https://github.com/PaddlePaddle/models/tree/release/1.8/PaddleNLP/machine_translation/transformer
Attention for the chage in `Scaled_dot_product`
"""
from functools import partial
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
dr... | (enc_input, attn_bias, n_layer, n_head,
d_key, d_value, d_model, d_inner_hid, pos_enc,
preporstprocess_dropout, attention_dropout,
relu_dropout, preprocess_cmd='n',
postprocess_cmd='da'):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
Args:... | encoder | identifier_name |
transformer.py | """
Encoder part is inherited from
https://github.com/PaddlePaddle/models/tree/release/1.8/PaddleNLP/machine_translation/transformer
Attention for the chage in `Scaled_dot_product`
"""
from functools import partial
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
dr... |
return out
pre_process_layer = partial(pre_post_process_layer, None)
post_process_layer = pre_post_process_layer
def encoder_layer(enc_input, attn_bias, n_head, d_key,
d_value, d_model, d_inner_hid, pos_enc, prepostprocess_dropout,
attention_dropout, relu_dropout, preprocess... | if dropout_rate:
out = layers.dropout(
out,
dropout_prob=dropout_rate,
seed=dropout_seed,
is_test=False) | conditional_block |
BlockChain.go | package BLC
import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
"errors"
"fmt"
"github.com/boltdb/bolt"
"log"
"math/big"
"os"
"strconv"
"time"
)
const dbName = "blockchain_%s.db"
const blockTableName = "blocks"
type BlockChain struct {
Tip []byte //最新区块的hash
DB *bolt.DB
}
func GetBlockChainObject(nodeID stri... | ash)
if hashInt.Cmp(big.NewInt(0))==0{
break
}
}
return Transaction{},errors.New("Transaction is not found")
}
func(bc *BlockChain) VerifyTransaction(tx *Transaction,txs []*Transaction) bool{
if tx.IsCoinBaseTransaction(){
return true
}
prevTxs := make(map[string]Transaction)
for _,vin := range tx.Vins{
... | {
if bytes.Compare(tx.TxHash,txHash)==0{
return *tx,nil
}
}
hashInt.SetBytes(block.PrevBlockH | conditional_block |
BlockChain.go | package BLC
import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
"errors"
"fmt"
"github.com/boltdb/bolt"
"log"
"math/big"
"os"
"strconv"
"time"
)
const dbName = "blockchain_%s.db"
const blockTableName = "blocks"
type BlockChain struct {
Tip []byte //最新区块的hash
DB *bolt.DB
}
func GetBlockChainObject(nodeID stri... | func(blockchain *BlockChain)GetBalance(address string) int64{
utxos := blockchain.UnUTXOs(address,[]*Transaction{})
fmt.Println(utxos)
var amount int64
for _,utxo := range utxos{
amount += utxo.Output.Value
}
return amount
}
// 数字签名
func(bc *BlockChain)SignTransaction(tx *Transaction,private ecdsa.PrivateKey... | return nil
})
} | random_line_split |
BlockChain.go | package BLC
import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
"errors"
"fmt"
"github.com/boltdb/bolt"
"log"
"math/big"
"os"
"strconv"
"time"
)
const dbName = "blockchain_%s.db"
const blockTableName = "blocks"
type BlockChain struct {
Tip []byte //最新区块的hash
DB *bolt.DB
}
func GetBlockChainObject(nodeID stri... | if b!=nil{
// //blockHash := b.Get([]byte("l"))
// block := DeSerialize(b.Get(blc.Tip))
// height = block.Height+1
// preHash = block.Hash
// }
// return nil
// })
// if err!=nil{
// log.Panic(err)
// }
// // 创建新区块并添加数据库
//
// err = blc.DB.Update(func(tx *bolt.Tx) error{
// b := tx.Bucket([]byte(blockTable... |
os.Exit(1)
}
// 当数据库不存在时,创建创世区块链
fmt.Println("正在创建创世区块。。。")
dbName := fmt.Sprintf(dbName,nodeID)
db, err := bolt.Open(dbName, 0600, nil)
if err != nil {
log.Fatal(err)
}
//defer db.Close()
var genesisBlock *Block
err = db.Update(func(tx *bolt.Tx)error{
b,err := tx.CreateBucket([]byte(blockTableName... | identifier_body |
BlockChain.go | package BLC
import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
"errors"
"fmt"
"github.com/boltdb/bolt"
"log"
"math/big"
"os"
"strconv"
"time"
)
const dbName = "blockchain_%s.db"
const blockTableName = "blocks"
type BlockChain struct {
Tip []byte //最新区块的hash
DB *bolt.DB
}
func GetBlockChainObject(nodeID stri... | ripemd160Hash) {
key := hex.EncodeToString(in.TXHash)
spentTXOutputs[key] = append(spentTXOutputs[key], in.Vout)
}
}
}
}
fmt.Println(spentTXOutputs)
for _,tx:=range txs{
//若当前的txHash都没有被记录消费
spentArray,ok:=spentTXOutputs[hex.EncodeToString(tx.TxHash)]
if ok==false{
for index,out := r... | 60Hash( | identifier_name |
processing.py | import csv
import numpy as np
from implementations import ridge_regression
from helper_functions import load_csv_data
from helper_functions import compute_error
from helper_functions import compute_mse
from helper_functions import compute_rmse
def load(train_file, test_file):
"""
Load dataset from the given p... | (y):
"""
The labels in logistic regression are interpreted as probabilities,
so this method transfers the labels to the range [0, 1]
:param y: labels
:return: labels as probability
"""
y[y == -1] = 0
return y
| change_labels_logistic | identifier_name |
processing.py | import csv
import numpy as np
from implementations import ridge_regression
from helper_functions import load_csv_data
from helper_functions import compute_error
from helper_functions import compute_mse
from helper_functions import compute_rmse
def load(train_file, test_file):
"""
Load dataset from the given p... |
def report_prediction_accuracy_logistic(y, tx, w_best, verbose=True):
"""
Report the percentage of correct predictions of a model that is applied
on a set of labels. This method specifically works for logistic regression
since the prediction assumes that labels are between 0 and 1.
:param y: labe... | """
Perform cross_validation for a specific test set from the partitioned set.
:param y: label data
:param augmented_tx: augmented features
:param k_indices: An array of k sub-indices that are randomly partitioned
:param k: number of folds
:param lambda_: regularization parameters
:param rep... | identifier_body |
processing.py | import csv
import numpy as np
from implementations import ridge_regression
from helper_functions import load_csv_data
from helper_functions import compute_error
from helper_functions import compute_mse
from helper_functions import compute_rmse
def load(train_file, test_file):
"""
Load dataset from the given p... |
print("\n... finished.")
return tx, header
def create_csv(output_file, y, tx, ids, header, is_test):
"""
Split the given dataset such that only the data points with a certain
jet number remains, note that jet number is a discrete valued feature. In
other words, filter the dataset using the je... | tx = np.delete(tx, col - num_removed, 1)
header = np.delete(header, col - num_removed + 2)
num_removed += 1 | conditional_block |
processing.py | import csv
import numpy as np
from implementations import ridge_regression
from helper_functions import load_csv_data
from helper_functions import compute_error
from helper_functions import compute_mse
from helper_functions import compute_rmse
def load(train_file, test_file):
"""
Load dataset from the given p... |
def split_data(y, tx, ids, jet_num):
"""
Split the given dataset such that only the data points with a certain
jet number remains, note that jet number is a discrete valued feature. In
other words, filter the dataset using the jet number.
:param y: known label data
:param tx: an array of traini... | for index in range(len(tx_row)):
dictionary[header[index + 2]] = float(tx_row[index])
writer.writerow(dictionary)
print('\n... finished.')
| random_line_split |
handshake.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! The handshake module implements the handshake part of the protocol.
//! This module also implements additional anti-DoS mitigation,
//! by including a timestamp in each handshake initialization message.
//! Refer to the module's do... | let server_auth = HandshakeAuthMode::mutual(trusted_peers);
(client_auth, server_auth)
} else {
(HandshakeAuthMode::ServerOnly, HandshakeAuthMode::ServerOnly)
};
let client = NoiseUpgrader::new(client_private, client_auth);
let server = NoiseUpgrader:... | vec![(client_id, client_keys), (server_id, server_keys)]
.into_iter()
.collect(),
));
let client_auth = HandshakeAuthMode::mutual(trusted_peers.clone()); | random_line_split |
handshake.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! The handshake module implements the handshake part of the protocol.
//! This module also implements additional anti-DoS mitigation,
//! by including a timestamp in each handshake initialization message.
//! Refer to the module's do... |
fn anti_replay_timestamps(&self) -> Option<&RwLock<AntiReplayTimestamps>> {
match &self {
HandshakeAuthMode::Mutual {
anti_replay_timestamps,
..
} => Some(&anti_replay_timestamps),
HandshakeAuthMode::ServerOnly => None,
}
}
... | {
HandshakeAuthMode::Mutual {
anti_replay_timestamps: RwLock::new(AntiReplayTimestamps::default()),
trusted_peers,
}
} | identifier_body |
handshake.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! The handshake module implements the handshake part of the protocol.
//! This module also implements additional anti-DoS mitigation,
//! by including a timestamp in each handshake initialization message.
//! Refer to the module's do... | <TSocket>(
&self,
socket: TSocket,
origin: ConnectionOrigin,
remote_public_key: Option<x25519::PublicKey>,
) -> io::Result<(x25519::PublicKey, NoiseStream<TSocket>)>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// perform the noise handshake
let so... | upgrade | identifier_name |
lib.rs | // This crate is a library
#![crate_type = "lib"]
// This crate is named "pixel"
#![crate_name = "pixel"]
// Use +nightly to overpass this
#![feature(test)]
#[cfg(test)]
mod tests;
extern crate rand;
use std::ops::Not;
use std::path::Path;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
use rand... | (red: u8, green: u8, blue:u8) -> Color {
return Color {r : red, g : green, b : blue};
}
/// Conctructor with random values for each color
pub fn new_random() -> Color {
let mut r = rand::thread_rng();
return Color {
r : r.gen::<u8>(),
g : r.gen::<u8>... | new | identifier_name |
lib.rs | // This crate is a library
#![crate_type = "lib"]
// This crate is named "pixel"
#![crate_name = "pixel"]
// Use +nightly to overpass this
#![feature(test)]
#[cfg(test)]
mod tests;
extern crate rand;
use std::ops::Not;
use std::path::Path;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
use rand... |
/// Width's getter
pub fn width(&self) -> u32 {
return self.width;
}
/// Height's getter
pub fn height(&self) -> u32 {
return self.height;
}
/// Pixels getter
pub fn pixels(&self) -> &Vec<Color> {
return &self.pixels;
}
/// Equals()
pub fn eq(&sel... | {
return Image {width : width, height : height, pixels : pixels};
} | identifier_body |
lib.rs | // This crate is a library
#![crate_type = "lib"]
// This crate is named "pixel"
#![crate_name = "pixel"]
// Use +nightly to overpass this
#![feature(test)]
#[cfg(test)]
mod tests;
extern crate rand;
use std::ops::Not;
use std::path::Path;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
use rand... | pub fn blue(&self) -> u8 {
return self.b;
}
/// toString() to display a Color
pub fn display(&self) {
println!("r : {}, g : {}, b : {}", self.r, self.g, self.b);
}
/// Equals to determine if the two Color in parameters are equals.
/// Return true if self and other and equal... | }
/// Blue's getter | random_line_split |
main.py | # Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... | "$TARGET"
]), "disassembler Output -> $TARGET"),
suffix=".dis"
)
)
)
if not env.get("PIOFRAMEWORK"):
env.SConscript("frameworks/_bare.py")
#
# Target: Build executable and linkable firmware
#
target_firm_elf = None
target_firm_hex = None
object_dump... | "$SOURCES",
">",
| random_line_split |
main.py | # Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... | (env, source):
build_dir = env.subst("$BUILD_DIR")
if not isdir(build_dir):
makedirs(build_dir)
script_path = join(build_dir, "upload.jlink")
commands = [
"h",
"loadbin %s, %s" % (source, env.BoardConfig().get(
"upload.offset_ad... | _jlink_cmd_script | identifier_name |
main.py | # Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... |
env.Replace(
__jlink_cmd_script=_jlink_cmd_script,
UPLOADER="JLink.exe" if system() == "Windows" else "JLinkExe",
UPLOADERFLAGS=[
"-device", env.BoardConfig().get("debug", {}).get("jlink_device"),
"-speed", "4000",
"-if", ("jtag" if upload_protoc... | build_dir = env.subst("$BUILD_DIR")
if not isdir(build_dir):
makedirs(build_dir)
script_path = join(build_dir, "upload.jlink")
commands = [
"h",
"loadbin %s, %s" % (source, env.BoardConfig().get(
"upload.offset_address", "0x0")),
... | identifier_body |
main.py | # Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... |
env.Append(
BUILDERS=dict(
ElfToBin=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"binary",
"$SOURCES",
"$TARGET"
]), "Bin Output -> $TARGET"),
suffix=".bin"
... | env.Replace(PROGNAME="firmware") | conditional_block |
wxController.go | package frontEndControllers
import (
"encoding/json"
"errors"
goXorm "github.com/go-xorm/xorm"
"github.com/kataras/iris"
"qpgame/common/services"
"qpgame/common/utils"
"qpgame/config"
"qpgame/models"
"qpgame/models/beans"
"qpgame/models/xorm"
"qpgame/ramcache"
"strconv"
"time"
)
type WxController struct ... | dxMap := woiIdx.(map[string]beans.WxOpenId)
var wxOpenId beans.WxOpenId
wxOpenId, bind = woiIdxMap[openId]
if bind {
userId := wxOpenId.UserId
uic, _ := ramcache.UserIdCard.Load(service.platform)
uicMap := uic.(map[int]beans.UserProfile)
userProfile := uicMap[userId]
userBean = xorm.Users{
Id: use... | form)
woiI | identifier_name |
wxController.go | package frontEndControllers
import (
"encoding/json"
"errors"
goXorm "github.com/go-xorm/xorm"
"github.com/kataras/iris"
"qpgame/common/services"
"qpgame/common/utils"
"qpgame/config"
"qpgame/models"
"qpgame/models/beans"
"qpgame/models/xorm"
"qpgame/ramcache"
"strconv"
"time"
)
type WxController struct ... | Bean)
now := utils.GetNowTime()
var userUpdateBean = xorm.Users{
Token: token,
TokenCreated: now,
LastLoginTime: now,
}
var respUserBean xorm.Users
//开始事务
session := service.engine.NewSession()
err := session.Begin()
defer session.Close()
_, err = session.ID(userBean.Id).Update(userUpdateBean)
... | wxOpenId beans.WxOpenId
wxOpenId, bind = woiIdxMap[openId]
if bind {
userId := wxOpenId.UserId
uic, _ := ramcache.UserIdCard.Load(service.platform)
uicMap := uic.(map[int]beans.UserProfile)
userProfile := uicMap[userId]
userBean = xorm.Users{
Id: userId,
Phone: userProfile.Phone,
UserName:... | identifier_body |
wxController.go | package frontEndControllers
import (
"encoding/json"
"errors"
goXorm "github.com/go-xorm/xorm"
"github.com/kataras/iris"
"qpgame/common/services"
"qpgame/common/utils"
"qpgame/config"
"qpgame/models"
"qpgame/models/beans"
"qpgame/models/xorm"
"qpgame/ramcache"
"strconv"
"time"
)
type WxController struct ... | rId: iUserId, Ip: service.ip, LoginTime: iNow, LoginFrom: service.loginFrom}
_, createErr = session.InsertOne(loginLog)
if createErr != nil {
session.Rollback()
//utils.ResFaiJSON(&ctx, createErr.Error(), "绑定微信失败", config.NOTGETDATA)
return userBean, createErr
}
err := services.PromotionAward(service.platfor... | Bean, createErr
}
loginLog := xorm.UserLoginLogs{Use | conditional_block |
wxController.go | package frontEndControllers
import (
"encoding/json"
"errors"
goXorm "github.com/go-xorm/xorm"
"github.com/kataras/iris"
"qpgame/common/services"
"qpgame/common/utils"
"qpgame/config"
"qpgame/models"
"qpgame/models/beans"
"qpgame/models/xorm"
"qpgame/ramcache"
"strconv"
"time"
)
type WxController struct ... | utMap := ut.(map[string][]string)
sUserId := strconv.Itoa(iUserId)
utMap[userBean.UserName] = []string{sUserId, token, sTokenTime, "1"}
utils.UpdateUserIdCard(service.platform, iUserId, map[string]interface{}{
"Username": userBean.UserName,
"Token": userBean.Token,
"TokenCreated": sTokenTime, // 注意... | random_line_split | |
redcap2mysql.py | #!/usr/bin/python
# Export data from a REDCap project and send to a MySQL database.
# Track changes to transferred data files in local git repository.
#
# This is just a *rough* prototype in the *early* stages of development.
#
# It has been tested on Windows Server 2008 R2 with ActivePython 2.7 (64-bit).
# It has bee... | (file_name):
"""Create a hash of a file."""
BLOCKSIZE = 65536
hasher = hashlib.sha1()
with open(file_name, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return(hasher.hexdigest())
def send_to_db... | hash_file | identifier_name |
redcap2mysql.py | #!/usr/bin/python
# Export data from a REDCap project and send to a MySQL database.
# Track changes to transferred data files in local git repository.
#
# This is just a *rough* prototype in the *early* stages of development.
#
# It has been tested on Windows Server 2008 R2 with ActivePython 2.7 (64-bit).
# It has bee... |
else:
print("Can't find config file: " + config_file)
exit(1)
# --------------------------
# Parse configuration object
# --------------------------
data_path = config.get('global', 'data_path', 0)
log_timestamp_format = config.get('global', 'log_timestamp_format', 0)
log_file = config.get('global', 'log_fil... | config.read(config_file) | conditional_block |
redcap2mysql.py | #!/usr/bin/python
# Export data from a REDCap project and send to a MySQL database.
# Track changes to transferred data files in local git repository.
#
# This is just a *rough* prototype in the *early* stages of development.
#
# It has been tested on Windows Server 2008 R2 with ActivePython 2.7 (64-bit).
# It has bee... | #
# For use with ODBC database connections, you will also want to install pyodbc:
#
# python -m pip install pyodbc
#
# Or, alternatively, for use with the MySQL Connector driver written in Python:
#
# python -m pip install mysql-connector
#
# On Windows, you will also need Microsoft Visual C++ Compiler for Python 2.7.
... | # python -m pip install gitpython
# python -m pip install git+https://github.com/alorenzo175/mylogin.git#egg=mylogin
# python -m pip install certifi | random_line_split |
redcap2mysql.py | #!/usr/bin/python
# Export data from a REDCap project and send to a MySQL database.
# Track changes to transferred data files in local git repository.
#
# This is just a *rough* prototype in the *early* stages of development.
#
# It has been tested on Windows Server 2008 R2 with ActivePython 2.7 (64-bit).
# It has bee... |
def parse_csv(csv_file):
"""Parse a CSV file with Pandas, with basic checks and error handling."""
if os.path.isfile(csv_file) == True:
num_lines = sum(1 for line in open(csv_file))
if num_lines > 1:
try:
data = pd.read_csv(csv_file, index_col=False)
... | """Get the sha1 hash of the previously uploaded data for a table."""
# See if the database contains the log_table (REDCap transfer log) table.
rs = sql.execute('SHOW TABLES LIKE "' + log_table + '";', conn)
row0 = rs.fetchone()
res = ''
if (row0 is not None) and (len(row0) != 0):
res = row0... | identifier_body |
messagecard.go | package goteamsnotify
import (
"errors"
"fmt"
"strings"
)
// MessageCardSectionFact represents a section fact entry that is usually
// displayed in a two-column key/value format.
type MessageCardSectionFact struct {
// Name is the key for an associated value in a key/value pair
Name string `json:"name"`
// Va... | if s == nil {
return fmt.Errorf("func AddSection: nil MessageCardSection received")
}
// Perform validation of all MessageCardSection fields in an effort to
// avoid adding a MessageCardSection with zero value fields. This is
// done to avoid generating an empty sections JSON array since the
// Sections... | func (mc *MessageCard) AddSection(section ...*MessageCardSection) error {
for _, s := range section {
// bail if a completely nil section provided | random_line_split |
messagecard.go | package goteamsnotify
import (
"errors"
"fmt"
"strings"
)
// MessageCardSectionFact represents a section fact entry that is usually
// displayed in a two-column key/value format.
type MessageCardSectionFact struct {
// Name is the key for an associated value in a key/value pair
Name string `json:"name"`
// Va... | (fact ...MessageCardSectionFact) error {
for _, f := range fact {
if f.Name == "" {
return fmt.Errorf("empty Name field received for new fact: %+v", f)
}
if f.Value == "" {
return fmt.Errorf("empty Name field received for new fact: %+v", f)
}
}
mcs.Facts = append(mcs.Facts, fact...)
return nil
}
/... | AddFact | identifier_name |
messagecard.go | package goteamsnotify
import (
"errors"
"fmt"
"strings"
)
// MessageCardSectionFact represents a section fact entry that is usually
// displayed in a two-column key/value format.
type MessageCardSectionFact struct {
// Name is the key for an associated value in a key/value pair
Name string `json:"name"`
// Va... |
// AddFactFromKeyValue accepts a key and slice of values and converts them to
// MessageCardSectionFact values
func (mcs *MessageCardSection) AddFactFromKeyValue(key string, values ...string) error {
// validate arguments
if key == "" {
return errors.New("empty key received for new fact")
}
if len(values) < 1... | {
for _, f := range fact {
if f.Name == "" {
return fmt.Errorf("empty Name field received for new fact: %+v", f)
}
if f.Value == "" {
return fmt.Errorf("empty Name field received for new fact: %+v", f)
}
}
mcs.Facts = append(mcs.Facts, fact...)
return nil
} | identifier_body |
messagecard.go | package goteamsnotify
import (
"errors"
"fmt"
"strings"
)
// MessageCardSectionFact represents a section fact entry that is usually
// displayed in a two-column key/value format.
type MessageCardSectionFact struct {
// Name is the key for an associated value in a key/value pair
Name string `json:"name"`
// Va... |
if heroImage.Title == "" {
return fmt.Errorf("cannot add empty hero image title")
}
mcs.HeroImage = &heroImage
// our validation checks didn't find any problems
return nil
}
// NewMessageCard creates a new message card with fields required by the
// legacy message card format already predefined
func NewMess... | {
return fmt.Errorf("cannot add empty hero image URL")
} | conditional_block |
timer.rs | //! POSIX per-process timer interface.
//!
//! This module provides a wrapper around POSIX timers (see `timer_create(2)`) and utilities to
//! setup thread-targeted signaling and signal masks.
use std::mem::MaybeUninit;
use std::time::Duration;
use std::{io, mem};
use libc::{c_int, clockid_t, pid_t};
/// Timers can ... | (v: libc::timespec) -> Option<Duration> {
if v.tv_sec == 0 && v.tv_nsec == 0 {
None
} else {
Some(Duration::new(v.tv_sec as u64, v.tv_nsec as u32))
}
}
impl TimerSpec {
// Helpers to convert between TimerSpec and libc::itimerspec
fn to_itimerspec(&self) -> libc::itimerspec {
... | timespec_to_opt_duration | identifier_name |
timer.rs | //! POSIX per-process timer interface.
//!
//! This module provides a wrapper around POSIX timers (see `timer_create(2)`) and utilities to
//! setup thread-targeted signaling and signal masks.
use std::mem::MaybeUninit;
use std::time::Duration;
use std::{io, mem};
use libc::{c_int, clockid_t, pid_t};
/// Timers can ... |
TimerEvent::ThreadSignal(tid, signo) => {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_THREAD_ID;
ev.sigev_notify_thread_id = tid.0;
}
TimerEvent::ThisThreadSignal(signo) => {
ev.sigev_signo = signo.0;
... | {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_SIGNAL;
} | conditional_block |
timer.rs | //! POSIX per-process timer interface.
//!
//! This module provides a wrapper around POSIX timers (see `timer_create(2)`) and utilities to
//! setup thread-targeted signaling and signal masks.
use std::mem::MaybeUninit;
use std::time::Duration;
use std::{io, mem};
use libc::{c_int, clockid_t, pid_t};
/// Timers can ... | },
}
}
fn timespec_to_opt_duration(v: libc::timespec) -> Option<Duration> {
if v.tv_sec == 0 && v.tv_nsec == 0 {
None
} else {
Some(Duration::new(v.tv_sec as u64, v.tv_nsec as u32))
}
}
impl TimerSpec {
// Helpers to convert between TimerSpec and libc::itimerspec
fn to_... | Some(value) => libc::timespec {
tv_sec: value.as_secs() as i64,
tv_nsec: value.subsec_nanos() as i64, | random_line_split |
Main.go | package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"text/template"
"time"
"github.com/alert0/backsyn/logger"
"github.com/jlaffaye/ftp"
)
var backFilePath string = "back.json"
////指纹集合文件名称
var hashFileName string ... |
}
err = TemplateSaveFile(ORACLEBAKPATHTL, ORACLEBAKPATH, oracledir)
if err != nil {
logger.Println("生成oracledir.bat 失败" + err.Error())
}
var oracledatatmp []string = strings.Split(info.OracleURL, "@")
if oracledatatmp == nil || len(oracledatatmp) < 2 {
logger.Println("读取oracle配置信息失败")
}
oracleddata :... | string{
"Dir": dir,
"OracleBakPath": OracleBakPath, | conditional_block |
Main.go | package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"text/template"
"time"
"github.com/alert0/backsyn/logger"
"github.com/jlaffaye/ftp"
)
var backFilePath string = "back.json"
////指纹集合文件名称
var hashFileName string ... | info os.FileInfo, err error) error {
// var file []byte
// if err != nil {
// return filepath.SkipDir
// }
// header, err := zip.FileInfoHeader(info) // 转换为zip格式的文件信息
// if err != nil {
// return filepath.SkipDir
// }
// header.Name, _ = filepath.Rel(filepath.Dir(frm), path)
// if !info.IsDir() {
// // 确... | path string, | identifier_name |
Main.go | package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"text/template"
"time"
"github.com/alert0/backsyn/logger"
"github.com/jlaffaye/ftp"
)
var backFilePath string = "back.json"
////指纹集合文件名称
var hashFileName string ... | //cmd.Args = []string{"a",dst,frm};
//cmd.Stdin = strings.NewReader("some input")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
logger.Println("执行7zip压缩命令错误: " + err.Error())
//logger.Fatal(err)
return err
}
logger.Println("in all caps: %s\n", out.String())
return nil
}
//调用7zi... | //调用7zip压缩
func compress7zip(frm, dst string) error {
cmd := exec.Command("7z/7z.exe", "a", "-mx=1", "-v5g", dst, frm) | random_line_split |
Main.go | package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"text/template"
"time"
"github.com/alert0/backsyn/logger"
"github.com/jlaffaye/ftp"
)
var backFilePath string = "back.json"
////指纹集合文件名称
var hashFileName string ... | n err
}
xcplasttime = "01-02-2006"
}
if err := tarpath(info, lasttime, xcplasttime); err != nil {
logger.Println("复制文件失败" + err.Error())
}
if err := zipfiles(info.TargetPath, lasttime); err != nil {
logger.Println("压缩文件失败" + err.Error())
}
var remoteSavePath = lastmoth + "^" + strings.Replace(get_exter... | ecu(ORACLEBAKBATPATH)
if err != nil {
logger.Println("运行文件失败" + ORACLEBAKBATPATH + err.Error())
return err
}
return nil
}
func BakFiles(info Backinfo) error {
var xcplasttime = time.Now().AddDate(0, 0, -1).Format("01-02-2006")
var lasttime = time.Now().Format("2006-01-02")
var lastmoth = time.Now().Forma... | identifier_body |
spatial_ornstein_uhlenbeck.py | #/usr/bin/env python
from __future__ import division
__author__ = "Jesse Zaneveld"
__copyright__ = "Copyright 2016, The Karenina Project"
__credits__ = ["Jesse Zaneveld"]
__license__ = "GPL"
__version__ = "0.0.1-dev"
__maintainer__ = "Jesse Zaneveld"
__email__ = "zaneveld@gmail.com"
__status__ = "Development"
import... |
else:
set_xyz_lambda_zero = {"start":perturbation_timepoint,\
"end":perturbation_timepoint + perturbation_duration,\
"params":{"lambda":0.000},"update_mode":"replace","axes":["x","y","z"]}
perturbations_list.append(set_xyz_lambda_zero)
return perturbations_list
def main():
... | df = pd.read_csv(pert_file_path, sep = "\t")
headers_list = list(df)
for index, row in df.iterrows():
a_perturbation = {"start":perturbation_timepoint,\
"end":perturbation_timepoint + perturbation_duration}
required_headers_checker = {"params" : False, "values" : ... | conditional_block |
spatial_ornstein_uhlenbeck.py | #/usr/bin/env python
from __future__ import division
__author__ = "Jesse Zaneveld"
__copyright__ = "Copyright 2016, The Karenina Project"
__credits__ = ["Jesse Zaneveld"]
__license__ = "GPL"
__version__ = "0.0.1-dev"
__maintainer__ = "Jesse Zaneveld"
__email__ = "zaneveld@gmail.com"
__status__ = "Development"
import... |
def ensure_exists(output_dir):
"""
Ensure that output_dir exists
:param output_dir: path to output directory
"""
try:
makedirs(output_dir)
except OSError:
if not isdir(output_dir):
raise
def write_options_to_log(log, opts):
"""
Writes user's input option... | """
Raise ValueError if perturbation_timepoint is < 0 or >n_timepoints
:param perturbation_timepoint: defined timepoint for perturbation application
:param n_timepoints: number of timepoints
"""
if perturbation_timepoint and perturbation_timepoint >= n_timepoints:
raise ValueError("Perturb... | identifier_body |
spatial_ornstein_uhlenbeck.py | #/usr/bin/env python
from __future__ import division
__author__ = "Jesse Zaneveld"
__copyright__ = "Copyright 2016, The Karenina Project"
__credits__ = ["Jesse Zaneveld"]
__license__ = "GPL"
__version__ = "0.0.1-dev"
__maintainer__ = "Jesse Zaneveld"
__email__ = "zaneveld@gmail.com"
__status__ = "Development"
import... | (log, opts):
"""
Writes user's input options to log file
:param log: log filename
:param opts: options
"""
logfile = open(join(opts.output, log),"w+")
logfile_header = "#Karenina Simulation Logfile\n"
logfile.write(logfile_header)
logfile.write("Output folder: %s\n" %(str(opts.ou... | write_options_to_log | identifier_name |
spatial_ornstein_uhlenbeck.py | #/usr/bin/env python
from __future__ import division
__author__ = "Jesse Zaneveld"
__copyright__ = "Copyright 2016, The Karenina Project"
__credits__ = ["Jesse Zaneveld"]
__license__ = "GPL"
__version__ = "0.0.1-dev"
__maintainer__ = "Jesse Zaneveld"
__email__ = "zaneveld@gmail.com"
__status__ = "Development"
import... | perturbations = parse_perturbation_file(opts.pert_file_path,\
opts.perturbation_timepoint, opts.perturbation_duration)
treatments = [[], perturbations]
treatment_names = opts.treatment_names.split(",")
if verbose:
print("Raw number of individuals from user:",opts.n_individuals)
prin... | #Set up the treatments to be applied
| random_line_split |
config_unix.go | // Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
// +build linux darwin freebsd
package lib
import (
"os"
"path/filepath"
"reflect"
"github.co... | (configMap map[string]interface{}) *Config {
b := *c.commonBackfill(configMap)
if _, exists := configMap["log_file_name"]; !exists {
b.LogFileName = DefaultConfig.LogFileName
}
if _, exists := configMap["log_file_max_megabytes"]; !exists {
b.LogFileMaxMegabytes = DefaultConfig.LogFileMaxMegabytes
}
if _, exi... | Backfill | identifier_name |
config_unix.go | // Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
// +build linux darwin freebsd
package lib
import (
"os"
"path/filepath"
"reflect"
"github.co... | CloudPrintingEnable bool `json:"cloud_printing_enable"`
// Associated with root account. XMPP credential.
XMPPJID string `json:"xmpp_jid,omitempty"`
// Associated with robot account. Used for acquiring OAuth access tokens.
RobotRefreshToken string `json:"robot_refresh_token,omitempty"`
// Associated with user ... | LocalPrintingEnable bool `json:"local_printing_enable"`
// Enable cloud discovery and printing. | random_line_split |
config_unix.go | // Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
// +build linux darwin freebsd
package lib
import (
"os"
"path/filepath"
"reflect"
"github.co... |
if xdgCF, err := xdg.Config.Find(cf); err == nil {
// File exists in an XDG directory.
return xdgCF, true
}
// Default to relative path. This is probably what the user expects if
// it wasn't found anywhere else.
return absCF, false
}
// Backfill returns a copy of this config with all missing keys set to d... | {
// File exists on relative path.
return absCF, true
} | conditional_block |
config_unix.go | // Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
// +build linux darwin freebsd
package lib
import (
"os"
"path/filepath"
"reflect"
"github.co... | {
s := *c.commonSparse(context)
if !context.IsSet("log-file-max-megabytes") &&
s.LogFileMaxMegabytes == DefaultConfig.LogFileMaxMegabytes {
s.LogFileMaxMegabytes = 0
}
if !context.IsSet("log-max-files") &&
s.LogMaxFiles == DefaultConfig.LogMaxFiles {
s.LogMaxFiles = 0
}
if !context.IsSet("log-to-journal"... | identifier_body | |
ias_proxy_server.rs | /*
Copyright 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, ... | // Cache is present, it can be sent
Some(cache_present) => Ok(cache_present.clone()),
// Cache is not presnet, request from IAS and add to cache
None => {
let result = ias_client_obj.post_verify_attestation(
quote.as_bytes(),
Option::from(json_... | .lock()
.expect("Error acquiring AVR cache lock");
let cached_avr = attestation_cache_lock.get("e);
let avr = match cached_avr { | random_line_split |
ias_proxy_server.rs | /*
Copyright 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, ... | () {
let client_response = Err(ClientError);
let ias_response = ias_response_from_client_response(client_response);
match ias_response {
Ok(_unexpected) => assert!(false),
Err(_expected) => assert!(true),
};
}
}
| test_erraneous_ias_response_from_client_response | identifier_name |
ias_proxy_server.rs | /*
Copyright 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, ... |
/// Function to construct ```IasProxyServer``` object with the input proxy configuration file.
/// 'new()' for ```IasProxyServer``` is private, so use this public method to get instance of it.
///
/// return: A ```IasProxyServer``` object
pub fn get_proxy_server(proxy_config: &IasProxyConfig) -> IasProxyServer {
... | {
// Start conversion, need to parse client_resposne
match client_response {
Ok(successful_response) => {
// If there's successful response, then read body to string
let body_string_result = client_utils::read_body_as_string(successful_response.body);
// If reading b... | identifier_body |
mod.rs | use crate::graph::Graph;
use log::debug;
use std::collections::HashMap;
use std::fmt;
use ndarray::Array2;
use thiserror::Error;
mod builder;
mod word;
mod cell;
mod add_word;
mod random;
mod spacing;
mod properties;
mod pdf_conversion;
mod matrix;
mod merge;
mod validity;
use word::Word;
use cell::Cell;
pub use bui... | FillBlack,
}
#[derive(Error,Debug,PartialEq)]
pub enum CrosswordError {
#[error("Adjacent cells {0:?} {1:?} incompatible - no word found that links them.")]
AdjacentCellsNoLinkWord(Location, Location),
#[error("Adjacent cells {0:?} {1:?} incompatible - should have a shared word which links them, but t... |
#[error("Attempted to fill a cell already marked as black")] | random_line_split |
mod.rs | use crate::graph::Graph;
use log::debug;
use std::collections::HashMap;
use std::fmt;
use ndarray::Array2;
use thiserror::Error;
mod builder;
mod word;
mod cell;
mod add_word;
mod random;
mod spacing;
mod properties;
mod pdf_conversion;
mod matrix;
mod merge;
mod validity;
use word::Word;
use cell::Cell;
pub use bui... | Graph {
let edges = self.get_all_intersections();
let mut graph = Graph::new_from_edges(edges);
for (word_id, _word) in self.word_map.iter().filter(|(_id, w)| w.is_placed()) {
graph.add_node(*word_id);
}
graph
}
pub fn to_string_with_coords(&self) -> String ... | elf) -> | identifier_name |
mod.rs | use crate::graph::Graph;
use log::debug;
use std::collections::HashMap;
use std::fmt;
use ndarray::Array2;
use thiserror::Error;
mod builder;
mod word;
mod cell;
mod add_word;
mod random;
mod spacing;
mod properties;
mod pdf_conversion;
mod matrix;
mod merge;
mod validity;
use word::Word;
use cell::Cell;
pub use bui... | n unplace_word(&mut self, word_id: usize) {
for (_location, cell) in self.cell_map.iter_mut() {
cell.remove_word(word_id);
}
if let Some(word) = self.word_map.get_mut(&word_id) {
word.remove_placement();
}
self.fit_to_size();
debug!("Now have {} wo... | elf.unplace_word(word_id);
self.word_map.remove(&word_id);
}
pub f | identifier_body |
parser.js | import { __assign } from "tslib";
import path from 'path';
import remarkMdx from 'remark-mdx';
import remarkParse from 'remark-parse';
import unified from 'unified';
import { hasProperties, isJsxNode, last, normalizeParser, normalizePosition, restoreNodeLocation, } from './helper';
import { COMMENT_CONTENT_REGEX, isCom... |
Parser.prototype.normalizeJsxNode = function (node, parent, options) {
if (options === void 0) { options = this._options; }
var value = node.value;
if (node.type !== 'jsx' || isComment(value)) {
return node;
}
var commentContent = COMMENT_CONTENT_REGEX.exec(value... | {
// @internal
this._options = DEFAULT_PARSER_OPTIONS;
this.parse = this.parse.bind(this);
this.parseForESLint = this.parseForESLint.bind(this);
} | identifier_body |
parser.js | import { __assign } from "tslib";
import path from 'path';
import remarkMdx from 'remark-mdx';
import remarkParse from 'remark-parse';
import unified from 'unified';
import { hasProperties, isJsxNode, last, normalizeParser, normalizePosition, restoreNodeLocation, } from './helper';
import { COMMENT_CONTENT_REGEX, isCom... | if (node.data && node.data.jsxType === 'JSXElementWithHTMLComments') {
this._services.JSXElementsWithHTMLComments.push(node);
}
var value = node.value;
// fix #4
if (isComment(value)) {
return;
}
var _a = normalizePosition(node.position), l... | var _this = this; | random_line_split |
parser.js | import { __assign } from "tslib";
import path from 'path';
import remarkMdx from 'remark-mdx';
import remarkParse from 'remark-parse';
import unified from 'unified';
import { hasProperties, isJsxNode, last, normalizeParser, normalizePosition, restoreNodeLocation, } from './helper';
import { COMMENT_CONTENT_REGEX, isCom... | () {
// @internal
this._options = DEFAULT_PARSER_OPTIONS;
this.parse = this.parse.bind(this);
this.parseForESLint = this.parseForESLint.bind(this);
}
Parser.prototype.normalizeJsxNode = function (node, parent, options) {
if (options === void 0) { options = this._options; ... | Parser | identifier_name |
parser.js | import { __assign } from "tslib";
import path from 'path';
import remarkMdx from 'remark-mdx';
import remarkParse from 'remark-parse';
import unified from 'unified';
import { hasProperties, isJsxNode, last, normalizeParser, normalizePosition, restoreNodeLocation, } from './helper';
import { COMMENT_CONTENT_REGEX, isCom... |
/* istanbul ignore else */
if (options.filePath && this._options !== options) {
Object.assign(this._options, options);
}
var program;
var parseError;
for (var _i = 0, _a = this._parsers; _i < _a.length; _i++) {
var parser_1 = _a[_i];
t... | {
this._parsers = normalizeParser(options.parser);
} | conditional_block |
ccp_project.py | import datetime as dt
import scipy.optimize as sco
import scipy.stats as scs
import statsmodels.regression.linear_model as sm
import pandas as pd
import pandas.tseries.offsets as pdtso
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
%matplotlib inline
#############... |
def period_names_list(periods):
"""Returns a list using function period_name."""
return [period_name(period) for period in periods]
#%%
#==============================================================================
# PARAMETRIZATION OF THE OPTIMIZATION
#=========================================... | """Returns a string in the form '1980 - 1989'."""
year_start = period[0][:4]
year_end = period[1][:4]
return year_start + " - " + year_end | identifier_body |
ccp_project.py | import datetime as dt
import scipy.optimize as sco
import scipy.stats as scs
import statsmodels.regression.linear_model as sm
import pandas as pd
import pandas.tseries.offsets as pdtso
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
%matplotlib inline
#############... |
#%%
# prints the portfolio composition over time
for i in range(4):
period = period_name(optimization_periods[i])
strategy_results[i].drop(["Return"], axis=1).plot.bar(stacked=True, figsize=(12,6))
plt.title("Portfolio composition for the period " + period)
plt.legend()
plt.show... | print(indicator)
strategy_results = []
params['X_macro'] = pd.DataFrame(X_macro[indicator])
for j, period in enumerate(optimization_periods):
params['start_date'], params['end_date'] = period
strategy_results.append(optimization(**params))
mydict[indic... | conditional_block |
ccp_project.py | import datetime as dt
import scipy.optimize as sco
import scipy.stats as scs
import statsmodels.regression.linear_model as sm
import pandas as pd
import pandas.tseries.offsets as pdtso
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
%matplotlib inline
#############... | # center on 0
#momentum_scores = [a - np.average(momentum_scores) for a in momentum_scores]
momentum_weights = momentum_weighting * np.array(momentum_scores) + (1 - momentum_weighting) * momentum_weights
scaled_weights = np.dot(momentum_weig... | random_line_split | |
ccp_project.py | import datetime as dt
import scipy.optimize as sco
import scipy.stats as scs
import statsmodels.regression.linear_model as sm
import pandas as pd
import pandas.tseries.offsets as pdtso
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
%matplotlib inline
#############... | (optimization_periods, strategy_results, Y_assets, indicator='Sharpe Ratio'):
# histograms for analysis
my_df = strategy_analysis(optimization_periods, strategy_results, Y_assets, freq)
my_df.sort_index(axis=1).loc(axis=1)[:, 'Sharpe Ratio'].plot.bar(figsize=(12,6))
plt.show()
histogram_a... | histogram_analysis | identifier_name |
index.js | //Boilerplate code set up correctly - used to accept input from user
const readline = require("readline");
const rl = readline.createInterface(process.stdin, process.stdout);
function ask(questionText) {
return new Promise((resolve, reject) => {
rl.question(questionText, resolve);
});
}
//Guess The Number gam... | (min, max) {
return Math.floor(Math.random() * (max - min + 1) + min);
}
//intros the game
console.log(
"Let's play a game where I (computer) pick a number between 1 and 100, and you (human) try to guess it."
);
//declares wantToPlay variable to allow users to play multiple times
let wantToPlay = ... | chooseRandomNumber | identifier_name |
index.js | //Boilerplate code set up correctly - used to accept input from user
const readline = require("readline");
const rl = readline.createInterface(process.stdin, process.stdout);
function ask(questionText) {
return new Promise((resolve, reject) => {
rl.question(questionText, resolve);
});
}
//Guess The Number gam... |
}
return false;
}
}
//intros the game
console.log(
"Let's play a game where you (human) pick a number between 1 and a maximum, and I (computer) try to guess it."
);
//declares wantToPlay variable to allow users to play multiple times
let wantToPlay = "y";
//while wantToPlay is yes ... | {
console.log(
`\nCheater, cheater pumpkin eater! You said the number was higher than ${
min - 1
}, so it can't also be lower than ${guess}!\n`
);
return true;
} | conditional_block |
index.js | //Boilerplate code set up correctly - used to accept input from user
const readline = require("readline");
const rl = readline.createInterface(process.stdin, process.stdout);
function ask(questionText) {
return new Promise((resolve, reject) => {
rl.question(questionText, resolve);
});
}
//Guess The Number gam... | //sanitizes wantToPlay
wantToPlay = wantToPlay.trim().toLowerCase();
//if the user does not want to play again the game exits
if (wantToPlay === "n" || wantToPlay === "no") {
console.log("\nGoodbye, thanks for playing!");
process.exit();
}
}
//if... | random_line_split | |
index.js | //Boilerplate code set up correctly - used to accept input from user
const readline = require("readline");
const rl = readline.createInterface(process.stdin, process.stdout);
function ask(questionText) {
return new Promise((resolve, reject) => {
rl.question(questionText, resolve);
});
}
//Guess The Number gam... |
//cheat detector function that will return true if there is an issue with the response based on known range (true ==> lying, false ==> not lying)
function cheatDetector(min, max, guess, secretNumber, modifyRange) {
//if the computer's guess is the secret number but the user has said no, the computer calls the... | {
return min + Math.floor((max - min) / 2);
} | identifier_body |
codec.rs | //! encode and decode the frames for the mux protocol.
//! The frames include the length of a PDU as well as an identifier
//! that informs us how to decode it. The length, ident and serial
//! number are encoded using a variable length integer encoding.
//! Rather than rely solely on serde to serialize and deserializ... | (value: u64) -> usize {
struct NullWrite {};
impl std::io::Write for NullWrite {
fn write(&mut self, buf: &[u8]) -> std::result::Result<usize, std::io::Error> {
Ok(buf.len())
}
fn flush(&mut self) -> std::result::Result<(), std::io::Error> {
Ok(())
}
}... | encoded_length | identifier_name |
codec.rs | //! encode and decode the frames for the mux protocol.
//! The frames include the length of a PDU as well as an identifier
//! that informs us how to decode it. The length, ident and serial
//! number are encoded using a variable length integer encoding.
//! Rather than rely solely on serde to serialize and deserializ... | pub serial: u64,
pub pdu: Pdu,
}
/// If the serialized size is larger than this, then we'll consider compressing it
const COMPRESS_THRESH: usize = 32;
fn serialize<T: serde::Serialize>(t: &T) -> Result<(Vec<u8>, bool), Error> {
let mut uncompressed = Vec::new();
let mut encode = varbincode::Serializer... | #[derive(Debug, PartialEq)]
pub struct DecodedPdu { | random_line_split |
macro.rs | #![crate_name = "docopt_macros"]
#![crate_type = "dylib"]
#![feature(plugin_registrar, quote, rustc_private)]
//! This crate defines the `docopt!` macro. It is documented in the
//! documentation of the `docopt` crate.
extern crate syntax;
extern crate rustc_plugin;
extern crate docopt;
use std::borrow::Borrow;
use... | let public = self.p.eat_keyword(symbol::keywords::Pub);
let mut info = StructInfo {
name: try!(self.p.parse_ident()),
public: public,
deriving: vec![],
};
if self.p.eat(&token::Comma) { return Ok(info); }
let deriving = try!(self.p.parse_ident(... | random_line_split | |
macro.rs | #![crate_name = "docopt_macros"]
#![crate_type = "dylib"]
#![feature(plugin_registrar, quote, rustc_private)]
//! This crate defines the `docopt!` macro. It is documented in the
//! documentation of the `docopt` crate.
extern crate syntax;
extern crate rustc_plugin;
extern crate docopt;
use std::borrow::Borrow;
use... |
fn meta_item(cx: &ExtCtxt, s: &str) -> codemap::Spanned<ast::NestedMetaItemKind> {
codemap::Spanned {
node: ast::NestedMetaItemKind::MetaItem(cx.meta_word(codemap::DUMMY_SP, intern(s))),
span: cx.call_site(),
}
}
fn intern(s: &str) -> symbol::Symbol {
symbol::Symbol::intern(s)
}
fn ty_ve... | {
let sp = codemap::DUMMY_SP;
let its = items.into_iter().map(|s| meta_item(cx, s.borrow())).collect();
let mi = cx.meta_list(sp, intern(name.borrow()), its);
cx.attribute(sp, mi)
} | identifier_body |
macro.rs | #![crate_name = "docopt_macros"]
#![crate_type = "dylib"]
#![feature(plugin_registrar, quote, rustc_private)]
//! This crate defines the `docopt!` macro. It is documented in the
//! documentation of the `docopt` crate.
extern crate syntax;
extern crate rustc_plugin;
extern crate docopt;
use std::borrow::Borrow;
use... | (&self, cx: &ExtCtxt) -> Vec<ast::StructField> {
let mut fields: Vec<ast::StructField> = vec!();
for (atom, opts) in self.doc.parser().descs.iter() {
let name = ArgvMap::key_to_struct_field(&*atom.to_string());
let ty = match self.types.get(atom) {
None => self.pa... | struct_fields | identifier_name |
program.py | # Importing the details of the spy
from spy_details import spy, Spy, friends, ChatMessage
# Importing steganography module
from steganography.steganography import Steganography
# List of status messages
STATUS_MESSAGES = ['Having Fun', 'Sunny Day', "Busy",
"Feeling Lazy", "Damm it it fe... |
return len(friends)
#Function to select a friend
def select_a_friend():
item_number = 0
for friend in friends:
print ('%d. %s %s aged %d with rating %.2f is online' % (item_number + 1, friend.salutation, friend.name, friend.age, friend.rating))
item_numb... | print("Sorry, the friend cannot be a spy!") | conditional_block |
program.py | # Importing the details of the spy
from spy_details import spy, Spy, friends, ChatMessage
# Importing steganography module
from steganography.steganography import Steganography
# List of status messages
STATUS_MESSAGES = ['Having Fun', 'Sunny Day', "Busy",
"Feeling Lazy", "Damm it it fe... | ():
item_number = 0
for friend in friends:
print ('%d. %s %s aged %d with rating %.2f is online' % (item_number + 1, friend.salutation, friend.name, friend.age, friend.rating))
item_number = item_number + 1
friend_choice = raw_input("Choose the index of the frien... | select_a_friend | identifier_name |
program.py | # Importing the details of the spy
from spy_details import spy, Spy, friends, ChatMessage
# Importing steganography module
from steganography.steganography import Steganography
# List of status messages
STATUS_MESSAGES = ['Having Fun', 'Sunny Day', "Busy",
"Feeling Lazy", "Damm it it fe... | else:
print ("Enter a valid spy rating")
else:
if spy.age <= 12:
print("Sorry, you are too young to become a spy!")
elif spy.ag... |
start_chat(spy)
| random_line_split |
program.py | # Importing the details of the spy
from spy_details import spy, Spy, friends, ChatMessage
# Importing steganography module
from steganography.steganography import Steganography
# List of status messages
STATUS_MESSAGES = ['Having Fun', 'Sunny Day', "Busy",
"Feeling Lazy", "Damm it it fe... |
#Function to send a message
def send_a_message():
friend_choice = select_a_friend()
original_image = raw_input("What is the name of the image?: ")
output_path = "output.jpg"
text = raw_input("What do you want to say? ")
Steganography.encode(origina... | item_number = 0
for friend in friends:
print ('%d. %s %s aged %d with rating %.2f is online' % (item_number + 1, friend.salutation, friend.name, friend.age, friend.rating))
item_number = item_number + 1
friend_choice = raw_input("Choose the index of the friend: ")
... | identifier_body |
scr.py | from PyLnD.loads.rf_functions import rf_mdof
from PyLnD.loads.pfile import modal_p
from PyLnD.loads.phi import PHI
from PyLnD.loads.hwlist import HWLIST
from PyLnD.loads.ltm import LTM
from PyLnD.loads.eig import EIG
from PyLnD.loads.pfile import PFILE
from pylab import *
class SCR:
"""Screening Class ... | else:
raise Exception("!!! DOF " + dof.__str__() + " not in LTM " + self.ltm.name)
# Create FFT object.
u_fft = FFT(resp, x=self.u[c][i_dof, :], time=self.time[c])
# Plot the requested response fft.
fig = plt.fi... | if dof in self.ltm.acron_dofs:
i_dof = self.ltm.acron_dofs.index(dof)
elif dof in self.ltm.dofs:
i_dof = self.ltm.dofs.index(dof)
| random_line_split |
scr.py | from PyLnD.loads.rf_functions import rf_mdof
from PyLnD.loads.pfile import modal_p
from PyLnD.loads.phi import PHI
from PyLnD.loads.hwlist import HWLIST
from PyLnD.loads.ltm import LTM
from PyLnD.loads.eig import EIG
from PyLnD.loads.pfile import PFILE
from pylab import *
class SCR:
"""Screening Class ... |
def load_ltm(self, **kwargs):
"""Method to load the LTM into the analysis.
Ex: scr.load_ltm(ltm='xp93zz_scr.pch)"""
ltm = kwargs['ltm']
self.ltm = LTM(ltm)
self.ltm.label_ltm(self.hwlist)
def load_eig(self, **kwargs):
"""Method to load the eigenv... | """Method to load the Hardware List (HWLIST) into the analysis.
Ex: scr.load_hwlist(hwlist='xp_hwlist.xls')
"""
hwlist = kwargs['hwlist']
self.hwlist = HWLIST(hwlist) | identifier_body |
scr.py | from PyLnD.loads.rf_functions import rf_mdof
from PyLnD.loads.pfile import modal_p
from PyLnD.loads.phi import PHI
from PyLnD.loads.hwlist import HWLIST
from PyLnD.loads.ltm import LTM
from PyLnD.loads.eig import EIG
from PyLnD.loads.pfile import PFILE
from pylab import *
class SCR:
"""Screening Class ... |
else:
desc = ''
# Loop and plot each requested dof.
fig = figure()
ax = subplot(111)
for item in items:
if item.__len__() != 3:
raise Exception('!!! You must supply (case, acron, dof) to plot !!!')
c = item[0]
... | desc = kwargs['desc'] | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.