file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
server.go | can stop just those
_, err := s.AgentConns()
if err != nil {
return err
}
var wg sync.WaitGroup
errs := make(chan error, len(s.agentConns))
for _, conn := range s.agentConns {
conn := conn
wg.Add(1)
go func() {
defer wg.Done()
_, err := conn.AgentClient.StopAgent(context.Background(), &idl.Stop... | {
return xerrors.Errorf("saving hub configuration: %w", err)
} | conditional_block | |
server.go | () to Start() that
// Stop() had already beed called, so no need to do anything further
// in Start().
// Note that when used as a flag, nil value means that Stop() has
// been called.
stopped chan struct{}
daemon bool
}
type Connection struct {
Conn *grpc.ClientConn
AgentClient idl.AgentClient
H... |
func (s *Server) RestartAgents(ctx context.Context, in *idl.RestartAgentsRequest) (*idl.RestartAgentsReply, error) {
restartedHosts, err := RestartAgents(ctx, nil, AgentHosts(s.Source), s.AgentPort, s.StateDir)
return &idl.RestartAgentsReply{AgentHosts: restartedHosts}, err
}
func RestartAgents(ctx context.Context... | {
s.mu.Lock()
defer s.mu.Unlock()
// StopServices calls Stop(false) because it has already closed the agentConns
if closeAgentConns {
s.closeAgentConns()
}
if s.server != nil {
s.server.Stop()
<-s.stopped // block until it is OK to stop
}
// Mark this server stopped so that a concurrent Start() doesn't... | identifier_body |
server.go | Stop() to Start() that
// Stop() had already beed called, so no need to do anything further
// in Start().
// Note that when used as a flag, nil value means that Stop() has
// been called.
stopped chan struct{}
daemon bool
}
type Connection struct {
Conn *grpc.ClientConn
AgentClient idl.AgentClie... | daemon.Daemonize()
}
err = server.Serve(lis)
if err != nil {
err = xerrors.Errorf("serve: %w", err)
}
// inform Stop() that is it is OK to stop now
s.stopped <- struct{}{}
return err
}
func (s *Server) StopServices(ctx context.Context, in *idl.StopServicesRequest) (*idl.StopServicesReply, error) {
err :... |
if s.daemon {
fmt.Printf("Hub started on port %d (pid %d)\n", s.Port, os.Getpid()) | random_line_split |
console_test.go | ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
ConsoleSocket: sock,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
// Make sure we get a console PTY.
ptyMaster, err := re... | {
for name, conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("true")
spec.Process.Terminal = true
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
... | identifier_body | |
console_test.go | , conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("true")
spec.Process.Terminal = true
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cle... | t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args = Args{
ID: ids[1],
Spec: testSpecs[1],
BundleDir: bundleDir,
ConsoleSocket: sock,
}
cont... | sock, err := socketPath(bundleDir)
if err != nil { | random_line_split |
console_test.go | , conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("true")
spec.Process.Terminal = true
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cle... |
ptyMaster.Close()
})
}
}
// Test that job control signals work on a console created with "exec -ti".
func TestJobControlSignalExec(t *testing.T) {
spec := testutil.NewSpecWithArgs("/bin/sleep", "10000")
conf := testutil.TestConfig(t)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err ... | {
t.Fatalf("error receiving console FD: %v", err)
} | conditional_block |
console_test.go | , conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("true")
spec.Process.Terminal = true
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cle... | (t *testing.T) {
spec := testutil.NewSpecWithArgs("/bin/sleep", "10000")
conf := testutil.TestConfig(t)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: ... | TestJobControlSignalExec | identifier_name |
build.py | env.cmd(["tar", "-C", dest_dir, "-xf", self._tar_path])
tar_name = get_one(os.listdir(dest_dir))
for leafname in os.listdir(os.path.join(dest_dir, tar_name)):
os.rename(os.path.join(dest_dir, tar_name, leafname),
os.path.join(dest_dir, leafname))
os.rmdir(os.pa... |
def remove_tree(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
def copy_onto(source_dir, dest_dir):
for leafname in os.listdir(source_dir):
subprocess.check_call(["cp", "-a", os.path.join(source_dir, leafname),
"-t", dest_dir])
def install_d... | temp_dir = "%s.temp" % self._source_dir
os.makedirs(temp_dir)
self.source.write_tree(self._env, temp_dir)
os.rename(temp_dir, self._source_dir) | conditional_block |
build.py | env.cmd(["tar", "-C", dest_dir, "-xf", self._tar_path])
tar_name = get_one(os.listdir(dest_dir))
for leafname in os.listdir(os.path.join(dest_dir, tar_name)):
os.rename(os.path.join(dest_dir, tar_name, leafname),
os.path.join(dest_dir, leafname))
os.rmdir(os.pa... | (self, log):
def run(dest):
cmd = [arg % {"destdir": dest} for arg in install_cmd]
self._build_env.cmd(cmd)
install_destdir(self._prefix, self._install_dir, run)
Mod.name = name
Mod.source = source
return Mod
ModuleBinutils = Module(
name="binut... | install | identifier_name |
build.py | env.cmd(["tar", "-C", dest_dir, "-xf", self._tar_path])
tar_name = get_one(os.listdir(dest_dir))
for leafname in os.listdir(os.path.join(dest_dir, tar_name)):
os.rename(os.path.join(dest_dir, tar_name, leafname),
os.path.join(dest_dir, leafname))
os.rmdi... | def install_destdir(prefix_dir, install_dir, func):
temp_dir = "%s.tmp" % install_dir
remove_tree(temp_dir)
func(temp_dir)
remove_tree(install_dir)
# Tree is installed into $DESTDIR/$prefix.
# We need to strip $prefix.
assert prefix_dir.startswith("/")
os.rename(os.path.join(temp_dir, pr... | "-t", dest_dir])
| random_line_split |
build.py | )
self.source.write_tree(self._env, temp_dir)
os.rename(temp_dir, self._source_dir)
def remove_tree(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
def copy_onto(source_dir, dest_dir):
for leafname in os.listdir(source_dir):
subprocess.check_call(["cp"... | name = "libnacl"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
pass
def install(self, log):
mkdir_p(self._build_dir)
# This requires scons to pass PATH through so that it can run
# nacl-gcc. We set naclsdk_mode to point to an empty
... | identifier_body | |
response.rs | }
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use std::error::Error;
self.description().fmt(f)
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match self.message {
None => "<no description available>",
... | (&self) -> Option<&error::Error> {
None
}
}
impl From<Status> for Error {
fn from(status: Status) -> Error {
Error::new(status, None)
}
}
impl From<(Status, &'static str)> for Error {
fn from(pair: (Status, &'static str)) -> Error {
Error::new(pair.0, Some(Cow::Borrowed(pair.1)... | cause | identifier_name |
response.rs | , jpg, jpeg, png
/// - text: css, htm, html, txt
/// - video: avi, mp4, mpg, mpeg, ts
/// If the file does not exist, this method sends a 404 Not Found response.
SendFile(String)
}
/// Conversion from `()` into `End(None)`.
impl From<()> for Action {
fn from(_: ()) -> Action {
Action::E... | {
response.streaming
} | identifier_body | |
response.rs | to the given URL with a 3xx status (use 302 Found if unsure).
Redirect(Status, String),
/// Renders the template with the given name using the given JSON value.
///
/// If no Content-Type header is set, the content type is set to `text/html`.
Render(String, json::Value),
/// Sends the respons... | // probably not the best idea for big files, we should use stream instead in that case
match File::open(path) {
Ok(mut file) => {
let mut buf = Vec::with_capacity(file.metadata().ok().map_or(1024, |meta| meta.len() as usize));
if let Err(err) = file.read_to_en... | random_line_split | |
node.go | "/etc/kubernetes/aws.conf"
kubeadmAzureConfig = "/etc/kubernetes/azure.conf"
cniDir = "/etc/cni/net.d"
cniBridgeConfig = "/etc/cni/net.d/10-bridge.conf"
cniLoopbackConfig = "/etc/cni/net.d/99-loopback.conf"
maxJoinRetries = 5
)
var _ phases.Runnable = (*Node)(nil)
type Node struct {
co... |
func (n *Node) RegisterFlags(flags *pflag.FlagSet) {
// Kubernetes version
flags.String(constants.FlagKubernetesVersion, n.config.Kubernetes.Version, "Kubernetes version")
// Kubernetes container runtime
flags.String(constants.FlagContainerRuntime, n.config.ContainerRuntime.Type, "Kubernetes container runtime")
/... |
return short
}
| identifier_body |
node.go | "/etc/kubernetes/aws.conf"
kubeadmAzureConfig = "/etc/kubernetes/azure.conf"
cniDir = "/etc/cni/net.d"
cniBridgeConfig = "/etc/cni/net.d/10-bridge.conf"
cniLoopbackConfig = "/etc/cni/net.d/99-loopback.conf"
maxJoinRetries = 5
)
var _ phases.Runnable = (*Node)(nil)
type Node struct {
co... | }
n.podNetworkCIDR, err = cmd.Flags().GetString(constants.FlagPodNetworkCIDR)
if err != nil {
return
}
n.cloudProvider, err = cmd.Flags().GetString(constants.FlagCloudProvider)
if err != nil {
return
}
n.nodepool, err = cmd.Flags().GetString(constants.FlagPipelineNodepool)
if err != nil {
return
}
n.a... |
return
}
| conditional_block |
node.go | etc/kubernetes/aws.conf"
kubeadmAzureConfig = "/etc/kubernetes/azure.conf"
cniDir = "/etc/cni/net.d"
cniBridgeConfig = "/etc/cni/net.d/10-bridge.conf"
cniLoopbackConfig = "/etc/cni/net.d/99-loopback.conf"
maxJoinRetries = 5
)
var _ phases.Runnable = (*Node)(nil)
type Node struct {
confi... | flags *pflag.FlagSet) {
// Kubernetes version
flags.String(constants.FlagKubernetesVersion, n.config.Kubernetes.Version, "Kubernetes version")
// Kubernetes container runtime
flags.String(constants.FlagContainerRuntime, n.config.ContainerRuntime.Type, "Kubernetes container runtime")
// Kubernetes network
flags.St... | egisterFlags( | identifier_name |
node.go | "/etc/kubernetes/aws.conf"
kubeadmAzureConfig = "/etc/kubernetes/azure.conf"
cniDir = "/etc/cni/net.d"
cniBridgeConfig = "/etc/cni/net.d/10-bridge.conf"
cniLoopbackConfig = "/etc/cni/net.d/99-loopback.conf"
maxJoinRetries = 5
)
var _ phases.Runnable = (*Node)(nil)
type Node struct {
co... | return err
}
if err := validator.NotEmpty(map[string]interface{}{
constants.FlagKubernetesVersion: n.kubernetesVersion,
constants.FlagContainerRuntime: n.containerRuntime,
constants.FlagAPIServerHostPort: n.apiServerHostPort,
constants.FlagKubeadmToken: n.kubeadmToken,
constants.FlagCACertHash: ... | random_line_split | |
perf_tool.go | "},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}, {
Name: "etm_stress",
Val: testCase{
quipperArgs: []string{"--run_inject", "--inject_args", "inject;--itrace=i512il;--strip",
"--", "record", "-e", "cs_etm/autofdo/", "-a", "-N"},
disableCPUIdle: true,
repetition: ... |
if sessionID == 0 {
s.Fatal("Invalid session ID from GetPerfOutputFd")
}
return rPipe, sessionID, nil
}
func checkPerfData(s *testing.State, result []byte) {
const minResultLength = 20
s.Logf("GetPerfOutputV2() returned %d bytes of perf data", len(result))
if len(result) < minResultLength {
s.Fatal("Perf ou... | {
rPipe.Close()
return nil, 0, err
} | conditional_block |
perf_tool.go | "},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}, {
Name: "etm_stress",
Val: testCase{
quipperArgs: []string{"--run_inject", "--inject_args", "inject;--itrace=i512il;--strip",
"--", "record", "-e", "cs_etm/autofdo/", "-a", "-N"},
disableCPUIdle: true,
repetition: ... |
func testSingleCall(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testSingleCall", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("... | {
const minResultLength = 20
s.Logf("GetPerfOutputV2() returned %d bytes of perf data", len(result))
if len(result) < minResultLength {
s.Fatal("Perf output is too small")
}
if bytes.HasPrefix(result, []byte("<process exited with status: ")) {
s.Fatalf("Quipper failed: %s", string(result))
}
} | identifier_body |
perf_tool.go | "},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}, {
Name: "etm_stress",
Val: testCase{
quipperArgs: []string{"--run_inject", "--inject_args", "inject;--itrace=i512il;--strip",
"--", "record", "-e", "cs_etm/autofdo/", "-a", "-N"},
disableCPUIdle: true,
repetition: ... | (ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testSingleCall", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not di... | testSingleCall | identifier_name |
perf_tool.go | arm"},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}, {
Name: "etm_stress",
Val: testCase{
quipperArgs: []string{"--run_inject", "--inject_args", "inject;--itrace=i512il;--strip",
"--", "record", "-e", "cs_etm/autofdo/", "-a", "-N"},
disableCPUIdle: true,
repetition: ... | func testConsecutiveCalls(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testConsecutiveCalls", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
durationSec := 1
for i := 0; i < 3; i++ {
output, sessionID, err := getPerfOutput(ctx, s, d, tc, durationSec)
if e... | })
}
| random_line_split |
dockerapi.go | []string{"mariohellowebserver"},
Dockerfile: "Dockerfile",
}
buildResponse, err := dockerClient.ImageBuild(context.Background(), tarDockerfileReader, options)
if err != nil {
log.Fatal(err, " :unable to read image build response")
}
defer buildResponse.Body.Close()
termFd, isTerm := term.GetFd... | {
err := dockerClient.ContainerStart(context.Background(), containerID, types.ContainerStartOptions{})
return containerID, err
} | identifier_body | |
dockerapi.go | () {
readObj, err := mapsi2disk.ReadContainerPortsFromDisk(mapsi2disk.GobFilename)
readBackOwnedContainers := readObj.(map[string]int)
if err == nil {
defer mapsi2disk.DeleteFile(mapsi2disk.GobFilename)
dockerClient := GetDockerClient()
for containerID := range readBackOwnedContainers {
log.Printf("Dele... | RemoveLiveContainersFromPreviousRun | identifier_name | |
dockerapi.go |
if err == nil {
defer mapsi2disk.DeleteFile(mapsi2disk.GobFilename)
dockerClient := GetDockerClient()
for containerID := range readBackOwnedContainers {
log.Printf("Deleting container: %v from previous launch.\n", containerID)
err = dockerClient.ContainerStop(context.Background(), containerID, nil)
}
... | readObj, err := mapsi2disk.ReadContainerPortsFromDisk(mapsi2disk.GobFilename)
readBackOwnedContainers := readObj.(map[string]int) | random_line_split | |
dockerapi.go | client when launching and holds on to it for all API interactions.
// This should be contrasted with the stateless approach of requesting a new client for any API interaction.
func GetDockerClient() *client.Client {
ctx := context.Background()
dockerClient, err := client.NewClientWithOpts(client.FromEnv)
if err !=... |
fmt.Printf("\n**** Passed RequestedContainers == Live assertion. ****\n\n")
}
// AssertRequestedContainersAreGone check that no containers exist and that the system cleaned up otherwise it panics.
// This is called by tests.
func AssertRequestedContainersAreGone() {
cli := GetDockerClient()
containers, err := g... | {
if container.State == containerRunningStateString {
log.Printf("Container %s in %s state.\n", container.ID, container.State)
} else {
log.Panicf("Found container %s that is not running with state %s, status %s.\n", container.ID, container.State, container.Status)
}
} | conditional_block |
set6.go |
}
func recoverKey(k, H, r, s, q *big.Int) *big.Int {
x := new(big.Int)
r1 := new(big.Int).ModInverse(r, q)
x = x.Mod(x.Mul(x.Sub(x.Mul(s, k), H), r1), q)
return x
}
func problemThree() error {
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
x, y, err := ciphers.DSAKeygen(params)
if err... |
middle := new(big.Int).Add(lowerBound, upperBound)
middle = middle.Div(middle, two)
if even {
upperBound = middle
} else {
lowerBound = middle
}
time.Sleep(5 * time.Millisecond)
printInline(upperBound.Bytes())
multiplier = multiplier.Mul(multiplier, two)
}
fmt.Println()
return nil
}
type Inte... | {
return err
} | conditional_block |
set6.go | Key(k, hash, weakR, weakS, params.Q)
keyHash := hashes.SHA1([]byte(privateKey.Text(16)))
if keyHash == "0954edd5e0afe5542a4adf012611a91912a3ec16" {
fmt.Printf("Found key: %v\n", privateKey)
}
return nil
}
func problemFour(input string) error {
rs := []*big.Int{}
ss := []*big.Int{}
ms := []*big.Int{}
msgs := ... | {
one := utils.GetBigInt(1)
r := new(big.Int)
r = ceilDiv(r.Mul(utils.GetBigInt(2), r.Sub(r.Mul(interval.Upper, s0), interval.TwoB)), server.N)
s := new(big.Int)
minS := new(big.Int)
maxS := new(big.Int)
var err error
valid := false
for r.Cmp(server.N) == -1 {
rn := new(big.Int).Mul(r, server.N)
minS = mi... | identifier_body | |
set6.go | ("g = 0")
fmt.Printf("Signature: r:%v s:%v \n", signature.R, signature.S)
verified, err := ciphers.DSAVerify(hello, signature, y, params)
if err != nil {
return err
}
fmt.Printf("'Hello' verified: %v\n", verified)
verified, err = ciphers.DSAVerify(goodbye, signature, y, params)
if err != nil {
return err
... | } else {
_, s, err = searchRS(s, c, bounds, server)
if err != nil {
return nil, err | random_line_split | |
set6.go | () error {
e, d, n, err := ciphers.RSAKeygen(1024)
if err != nil {
return err
}
plaintext := []byte("hi mom")
signature, err := ciphers.PKCS15Sign(plaintext, d, n)
fmt.Printf("Valid Signature: %s\n", signature)
verified := ciphers.PKCS15Verify(plaintext, signature, e, n)
fmt.Printf("Verified: %t\n", verified... | problemTwo | identifier_name | |
OrganizationListDetail.ts | 、干部干事培训和考核、人才互荐交流、总结晚会等活动;负责与各学院的学生科创组织保持紧密联系,开展交流活动;负责与兄弟高校的学生科创组织进行交流合作。'
},
{
name: '科创竞赛部',
posterStyle: {
backgroundImage: `url(${imgHostname}kelian/kelian-jingsai.png)`
},
introduction:
'科创竞赛部是学生科技联合会的竞技中心。以“科创点亮未来,竞赛成就梦想”为理念,主要负责开展和推广校内外科技竞赛... | introduction:
'研究生校园文化生活的缔造者和领跑人。于文,主办迎新晚会等大型活动,丰富研究生的课余生活,协助各分研会举办各类文艺活动,营造活跃向上的氛围。于体,参与组建、管理研究生各类球队,积极参加各类校级比赛,如运动会、“青春杯”篮球、足球赛、公园排球赛、校园马拉松等,宣传体育育人理念,提高研究生的综合素质。'
},
{
| random_line_split | |
dominogame.go | dominoGame, int) {
var firstTurn, highestDouble int
//pieces will be reshuffled if nobody starts with a double
for {
game := gameRaw
//assign domino pieces to players
for k, player := range game.players {
for i := 1; i <= 7; i++ {
r := rand.Intn(len(game.pieces) - 1)
player.ownedPieces = append(play... | }
}
//determining which player places the first piece
func firstMove(piece dominoPiece, highestDouble, firstTurn, playerNum int) (int, int) {
if (piece.top == piece.bot) && (piece.top > highestDouble) {
firstTurn = playerNum
highestDouble = piece.top
}
return firstTurn, highestDouble
}
func remove(s []dominoP... | if firstTurn != 0 {
return game, firstTurn
} | random_line_split |
dominogame.go | inoGame, int) {
var firstTurn, highestDouble int
//pieces will be reshuffled if nobody starts with a double
for {
game := gameRaw
//assign domino pieces to players
for k, player := range game.players {
for i := 1; i <= 7; i++ {
r := rand.Intn(len(game.pieces) - 1)
player.ownedPieces = append(player.... | fmt.Println("x too large. Grid is currently ", len(newGrid.grid[0]), " squares long.")
continue
}
if x < 1 {
fmt.Println("x too small. Start from 1.")
continue
}
break
}
//get y axis of grid
for {
fmt.Println("Type y-axis for end to go. (starting from 1 at top left)")
fmt.Scan(&y... | {
var x, y, ori int
var end2 string
var newGrid dominoGrid
//check viability of piece selected
if !checkPiece(piece, grid) && !firstTurn {
return nil, newGrid
}
//select which end of piece to place first
end := selectPieceEnd(piece)
//select square for end to go
for {
newGrid = grid
printGrid(newGrid)
... | identifier_body |
dominogame.go | (gameRaw dominoGame) (dominoGame, int) {
var firstTurn, highestDouble int
//pieces will be reshuffled if nobody starts with a double
for {
game := gameRaw
//assign domino pieces to players
for k, player := range game.players {
for i := 1; i <= 7; i++ {
r := rand.Intn(len(game.pieces) - 1)
player.own... | assignPieces | identifier_name | |
dominogame.go | Double(game.players)
fmt.Println("Player ", playerNum, " starts first with their highest double.")
game.players[playerNum-1].ownedPieces, game.grid = placePiece(highestDouble, game.players[playerNum-1].ownedPieces, game.grid, true)
if game.players[playerNum-1].ownedPieces == nil {
fmt.Println("Error pl... | {
//check if there is room to place
if grid.grid[y+1][x] == "X" || grid.grid[y-1][x] == "X" || grid.grid[y][x+1] == "X" || grid.grid[y][x-1] == "X" {
viable = true
}
} | conditional_block | |
lib.rs | indexmap;
mod errors;
extern crate xml;
mod builder;
use std::borrow::Cow;
use std::fmt;
use std::io::{Read, Write};
use std::iter::Filter;
use std::slice::{Iter, IterMut};
use std::str::FromStr;
use std::string::ToString; |
use indexmap::IndexMap;
use xml::common::XmlVersion as BaseXmlVersion;
/// Enumeration of XML versions
///
/// This exists solely because `xml-rs`'s `XmlVersion` doesn't implement Debug
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum XmlVersion {
/// XML Version 1.0
Version10,
/// XML Version 1.1
... |
pub use errors::*;
pub use builder::*; | random_line_split |
lib.rs | crate indexmap;
mod errors;
extern crate xml;
mod builder;
use std::borrow::Cow;
use std::fmt;
use std::io::{Read, Write};
use std::iter::Filter;
use std::slice::{Iter, IterMut};
use std::str::FromStr;
use std::string::ToString;
pub use errors::*;
pub use builder::*;
use indexmap::IndexMap;
use xml::common::Xm... | (value: XmlVersion) -> BaseXmlVersion {
match value {
XmlVersion::Version10 => BaseXmlVersion::Version10,
XmlVersion::Version11 => BaseXmlVersion::Version11,
}
}
}
/// An XML element
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Element {
/// Tag prefix, used for nam... | from | identifier_name |
metamandering_north_carolina.py | import mean_median, efficiency_gap
from gerrychain.tree import recursive_tree_part, bipartition_tree_random, PopulatedGraph, contract_leaves_until_balanced_or_none, find_balanced_edge_cuts
def | (partition):
parent = partition.parent
if not parent:
return 0
return parent["step_num"] + 1
def always_true(proposal):
return True
def produce_gerrymanders(graph, k, tag, sample_size, chaintype):
#Samples k partitions of the graph
#stor... | step_num | identifier_name |
metamandering_north_carolina.py | .metrics import mean_median, efficiency_gap
from gerrychain.tree import recursive_tree_part, bipartition_tree_random, PopulatedGraph, contract_leaves_until_balanced_or_none, find_balanced_edge_cuts
def step_num(partition):
parent = partition.parent
if not parent:
... |
if 'EL16G_PR_D' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['EL16G_PR_D'] = 0
if 'EL16G_PR_R' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['EL16G_PR_R'] = 0
##Need to add the voting data
# Seanna: So it looks like it initialize populati... | g_sierpinsky.nodes[node]['population'] = 0 | conditional_block |
metamandering_north_carolina.py | import numpy as np
import copy
from gerrychain.tree import bipartition_tree as bpt
from gerrychain import Graph
from gerrychain import MarkovChain
from gerrychain.constraints import (Validator, single_flip_contiguous,
within_percent_of_ideal_population, UpperBound)
from gerrychain.p... | import seaborn as sns
from functools import partial
import networkx as nx | random_line_split | |
metamandering_north_carolina.py | import mean_median, efficiency_gap
from gerrychain.tree import recursive_tree_part, bipartition_tree_random, PopulatedGraph, contract_leaves_until_balanced_or_none, find_balanced_edge_cuts
def step_num(partition):
parent = partition.parent
if not parent:
return 0... | print("made partition")
crosses = compute_cross_edge(graph, target_partition)
k = len(target_partition.parts)
dual_crosses = []
for edge in dual.edges:
if dual.edges[edge]["original_name"] in crosses:
dual_crosses.append(edge)
print("making dual distanc... | updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
assignment = {}
for x in graph.nodes():
color = 0
for block in target_partition.keys():
if x in target_partition... | identifier_body |
component.rs | .iter()
.filter_map(|init| match init {
GlobalInitializer::AlwaysTrap(i) => Some(i),
_ => None,
})
.collect();
let always_trap = engine.run_maybe_parallel(always_trap, |info| {
compiler
.component_compiler()
... | {
match self.inner.code.types() {
crate::code::Types::Component(types) => types,
// The only creator of a `Component` is itself which uses the other
// variant, so this shouldn't be possible.
crate::code::Types::Module(_) => unreachable!(),
}
} | identifier_body | |
component.rs |
.iter()
.filter_map(|init| match init {
GlobalInitializer::LowerImport(i) => Some(i),
_ => None,
})
.collect();
let lowerings = engine.run_maybe_parallel(lowerings, |lowering| {
compiler
.component_compi... | let info = &self.inner.info.lowerings[index];
self.func(info)
}
pub(crate) fn always_trap_ptr(&self, index: RuntimeAlwaysTrapIndex) -> NonNull<VMFunctionBody> { | random_line_split | |
component.rs | ,
})
.collect();
let always_trap = engine.run_maybe_parallel(always_trap, |info| {
compiler
.component_compiler()
.compile_always_trap(&types[info.canonical_abi])
})?;
// Compile all "lowerings" which are adapters that go from ... | text | identifier_name | |
images.go | }
for _, arch := range arches {
logrus.Infof(
"Annotating %s-%s:%s with --arch %s",
image, arch, version, arch,
)
if err := i.Execute(
"docker", "manifest", "annotate", "--arch", arch,
imageVersion, fmt.Sprintf("%s-%s:%s", image, arch, version),
); err != nil {
return fmt.Errorf("ann... | ); err != nil {
return fmt.Errorf("executing tarball callback: %w", err) | random_line_split | |
images.go | return "", err
}
return res.OutputTrimNL(), nil
}
func (*defaultImageImpl) RepoTagFromTarball(path string) (string, error) {
tagOutput, err := command.
New("tar", "xf", path, "manifest.json", "-O").
Pipe("jq", "-r", ".[0].RepoTags[0]").
RunSilentSuccessOutput()
if err != nil {
return "", err
}
return t... | {
logrus.Infof("Validating image manifests in %s", registry)
version = i.normalizeVersion(version)
manifestImages := ManifestImages
arches := SupportedArchitectures
if fast {
arches = FastArchitectures
}
for _, image := range manifestImages {
imageVersion := fmt.Sprintf("%s/%s:%s", registry, image, versio... | identifier_body | |
images.go | return command.New(cmd, args...).RunSilentSuccess()
}
func (*defaultImageImpl) ExecuteOutput(cmd string, args ...string) (string, error) {
res, err := command.New(cmd, args...).RunSilentSuccessOutput()
if err != nil {
return "", err
}
return res.OutputTrimNL(), nil
}
func (*defaultImageImpl) RepoTagFromTarball... | else if strings.Contains(err.Error(), "request canceled while waiting for connection") {
// The error is unfortunately not exported:
// https://github.com/golang/go/blob/dc04f3b/src/net/http/client.go#L720
// https://github.com/golang/go/blob/dc04f3b/src/net/http/transport.go#L2518
// ref: https://gith... | {
return true, nil
} | conditional_block |
images.go | (impl imageImpl) {
i.imageImpl = impl
}
// imageImpl is a client for working with container images.
//
//counterfeiter:generate . imageImpl
type imageImpl interface {
Execute(cmd string, args ...string) error
ExecuteOutput(cmd string, args ...string) (string, error)
RepoTagFromTarball(path string) (string, error)
... | SetImpl | identifier_name | |
settings.go | PluginTypeNone GenPluginType = iota
// GenPluginTypeGo says the plugin is a Golang plugin that
// is or uses github.com/golang/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGo
// GenPluginTypeGogo says the plugin is a Golang plugin that
// is or uses github.com/gogo/protobuf.
// This will use GenGo... |
return strconv.Itoa(int(g))
}
// The Is functions do not validate if the plugin type is known
// as this is supposed to be done in ConfigProvider.
// It's a lot easier if they just return a bool.
// IsGo returns true if the plugin type is associated with
// github.com/golang/protobuf.
func (g GenPluginType) IsGo() ... | {
return s
} | conditional_block |
settings.go | enPluginTypeNone GenPluginType = iota
// GenPluginTypeGo says the plugin is a Golang plugin that
// is or uses github.com/golang/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGo
// GenPluginTypeGogo says the plugin is a Golang plugin that
// is or uses github.com/gogo/protobuf.
// This will use Gen... | Lint LintConfig
// The gen config.
Gen GenConfig
}
// CompileConfig is the compile config.
type CompileConfig struct {
// The Protobuf version to use from https://github.com/protocolbuffers/protobuf/releases.
// Must have a valid protoc zip file asset, so for example 3.5.0 is a valid version
// but 3.5.0.1 is no... | random_line_split | |
settings.go | PluginTypeNone GenPluginType = iota
// GenPluginTypeGo says the plugin is a Golang plugin that
// is or uses github.com/golang/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGo
// GenPluginTypeGogo says the plugin is a Golang plugin that
// is or uses github.com/gogo/protobuf.
// This will use GenGo... |
// IsGogo returns true if the plugin type is associated with
// github.com/gogo/protobuf.
func (g GenPluginType) IsGogo() bool {
return _genPluginTypeToIsGogo[g]
}
// ParseGenPluginType parses the GenPluginType from the given string.
//
// Input is case-insensitive.
func ParseGenPluginType(s string) (GenPluginType,... | {
return _genPluginTypeToIsGo[g]
} | identifier_body |
settings.go | PluginTypeNone GenPluginType = iota
// GenPluginTypeGo says the plugin is a Golang plugin that
// is or uses github.com/golang/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGo
// GenPluginTypeGogo says the plugin is a Golang plugin that
// is or uses github.com/gogo/protobuf.
// This will use GenGo... | () string {
if s, ok := _genPluginTypeToString[g]; ok {
return s
}
return strconv.Itoa(int(g))
}
// The Is functions do not validate if the plugin type is known
// as this is supposed to be done in ConfigProvider.
// It's a lot easier if they just return a bool.
// IsGo returns true if the plugin type is associa... | String | identifier_name |
lib.rs | exchange.com/questions/20139/gradients-for-bias-terms-in-backpropagation
//!- https://cs231n.github.io/optimization-2/
//!- https://cs231n.github.io/neural-networks-case-study/#grad
//!- https://stackoverflow.com/questions/38082835/backpropagation-in-gradient-descent-for-neural-networks-vs-linear-regression
//!- ... | test_sum | identifier_name | |
lib.rs | the MIT License - see the license file for details
//!
//!# Acknowledgements
//!
//!The fast.ai deep learning from the foundations course (https://course.fast.ai/part2) teaches a lot about how to make your own deep learning library
//!
//!Some of the resources that I found useful when working on this library include:
... |
pub fn matmul<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.matmul(rhs)
}
pub fn concat<'a>(lhs: &'a Tensor, rhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.concat(&rhs, dim)
}
#[cfg(test)]
mod tests {
use super::tensor::*;
use super::*;
#[t... | } | random_line_split |
lib.rs | - https://stackoverflow.com/questions/38082835/backpropagation-in-gradient-descent-for-neural-networks-vs-linear-regression
//!- https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
//!- https://stackoverflow.com/questions/38082835/backpropagation-in-gradient-descent-for-neural-networks-vs... | {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = sum(&a, 0).unwrap();
assert!((c.data == vec![5.0]) && (c.shape == vec![1]))
} | identifier_body | |
lib.rs | Array2<T>
where
T: Copy,
{
fn original(&self) -> Array2<T> {
self.clone()
}
fn rot90_clockwise(&self) -> Array2<T> {
let mut arr = self.clone();
arr.swap_axes(0, 1);
arr.flip_horizontal()
}
fn rot180_clockwise(&self) -> Array2<T> {
let shape = self.shape(... | <'a>(
&'a self,
row: usize,
col: usize,
prev_images: &[(&'a Tile, usize)],
) -> Vec<(&'a Tile, usize)> {
let mut result: Vec<(&Tile, usize)> = vec![];
result.extend_from_slice(prev_images);
for tile in self.tiles.iter() {
if result.iter().any(|(t, ... | fits | identifier_name |
lib.rs | 90 degree clockwise
(bottom_rev, left_rev, top_rev, right_rev), // rotate 180 degree clockwise
(right, bottom_rev, left, top_rev), // rotate 270 degree clockwise
(bottom, right_rev, top, left_rev), // flip vertical
(top_rev, left, bottom_re... | let testcase = read_input("../testcase1.txt");
let test_image = BigImage::new(testcase);
let result = vec![];
let result = test_image.fits(0, 0, &result);
assert_eq!(part2_solution(&test_image, &result), 273); | random_line_split | |
lib.rs | Array2<T>
where
T: Copy,
{
fn original(&self) -> Array2<T> {
self.clone()
}
fn rot90_clockwise(&self) -> Array2<T> {
let mut arr = self.clone();
arr.swap_axes(0, 1);
arr.flip_horizontal()
}
fn rot180_clockwise(&self) -> Array2<T> {
let shape = self.shape(... | let (bottom, bottom_rev) = parse_tile::parse_border(&lines[lines.len() - 1]).unwrap();
let mut sub_image = unsafe { Array2::<u8>::uninitialized((shape, shape)) };
for (i, row) in lines.iter().enumerate().skip(2).take(shape) {
let row_pixels = parse_tile::parse_sub_image(&row[1..row.l... | {
let lines = data
.split('\n')
.map(|s| s.trim_end().to_string())
.collect::<Vec<_>>();
let shape = lines[1].len() - 2;
let tile_id = parse_tile::parse_tile_id(&lines[0]).unwrap();
let (top, top_rev) = parse_tile::parse_border(&lines[1]).unwrap();
... | identifier_body |
tasty_trade_importer.py | '''
og_rows = copy_rows(csvfile)
unique_days = getDays(og_rows)
# list of dicts, each dict is a csv row
all_trades = {}
formatted_rows = []
for day in unique_days:
# get account credits and debits and balance changes
money_movement = moneyMovementForDay(day, og_rows)
if money_movement:
formatted_rows.ex... | trades[symbol]['commons']['amount_sold'] += math.fabs(netAmount)
# reduce total holdings
trades[symbol]['commons']['quantity'] -= int(row['Quantity'])
print('calulated all {} trades for {}'.format(len(trades.items()), day))
return trades
def is_commons_swing_trade(symbol, trade_type):
return tra... | random_line_split | |
tasty_trade_importer.py | quantity: int
amount_bought
amount_sold
}
}
}
'''
# calc all trades for this day
for row in og_rows:
# if it's the same day
if sameDay(day, row):
# calulate all trades for every symbol
if 'trade' in row['Transaction Code'].lower():
symbol = row['Symbol']
if isOption(row):
... | print('...creating csv')
with open('formatted_tt.csv', 'w', newline='') as out_csvfile:
fieldnames = ['transaction_type','account','date','symbol','quantity','stock','option','p_l', '%']
writer = csv.DictWriter(out_csvfile, fieldnames=fieldnames)
writer.writeheader()
for formatted in formatted_rows:
write... | identifier_body | |
tasty_trade_importer.py | '''
og_rows = copy_rows(csvfile)
unique_days = getDays(og_rows)
# list of dicts, each dict is a csv row
all_trades = {}
formatted_rows = []
for day in unique_days:
# get account credits and debits and balance changes
money_movement = moneyMovementForDay(day, og_rows)
if money_movement:
formatted_rows.ex... |
print('calulated all {} trades for {}'.format(len(trades.items()), day))
return trades
def is_commons_swing_trade(symbol, trade_type):
return trade_type['commons']['quantity'] != 0
def is_options_swing_trade(symbol, trade_type):
return trade_type['options']['quantity'] != 0
def get_swing_trades(swing_trades, ... | netAmount = amountWithFees(row)
# save stock trades
if symbol not in trades:
trades[symbol] = create_trade_dict()
trades[symbol]['commons']['net_amount'] += netAmount
# update buy and sell totals
if isPurchase(row):
trades[symbol]['commons']['amount_bought'] += math.fabs(netA... | conditional_block |
tasty_trade_importer.py | '''
og_rows = copy_rows(csvfile)
unique_days = getDays(og_rows)
# list of dicts, each dict is a csv row
all_trades = {}
formatted_rows = []
for day in unique_days:
# get account credits and debits and balance changes
money_movement = moneyMovementForDay(day, og_rows)
if money_movement:
formatted_rows.ex... | (day, money_movement):
formatted_rows = []
for event in money_movement:
formatted_row = {
'transaction_type': 'money_transfer',
'account': None,
'date': day,
'symbol': None,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(event, 2)),
'%': 0
}
formatt... | getFormattedRowsForMoneyMoneyMovement | identifier_name |
redis_performance_monitor.js | (err.code);
}
else if(err instanceof InvalidAuthenticationError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof UnknownHostError)
{
console.log(err.message);
process.exit(err.code);
}
else
{
console.log(err.message);
process.exit(1);
}
}
}).call(this)... | {
try
{
fs.mkdirSync( __dirname + tempDir);
}
catch(e)
{
var ex = new CreateTmpDirError(e.message);
ex.message = e.message;
errorHandler(ex);
}
} | conditional_block | |
redis_performance_monitor.js | ? '' : username;
username = username === '""' ? '' : username;
if (username.length === 1 && username === '"')
username = '';
// <PASS_WORD>
var passwd = args[3];
passwd = passwd.length === 0 ? '' : passwd;
passwd = passwd === '""' ? '' : passwd;
if (passwd.length === 1 && passwd === '"')
passwd = '';
//... | random_line_split | ||
redis_performance_monitor.js | )
{
//<METRIC_STATE>
var metricState = args[0].replace('"', '');
var tokens = metricState.split(',');
var metricsExecution = new Array(7);
for (var i in tokens)
metricsExecution[i] = (tokens[i] === '1');
//<HOST>
var hostname = args[1];
//<PORT>
var port = args[2];
if (port.length === 0)
port = '637... | {
if(err instanceof InvalidAuthenticationError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof UnknownHostError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof MetricNotFoundError)
{
console.log(err.message);
process.exit(err.code);
}
els... | identifier_body | |
redis_performance_monitor.js | 8:Commands/Sec:4', key : 'total_commands_processed', ratio : true };
metrics['KeyHits'] = { id : '1399:Key Hits:4', key : 'keyspace_hits', ratio : false };
metrics['KeyMisses'] = { id : '1400:Key Misses:4', key : 'keyspace_misses', ratio : false };
metrics['KeysEvicted'] = { id : '140... | (metrics)
{
for (var i in metrics)
{
var out = "";
var metric = metrics[i];
out += metric.id;
out += "|";
out += metric.value;
out += "|";
console.log(out);
}
}
// ############################################################################
// RATE PROCESSING
/**
* Process performance results... | output | identifier_name |
script.js | then((response) => {
return response.json()
}).then((data) =>{
if(data[statecode]!==undefined)
{
if(data[statecode]["total"]["confirmed"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev... | getSpecificData | identifier_name | |
script.js | cases_time_series"][prev_index-1]["totalconfirmed"];
let prev_recoverd=data["cases_time_series"][prev_index-1]["totalrecovered"];
let prev_deaths=data["cases_time_series"][prev_index-1]["totaldeceased"];
//iterating only statewise object
$.each(data["statewise"],function(index,data){
//storing value... | let date=data_array[0];
let active=data_array[1];
let confirmed=data_array[2];
let recovered=data_array[3];
let deaths=data_array[4];
let prev_confirm=prev_data_array[0]
let prev_recoverd=prev_data_array[1]
let prev_deaths=prev_data_array[2]
if(!isold)
{
//calculating the d... | //retrive the valu from array | random_line_split |
script.js | died pushing zero
if(data[statecode]["total"]["deceased"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(data[statecode]["total"]["deceased"])
}
//call functio to add data to tabel isstatus =0
... | {
//setting no active to 1
no_active=1;
$(".state-container").empty();
document.querySelector(".state-container").style.display="flex";
document.querySelector(".error_container").style.display="none";
let link="https://api.covid19india.org/v3/data-"+date+".json";
$.getJSON(link,function(datas){
for(data in da... | identifier_body | |
collect-raman.py | ")
parser.add_argument("--scans-to-average", type=int, help="scans to average (default 0)", default=1)
self.args = parser.parse_args()
# grab first spectrometer on the chain
device = usb.core.find(idVendor=0x24aa, idProduct=0x4000)
if device is None:
pr... | (self, db):
db = round(db, 1)
msb = int(db)
lsb = int((db - int(db)) * 10)
raw = (msb << 8) | lsb
self.debug("setting gainDB 0x%04x (FunkyFloat)" % raw)
self.send_cmd(0xb7, raw)
def set_modulation_enable(self, flag):
self.debug(f"setting laserModulationEnable... | set_gain_db | identifier_name |
collect-raman.py | ")
parser.add_argument("--scans-to-average", type=int, help="scans to average (default 0)", default=1)
self.args = parser.parse_args()
# grab first spectrometer on the chain
device = usb.core.find(idVendor=0x24aa, idProduct=0x4000)
if device is None:
pr... |
# take measurements
spectra = []
try:
for i in range(self.args.count):
# take dark-corrected measurement
spectrum = self.get_averaged_spectrum()
if self.dark is not None:
spectrum -= dark
spectra.ap... | print("*** not firing laser because --fire-laser not specified ***") | conditional_block |
collect-raman.py | ")
parser.add_argument("--scans-to-average", type=int, help="scans to average (default 0)", default=1)
self.args = parser.parse_args()
# grab first spectrometer on the chain
device = usb.core.find(idVendor=0x24aa, idProduct=0x4000)
if device is None:
pr... |
def generate_wavelengths(self):
self.wavelengths = []
self.wavenumbers = []
for i in range(self.pixels):
wavelength = self.wavecal_C0 \
+ self.wavecal_C1 * i \
+ self.wavecal_C2 * i * i \
+ self.wavecal_C3 * i ... | self.min_laser_power_mW = self.unpack((3, 32, 4), "f") | random_line_split |
collect-raman.py | ")
parser.add_argument("--scans-to-average", type=int, help="scans to average (default 0)", default=1)
self.args = parser.parse_args()
# grab first spectrometer on the chain
device = usb.core.find(idVendor=0x24aa, idProduct=0x4000)
if device is None:
pr... |
def set_raman_mode(self, flag):
self.debug(f"setting ramanMode {flag}")
self.send_cmd(0xff, 0x16, 1 if flag else 0)
def set_raman_delay_ms(self, ms):
if ms < 0 or ms > 0xffff:
print("ERROR: ramanDelay requires uint16")
return
self.debug(f"setting raman... | self.debug(f"setting laserModulationEnable {flag}")
self.send_cmd(0xbd, 1 if flag else 0) | identifier_body |
MT3D_PP_viz.py | 4] for h in obs_sim_zone_all])
zone = [h[2] for h in obs_sim_zone_all]
residuals = [h[0] - h[1] for h in obs_sim_zone_all]
residuals = np.absolute(residuals)
from matplotlib import colors
import six
colors_ = list(six.iteritems(colors.cnames))
# Add the single letter colors.
for name, r... | conc = None
sft_conc = None
obs_group = self.mf_model.model_data.observations.obs_group
obs_sim_zone_all = []
# Write observation to file
for obs_set in obs_group:
obs_sim_zone_all = []
obs_type = obs_group[obs_set]['obs_type']
# Import the required model outputs for proc... | identifier_body | |
MT3D_PP_viz.py | (self):
"""TODO: Docs"""
concobj = self.import_concs()
times = concobj.get_times()
scatterx = []
scattery = []
obs_sim_zone_all = []
# The definition of obs_sim_zone looks like:
for i in range(self.mf_model.model_data.model_time.t['steps']):
conc = concobj.get_data(totim=times... | compareAllObs | identifier_name | |
MT3D_PP_viz.py | ax.set_title('Sim vs Obs (%d points)' % (len(scatterx)))
comp_zone_plots = {}
colours = ['r', 'orangered', 'y', 'green', 'teal', 'blue', 'fuchsia']
for i in xrange(1, 8):
scatterx2 = [loc[0] for loc in obs_sim_zone_all if loc[2] == float(i)]
scattery2 = [loc[1] for loc in obs_sim_zone_a... |
ax = fig.add_subplot(1, 3, 2) | random_line_split | |
MT3D_PP_viz.py | loc[0] for loc in obs_sim_zone_all if loc[2] == float(i)]
scattery2 = [loc[1] for loc in obs_sim_zone_all if loc[2] == float(i)]
# print len(scatterx2), colours[i-1]
comp_zone_plots[i] = ax.scatter(scatterx2, scattery2, edgecolors=colours[
i - 1], facecolo... |
# End for
# End for
zone = np.array(zone)
rgba_colors = np.zeros((len(x), 4))
# for red the first column needs to be one
for i in range(1, 8):
rgba_colors[:, 0][zone == i] = rgb_ref[i - 1][0]
rgba_colors[:, 1][zone == i] = rgb_ref[i - 1][1]
rgba_colors[:, 2][zone ==... | if col == nam:
rgb_ref += [rgb_all[index]]
# End if | conditional_block |
linebreak.rs | imal" breaking algorithm in the style of
// Knuth, D.E., and Plass, M.F. "Breaking Paragraphs into Lines." in Software,
// Practice and Experience. Vol. 11, No. 11, November 1981.
// http://onlinelibrary.wiley.com/doi/10.1002/spe.4380111102/pdf
fn break_knuth_plass<'a, T: Clone + Iterator<Item = &'a WordInfo<'... | build_best_path | identifier_name | |
linebreak.rs |
}
// break_simple implements a "greedy" breaking algorithm: print words until
// maxlength would be exceeded, then print a linebreak and indent and continue.
fn break_simple<'a, T: Iterator<Item = &'a WordInfo<'a>>>(
mut iter: T,
args: &mut BreakArgs<'a>,
) -> std::io::Result<()> {
iter.try_fold((args.ini... | {
break_knuth_plass(p_words_words, &mut break_args)
} | conditional_block | |
linebreak.rs | _sentence_end) = match iter.peek() {
None => (true, true),
Some(&&WordInfo {
sentence_start: st,
new_line: nl,
..
}) => (false, st || (nl && w.ends_punct)),
};
// should we be adding extra space at the beginning of the ... | if uniform || newline {
if start || (newline && punct) {
2
} else { | random_line_split | |
Helper.js | .parts[0].jogByDistance
jogPace = jogPaceFunction(fillerWorkout.parts[0].jogPace)(tempoPace)
jogTime = jogDistance * jogPace
}
const sprintPaceFunction = (sprintPaceString) => new Function('targetPace', sprintPaceString) //same as jogPaceFunction
const sprintPace = sprintPaceFunction(fillerWorkout.parts[0... | {
try {
const raw = await fetch(url);
return await raw.json();
} catch (error) {
throw error;
}
} | identifier_body | |
Helper.js | 2. If workout turns out to be a failure or success
a. if previous workout is success, continue with next workout, change nothing
b. if workout is a failure and fail_count == 0,
i. Set k = 1.2, fail_count++
c. if workout is a failure and fail_count == 1,
i. Set x = P(avg)... | (questionnaireData, previousFitness) {
const {duration, workoutFrequency} = questionnaireData
//todo fix currentFitness
return {
currentTime: convertToSeconds(questionnaireData.latest),
targetTime: convertToSeconds(questionnaireData.target),
duration,
workoutFrequency,
currentFitness: previous... | getUserInfo | identifier_name |
Helper.js | 2. If workout turns out to be a failure or success
a. if previous workout is success, continue with next workout, change nothing
b. if workout is a failure and fail_count == 0,
i. Set k = 1.2, fail_count++
c. if workout is a failure and fail_count == 1,
i. Set x = P(avg)... | }
if (!diffs.stDiff) {
diffs.stDiff = diffs.vDiff + intermediateFunc(deltas[3], vVelocity, stVelocity);
}
}
if (diffs.stDiff && !diffs.vDiff) {
diffs.vDiff = diffs.stDiff - intermediateFunc(deltas[3], vVelocity, stVelocity);
}
}
return diffs;
}
export const getSpeedDiffi... | }
if (diffs.vDiff && !(diffs.ltDiff && diffs.stDiff)) {
if (!diffs.ltDiff) {
diffs.ltDiff = diffs.vDiff - intermediateFunc(deltas[2], ltVelocity, vVelocity); | random_line_split |
knapsack.py | else:
return x_train, ticker
# Initialize first state, all items are placed deterministically
def init_state(file, test):
d, ticker = read_file(file, test=test)
xdata = pd.DataFrame()
scaler = preprocessing.StandardScaler()
xdata['adj_close'] = d['adj_close'] # .values
xdata['diff'] =... | new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
eval_reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state, eval=True,
epoch=epoch)
state = new_state
... | action = (np.argmax(qval))
# Take action, observe new state S' | random_line_split |
knapsack.py | else:
return x_train, ticker
# Initialize first state, all items are placed deterministically
def init_state(file, test):
d, ticker = read_file(file, test=test)
xdata = pd.DataFrame()
scaler = preprocessing.StandardScaler()
xdata['adj_close'] = d['adj_close'] # .values
xdata['diff'] =... | xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
joblib.dump(scaler, 'data/scaler.pkl')
else:
scaler = joblib.load('data/scaler.pkl')
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
state = xdata[0:1, 0:1, :]
return state, xdata, close
# Take Action... | filepath = 'util/stock_dfs/'
all = []
scaler = preprocessing.StandardScaler()
for f in os.listdir(filepath):
datapath = os.path.join(filepath, f)
if datapath.endswith('.csv'):
# print(datapath)
Res = init_state(datapath, test=test)
all.append(Res)
all ... | identifier_body |
knapsack.py | (file, test=None):
scaler = preprocessing.MinMaxScaler()
d = pd.read_csv(file).set_index('Date')
d.fillna(0, inplace=True)
ticker = get_ticker(file)
d['ticker'] = ticker
d.rename(columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close', 'Adj Close': 'adj_close',
... | read_file | identifier_name | |
knapsack.py |
else:
return x_train, ticker
# Initialize first state, all items are placed deterministically
def init_state(file, test):
d, ticker = read_file(file, test=test)
xdata = pd.DataFrame()
scaler = preprocessing.StandardScaler()
xdata['adj_close'] = d['adj_close'] # .values
xdata['diff'] ... | return x_test, ticker | conditional_block | |
DAC16bit.py | self.Halfrange = Halfrange
self.communication_bytes = 3
if numdacs % 4 == 0 and numdacs > 0:
self._numdacs = int(numdacs)
else:
logging.error('Number of dacs needs to be multiple of 4')
# initialize pol_num, the voltage offset due to the polarity
self.pol_num = np.zeros(self._numdacs)
for i in rang... | self._interface = interface
self.Fullrange = Fullrange | random_line_split | |
DAC16bit.py | self.ser.open()
raise Exception()
if not self.ser.isOpen():
logging.error('Serial port not open')
print ('serial port not open')
raise Exception()
logging.info('Serial port opened: ' + self.ser.portstr)
# close serial connection
def _close_serial_connection(self):
'''
Closes the serial connect... | (self, channel):
'''
Returns the value of the specified dac
Input:
channel (int) : 1 based index of the dac
Output:
voltage (float) : dacvalue in mV
'''
logging.info('Reading dac%s', channel)
mvoltages = self._get_dacs()
logging.info(mvoltages)
#return mvoltages[channel - 1]
return mvoltages... | do_get_dac | identifier_name |
DAC16bit.py | from mV to V and multiply with the resolution, divide by the 10V max
# logging.info("positive, data:")
# #data = bin(int(data) & 0xffffffff)
# logging.info(data)
# else:
# data = int(((float(mvoltage)/1000)+2)*(2**16/2)/(self.Halfrange/1000)) #from mV to V and multiply with the resolution, divide by the 1... | if channel>2:
print('Error: Only channels 1-2 have ramping.')
else:
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)]
channel = (int(channel)-1) | 0b10100000 #110 is... | identifier_body | |
DAC16bit.py | i in range(self._numdacs):
# takes two bytes, converts it to a 16 bit int and then divides by
# the range and adds the offset due to the polarity
values[i] = ((byte_mess[5 + 3 * i] * 256 + byte_mess[6 + 3 * i]) /
65535.0 * self.Fullrange)-self.Halfrange # + self.pol_num[i]
return values
# Communic... | return 'POS' | conditional_block | |
gm.go | �理模块消息
var i interface{}
i = m
if modules, ok := i.(interface {
initModuleHandler() []*gm_group
}); ok {
m.groups = append(m.groups, modules.initModuleHandler()...)
}
var protoBytes [][]byte
for _, g := range m.groups {
proto := &shared_proto.GmCmdListProto{}
proto.Tab = g.tab
for _, h := range g.han... | _s | identifier_body | |
gm.go | WarService, mingcService iface.MingcService,
clusterService *cluster.ClusterService, seasonService iface.SeasonService, gameExporter iface.GameExporter, country iface.CountryService,
tick iface.TickerService) *GmModule {
m := &GmModule{
dep: dep,
time: dep.Time(),
db: ... |
if m.config.IsDebug {
if m.config.IsDebugYuanbao {
m.groups = []*gm_group{
{
tab: "常用",
handler: []*gm_handler{
newCmdIntHandler("加元宝(负数表示减)_10", "加元宝(负数表示减)", "100000", func(amount int64, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
... | seasonService: seasonService,
buffService: buffService,
country: country,
gameExporter: gameExporter,
} | random_line_split |
gm.go | WarService, mingcService iface.MingcService,
clusterService *cluster.ClusterService, seasonService iface.SeasonService, gameExporter iface.GameExporter, country iface.CountryService,
tick iface.TickerService) *GmModule {
m := &GmModule{
dep: dep,
time: dep.Time(),
db: ... | *GmModule) ProcessInvaseTargetIdMsg(proto *gm.C2SInvaseTargetIdProto, hc iface.HeroController) {
var heroBaseX, heroBaseY int
hc.Func(func(hero *entity.Hero, err error) (heroChanged bool) {
heroBaseX, heroBaseY = hero.BaseX(), hero.Base | " + proto.Cmd))
}
//gogen:iface
func (m | conditional_block |
gm.go | (m.groups, modules.initModuleHandler()...)
}
var protoBytes [][]byte
for _, g := range m.groups {
proto := &shared_proto.GmCmdListProto{}
proto.Tab = g.tab
for _, h := range g.handler {
hp := &shared_proto.GmCmdProto{}
hp.Cmd = h.cmd
hp.Desc = h.desc
hp.HasInput = len(h.defaultInput) > 0
hp.De... | e) processGoodsCm | identifier_name | |
prune_head_with_taylor.py | max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
#... |
logger.info("device: {} n_gpu: {}, distributed training: {}".format(
device, n_gpu, bool(args.local_rank != -1)))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
os.makedirs(args.output_dir, exis... | torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl') | conditional_block |
prune_head_with_taylor.py | if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0... | """Loads a data file into a list of `InputBatch`s."""
if label_list:
label_map = {label: i for i, label in enumerate(label_list)}
else:
label_map = None
features = []
tokenslist = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a... | identifier_body | |
prune_head_with_taylor.py | max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
#... | (tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is ver... | _truncate_seq_pair | identifier_name |
prune_head_with_taylor.py | "rte": RteProcessor,
"sst-2": SstProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"wnli": WnliProcessor,
"sts-b": StsProcessor,
"scitail": ScitailProcessor,
}
num_labels_task = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"rte": 2,
"sst-2": 2,
"qqp": 2,
"qnli": 2,
... | processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor, | random_line_split | |
family.go | Shaw",
"Snyder",
"Mason",
"Dixon",
"Munoz",
"Hunt",
"Hicks",
"Holmes",
"Palmer",
"Wagner",
"Black",
"Robertson",
"Boyd",
"Rose",
"Stone",
"Salazar",
"Fox",
"Warren",
"Mills",
"Meyer",
"Rice",
"Schmidt",
"Garza",
"Daniels",
"Ferguson",
"Nichols",
"Stephens",
"Soto",
"Weaver",
"Ryan",
"Gardn... | "Wu",
"Hines",
"Mullins",
"Castaneda",
"Malone",
"Cannon",
"Tate",
"Mack",
"Sherman",
"Hubbard",
"Hodges",
"Zhang",
"Guerra",
"Wolf",
"Valencia",
"Saunders",
"Franco",
"Rowe",
"Gallagher",
"Farmer",
"Hammond",
"Hampton",
"Townsend",
"Ingram",
"Wise",
"Gallegos",
"Clarke",
"Barton",
"Schroed... | "Todd", | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.