content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!usr/bin/python
import re
target = open ("linkage_mates.txt","w")
with open ("mate_aligned.txt", "r") as f:
fam={}
fam_mate={}
lines = f.readlines()
for line in lines:
line_split = line.split("\t", 1)
try:
fam[line_split[0]]
fam[line_split[0]].append(line_split[1])
except KeyError:
fam[line_split[0]] = []
fam[line_split[0]].append(line_split[1])
for key1 in fam:
try:
if int(fam[key1][0].split("\t")[2]) > int(fam[key1][1].split("\t")[2]):
fam[key1][0] , fam[key1][1] = fam[key1][1] , fam[key1][0]
except IndexError:
pass
for key2 in fam:
if len(fam[key2]) == 2:
mate1_match = re.sub(r'[ATCGN]*$','',re.sub(r'^[ATCGN]*','', fam[key2][0].split()[8]))
mate2_match = re.sub(r'[ATCGN]*$','',re.sub(r'^[ATCGN]*','', fam[key2][1].split()[8]))
mate1_start = int(fam[key2][0].split()[2])
mate2_start = int(fam[key2][0].split()[6])
overlapping = mate1_start + len (mate1_match) - mate2_start
if overlapping >= 0:
mate2_start_chunked = mate2_start + overlapping
mate2_match_chunked = mate2_match[mate2_start_chunked-mate2_start:]
#find matches separately in mate1_match and mate2_match_chunked
link=callVariants (mate1_match, mate1_start, mate2_match_chunked, mate2_start_chunked)
if link is not None:
tmp1 = [(x,y) for x in link[0] for y in link[1]]
for key3 in tmp1:
target.write ("%s-%s_%s-%s\t%s\n" %(key3[0][0], key3[0][1], key3[1][0], key3[1][1], key2))
else:
#find matches separately in mate1_match and mate2_match
link=callVariants (mate1_match, mate1_start, mate2_match, mate2_start)
if link is not None:
tmp1 = [(x,y) for x in link[0] for y in link[1]]
for key3 in tmp1:
target.write ("%s-%s_%s-%s\t%s\n" %(key3[0][0], key3[0][1], key3[1][0], key3[1][1], key2))
else:
pass
| [
2,
0,
14629,
14,
8800,
14,
29412,
220,
198,
198,
11748,
302,
220,
198,
198,
16793,
796,
1280,
5855,
8726,
496,
62,
7300,
13,
14116,
2430,
86,
4943,
198,
198,
4480,
1280,
5855,
9830,
62,
41634,
13,
14116,
1600,
366,
81,
4943,
355,
... | 1.976861 | 994 |
x = False
while x == False:
value = input("Enter the number between 0-9 : ")
try :
value = int(value)
if value > 9:
print("Your value is over 9")
elif value < 0:
print("Your value is less than 0")
else:
print("Your value is ",value)
x = True
except ValueError:
print("Please enter number only")
| [
87,
796,
10352,
198,
4514,
2124,
6624,
10352,
25,
198,
220,
220,
220,
1988,
796,
5128,
7203,
17469,
262,
1271,
1022,
657,
12,
24,
1058,
366,
8,
198,
220,
220,
220,
1949,
1058,
198,
220,
220,
220,
220,
220,
220,
220,
1988,
796,
493... | 2.146739 | 184 |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
import multiprocessing
import os
import unittest
from typing import Callable, Optional
import torch
import torch.distributed as dist
from torchrec.distributed.comm import _CROSS_PG, _INTRA_PG
from torchrec.test_utils import (
get_free_port,
init_distributed_single_host,
seed_and_log,
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
30277,
19193,
82,
11,
3457,
13,
290,
29116,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
347,
10305,
12,
76... | 3.152542 | 177 |
from typing import Any
from typing import Dict
| [
6738,
19720,
1330,
4377,
198,
6738,
19720,
1330,
360,
713,
628
] | 4.363636 | 11 |
"""Test for asyncprawcore.auth.Authorizer classes."""
import pytest
import asyncprawcore
from ..conftest import two_factor_callback # noqa F401
from . import IntegrationTest
| [
37811,
14402,
329,
355,
2047,
13155,
1831,
7295,
13,
18439,
13,
13838,
7509,
6097,
526,
15931,
198,
11748,
12972,
9288,
198,
198,
11748,
355,
2047,
13155,
1831,
7295,
198,
198,
6738,
11485,
1102,
701,
395,
1330,
734,
62,
31412,
62,
4742... | 3.25 | 56 |
# https://github.com/pratogab/batch-transforms
import torch
class Normalize:
"""Applies the :class:`~torchvision.transforms.Normalize` transform to a batch of images.
.. note::
This transform acts out of place by default, i.e., it does not mutate the input tensor.
Args:
mean (sequence):
Sequence of means for each channel.
std (sequence):
Sequence of standard deviations for each channel.
inplace(bool,optional):
Bool to make this operation in-place.
dtype (torch.dtype,optional):
The data type of tensors to which the transform will be applied.
device (torch.device,optional):
The device of tensors to which the transform will be applied.
"""
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor of size (N, C, H, W) to be normalized.
Returns:
Tensor: Normalized Tensor.
"""
if not self.inplace:
tensor = tensor.clone()
tensor.sub_(self.mean).div_(self.std)
return tensor
| [
2,
3740,
1378,
12567,
13,
785,
14,
1050,
265,
519,
397,
14,
43501,
12,
7645,
23914,
198,
198,
11748,
28034,
628,
198,
4871,
14435,
1096,
25,
198,
220,
220,
220,
37227,
4677,
13508,
262,
1058,
4871,
25,
63,
93,
13165,
354,
10178,
13,... | 2.358811 | 471 |
from flask import (
current_app,
flash,
Blueprint,
render_template,
request,
redirect,
url_for,
abort,
make_response,
)
from ..models import Option, Page, User, Category, Tag, Post, Comment, Link
from ..extensions import db
from ..forms import (
LoginForm,
PostForm,
PageForm,
CategoryForm,
TagForm,
CommentForm,
OptionForm,
LinkForm,
SearchForm,
)
from flask_login import login_required, current_user
from ..utils import redirect_back
admin_bp = Blueprint("admin", __name__)
@admin_bp.before_request
@login_required
@admin_bp.route("/option", methods=["GET", "POST"])
@admin_bp.route("/manage_pages")
@admin_bp.route("/edit_page/<int:page_id>", methods=["GET", "POST"])
@admin_bp.route("/add_page", methods=["GET", "POST"])
@admin_bp.route("/page/<int:page_id>/delete", methods=["POST"])
@admin_bp.route("/manage_categories")
@admin_bp.route("/edit_category/<int:category_id>", methods=["GET", "POST"])
@admin_bp.route("/add_category", methods=["GET", "POST"])
@admin_bp.route("/category/<int:category_id>/delete", methods=["POST"])
@admin_bp.route("/manage_tags")
@admin_bp.route("/edit_tag/<int:tag_id>", methods=["GET", "POST"])
@admin_bp.route("/add_tag", methods=["GET", "POST"])
@admin_bp.route("/tag/<int:tag_id>/delete", methods=["POST"])
@admin_bp.route("/manage_posts")
@admin_bp.route("/write_post", methods=["GET", "POST"])
@admin_bp.route("/edit_post/<int:post_id>", methods=["GET", "POST"])
@admin_bp.route("/post/<int:post_id>/delete", methods=["POST"])
@admin_bp.route("/manage_comments")
@admin_bp.route("/comment/<int:comment_id>/approve", methods=["POST"])
@admin_bp.route("/comment/<int:comment_id>/delete", methods=["POST"])
@admin_bp.route("/manage_links")
@admin_bp.route("/edit_link/<int:link_id>", methods=["GET", "POST"])
@admin_bp.route("/add_link", methods=["GET", "POST"])
@admin_bp.route("/link/<int:link_id>/delete", methods=["POST"]) | [
6738,
42903,
1330,
357,
198,
220,
220,
220,
1459,
62,
1324,
11,
198,
220,
220,
220,
7644,
11,
198,
220,
220,
220,
39932,
11,
198,
220,
220,
220,
8543,
62,
28243,
11,
198,
220,
220,
220,
2581,
11,
198,
220,
220,
220,
18941,
11,
1... | 2.498118 | 797 |
import http
import io
from unittest import mock
import pytest
import requests_mock
from django.conf import settings
from requests.exceptions import HTTPError
from core import helpers
from core.tests.helpers import create_response
@pytest.fixture(autouse=True)
@mock.patch('core.helpers.boto3')
@mock.patch('core.helpers.boto3')
@mock.patch('core.helpers.boto3')
@mock.patch.object(helpers.CompaniesHouseClient, 'retrieve_profile')
@mock.patch.object(helpers.CompaniesHouseClient, 'retrieve_profile')
| [
11748,
2638,
198,
11748,
33245,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
11748,
12972,
9288,
198,
11748,
7007,
62,
76,
735,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
7007,
13,
1069,
11755,
1330,
14626,
12331,
19... | 2.994253 | 174 |
"""init file for py2nb"""
from .py2nb import py2nb
from .nb2py import nb2py | [
37811,
15003,
2393,
329,
12972,
17,
46803,
37811,
198,
198,
6738,
764,
9078,
17,
46803,
1330,
12972,
17,
46803,
198,
6738,
764,
46803,
17,
9078,
1330,
299,
65,
17,
9078
] | 2.533333 | 30 |
import os
import pytest
import boto3
import base64
from moto import mock_s3
from firehose_replicator import firehose_replicator
ECS_BUCKET_NAME = "the-ecs-bucket"
ECS_OBJECT_PREFIX = "cv/wydot"
os.environ["ECS_BUCKET_NAME"] = ECS_BUCKET_NAME
os.environ["ECS_OBJECT_PREFIX"] = ECS_OBJECT_PREFIX
@mock_s3
| [
11748,
28686,
198,
11748,
12972,
9288,
198,
11748,
275,
2069,
18,
198,
11748,
2779,
2414,
198,
6738,
285,
2069,
1330,
15290,
62,
82,
18,
198,
6738,
2046,
71,
577,
62,
35666,
26407,
1330,
2046,
71,
577,
62,
35666,
26407,
198,
198,
2943... | 2.257353 | 136 |
from python_helper.api.src.service import LogHelper, ReflectionHelper
from python_helper.api.src.domain import Constant as c
| [
6738,
21015,
62,
2978,
525,
13,
15042,
13,
10677,
13,
15271,
1330,
5972,
47429,
11,
6524,
1564,
47429,
198,
6738,
21015,
62,
2978,
525,
13,
15042,
13,
10677,
13,
27830,
1330,
20217,
355,
269,
198
] | 3.571429 | 35 |
###################THIS FILE DOES NOT WORK#################
import requests
import html_to_json
#URL = "https://modelo-prueba.herokuapp.com/result"
url = "http://127.0.0.1:8080/result"
data = {'a':1, 'b':2, 'c':3, 'd':4}
####################Alternativa1###############################
r = requests.post(url,data=data)
r_json = html_to_json.convert(r.content)
print(r_json)
prob = r_json['body'][0]['div'][0]['div'][0]['h3'][0]['_value']
print(prob) | [
14468,
21017,
43559,
45811,
38359,
5626,
30936,
14468,
2,
198,
198,
11748,
7007,
198,
11748,
27711,
62,
1462,
62,
17752,
198,
198,
2,
21886,
796,
366,
5450,
1378,
19849,
78,
12,
1050,
518,
7012,
13,
11718,
23063,
1324,
13,
785,
14,
20... | 2.516667 | 180 |
from chris_plugin.types import ParameterType
import logging
from typing import Union
class Placeholders:
"""
The ChRIS store requires a default value to be provided for
arguments which are optional, however argparse doesn't care.
If the ChRIS plugin does not specify a default for an argument
that is optional, we will add these placeholders when serializing the parameters.
https://github.com/FNNDSC/ChRIS_store/blob/0295268f7cd65593a259a7a00b83eac8ae876c33/store_backend/plugins/serializers.py#L404-L407
"""
INT = -1
FLOAT = 0.0
STR = ""
BOOL = False
_logger = logging.getLogger(__name__)
@classmethod
@staticmethod
| [
6738,
442,
2442,
62,
33803,
13,
19199,
1330,
25139,
2357,
6030,
198,
11748,
18931,
198,
6738,
19720,
1330,
4479,
628,
198,
4871,
8474,
10476,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
383,
609,
49,
1797,
3650,
4433,
257,
4277... | 2.849372 | 239 |
import numpy as np
import random
from collections import deque
| [
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
6738,
17268,
1330,
390,
4188,
198
] | 4.2 | 15 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
if __name__ == '__main__':
feature = torch.randn(1, 32, 128, 256)
# canvas_transform(feature)
# grid_test()
# grid_canvas = create_grid_canvas(feature)
# print(grid_canvas.shape)
# grid_canvas = normalize_grid_canvas(grid_canvas)
# print(grid_canvas.shape)
pointpropagation = PointPropagation(feature.shape)
output = pointpropagation.forward(feature)
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
299,
32152,
355,
45941,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
2... | 2.640884 | 181 |
#! /usr/local/bin/python3.5
import socket
import struct
import textwrap
import binascii
import struct
import sys
TAB_1 = '\t - '
TAB_2 = '\t\t - '
TAB_3 = '\t\t\t - '
TAB_4 = '\t\t\t\t - '
DATA_TAB_1 = '\t '
DATA_TAB_2 = '\t\t '
DATA_TAB_3 = '\t\t\t '
DATA_TAB_4 = '\t\t\t\t '
# Unpack Ethernet Frame
# Unpack IPv4 Packets Recieved
# Returns Formatted IP Address
# Unpacks for any ICMP Packet
# Unpacks for any TCP Packet
# Unpacks for any UDP Packet
# Formats the output line
main() | [
2,
0,
1220,
14629,
14,
12001,
14,
8800,
14,
29412,
18,
13,
20,
198,
198,
11748,
17802,
198,
11748,
2878,
198,
11748,
2420,
37150,
198,
11748,
9874,
292,
979,
72,
198,
11748,
2878,
198,
11748,
25064,
198,
198,
5603,
33,
62,
16,
796,
... | 2.187234 | 235 |
# Copyright 2016 Ericsson AB
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
DEFAULT_QUOTAS = {
u'quota_set': {
u'metadata_items': 128, u'subnet': 10,
u'floatingip': 50, u'gigabytes': 1000, u'backup_gigabytes': 1000,
u'ram': 51200, u'floating_ips': 10, u'snapshots': 10,
u'security_group_rule': 100,
u'instances': 10, u'key_pairs': 100, u'volumes': 10, u'router': 10,
u'security_group': 10, u'cores': 20, u'backups': 10, u'fixed_ips': -1,
u'port': 50, u'security_groups': 10, u'network': 10
}
}
| [
2,
15069,
1584,
7651,
16528,
9564,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
... | 2.578704 | 432 |
#!/usr/bin/python
#
# Generate test cases for version_test.go
#
# Herein lies my first ever python script...
#
import rpm
versions = [
"",
"0",
"1",
"2",
"10",
"100",
"0.0",
"0.1",
"0.10",
"0.99",
"1.0",
"1.99",
"2.0",
"0.0.0",
"0.0.1",
"0.0.2",
"0.0.10",
"0.0.99",
"0.1.0",
"0.2.0",
"0.10.0",
"0.99.0",
"0.100.0",
"0.0.0.0",
"0.0.0.1",
"0.0.0.10",
"0.0.1.0",
"0.0.01.0",
"1.2.3.4",
"1-2-3-4",
"20150101",
"20151212",
"20151212.0",
"20151212.1",
"2015.1.1",
"2015.02.02",
"2015.12.12",
"1.2.3a",
"1.2.3b",
"R16B",
"R16C",
"1.2.3.2016.1.1",
"0.5a1.dev",
"1.8.B59BrZX",
"0.07b4p1",
"3.99.5final.SP07",
"3.99.5final.SP08",
"0.4.tbb.20100203",
"0.5.20120830CVS.el7",
"1.el7",
"1.el6",
"10.el7",
"01.el7",
"0.17.20140318svn632.el7",
"0.17.20140318svn633.el7",
"1.20140522gitad6fb3e.el7",
"1.20140522hitad6fb3e.el7",
"8.20140605hgacf1c26e3019.el7",
"8.20140605hgacf1c26e3029.el7",
"22.svn457.el7",
"22.svn458.el7",
]
print "\t// tests generated with version_test.py"
print "\ttests := []VerTest{"
for x in versions:
for y in versions:
print "\t\tVerTest{\"" + x + "\", \"" + y + "\",", rpm.labelCompare(("0", "0", x), ("0", "0", y)), "},"
print "\t}"
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
2,
198,
2,
2980,
378,
1332,
2663,
329,
2196,
62,
9288,
13,
2188,
198,
2,
198,
2,
3423,
259,
7363,
616,
717,
1683,
21015,
4226,
986,
198,
2,
198,
11748,
37542,
198,
198,
47178,
796,
... | 1.686016 | 758 |
import json
import csv
DISTRICT = 2
PARTY = 4
VOTE_PERCENTAGE = 8
WINNER = 11
PARTIES = ("NDP", "Liberal", "Conservative", "Bloc Quebecois", "Green")
json_file = open("electoral_districts.json")
districts = json.load(json_file)
## Parse csv file containing individual results
csv_file = "output.csv"
with open(csv_file, newline='') as csv_input:
csv_reader = csv.reader(csv_input, skipinitialspace=True)
row = next(csv_reader)
## Make a dictionary that maps FEDNUM to ({Party: percent}, winning_party))
party_percentage = {}
district_results = {}
district_winners = {}
prev_district = 0
for row in csv_reader:
curr_district = int(row[DISTRICT])
party = row[PARTY]
vote_percentage = row[VOTE_PERCENTAGE]
if party in PARTIES:
# new district -> map previous district to party percentages
if prev_district != 0 and curr_district != prev_district:
district_results[prev_district] = party_percentage
district_winners[prev_district] = prev_winner
party_percentage = {}
party_percentage[party] = vote_percentage
prev_district = curr_district
prev_winner = row[WINNER]
## Add the last district
district_results[prev_district] = party_percentage
district_winners[prev_district] = prev_winner
## Iterate over feature properties and add property party_percentages
## to feature with feature number FEDNUM
import operator
features = districts["features"]
districts_con = {"type": "FeatureCollection", "crs": {"type": "name", "properties": {"name": "urn:ogc:def:crs:OGC:1.3:CRS84"}}, "features": []}
districts_lib = {"type": "FeatureCollection", "crs": {"type": "name", "properties": {"name": "urn:ogc:def:crs:OGC:1.3:CRS84"}}, "features": []}
districts_ndp = {"type": "FeatureCollection", "crs": {"type": "name", "properties": {"name": "urn:ogc:def:crs:OGC:1.3:CRS84"}}, "features": []}
districts_green = {"type": "FeatureCollection", "crs": {"type": "name", "properties": {"name": "urn:ogc:def:crs:OGC:1.3:CRS84"}}, "features": []}
districts_bloc = {"type": "FeatureCollection", "crs": {"type": "name", "properties": {"name": "urn:ogc:def:crs:OGC:1.3:CRS84"}}, "features": []}
for feature in features:
fednum = feature["properties"]["FEDNUM"]
if fednum in district_results:
feature["properties"]["ELECTION_RESULTS"] = district_results[fednum]
winner = district_winners[fednum]#max(district_results[fednum].items(), key=operator.itemgetter(1))[0]
feature["properties"]["WINNER"] = winner
if winner == "Green":
feature["properties"]["fill"] = "#31a354"
districts_green["features"].append(feature)
elif winner == "NDP":
feature["properties"]["fill"] = "#feb24c"
districts_ndp["features"].append(feature)
elif winner == "Liberal":
feature["properties"]["fill"] = "#de2d26"
districts_lib["features"].append(feature)
elif winner == "Conservative":
feature["properties"]["fill"] = "#2c7fb8"
districts_con["features"].append(feature)
elif winner == "Bloc":
feature["properties"]["fill"] = "#a6bddb"
districts_bloc["features"].append(feature)
with open('districtResults.geojson', 'w') as out:
json.dump(districts, out)
with open('resultsGreen.geojson', 'w') as out:
json.dump(districts_green, out)
with open('resultsLiberal.geojson', 'w') as out:
json.dump(districts_lib, out)
with open('resultsConservative.geojson', 'w') as out:
json.dump(districts_con, out)
with open('resultsNDP.geojson', 'w') as out:
json.dump(districts_ndp, out)
with open('resultsBloc.geojson', 'w') as out:
json.dump(districts_bloc, out)
| [
11748,
33918,
198,
11748,
269,
21370,
198,
198,
26288,
5446,
18379,
796,
362,
198,
30709,
56,
796,
604,
198,
53,
23051,
62,
18973,
43960,
11879,
796,
807,
198,
37620,
21479,
796,
1367,
198,
30709,
11015,
796,
5855,
45,
6322,
1600,
366,
... | 2.409826 | 1,608 |
# Copyright 2020 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from v3iofs import path as v3path
import pytest
split_cases = [
# path, expected, raises
('', None, True),
('/', ('', ''), False),
('/a', ('a', ''), False),
('/a/b/c', ('a', 'b/c'), False),
]
@pytest.mark.parametrize('path, expected, raises', split_cases)
unslash_cases = [
# path, expected
('', ''),
('/', ''),
('/a', '/a'),
('/a/', '/a'),
]
@pytest.mark.parametrize('path, expected', unslash_cases)
| [
2,
15069,
12131,
314,
5162,
1031,
952,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,... | 2.911681 | 351 |
import pathlib
import nibabel as nib
import numpy as np
import shutil
import sys
if __name__ == "__main__":
main()
| [
11748,
3108,
8019,
198,
11748,
33272,
9608,
355,
33272,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4423,
346,
198,
198,
11748,
25064,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,... | 2.860465 | 43 |
# Copyright (c) 2018, NVIDIA CORPORATION.
"""
This file provide binding to the libgdf library.
"""
import contextlib
import itertools
import numpy as np
import pandas as pd
import pyarrow as pa
from libgdf_cffi import ffi, libgdf
from librmm_cffi import librmm as rmm
from . import cudautils
from .utils import calc_chunk_size, mask_dtype, mask_bitsize
def columnview(size, data, mask=None, dtype=None, null_count=None):
"""
Make a column view.
Parameters
----------
size : int
Data count.
data : Buffer
The data buffer.
mask : Buffer; optional
The mask buffer.
dtype : numpy.dtype; optional
The dtype of the data. Defaults to *data.dtype*.
"""
if mask is not None:
assert null_count is not None
dtype = dtype or data.dtype
return _columnview(size=size, data=unwrap(data), mask=unwrap(mask),
dtype=dtype, null_count=null_count)
def apply_binaryop(binop, lhs, rhs, out):
"""Apply binary operator *binop* to operands *lhs* and *rhs*.
The result is stored to *out*.
Returns the number of null values.
"""
args = (lhs.cffi_view, rhs.cffi_view, out.cffi_view)
# apply binary operator
binop(*args)
# validity mask
if out.has_null_mask:
return apply_mask_and(lhs, rhs, out)
else:
return 0
def apply_unaryop(unaop, inp, out):
"""Apply unary operator *unaop* to *inp* and store to *out*.
"""
args = (inp.cffi_view, out.cffi_view)
# apply unary operator
unaop(*args)
np_gdf_dict = {np.float64: libgdf.GDF_FLOAT64,
np.float32: libgdf.GDF_FLOAT32,
np.int64: libgdf.GDF_INT64,
np.int32: libgdf.GDF_INT32,
np.int16: libgdf.GDF_INT16,
np.int8: libgdf.GDF_INT8,
np.bool_: libgdf.GDF_INT8,
np.datetime64: libgdf.GDF_DATE64}
def np_to_gdf_dtype(dtype):
"""Util to convert numpy dtype to gdf dtype.
"""
if pd.api.types.is_categorical_dtype(dtype):
return libgdf.GDF_INT8
else:
return np_gdf_dict[np.dtype(dtype).type]
def gdf_to_np_dtype(dtype):
"""Util to convert gdf dtype to numpy dtype.
"""
return np.dtype({
libgdf.GDF_FLOAT64: np.float64,
libgdf.GDF_FLOAT32: np.float32,
libgdf.GDF_INT64: np.int64,
libgdf.GDF_INT32: np.int32,
libgdf.GDF_INT16: np.int16,
libgdf.GDF_INT8: np.int8,
libgdf.GDF_DATE64: np.datetime64,
libgdf.N_GDF_TYPES: np.int32,
libgdf.GDF_CATEGORY: np.int32,
}[dtype])
def np_to_pa_dtype(dtype):
"""Util to convert numpy dtype to PyArrow dtype.
"""
return {
np.float64: pa.float64(),
np.float32: pa.float32(),
np.int64: pa.int64(),
np.int32: pa.int32(),
np.int16: pa.int16(),
np.int8: pa.int8(),
np.bool_: pa.int8(),
np.datetime64: pa.date64(),
}[np.dtype(dtype).type]
def apply_sort(col_keys, col_vals, ascending=True):
"""Inplace sort
"""
nelem = len(col_keys)
begin_bit = 0
end_bit = col_keys.dtype.itemsize * 8
plan = libgdf.gdf_radixsort_plan(nelem, not ascending, begin_bit, end_bit)
sizeof_key = col_keys.dtype.itemsize
sizeof_val = col_vals.dtype.itemsize
try:
libgdf.gdf_radixsort_plan_setup(plan, sizeof_key, sizeof_val)
libgdf.gdf_radixsort_generic(plan,
col_keys.cffi_view,
col_vals.cffi_view)
finally:
libgdf.gdf_radixsort_plan_free(plan)
_join_how_api = {
'inner': libgdf.gdf_inner_join,
'outer': libgdf.gdf_full_join,
'left': libgdf.gdf_left_join,
}
_join_method_api = {
'sort': libgdf.GDF_SORT,
'hash': libgdf.GDF_HASH
}
@contextlib.contextmanager
def apply_join(col_lhs, col_rhs, how, method='hash'):
"""Returns a tuple of the left and right joined indices as gpu arrays.
"""
if(len(col_lhs) != len(col_rhs)):
msg = "Unequal #columns in list 'col_lhs' and list 'col_rhs'"
raise ValueError(msg)
joiner = _join_how_api[how]
method_api = _join_method_api[method]
gdf_context = ffi.new('gdf_context*')
if method == 'hash':
libgdf.gdf_context_view(gdf_context, 0, method_api, 0, 0, 0)
elif method == 'sort':
libgdf.gdf_context_view(gdf_context, 1, method_api, 0, 0, 0)
else:
msg = "method not supported"
raise ValueError(msg)
col_result_l = columnview(0, None, dtype=np.int32)
col_result_r = columnview(0, None, dtype=np.int32)
if(how in ['left', 'inner']):
list_lhs = []
list_rhs = []
for i in range(len(col_lhs)):
list_lhs.append(col_lhs[i].cffi_view)
list_rhs.append(col_rhs[i].cffi_view)
# Call libgdf
joiner(len(col_lhs), list_lhs, list_rhs, col_result_l,
col_result_r, gdf_context)
else:
joiner(col_lhs[0].cffi_view, col_rhs[0].cffi_view, col_result_l,
col_result_r)
# Extract result
left = rmm.device_array_from_ptr(ptr=col_result_l.data,
nelem=col_result_l.size,
dtype=np.int32)
right = rmm.device_array_from_ptr(ptr=col_result_r.data,
nelem=col_result_r.size,
dtype=np.int32)
yield(left, right)
libgdf.gdf_column_free(col_result_l)
libgdf.gdf_column_free(col_result_r)
def apply_segsort(col_keys, col_vals, segments, descending=False,
plan=None):
"""Inplace segemented sort
Parameters
----------
col_keys : Column
col_vals : Column
segments : device array
"""
# prepare
nelem = len(col_keys)
if nelem == segments.size:
# As many seguments as there are elements.
# Nothing to do.
return
if plan is None:
plan = SegmentedRadixortPlan(nelem, col_keys.dtype, col_vals.dtype,
descending=descending)
plan.sort(segments, col_keys, col_vals)
return plan
def hash_columns(columns, result):
"""Hash the *columns* and store in *result*.
Returns *result*
"""
assert len(columns) > 0
assert result.dtype == np.int32
# No-op for 0-sized
if len(result) == 0:
return result
col_input = [col.cffi_view for col in columns]
col_out = result.cffi_view
ncols = len(col_input)
hashfn = libgdf.GDF_HASH_MURMUR3
libgdf.gdf_hash(ncols, col_input, hashfn, col_out)
return result
def hash_partition(input_columns, key_indices, nparts, output_columns):
"""Partition the input_columns by the hash values on the keys.
Parameters
----------
input_columns : sequence of Column
key_indices : sequence of int
Indices into `input_columns` that indicates the key columns.
nparts : int
number of partitions
Returns
-------
partition_offsets : list of int
Each index indicates the start of a partition.
"""
assert len(input_columns) == len(output_columns)
col_inputs = [col.cffi_view for col in input_columns]
col_outputs = [col.cffi_view for col in output_columns]
offsets = ffi.new('int[]', nparts)
hashfn = libgdf.GDF_HASH_MURMUR3
libgdf.gdf_hash_partition(
len(col_inputs),
col_inputs,
key_indices,
len(key_indices),
nparts,
col_outputs,
offsets,
hashfn
)
offsets = list(offsets)
return offsets
_GDF_COLORS = {
'green': libgdf.GDF_GREEN,
'blue': libgdf.GDF_BLUE,
'yellow': libgdf.GDF_YELLOW,
'purple': libgdf.GDF_PURPLE,
'cyan': libgdf.GDF_CYAN,
'red': libgdf.GDF_RED,
'white': libgdf.GDF_WHITE,
}
def str_to_gdf_color(s):
"""Util to convert str to gdf_color type.
"""
return _GDF_COLORS[s.lower()]
def nvtx_range_push(name, color='green'):
"""
Demarcate the beginning of a user-defined NVTX range.
Parameters
----------
name : str
The name of the NVTX range
color : str
The color to use for the range.
Can be named color or hex RGB string.
"""
name_c = ffi.new("char[]", name.encode('ascii'))
try:
color = int(color, 16) # only works if color is a hex string
libgdf.gdf_nvtx_range_push_hex(name_c, ffi.cast('unsigned int', color))
except ValueError:
color = str_to_gdf_color(color)
libgdf.gdf_nvtx_range_push(name_c, color)
def nvtx_range_pop():
""" Demarcate the end of the inner-most range.
"""
libgdf.gdf_nvtx_range_pop()
_GDF_QUANTILE_METHODS = {
'linear': libgdf.GDF_QUANT_LINEAR,
'lower': libgdf.GDF_QUANT_LOWER,
'higher': libgdf.GDF_QUANT_HIGHER,
'midpoint': libgdf.GDF_QUANT_MIDPOINT,
'nearest': libgdf.GDF_QUANT_NEAREST,
}
def get_quantile_method(method):
"""Util to convert method to gdf gdf_quantile_method.
"""
return _GDF_QUANTILE_METHODS[method]
def quantile(column, quant, method, exact):
""" Calculate the `quant` quantile for the column
Returns value with the quantile specified by quant
"""
gdf_context = ffi.new('gdf_context*')
method_api = _join_method_api['sort']
libgdf.gdf_context_view(gdf_context, 0, method_api, 0, 0, 0)
# libgdf.gdf_context_view(gdf_context, 0, method_api, 0)
# px = ffi.new("double *")
res = []
for q in quant:
px = ffi.new("double *")
if exact:
libgdf.gdf_quantile_exact(column.cffi_view,
get_quantile_method(method),
q,
ffi.cast('void *', px),
gdf_context)
else:
libgdf.gdf_quantile_aprrox(column.cffi_view,
q,
ffi.cast('void *', px),
gdf_context)
res.append(px[0])
return res
| [
2,
15069,
357,
66,
8,
2864,
11,
15127,
23929,
44680,
6234,
13,
198,
198,
37811,
198,
1212,
2393,
2148,
12765,
284,
262,
9195,
70,
7568,
5888,
13,
198,
37811,
198,
11748,
4732,
8019,
198,
11748,
340,
861,
10141,
198,
198,
11748,
299,
... | 1.98216 | 5,157 |
"""
Basic transformer, which cleans up the static calm data before sending it off
to an elasticsearch index.
The raw data can be obtained by running:
python monitoring/scripts/download_oai_harvest.py
from the root of this repo. This will create a file called `calm_records.json`.
The elasticsearch credentials (url, username, and password) should be specified
by a `es_credentials.json` file in this directory.
"""
import os
import json
import subprocess
from tqdm import tqdm
from transform import transform
from elasticsearch import Elasticsearch
path_to_es_credentials = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "es_credentials.json"
)
es_credentials = json.load(open(path_to_es_credentials))
es = Elasticsearch(
es_credentials["url"],
http_auth=(es_credentials["username"], es_credentials["password"]),
)
path_to_raw_records = (
subprocess.check_output(["git", "rev-parse", "--show-toplevel"])
.strip()
.decode("utf8")
+ "/calm_records.json"
)
raw_records = json.load(open(path_to_raw_records))
for raw_record in tqdm(raw_records):
try:
record = transform(raw_record)
res = es.index(
index="calm", id=record["RecordID"], doc_type="calm_record", body=record
)
except Exception as e:
print(e)
| [
37811,
198,
26416,
47385,
11,
543,
20658,
510,
262,
9037,
9480,
1366,
878,
7216,
340,
572,
198,
1462,
281,
27468,
12947,
6376,
13,
198,
198,
464,
8246,
1366,
460,
307,
6492,
416,
2491,
25,
628,
220,
220,
220,
21015,
9904,
14,
46521,
... | 2.730689 | 479 |
from .extensions import db, resizer
if resizer:
for size in resizer.sizes.iterkeys():
setattr(Upload, size + '_name', db.Column(db.Unicode(255)))
setattr(Upload, size + '_url', db.Column(db.Unicode(255)))
| [
6738,
764,
2302,
5736,
1330,
20613,
11,
581,
7509,
628,
198,
361,
581,
7509,
25,
198,
220,
220,
220,
329,
2546,
287,
581,
7509,
13,
82,
4340,
13,
2676,
13083,
33529,
198,
220,
220,
220,
220,
220,
220,
220,
900,
35226,
7,
41592,
11... | 2.414894 | 94 |
"""
Train job interacts with SageMaker
"""
import os
import boto3
import numpy as np
import pandas as pd
import sagemaker
# We use the Estimator from the SageMaker Python SDK
from sagemaker.pytorch.estimator import PyTorch
from ...sm_utils import get_sm_execution_role
ON_SAGEMAKER_NOTEBOOK = False
# preparation
sm_boto3 = boto3.client('sagemaker')
sess = sagemaker.Session()
region = sess.boto_session.region_name
bucket = sess.default_bucket() # this could also be a hard-coded bucket name
print('Using bucket ' + bucket)
sm_role = get_sm_execution_role(ON_SAGEMAKER_NOTEBOOK, region)
trainpath = 's3://sagemaker-ap-southeast-2-454979696062/sagemaker/sklearncontainer/adult.csv'
pytorch_estimator = PyTorch(
entry_point='train.py',
source_dir=os.path.abspath(os.path.dirname(__file__)),
role = sm_role,
train_instance_count=1,
train_instance_type='ml.c5.xlarge',
framework_version='1.5.0',
base_job_name='fastai-pytorch',
metric_definitions=[
{'Name': 'Dice accuracy',
'Regex': "Dice accuracy: ([0-9.]+).*$"}],
hyperparameters = {'hidden_layer_1': 200,
'hidden_layer_2': 100})
# launch training job, with asynchronous call
pytorch_estimator.fit({'train':trainpath}, wait=False)
| [
37811,
198,
44077,
1693,
44020,
351,
28733,
48890,
198,
37811,
198,
11748,
28686,
198,
11748,
275,
2069,
18,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
45229,
32174,
198,
198,
2,
775,
779,
262,... | 2.53507 | 499 |
# Internal
from warnings import warn
from contextlib import AsyncExitStack
warn(
"async_tools.context.async_context_manager is deprecated, use contextlib instead",
DeprecationWarning,
)
__all__ = ("AsyncExitStack",)
| [
2,
18628,
198,
6738,
14601,
1330,
9828,
198,
6738,
4732,
8019,
1330,
1081,
13361,
30337,
25896,
198,
198,
40539,
7,
198,
220,
220,
220,
366,
292,
13361,
62,
31391,
13,
22866,
13,
292,
13361,
62,
22866,
62,
37153,
318,
39224,
11,
779,
... | 3.228571 | 70 |
#===============================================================================
# Imports
#===============================================================================
from evn.util import (
Constant,
)
#===============================================================================
# Constants
#===============================================================================
ActionType = _ActionType()
#===============================================================================
# Classes
#===============================================================================
# vim:set ts=8 sw=4 sts=4 tw=78 et:
| [
2,
23926,
25609,
18604,
198,
2,
1846,
3742,
198,
2,
23926,
25609,
18604,
198,
6738,
819,
77,
13,
22602,
1330,
357,
198,
220,
220,
220,
20217,
11,
198,
8,
198,
198,
2,
23926,
25609,
18604,
198,
2,
4757,
1187,
198,
2,
23926,
25609,
... | 7.172414 | 87 |
from .experiment_runner import ExperimentRunner
if __name__ == '__main__':
exp_runner = ExperimentRunner()
print("Running Experiment\n")
exp_runner.run()
print("Experiment complete!.\n")
| [
6738,
764,
23100,
3681,
62,
16737,
1330,
29544,
49493,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1033,
62,
16737,
796,
29544,
49493,
3419,
628,
220,
220,
220,
3601,
7203,
28768,
29544,
59,
... | 3.104478 | 67 |
from suds.client import Client
client = Client('http://localhost:8000/?wsdl', cache=None)
print(client.service.say_fibonacci(10)) | [
6738,
424,
9310,
13,
16366,
1330,
20985,
198,
198,
16366,
796,
20985,
10786,
4023,
1378,
36750,
25,
33942,
20924,
18504,
25404,
3256,
12940,
28,
14202,
8,
198,
198,
4798,
7,
16366,
13,
15271,
13,
16706,
62,
69,
571,
261,
44456,
7,
940... | 3.046512 | 43 |
"""Stream type classes for tap-chromedata."""
import csv
from pathlib import Path
from typing import Any, Dict, Optional, Union, List, Iterable
from singer_sdk import typing as th # JSON Schema typing helpers
from singer_sdk.streams import Stream
import re
import ftplib
import io
import json
from zipfile import ZipFile
def camel_to_snake(name):
"""Convert camelCase words to snake_case"""
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
class ChromeDataStream(Stream):
"""Base stream class with config parameter getters, tranversers to ACES Mapping data and data cleaning"""
flo=""
dirname=""
mainfilename=""
@property
def url_base(self) -> str:
"""Return the API URL root, configurable via tap settings."""
return self.config["ftp_url"]
@property
def url_user(self) -> str:
"""Return the API URL user, configurable via tap settings."""
return self.config["ftp_user"]
@property
def url_pass(self) -> str:
"""Return the API URL password, configurable via tap settings."""
return self.config["ftp_pass"]
def data_cleaner(self,data):
"""Function to read and preprocessing data to UTF-8, converting headers to snake case and removing ~ as the quoting character in the data"""
for j in range(len(data)):
data[j]=data[j].decode('utf-8')
data[0]=camel_to_snake(data[0])
colnames=data[0].split(",")
for j in range(len(colnames)):
colnames[j]=colnames[j].replace("~","")
if '\r\n' in colnames[j]:
colnames[j]=colnames[j].replace("\r\n","")
datareader=csv.DictReader(data,quotechar='~',dialect='unix')
return datareader,colnames
class QuickDataStream(ChromeDataStream):
"""Class for reading the Quickdata records for all the years, zipped in year-by-year folder in the FTP server"""
name = "QuickData"
primary_keys = ["_autobuilder_style_id"]
replication_key = None
schema = th.PropertiesList(
th.Property("_model_year", th.IntegerType),
th.Property("_division_name", th.StringType),
th.Property("_model_name", th.StringType),
th.Property("_hist_style_id", th.IntegerType),
th.Property("_style_name", th.StringType),
th.Property("_style_name_wo_trim", th.StringType),
th.Property("_trim", th.StringType),
th.Property("_full_style_code", th.StringType),
th.Property("_style_sequence", th.IntegerType),
th.Property("msrp", th.NumberType),
th.Property("_invoice", th.NumberType),
th.Property("_destination", th.NumberType),
th.Property("_model_effective_date", th.StringType),
th.Property("_model_comment", th.StringType),
th.Property("_manufacturer_name", th.StringType),
th.Property("_manufacturer_id", th.IntegerType),
th.Property("_division_id", th.IntegerType),
th.Property("_hist_model_id", th.IntegerType),
th.Property("_market_class", th.StringType),
th.Property("_market_class_id", th.IntegerType),
th.Property("_subdivision_name", th.StringType),
th.Property("_subdivision_id", th.IntegerType),
th.Property("_style_id", th.IntegerType),
th.Property("_autobuilder_style_id", th.StringType)
).to_dict()
dirname="QuickData_ALL"
mainfilename="DeepLink.txt"
class AcesLegacyVehicleSchemaStream(ChromeDataStream):
"""Class for reading the ACES Legacy Vehicle records in the ACES Mapping folder of the FTP server"""
name = "AcesLegacyVehicle"
primary_keys = ["_vehicle_config_id","_legacy_vehicle_id"]
replication_key = None
schema = th.PropertiesList(
th.Property("_vehicle_config_id", th.IntegerType),
th.Property("_legacy_vehicle_id", th.IntegerType)
).to_dict()
dirname="ACES"
mainfilename="AcesLegacyVehicle.txt"
class AcesVehicleSchemaStream(ChromeDataStream):
"""Class for reading the ACES Vehicle records in the ACES Mapping folder of the FTP server"""
name = "AcesVehicle"
primary_keys = ["_vehicle_id"]
replication_key = None
schema = th.PropertiesList(
th.Property("_vehicle_id", th.IntegerType),
th.Property("_year_id", th.IntegerType),
th.Property("_make_id", th.IntegerType),
th.Property("_model_id", th.IntegerType),
th.Property("_sub_model_id", th.IntegerType),
th.Property("_region_id", th.IntegerType),
th.Property("_base_vehicle_id", th.IntegerType)
).to_dict()
dirname="ACES"
mainfilename="AcesVehicle.txt"
class AcesVehicleConfigSchemaStream(ChromeDataStream):
"""Class for reading the ACES Vehicle Config records in the ACES Mapping folder of the FTP server"""
name = "AcesVehicleConfigVehicle"
primary_keys = ["_aces_vehicle_config_id","_vehicle_config_id"]
replication_key = None
schema = th.PropertiesList(
th.Property("_aces_vehicle_config_id", th.IntegerType),
th.Property("_vehicle_config_id", th.IntegerType),
th.Property("_vehicle_id", th.IntegerType),
th.Property("_bed_config_id", th.IntegerType),
th.Property("_body_style_config_id", th.IntegerType),
th.Property("_brake_config_id", th.IntegerType),
th.Property("_drive_type_id", th.IntegerType),
th.Property("_engine_config_id", th.IntegerType),
th.Property("_transmission_id", th.IntegerType),
th.Property("_mfr_body_code_id", th.IntegerType),
th.Property("_wheel_base_id", th.IntegerType),
th.Property("_spring_type_config_id", th.IntegerType),
th.Property("_steering_config_id", th.IntegerType)
).to_dict()
dirname="ACES"
mainfilename="AcesVehicleConfig.txt"
class AcesVehicleMappingSchemaStream(ChromeDataStream):
"""Class for reading the ACES Vehicle Mapping records in the ACES Mapping folder of the FTP server"""
name = "AcesVehicleMappingVehicle"
primary_keys = ["_aces_vehicle_mapping_id"]
replication_key = None
schema = th.PropertiesList(
th.Property("_aces_vehicle_mapping_id", th.IntegerType),
th.Property("_vehicle_mapping_id", th.IntegerType),
th.Property("_vehicle_id", th.IntegerType),
th.Property("_aces_vehicle_config_id", th.IntegerType),
th.Property("_style_id", th.IntegerType),
th.Property("_option_codes", th.StringType)
).to_dict()
dirname="ACES"
mainfilename="AcesVehicleMapping.txt"
| [
37811,
12124,
2099,
6097,
329,
9814,
12,
28663,
276,
1045,
526,
15931,
198,
11748,
269,
21370,
198,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
32233,
11,
4479,
11,
7343,
11,
40806,
540,
198,
19... | 2.489974 | 2,643 |
input_dir = 'data/raw'
output_dir = 'data/processed'
TLHW = ['top', 'left', 'height', 'width']
XYXY = ['x1', 'y1', 'x2', 'y2']
OUT_HEADER = ['file', *XYXY, 'body', 'color']
mappings = {
'supercab': 'ute',
'cab': 'ute',
'minivan': 'people-mover',
'wagon': 'station-wagon',
} | [
15414,
62,
15908,
796,
705,
7890,
14,
1831,
6,
198,
22915,
62,
15908,
796,
705,
7890,
14,
14681,
276,
6,
198,
198,
14990,
39,
54,
796,
37250,
4852,
3256,
705,
9464,
3256,
705,
17015,
3256,
705,
10394,
20520,
198,
34278,
34278,
796,
... | 2.171642 | 134 |
#!/Applications/anaconda/envs/Python3/bin
import socket
def main():
'''Example Creating a Simple Client Using the socket Module'''
HOST = '127.0.0.1'
PORT = 50002
incomingMessage = ''
outgoingMessage = ''
# s is socket object for IPv4, TCP
s = socket.socket(socket.AF_INET , socket.SOCK_STREAM)
s.connect((HOST, PORT))
while(outgoingMessage != 'EXIT'):
outgoingMessage = input('Reply (type EXIT to esc): ')
s.send(outgoingMessage.encode('utf-8'))
incomingMessage = s.recv(1024).decode('utf-8')
if (incomingMessage == 'EXIT'):
print('The server has chosen to exit. Goodbye.')
break
print(incomingMessage)
s.close()
if __name__ == '__main__':
main()
# Run simpleServer.py first, then run simpleClient.py in separate terminal window
| [
2,
48443,
41995,
14,
272,
330,
13533,
14,
268,
14259,
14,
37906,
18,
14,
8800,
198,
198,
11748,
17802,
198,
198,
4299,
1388,
33529,
198,
220,
220,
220,
705,
7061,
16281,
30481,
257,
17427,
20985,
8554,
262,
17802,
19937,
7061,
6,
628,... | 2.498525 | 339 |
import numpy as np
if __name__ == '__main__':
main()
| [
11748,
299,
32152,
355,
45941,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.4 | 25 |
import pandas as pd
import re
import numpy as np
import math
import torch
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
302,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
11748,
28034,
628
] | 3.409091 | 22 |
# Zeit wird gezaehlt
import pygame, math, random, time
from pygame.locals import *
import PIL.Image as Image
import numpy as np
import random
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import os, sys
os.environ["SDL_VIDEODRIVER"] = "dummy"
#
| [
2,
47447,
266,
1447,
4903,
89,
3609,
71,
2528,
198,
11748,
12972,
6057,
11,
10688,
11,
4738,
11,
640,
198,
6738,
12972,
6057,
13,
17946,
874,
1330,
1635,
198,
198,
11748,
350,
4146,
13,
5159,
355,
7412,
198,
11748,
299,
32152,
355,
... | 2.798077 | 104 |
import asyncio
import unittest
from typing import Awaitable, Callable
import asynctest
from checklisting.result import MultiTaskResult, TaskResult
from checklisting.result.status import TaskResultStatus
from checklisting.tasks.services.zookeeper import (ZookeeperMntrResonseValidator,
ZookeeperRuokResponseValidator,
ZookeeperTask)
from checklisting.testing import setup_tcp_server
| [
11748,
30351,
952,
198,
11748,
555,
715,
395,
198,
6738,
19720,
1330,
5851,
4548,
540,
11,
4889,
540,
198,
198,
11748,
355,
2047,
310,
395,
198,
198,
6738,
41859,
278,
13,
20274,
1330,
15237,
25714,
23004,
11,
15941,
23004,
198,
6738,
... | 2.331754 | 211 |
# coding: utf-8
import logging
from os import path
from .models.mdsp import Mdsp
from .utils.udefault import load_yaml
from .dao.mongo.daomongo import DaoMongo
from .dao.mongo.daogridfs import DaoGridFS
class Config(object):
''' load all info from etc when starting webservice '''
@staticmethod
@classmethod
def initialize(cls, cfgpath):
''' classmethod to be done at first '''
try:
with open(cfgpath) as file:
info = load_yaml(file.read())
except Exception as ex:
logging.error('[LOAD_CFG] {}'.format(ex))
exit()
def init_db(info):
''' initialize database after getting all config '''
mongoinfo = info['db']['mongo']['client']
gridfsinfo = info['db']['gridfs']['client']
mongo_db = DaoMongo.get_db(mongoinfo)
dsp_tabObj = DaoMongo.get_tab(mongo_db, mongoinfo['dsp_tab'])
adm_tabObj = DaoMongo.get_tab(mongo_db, mongoinfo['adm_tab'])
media_tabObj = DaoMongo.get_tab(mongo_db, mongoinfo['media_tab'])
gridfs_db = DaoGridFS.get_db(gridfsinfo)
mongoinfo['dbObj'] = mongo_db
mongoinfo['dsp_tabObj'] = dsp_tabObj
mongoinfo['adm_tabObj'] = adm_tabObj
mongoinfo['media_tabObj'] = media_tabObj
gridfsinfo['dbObj'] = gridfs_db
info['db']['mongo']['client'] = mongoinfo
info['db']['gridfs']['client'] = gridfsinfo
return info
info = init_db(info)
cls.__set_cfg(info)
@classmethod
def __set_cfg(cls, info):
''' prevent others to modify this config info '''
cls.cfg = info
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
18931,
198,
6738,
28686,
1330,
3108,
198,
198,
6738,
764,
27530,
13,
9132,
2777,
1330,
39762,
2777,
198,
6738,
764,
26791,
13,
463,
891,
1721,
1330,
3440,
62,
88,
43695,
198,
6738,
76... | 2.06152 | 829 |
from pytheas.services import service
from pytheas.viewmodels.viewmodelbase import ViewModelBase
| [
6738,
12972,
1169,
292,
13,
30416,
1330,
2139,
198,
6738,
12972,
1169,
292,
13,
1177,
27530,
13,
1177,
19849,
8692,
1330,
3582,
17633,
14881,
628
] | 3.88 | 25 |
import numpy as np
import scipy.io as sio
import os
import matplotlib.pyplot as plt
import pandas as pd
from preprocessing import method_name
from plot_toolbox import plot_error_rate
###############################################################################
### CLASSIFICATION EXPERIMENTS ###############################################
###############################################################################
errors = []
method_names = []
order = ['Random',
'PCA',
'Whiten PCA',
'LDA',
'LDA with normalised data',
'NPP (10 neighbors)',
'NPP (30 neighbors)']
# Retrieving the different perf files
root = os.path.join(os.getcwd(), './perf_results/')
for method_toplot in order:
for file in os.listdir(root):
if 'detail' in file:
continue
mn = method_name(file)
if mn != method_toplot:
continue
method_names.append(mn)
full_path = os.path.join(root, file)
data = pd.read_csv(full_path)
error = data['mean_error']
errors.append(np.array(error))
errors = np.matrix(errors).T
plot_error_rate(errors, method_names)
###############################################################################
### FAST ICA EXPERIMENTS ######################################################
###############################################################################
#
#n_images = 3
#np.random.seed(0)
#random_images_index = np.random.randint(0, fea.shape[0], n_images)
#random_images_index = [0, 260, 520]
#original_images = fea[random_images_index,:]
#
#mixing_matrix = np.matrix(np.random.rand(n_images, n_images))
#mixed_images = mixing_matrix * original_images
#
##from sklearn.decomposition import FastICA
##coefficients = np.matrix(FastICA(algorithm='deflation').fit_transform(mixed_images))
##recovered_images = coefficients * mixed_images
#recovered_images = FastICA.simple_projection(mixed_images)
#
#for i in range(original_images.shape[0]):
# img_tools.display_image(original_images[i,:], save_folder='images/FastICA/reconstruction/',
# name='original_%s' % i)
#
#for i in range(mixed_images.shape[0]):
# img_tools.display_image(mixed_images[i,:], save_folder='images/FastICA/reconstruction/',
# name='mixed_%s' % i)
#
#for i in range(recovered_images.shape[0]):
# img_tools.display_image(recovered_images[i,:], save_folder='images/FastICA/reconstruction/',
# name='recovered_%s' % i)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
952,
355,
264,
952,
198,
11748,
28686,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
662,
36948,
1330,
2446,... | 2.674051 | 948 |
import pprint
sl = Solution()
t1 = [[1,3],[2,6],[8,10],[15,18]]
t1 = [Interval(*x) for x in t1]
res = sl.merge(t1)
pprint.pprint(res)
sl = Solution()
t2 = [[1,4],[4,5]]
t2 = [Interval(*x) for x in t2]
res = sl.merge(t2)
print(res)
| [
11748,
279,
4798,
628,
198,
6649,
796,
28186,
3419,
198,
198,
83,
16,
796,
16410,
16,
11,
18,
38430,
17,
11,
21,
38430,
23,
11,
940,
38430,
1314,
11,
1507,
11907,
198,
83,
16,
796,
685,
9492,
2100,
46491,
87,
8,
329,
2124,
287,
... | 1.958678 | 121 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Program Name: data_mining.py
# Anthony Waldsmith
# 8/03/16
# Python Version 3.4
# Description: Datamining CSV google stocks
# Optional import for versions of python <= 2
from __future__ import print_function
#name: get_data_list
#param: FILE_NAME - the file's name you saved for the stock's prices
#brief: get a list of the stock's records' lists
#return: a list of lists
#name: get_monthly_averages
#param: data_list - the list that you will process
#brief: get a list of the stock's monthly averages and their corresponding dates
#return: a list
# THIS COULD BE OPTIMIZED, HOWEVER THIS WAY MAKES A BIT MORE SENSE TO ME
#name: print_info
#param: monthly_average_list - the list that you will process
#brief: print the monthly averages of Google stock
#return: None
main()
# 1) call get_data_list(FILE_NAME) function to get the data list.
# Return into variable data_list
# 2) call get_monthly_averages(data_list) function with variable data_list from above as argument.
# Return into variable monthly_average_list
# 3) call print_info(monthly_average_list) function with variable monthly_average_list from above as argument.
# Show the monthly_average_list as shown in the sample run below
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
6118,
6530,
25,
1366,
62,
45374,
13,
9078,
198,
2,
9953,
24261,
21453,
198,
2,
807,
14,
3070,
14,
1433,
198,... | 3.346667 | 375 |
import os
| [
11748,
28686,
628
] | 3.666667 | 3 |
# Copyright (c) Ye Liu. All rights reserved.
import torch
import torch.nn as nn
import nncore
from ..builder import MESSAGE_PASSINGS
from ..bundle import Parameter
@MESSAGE_PASSINGS.register()
@nncore.bind_getter('in_features', 'out_features')
class GCN(nn.Module):
"""
Graph Convolutional Layer introduced in [1].
Args:
in_features (int): Number of input features.
out_features (int): Number of output features.
bias (bool, optional): Whether to add the bias term. Default: ``True``.
References:
1. Kipf et al. (https://arxiv.org/abs/1609.02907)
"""
def forward(self, x, graph):
"""
Args:
x (:obj:`torch.Tensor[N, M]`): The input node features.
graph (:obj:`torch.Tensor[N, N]`): The graph structure where
``graph[i, j] == n (n > 0)`` means there is an link with
weight ``n`` from node ``i`` to node ``j`` while
``graph[i, j] == 0`` means not.
"""
assert x.size(0) == graph.size(0) == graph.size(1)
n = self._compute_norm(graph)
h = torch.mm(x, self.weight)
y = torch.mm(n, h)
if self._with_bias:
y += self.bias
return y
@MESSAGE_PASSINGS.register()
@nncore.bind_getter('in_features', 'out_features', 'k')
class SGC(GCN):
"""
Simple Graph Convolutional Layer introduced in [1].
Args:
in_features (int): Number of input features.
out_features (int): Number of output features.
k (int, optional): Number of layers to be stacked.
bias (bool, optional): Whether to add the bias term. Default: ``True``.
References:
1. Wu et al. (https://arxiv.org/abs/1902.07153)
"""
@MESSAGE_PASSINGS.register()
@nncore.bind_getter('in_features', 'out_features', 'heads', 'p',
'negative_slope', 'concat', 'residual')
class GAT(nn.Module):
"""
Graph Attention Layer introduced in [1].
Args:
in_features (int): Number of input features.
out_features (int): Number of output features.
heads (int, optional): Number of attention heads. Default: ``1``.
p (float, optional): The dropout probability. Default: ``0``.
negative_slope (float, optional): The negative slope of
:obj:`LeakyReLU`. Default: ``0.2``.
concat (bool, optional): Whether to concatenate the features from
different attention heads. Default: ``True``.
residual (bool, optional): Whether to add residual connections.
Default: ``True``.
bias (bool, optional): Whether to add the bias term. Default: ``True``.
References:
1. Veličković et al. (https://arxiv.org/abs/1710.10903)
"""
def forward(self, x, graph):
"""
Args:
x (:obj:`torch.Tensor[N, M]`): The input node features.
graph (:obj:`torch.Tensor[N, N]`): The graph structure where
``graph[i, j] == n (n > 0)`` means there is a link from node
``i`` to node ``j`` while ``graph[i, j] == 0`` means not.
"""
assert x.size(0) == graph.size(0) == graph.size(1)
x = self.dropout(x)
h = torch.matmul(x[None, :], self.weight)
coe_i = torch.bmm(h, self.weight_i)
coe_j = torch.bmm(h, self.weight_j).transpose(1, 2)
coe = self.leaky_relu(coe_i + coe_j)
graph = torch.where(graph > 0, .0, float('-inf')).t()
att = self.dropout((coe + graph).softmax(dim=-1))
y = torch.bmm(att, h).transpose(0, 1).contiguous()
if self._residual:
if self._map_residual:
y += torch.mm(x, self.weight_r).view(-1, self._heads,
self._head_features)
else:
y += x[:, None]
if self._concat:
y = y.view(-1, self._out_features)
else:
y = y.mean(dim=1)
if self._with_bias:
y += self.bias
return y
| [
2,
15069,
357,
66,
8,
11609,
18258,
13,
1439,
2489,
10395,
13,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
11748,
299,
77,
7295,
198,
6738,
11485,
38272,
1330,
337,
1546,
4090,
8264,
62,
47924,
20754,... | 2.149468 | 1,880 |
import cv2
import numpy as np
from numba import njit
# https://automaticaddison.com/how-to-do-histogram-matching-using-opencv/
def calculate_cdf(histogram: np.ndarray) -> np.ndarray:
"""
This method calculates the cumulative distribution function
:param array histogram: The values of the histogram
:return: normalized_cdf: The normalized cumulative distribution function
:rtype: array
"""
# Get the cumulative sum of the elements
cdf = histogram.cumsum()
# Normalize the cdf
normalized_cdf = cdf / float(cdf.max())
return normalized_cdf
@njit
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
997,
7012,
1330,
299,
45051,
628,
198,
2,
3740,
1378,
37800,
2860,
1653,
13,
785,
14,
4919,
12,
1462,
12,
4598,
12,
10034,
21857,
12,
15699,
278,
12,
3500,
12,
9654,... | 2.985 | 200 |
import connect
error_message = 'Action is not defined. Please use create, truncate, or drop.'
| [
11748,
2018,
628,
198,
18224,
62,
20500,
796,
705,
12502,
318,
407,
5447,
13,
4222,
779,
2251,
11,
40122,
378,
11,
393,
4268,
2637,
628,
628,
628,
628,
628,
628,
628
] | 3.516129 | 31 |
{
'variables': {
# may be redefined in command line on configuration stage
'BUILD_LIBRDKAFKA%': 1,
},
"targets": [
{
"target_name": "bindings",
"sources": [ "<!@(ls -1 src/*.cc)", ],
"include_dirs": [
"<!(node -e \"require('nan')\")",
],
'conditions': [
# If BUILD_LIBRDKAFKA, then
# depend on librdkafka target, and use src-cpp when building
# this target.
[ '<(BUILD_LIBRDKAFKA)==1',
{
'dependencies': ['librdkafka'],
'include_dirs': [ "deps/librdkafka/src-cpp" ],
'libraries' : [ '-lz', '-lsasl2'],
},
# Else link against globally installed rdkafka and use
# globally installed headers. On Debian, you should
# install the librdkafka1, librdkafka++1, and librdkafka-dev
# .deb packages.
{
'libraries': ['-lz', '-lrdkafka', '-lrdkafka++'],
'include_dirs': [
"/usr/include/librdkafka",
"/usr/local/include/librdkafka"
],
},
],
[ 'OS=="mac"',
{
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.11',
'OTHER_CFLAGS' : ['-Wall', '-Wno-sign-compare', '-Wno-missing-field-initializers', '-std=c++11'],
},
}
],
[ 'OS=="linux"',
{
'cflags': ['-Wall', '-Wno-sign-compare', '-Wno-missing-field-initializers'],
}
]
]
},
{
"target_name": "librdkafka_config_h",
"type": "none",
"actions": [
{
'action_name': 'configure_librdkafka',
'message': 'configuring librdkafka...',
'inputs': [
'deps/librdkafka/configure',
],
'outputs': [
'deps/librdkafka/config.h',
],
'action': ['eval', 'cd deps/librdkafka && ./configure'],
},
],
},
{
"target_name": "librdkafka",
"type": "static_library",
'dependencies': [
'librdkafka_config_h',
],
"sources": [
"<!@(ls -1 ./deps/librdkafka/src/*.c)",
"<!@(ls -1 ./deps/librdkafka/src-cpp/*.cpp)",
],
'cflags_cc!': [ '-fno-rtti' ],
'conditions': [
[
'OS=="mac"',
{
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.11',
'OTHER_CFLAGS' : ['-Wno-sign-compare', '-Wno-missing-field-initializers'],
'GCC_ENABLE_CPP_RTTI': 'YES'
}
}
],[
'OS=="linux"',
{
'cflags' : [ '-Wno-sign-compare', '-Wno-missing-field-initializers', '-Wno-empty-body', '-g'],
}
]
]
}
]
}
| [
90,
198,
220,
705,
25641,
2977,
10354,
1391,
198,
220,
220,
220,
220,
220,
1303,
743,
307,
2266,
18156,
287,
3141,
1627,
319,
8398,
3800,
198,
220,
220,
220,
220,
220,
705,
19499,
26761,
62,
40347,
35257,
42,
8579,
25123,
4,
10354,
... | 1.715248 | 1,633 |
# Sampling from a bivariate normal density via Metropolis-Hastings
# %% Import packages
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import seaborn as sns
import torch
from torch.distributions import MultivariateNormal
# from torch.distributions import Normal
from torch.utils.data import DataLoader
from eeyore.datasets import EmptyXYDataset
from eeyore.models import DistributionModel
from eeyore.kernels import IsoSEKernel # , NormalKernel
from eeyore.samplers import MetropolisHastings
from eeyore.stats import mmd
# %% Set up unnormalized target density
# Using manually defined log_pdf function
# def log_pdf(theta, x, y):
# return -0.5*torch.sum(theta**2)
# Using log_pdf function based on Normal torch distribution
# pdf = Normal(torch.zeros(2), torch.ones(2))
# def log_pdf(theta, x, y):
# return torch.sum(pdf.log_prob(theta))
# Using log_pdf function based on MultivariateNormal torch distribution
pdf_dtype = torch.float32
pdf = MultivariateNormal(torch.zeros(2, dtype=pdf_dtype), covariance_matrix=torch.eye(2, dtype=pdf_dtype))
model = DistributionModel(log_pdf, 2, dtype=pdf.loc.dtype)
# %% Setup Metropolis-Hastings sampler
sampler = MetropolisHastings(
model,
theta0=torch.tensor([-1, 1], dtype=model.dtype),
dataloader=DataLoader(EmptyXYDataset()),
symmetric=True
)
# num_params = model.num_params()
# kernel = NormalKernel(torch.zeros(num_params), np.sqrt(2)*torch.ones(num_params))
# sampler = MetropolisHastings(model, theta0=torch.tensor([-1, 1], dtype=torch.float32), dataloader=dataloader, kernel)
# %% Run Metropolis-Hastings sampler
sampler.run(num_epochs=11000, num_burnin_epochs=1000)
# %% Compute acceptance rate
print('Acceptance rate: {}'.format(sampler.get_chain().acceptance_rate()))
# %% Compute Monte Carlo mean
print('Monte Carlo mean: {}'.format(sampler.get_chain().mean()))
# %% Compute Monte Carlo standard error
print('Monte Carlo standard error: {}'.format(sampler.get_chain().mc_se()))
# %% Compute multivariate ESS
print('Multivariate ESS: {}'.format(sampler.get_chain().multi_ess()))
# %% Plot traces of simulated Markov chain
for i in range(model.num_params()):
chain = sampler.get_param(i)
plt.figure()
sns.lineplot(x=range(len(chain)), y=chain)
plt.xlabel('Iteration')
plt.ylabel('Parameter value')
plt.title(r'Traceplot of $\theta_{{{0}}}$'.format(i+1))
# %% Plot histograms of marginals of simulated Markov chain
x_hist_range = np.linspace(-4, 4, 100)
for i in range(model.num_params()):
plt.figure()
plot = sns.kdeplot(sampler.get_param(i), color='blue', label='Simulated')
plot.set_xlabel('Parameter value')
plot.set_ylabel('Relative frequency')
plot.set_title(r'Traceplot of $\theta_{{{0}}}$'.format(i+1))
sns.lineplot(x=x_hist_range, y=stats.norm.pdf(x_hist_range, 0, 1), color='red', label='Target')
plot.legend()
# %% Plot scatter of simulated Markov chain
x_contour_range, y_contour_range = np.mgrid[-5:5:.01, -5:5:.01]
contour_grid = np.empty(x_contour_range.shape+(2,))
contour_grid[:, :, 0] = x_contour_range
contour_grid[:, :, 1] = y_contour_range
target = stats.multivariate_normal([0., 0.], [[1., 0.], [0., 1.]])
plt.scatter(x=sampler.get_param(0), y=sampler.get_param(1), marker='+')
plt.contour(x_contour_range, y_contour_range, target.pdf(contour_grid), cmap='copper')
plt.title('Countours of target and scatterplot of simulated chain');
# %% Sample using builtin PyTorch sample() method
num_samples = list(range(2, 101))
# num_samples = list(range(2, 10)) + list(range(10, 110, 10))
num_samples_max = max(num_samples)
sample2 = [pdf.sample([1]) for _ in range(num_samples_max)]
# %% Compute MMD between MCMC samples and samples generated by builtin PyTorch sample() method
mmd_vals = []
for n in num_samples:
mmd_vals.append(mmd(sampler.chain.vals['sample'][0:n], sample2[0:n], IsoSEKernel()).item())
# %% Plot MMD between MCMC samples and samples generated by builtin PyTorch sample() method
plot = sns.lineplot(x=num_samples, y=mmd_vals)
plot.set_title('MMD between MCMC samples and sample()');
| [
2,
3409,
11347,
422,
257,
275,
42524,
3487,
12109,
2884,
3395,
25986,
12,
39,
459,
654,
198,
198,
2,
43313,
17267,
10392,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
1174... | 2.682943 | 1,536 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START job_search_list_jobs]
from google.cloud import talent
import six
def list_jobs(project_id, tenant_id, filter_):
"""List Jobs"""
client = talent.JobServiceClient()
# project_id = 'Your Google Cloud Project ID'
# tenant_id = 'Your Tenant ID (using tenancy is optional)'
# filter_ = 'companyName=projects/my-project/companies/company-id'
if isinstance(project_id, six.binary_type):
project_id = project_id.decode("utf-8")
if isinstance(tenant_id, six.binary_type):
tenant_id = tenant_id.decode("utf-8")
if isinstance(filter_, six.binary_type):
filter_ = filter_.decode("utf-8")
parent = client.tenant_path(project_id, tenant_id)
# Iterate over all results
results = []
for job in client.list_jobs(parent, filter_):
results.append(job.name)
print("Job name: {}".format(job.name))
print("Job requisition ID: {}".format(job.requisition_id))
print("Job title: {}".format(job.title))
print("Job description: {}".format(job.description))
return results
# [END job_search_list_jobs]
list_jobs(
"python-docs-samples-tests",
"b603d325-3fb5-4979-8994-eba4ecf726f4",
'companyName="projects/{}/companies/{}"'.format(
"python-docs-samples-tests", "4c0b9887-8f69-429b-bc67-a072ef55ec3e"
),
)
| [
2,
15069,
12131,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 2.75072 | 694 |
import os
import tensorflow as tf
from inception_resnet_v1 import inference
from triplet_loss_train import InceptionTripletLoss
import numpy as np
import unittest
| [
11748,
28686,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
30839,
62,
411,
3262,
62,
85,
16,
1330,
32278,
198,
6738,
15055,
83,
62,
22462,
62,
27432,
1330,
554,
4516,
14824,
37069,
43,
793,
198,
11748,
299,
32152,
355,
45941,
... | 3.489362 | 47 |
#!/usr/bin/env python
from setuptools import find_packages,setup
version = '1.0.0'
setup(
name='dizicli',
version=version,
description='Dizi Crawler',
author='Batuhan Osman Taskaya',
author_email='batuhanosmantaskaya@gmail.com',
url='https://github.com/btaskaya/dizicli',
download_url='https://github.com/btaskaya/dizicli',
entry_points={
'console_scripts': [
'dizicli = dizicli.bin:main',
'filmcli = dizicli.bin_movie:main'
],
},
install_requires=['requests', 'pyquery', 'demjson', 'pget', 'furl', 'PyExecJS'],
packages=find_packages(exclude=("tests", "tests.*")),
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
40406,
198,
198,
9641,
796,
705,
16,
13,
15,
13,
15,
6,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
67,
528,
291,
4528,
3256... | 2.251724 | 290 |
#!/usr/bin/env python3
# The prime factors of 13195 are 5, 7, 13 and 29. What is the largest prime factor of the number 600851475143?
if __name__ == "__main__":
assert largest_prime_factor(13195) == 29
assert largest_prime_factor(600851475143) == 6857
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
383,
6994,
5087,
286,
1511,
22186,
389,
642,
11,
767,
11,
1511,
290,
2808,
13,
1867,
318,
262,
4387,
6994,
5766,
286,
262,
1271,
10053,
5332,
1415,
2425,
21139,
30,
628,
... | 2.89011 | 91 |
"""isort:skip_file"""
from dagster import solid
@solid
@solid
# start_marker
from dagster import AssetStore, ModeDefinition, pipeline, resource
@resource
@pipeline(mode_defs=[ModeDefinition(resource_defs={"asset_store": my_asset_store})])
# end_marker
| [
37811,
271,
419,
25,
48267,
62,
7753,
37811,
198,
6738,
48924,
1706,
1330,
4735,
628,
198,
31,
39390,
628,
198,
31,
39390,
628,
628,
198,
2,
923,
62,
4102,
263,
198,
6738,
48924,
1706,
1330,
31433,
22658,
11,
10363,
36621,
11,
11523,
... | 2.934066 | 91 |
import unittest
from bacdive.utils import check_allow
if __name__ == "__main__":
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
275,
330,
67,
425,
13,
26791,
1330,
2198,
62,
12154,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
628
] | 2.641026 | 39 |
from Dirs import *
import Layers
import Model
from Data import *
import pandas as pd
import numpy as np
import time
Lat_Gen()
| [
6738,
360,
17062,
1330,
1635,
198,
11748,
406,
6962,
198,
11748,
9104,
198,
6738,
6060,
1330,
1635,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
198,
24220,
62,
13746,
3419,
198
] | 3.175 | 40 |
from grouper import permissions
from grouper.fe.forms import PermissionRequestsForm
from grouper.fe.util import GrouperHandler
from grouper.models import REQUEST_STATUS_CHOICES
class PermissionsRequests(GrouperHandler):
"""Allow a user to review a list of permission requests that they have."""
| [
6738,
1132,
525,
1330,
21627,
198,
6738,
1132,
525,
13,
5036,
13,
23914,
1330,
2448,
3411,
16844,
3558,
8479,
198,
6738,
1132,
525,
13,
5036,
13,
22602,
1330,
402,
472,
525,
25060,
198,
6738,
1132,
525,
13,
27530,
1330,
4526,
35780,
6... | 3.716049 | 81 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 mjirik <mjirik@mjirik-Latitude-E6520>
#
# Distributed under terms of the MIT license.
"""
Automatic exposure compensation
"""
import logging
logger = logging.getLogger(__name__)
import argparse
import numpy as np
import matplotlib.pyplot as plt
import scipy
if __name__ == "__main__":
main()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
43907,
25,
69,
12685,
28,
40477,
12,
23,
198,
2,
198,
2,
15069,
10673,
1853,
285,
73,
343,
1134,
1279,
76,
7... | 2.5 | 160 |
pkgname = "ninja"
pkgver = "1.10.2"
pkgrel = 0
hostmakedepends = ["python"]
pkgdesc = "Small build system with a focus on speed"
maintainer = "q66 <q66@chimera-linux.org>"
license = "Apache-2.0"
url = "https://ninja-build.org"
sources = [f"https://github.com/ninja-build/ninja/archive/v{pkgver}.tar.gz"]
sha256 = ["ce35865411f0490368a8fc383f29071de6690cbadc27704734978221f25e2bed"]
options = ["!check"]
# FIXME: docs, completions
| [
35339,
3672,
796,
366,
35073,
6592,
1,
198,
35339,
332,
796,
366,
16,
13,
940,
13,
17,
1,
198,
35339,
2411,
796,
657,
198,
4774,
76,
4335,
538,
2412,
796,
14631,
29412,
8973,
198,
35339,
20147,
796,
366,
18712,
1382,
1080,
351,
257,... | 2.38674 | 181 |
##lots of imports. Some are unnecessary but I left a lot just to be safe...
import matplotlib.pyplot as plt
import matplotlib
from astropy.io import fits
import numpy as np
import astropy.table as t
import matplotlib.image as img
from scipy.optimize import newton
from pathlib import Path
import math
import matplotlib.cm as cm
import matplotlib.mlab as mlab
from matplotlib.patches import Ellipse
import numpy.random as rnd
from matplotlib import patches
import sys
from scipy.optimize import curve_fit
from mpl_toolkits.axes_grid1 import make_axes_locatable
plt.rcParams['axes.facecolor'] = 'white'
#fun galaxy: 8332-12701
#from marvin.tools.cube import Cube
'''
import urllib.request
import urllib.parse
import urllib.request
'''
import re
import csv
import os
import requests
import numpy as np
from scipy.stats import chi2
#import pylab as mp
#from https://casper.berkeley.edu/astrobaki/index.php/Plotting_Ellipses_in_Python
##stop():
##ON:
##7977-12705
#Check later:
#7977-9102
#Good examples of boring, normal galaxies:
#7990-6101
#Place-IFU (plate # - number of fibers, number bundle that is out
#of the number of bundles with that number of fibers)
##Code from Zach, do not touch!!
def is_sf_array(n2,ha,o3,hb):
'''
Checks whether arrays of line fluxes come from star formation based on BPT diagnostics
returns 1 if spaxel is star-forming, 0 if non-star forming and nan if not determinable.
'''
issf=np.zeros(n2.shape)
x=np.log10(n2/ha)
y=np.log10(o3/hb)
issf[np.where(x>0)]=0
goodpix=np.where((y<(0.61/(x-0.47))+1.19) & (y<(0.61/(x-0.05))+1.3) & (x<0.0))
badpix=np.where((np.isnan(x)==True) | (np.isnan(y)==True) | (np.isinf(x)==True) | (np.isinf(y)==True))
issf[badpix]=np.nan
issf[goodpix]=1
return issf
def n2s2_dopita16_w_errs(ha,n22,s21,s22,ha_err,n22_err,s21_err,s22_err):
'''
N2S2 metallicity diagnostic from Dopita et al. (2016)
includes a calculation of the errors
'''
y=np.log10(n22/(s21+s22))+0.264*np.log10(n22/ha)
s2=s21+s22
s2_err=np.sqrt(s21_err**2 + s22_err**2)
met_err=(1.0/np.log(10)) * np.sqrt( (1+0.264**2)*(n22_err/n22)**2 + (s2_err/s2)**2 + (ha_err/ha)**2 )
met=8.77+y
return met, met_err
##Takes an input array and returns the values if a condition is met. Basically a glorified call to numpy.where
#It will take an image and radius array (distarr; same size as the image) as arguments. Because of the way it was coded up, it works best when the distarr array is in units of pixels, i.e. don't use the R_re array.
def radial_profile(image,centre=None,distarr=None,mask=None,binwidth=2,radtype='unweighted'):
'''
image=2D array to calculate RP of.
centre=centre of image in pixel coordinates. Not needed if distarr is given.
distarr=2D array giving distance of each pixel from the centre.
mask = 2D array, 1 if you want to include given pixels, 0 if not.
binwidth= width of radial bins in pixels.
radtype='weighted' or 'unweighted'. Weighted will give you the average radius of pixels in the bin. Unweighted will give you the middle of each radial bin.
'''
distarr=distarr/binwidth
if centre is None:
centre=np.array(image.shape,dtype=float)/2
if distarr is None:
y,x=np.indices(image.shape)
distarr=np.sqrt((x-centre[0])**2 + (y-centre[1])**2)
if mask is None:
mask=np.ones(image.shape)
rmax=int(np.max(distarr))
r_edge=np.linspace(0,rmax,rmax+1)
rp=np.zeros(len(r_edge)-1)*np.nan
nums=np.zeros(len(r_edge)-1)*np.nan
sig=np.zeros(len(r_edge)-1)*np.nan
r=np.zeros(len(r_edge)-1)*np.nan
for i in range(0,len(r)):
rp[i]=np.nanmean(image[np.where((distarr>=r_edge[i]) & (distarr<r_edge[i+1]) & (mask==1.0) & (np.isinf(image)==False))])
nums[i]=len(np.where((distarr>=r_edge[i]) & (distarr<r_edge[i+1]) & (mask==1.0) & (np.isinf(image)==False) & (np.isnan(image)==False))[0])
sig[i]=np.nanstd((image[np.where((distarr>=r_edge[i]) & (distarr<r_edge[i+1]) & (mask==1.0) & (np.isinf(image)==False))]))
if radtype=='unweighted':
r[i]=(r_edge[i]+r_edge[i+1])/2.0
elif radtype=='weighted':
r[i]=np.nanmean(distarr[np.where((distarr>=r_edge[i]) & (distarr<r_edge[i+1]) & (mask==1.0) & (np.isinf(image)==False))])
r=r*binwidth
return r,rp,nums,sig
"""
scatter_if:
Creates a scatter plot from two arrays, but only plots points if a condition is met. All other plt.scatter functionality should be retained.
"""
#plate_num = ['8454', '9041', '7990', '8259', '8318', '9026']
#fiber_num = ['12703', '12701', '6104', '9101', '9101', '3703']
##plate_num = ['8455']
##fiber_num = ['3701']
##fiber_num = ['1901', '1902', '3701', '3702', '3703', '3704', '6101', '6102', '6103', '6104', '9101', '9102', '12701', '12702', '12704', '12705']
##After hoefully downloading all the required fits files, this will read all the names
#file_names = os.listdir("/home/celeste/Documents/astro_research/keepers")
#file_names = os.listdir("/home/celeste/Documents/astro_research/fits_files")
filename = '/home/celeste/Documents/astro_research/thesis_git/Good_Galaxies_SPX_3_N2S2.txt'
file_names = np.genfromtxt(filename, usecols=(0), skip_header=1, dtype=str, delimiter=',')
with open('/home/celeste/Documents/astro_research/thesis_git/mass_data.txt') as f:
mass_data=[]
for line in f:
mass_data.append(line)
#print(file_names)
##creates the empty arrays to append the names of the files in the folder
plate_num = []
fiber_num = []
split = []
#file_open = open("error_files.txt", "w")
##Goes through all files in the folder
for ii in range(0, len(file_names)):
##Removes all non alphanumeric characters and only leaves numbers and periods
file_names[ii] = re.sub("[^0-9-]", "", file_names[ii])
#print(file_names[ii])
#print(file_names[ii][4:])
#print(file_names[ii][:4])
##splits the two numbers into a plate number and fiber number
one, two = (str(file_names[ii]).split('-'))
##splits the two numbers into a plate number and fiber number
plate_num.insert(ii, one)
fiber_num.insert(ii, two)
#print(plate_num[0] + "-" + fiber_num[0])
#print(file_names[0])
##Main loop over all the plates
"""
Bad Plots?
8445-3701
8332-1902
8309-3703
"""
plate_num=['7960']
fiber_num = ['12701']
#plate_num = ['8252', '8338', '8568', '9865']
#fiber_num = ['12705', '12701', '12702', '12705']
count_continue1 = 0
count_continue2 = 0
count_continue3 = 0
failed_maps = "failed maps\n"
failed_logcube = "failed_logcube\n"
failed_other = "failed_TYPERROR\n"
for i in range(0, len(plate_num)): ##len(plate_num)
##for j in range(0, len(fiber_num)):
print(plate_num[i] + '-' + fiber_num[i])
print("Index: " + str(i))
##some black magic
#hdulist = fits.open('/home/celeste/Documents/astro_research/keepers/manga-' + plate_num[i] + '-' + fiber_num[i] + '-MAPS-SPX-GAU-MILESHC.fits.gz')
#hdulist = fits.open('/home/celeste/Documents/astro_research/downloaded_data/MPL-7/manga-' + plate_num[i] + '-' + fiber_num[i] + '-HYB-SPX-GAU-MILESHC.fits.gz')
try:
hdulist = fits.open('/media/celeste/Hypatia/MPL7/HYB/allmaps/manga-' + plate_num[i] + '-' + fiber_num[i] + '-MAPS-HYB10-GAU-MILESHC.fits.gz')
except FileNotFoundError:
failed_maps = failed_maps + str(plate_num[i]) + "-" + str(fiber_num[i]) + "\n"
print("failed on the MAPS file.")
#print(failed_maps)
print("------------------------------------------")
continue
#logcube = fits.open('/home/celeste/Documents/astro_research/logcube_files/manga-'+ str(plate_num[i])+ '-' + str(fiber_num[i]) + '-LOGCUBE.fits.gz')
try:
logcube = fits.open('/media/celeste/Hypatia/MPL7/LOGCUBES/manga-'+ str(plate_num[i])+ '-' + str(fiber_num[i]) + '-LOGCUBE.fits.gz')
except FileNotFoundError:
failed_logcube = failed_logcube + str(plate_num[i]) + "-" + str(fiber_num[i]) + "\n"
print("failed on the LOGCUBE file.")
print(failed_logcube)
print("------------------------------------------")
continue
##assigns the plate id based on what is in the data cube
plate_id = hdulist['PRIMARY'].header['PLATEIFU']
##gets official plate number
plate_number = hdulist['PRIMARY'].header['PLATEID']
fiber_number = hdulist['PRIMARY'].header['IFUDSGN']
##gets the hydrogen alpha and hydrogen beta data
Ha = hdulist['EMLINE_GFLUX'].data[18,...]
Hb = hdulist['EMLINE_GFLUX'].data[1,...]
snmap = hdulist['SPX_SNR'].data
fluxes = hdulist['EMLINE_GFLUX'].data
#errs = hdulist['EMLINE_GFLUX_ERR'].data
errs=(hdulist['EMLINE_GFLUX_IVAR'].data)**-0.5
H_alpha = fluxes[18,:,:]
Ha = H_alpha
Ha_err = errs[18,:,:]
OIII = fluxes[13,:,:]
o3_err = errs[13,:,:]
H_beta = fluxes[11,:,:]
Hb_err = errs[11,:,:]
n2_err = errs[19,:,:]
NII = fluxes[19,:,:]
s21 = fluxes[20,:,:]
s22 = fluxes[21,:,:]
s21_err = errs[20,:,:]
s22_err = errs[21,:,:]
#I band for contours
contours_i = logcube['IIMG'].data
contours_i_same = contours_i
velocity = hdulist['EMLINE_GVEL'].data[18,...]
velocity_err = (hdulist['EMLINE_GVEL_IVAR'].data[18,...])**-0.5
ew_cut = hdulist['EMLINE_GEW'].data[18,...]
#print(hdulist['PRIMARY'].header)
#quit()
##Imports the thingy we need to get the images of the galaxy without having to download directly all the pictures. This also bypasses the password needed
import requests
"""
r = requests.get('https://data.sdss.org/sas/mangawork/manga/spectro/redux/v2_3_1/' + str(plate_num[i]) + '/stack/images/' + str(fiber_num[i]) + '.png', auth=('sdss', '2.5-meters'))
##Saves the image
with open('/home/celeste/Documents/astro_research/astro_images/marvin_images/' + str(plate_id) + '.png', 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
"""
##Also need OIII 5007 and NII 6584
##Gets the correct lines
""""
H_alpha = get_line_ew(hdulist, 'Ha-6564')
H_beta = get_line_ew(hdulist, 'Hb-4862')
OIII = get_line_ew(hdulist, 'OIII-5008')
NII = get_line_ew(hdulist, 'NII-6585')
"""
##line ratios
#x: n2/Halpha (plot the log)
O_B = OIII/H_beta
N_A = NII/H_alpha
R = O_B/N_A
logR = np.log10(R)
c0 = 0.281
c1 = (-4.765)
c2 = (-2.268)
cs = np.array([c0, c1, c2])
##A lambda function, do not touch!
x2logR = lambda x, cs: np.sum((c*x**p for p,c in zip(np.linspace(0, len(cs)-1, len(cs)), cs)))
x2logR_zero = lambda x, cs, logR: x2logR(x, cs)-logR-0.001
##takes the log of the OH12 array
#logOH12 = np.ma.array(np.zeros(logR.shape),mask=logR.mask)
logOH12_old = 8.73-0.32*np.log10(R)
logOH12, logOH12error = n2s2_dopita16_w_errs(H_alpha, NII, s21, s22, Ha_err, n2_err, s21_err, s22_err)
is_starforming = is_sf_array(NII,H_alpha,OIII, H_beta)
##Finds the standard deviation and mean for future use
std_dev = np.std(Ha)
mean = np.mean(Ha)
##if it deviates too much from the mean it is removed
for j in range(len(Ha)):
for k in range(len(Ha[0])):
if (Ha[j][k] > std_dev*20+mean):
Ha[j][k] = np.nan
##Creates a shape that is the same size as the h-alpha array
shape = (Ha.shape[1])
shapemap = [-.25*shape, .25*shape, -.25*shape, .25*shape]
##Changes the font size
matplotlib.rcParams.update({'font.size': 20})
#Second image we want?
fig = plt.figure(figsize=(30,18), facecolor='white')
#plt.plot(hd2[0], Ha[7])
#sky coordinates relative to center
#exent = .5
##places text on the plot
plateifu = plate_id
'''
r = requests.get('https://data.sdss.org/sas/mangawork/manga/spectro/redux/v2_3_1/' + str(plate_num[i]) + '/stack/images/' + str(fiber_num[i]) + '.png', auth=('sdss', '2.5-meters'))
##Saves the image
with open('/home/celeste/Documents/astro_research/astro_images/marvin_images/' + str(plate_id) + '.png', 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
'''
image = img.imread('/home/celeste/Documents/astro_research/astro_images/marvin_images/' + str(plateifu) + '.png')
drpall = t.Table.read('/home/celeste/Documents/astro_research/drpall-v2_3_1.fits')
r = hdulist['SPX_ELLCOO'].data[0, ...]
obj = drpall[drpall['plateifu']==plateifu][0]
#nsa_sersic_ba for axis ratio
#axis=drpall['nsa_sersic_ba'].data[0, ...]
Re = obj['nsa_elpetro_th50_r']
pa = obj['nsa_elpetro_phi']
ba = obj['nsa_elpetro_ba']
#radius of each spec in
r_Re = r/Re
r_Re = hdulist['SPX_ELLCOO'].data[1]
print(plateifu)
mass = math.log10(obj['nsa_elpetro_mass'])-np.log10(.49)
#petrosian
print("mass from data", mass)
axis=obj['nsa_sersic_ba']
#closest to 1, above .8
#print("Axis ratio: ", axis)
#print("Mass is " + str(mass))
if mass > 10.75:
m = -0.16878698011761817
b = 8.92174257450408
if mass > 10.50 and mass <= 10.75:
m = -0.19145937059393828
b = 8.898917413495317
if mass > 10.25 and mass <= 10.50:
m = -0.16938127151421675
b = 8.825998835583249
if mass > 10.00 and mass <= 10.25:
m = -0.1762907767970223
b = 8.713865209075324
if mass > 9.75 and mass <= 10.00:
m = -0.14756252418062643
b = 8.59167993089605
if mass > 9.50 and mass <= 9.75:
m = -0.07514461331863775
b = 8.36144939226056
if mass > 9.25 and mass <= 9.50:
m = -0.05300368644036175
b = 8.26602769508888
if mass <= 9.25:
m = -0.05059620593888811
b = 8.147647436306206
#print("Slope: ", m)
#print("intercept: ", b)
zeros= False
for element in range(0, len(O_B)):
for item in range(0, len(O_B[element])):
if O_B[element][item] >= 0:
zeros = True
##Adds another subplot with the plateifu
a = fig.add_subplot(1,2,1)
print("plate ifu for plotting image" + plateifu)
#print(plateifu)
try:
image = img.imread('/home/celeste/Documents/astro_research/astro_images/marvin_images/' + plateifu + '.png')
except ValueError:
print("No image.")
print("========================================================================================")
lum_img = image[:,:,0]
#plt.subplot(121)
imgplot = plt.imshow(image)
plt.title("Galaxy " + str(plate_number) + "-" + str(fiber_number))
#################################################################################
#
# BPT Diagram Creator
#
#################################################################################
#Sum the fluxes over a 3" center of the galaxy, put into is starforming
ax_bpt = fig.add_subplot(1, 2, 2)
if zeros == True:
total=0
sfr=0
nsfr=0
ax_bpt.set_aspect(1)
ax_bpt.set_title("BPT Diagram")
#Kewley
X = np.linspace(-1.5, 0.3)
Y = ((0.61/(X-0.47))+1.19)
#Kauffmann
Xk = np.linspace(-1.5,0.)
Yk= (0.61/(Xk-0.05)+1.3)
ax_bpt.plot(X, Y, '--', color = "red", lw = 1, label = "Kewley+01")
ax_bpt.plot(Xk, Yk, '-', color = "blue", lw = 1, label = "Kauffmann+03")
x=np.linspace(-0.133638005,0.75,100)
y=2.1445*x+0.465
ax_bpt.plot(x, y, '--', color = "green", lw = 1, label = "Seyfert/LINER")
bpt_n2ha = np.log10(NII/H_alpha)
bpt_o3hb = np.log10(OIII/H_beta)
badpix = ((Ha/Ha_err) < 5) | ((H_beta/Hb_err) < 5) | ((OIII/o3_err) < 3) | ((NII/n2_err) < 3) | np.isinf(bpt_n2ha) | np.isinf(bpt_o3hb)
bpt_n2ha[badpix] = np.nan
bpt_o3hb[badpix] = np.nan
bpt_o3hb95 = np.nanpercentile(bpt_o3hb, 98)
bpt_o3hb5 = np.nanpercentile(bpt_o3hb, 2)
xmin = np.nanmin(bpt_n2ha) - 0.1
xmax = np.nanmax(bpt_n2ha) + 0.1
#ymin = bpt_o3hb5 - 0.1
#ymax = bpt_o3hb95 + 0.1
ymin = np.nanmin(bpt_o3hb) - 0.1
ymax = np.nanmax(bpt_o3hb) + 0.1
plt.legend()
#bad = is_starforming != 1
#r_Rebpt = r_Re[bad]
scatter_if(bpt_n2ha, bpt_o3hb, (is_starforming == 1) | (is_starforming == 0), c=r_Re, marker = ".", s = 65, alpha = 0.5, cmap = 'jet_r')
#scatter_if(bpt_n2ha, bpt_o3hb, is_starforming == 0, c=r_Re, marker = ".", s = 65, alpha = 0.5, cmap = 'jet')
ax_bpt.set_xlim(xmin, xmax)
ax_bpt.set_ylim(ymin, ymax)
ax_bpt.set_aspect((xmax-xmin)/(ymax-ymin))
ax_bpt.set_xlabel('Log([NII]/H$\\alpha$)')
ax_bpt.set_ylabel('Log([OIII]/H$\\beta$)')
#cb_max = math.ceil(np.amax(r_Re))
cb_bpt = plt.colorbar(shrink = .7)
cb_bpt.set_label('r/$R_e$', rotation = 270, labelpad = 25)
#plt.axes().set_aspect('equal')
#plt.clim(0,20)
#first_time = 1
plt.tight_layout()
try:
plt.tight_layout()
except ValueError:
print("all NaN")
print("==========================================================================================")
#file_open = open("error_files.txt", "a")
#first_time = 0
#file_open.write(plateifu + "\n")
#file.close()
print("value error, all NaN")
count_continue1=count_continue1+1
continue
Ha[is_starforming==0]=np.nan
logOH12[is_starforming==0]=np.nan
#print("total", total)
#print("nsfr", nsfr)
#print("sfr", sfr)
#plt.show()
plt.savefig('/home/celeste/Documents/astro_research/paper_plots/bpt_dia/bpt_dia_image_' + str(plate_id) + '.png')
| [
2235,
75,
1747,
286,
17944,
13,
2773,
389,
13114,
475,
314,
1364,
257,
1256,
655,
284,
307,
3338,
986,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
198,
6738,
6468,
28338,
13,
952,
1330,... | 1.993074 | 9,529 |
from __future__ import absolute_import
import argparse
import cStringIO
import fnmatch
import os
import re
import struct
import textwrap
from itertools import groupby
from sc2reader.exceptions import FileError
LITTLE_ENDIAN,BIG_ENDIAN = '<','>'
class ReplayBuffer(object):
""" The ReplayBuffer is a wrapper over the cStringIO object and provides
convenience functions for reading structured data from Starcraft II
replay files. These convenience functions can be sorted into several
different categories providing an interface as follows:
Stream Manipulation::
tell(self)
skip(self, amount)
reset(self)
align(self)
seek(self, position, mode=SEEK_CUR)
Data Retrieval::
read_variable_int(self)
read_string(self,additional)
read_timestamp(self)
read_count(self)
read_data_structure(self)
read_object_type(self, read_modifier=False)
read_object_id(self)
read_coordinate(self)
read_bitmask(self)
read_range(self, start, end)
Basic Reading::
read_byte(self)
read_int(self, endian=LITTLE_ENDIAN)
read_short(self, endian=LITTLE_ENDIAN)
read_chars(self,length)
read_hex(self,length)
Core Reading::
shift(self,bits)
read(bytes,bits)
The ReplayBuffer additionally defines the following properties:
left
length
cursor
"""
'''
Additional Properties
'''
@property
@property
@property
'''
Stream manipulation functions
'''
'''
Read "basic" structures
'''
def read_byte(self):
""" Basic byte read """
if self.bit_shift==0:
return ord(self.read_basic(1))
else:
return self.read(1)[0]
def read_int(self, endian=LITTLE_ENDIAN):
""" int32 read """
chars = self.read_basic(4) if self.bit_shift==0 else self.read_chars(4)
return struct.unpack(endian+'I', chars)[0]
def read_short(self, endian=LITTLE_ENDIAN):
""" short16 read """
chars = self.read_basic(2) if self.bit_shift==0 else self.read_chars(2)
return struct.unpack(endian+'H', chars)[0]
'''
Read replay-specific structures
'''
def read_variable_int(self):
""" Blizzard VL integer """
byte = self.read_byte()
shift, value = 1,byte & 0x7F
while byte & 0x80 != 0:
byte = self.read_byte()
value = ((byte & 0x7F) << shift * 7) | value
shift += 1
#The last bit of the result is a sign flag
return pow(-1, value & 0x1) * (value >> 1)
def read_string(self, length=None):
"""<length> ( <char>, .. ) as unicode"""
return self.read_chars(length if length!=None else self.read_byte())
def read_timestamp(self):
"""
Timestamps are 1-4 bytes long and represent a number of frames. Usually
it is time elapsed since the last event. A frame is 1/16th of a second.
The least significant 2 bits of the first byte specify how many extra
bytes the timestamp has.
"""
first = self.read_byte()
time,count = first >> 2, first & 0x03
if count == 0:
return time
elif count == 1:
return time << 8 | self.read_byte()
elif count == 2:
return time << 16 | self.read_short()
elif count == 3:
return time << 24 | self.read_short() << 8 | self.read_byte()
else:
raise ValueError()
def read_data_struct(self):
"""
Read a Blizzard data-structure. Structure can contain strings, lists,
dictionaries and custom integer types.
"""
#The first byte serves as a flag for the type of data to follow
datatype = self.read_byte()
if datatype == 0x02:
#0x02 is a byte string with the first byte indicating
#the length of the byte string to follow
count = self.read_count()
return self.read_string(count)
elif datatype == 0x04:
#0x04 is an serialized data list with first two bytes always 01 00
#and the next byte indicating the number of elements in the list
#each element is a serialized data structure
self.skip(2) #01 00
return [self.read_data_struct() for i in range(self.read_count())]
elif datatype == 0x05:
#0x05 is a serialized key,value structure with the first byte
#indicating the number of key,value pairs to follow
#When looping through the pairs, the first byte is the key,
#followed by the serialized data object value
data = dict()
for i in range(self.read_count()):
count = self.read_count()
key,value = count, self.read_data_struct()
data[key] = value #Done like this to keep correct parse order
return data
elif datatype == 0x06:
return self.read_byte()
elif datatype == 0x07:
return self.read_int()
elif datatype == 0x09:
return self.read_variable_int()
raise TypeError("Unknown Data Structure: '%s'" % datatype)
def read_object_type(self, read_modifier=False):
""" Object type is big-endian short16 """
type = self.read_short(endian=BIG_ENDIAN)
if read_modifier:
type = (type << 8) | self.read_byte()
return type
def read_object_id(self):
""" Object ID is big-endian int32 """
return self.read_int(endian=BIG_ENDIAN)
def read_bitmask(self):
""" Reads a bitmask given the current bitoffset """
length = self.read_byte()
bytes = reversed(self.read(bits=length))
mask = 0
for byte in bytes:
mask = (mask << 8) | byte
# Turn things like 10010011 into [True, False, False, True,...]
return list(reversed(_make_mask(mask, length)))
'''
Base read functions
'''
def shift(self, bits):
"""
The only valid use of Buffer.shift is when you know that there are
enough bits left in the loaded byte to accommodate your request.
If there is no loaded byte, or the loaded byte has been exhausted,
then Buffer.shift(8) could technically be used to read a single
byte-aligned byte.
"""
try:
#declaring locals instead of accessing dict on multiple use seems faster
bit_shift = self.bit_shift
new_shift = bit_shift+bits
#make sure there are enough bits left in the byte
if new_shift <= 8:
if not bit_shift:
self.last_byte = ord(self.read_basic(1))
#using a bit_mask_array tested out to be 20% faster, go figure
ret = (self.last_byte >> bit_shift) & self.lo_masks[bits]
#using an if for the special case tested out to be faster, hrm
self.bit_shift = 0 if new_shift == 8 else new_shift
return ret
else:
msg = "Cannot shift off %s bits. Only %s bits remaining."
raise ValueError(msg % (bits, 8-self.bit_shift))
except TypeError:
raise EOFError("Cannot shift requested bits. End of buffer reached")
class PersonDict(dict):
"""Delete is supported on the pid index only"""
class TimeDict(dict):
""" Dict with frames as key """
class Selection(TimeDict):
""" Buffer for tracking selections in-game """
@classmethod
def replace(cls, selection, indexes):
""" Deselect objects according to indexes """
return [ selection[i] for i in indexes ]
@classmethod
def deselect(cls, selection, indexes):
""" Deselect objects according to indexes """
return [ selection[i] for i in range(len(selection)) if i not in indexes ]
@classmethod
def mask(cls, selection, mask):
""" Deselect objects according to deselect mask """
if len(mask) < len(selection):
# pad to the right
mask = mask+[False,]*(len(selection)-len(mask))
return [ obj for (slct, obj) in filter(lambda (slct, obj): not slct, zip(mask, selection)) ]
import inspect
def read_header(file):
''' Read the file as a byte stream according to the documentation found at:
http://wiki.devklog.net/index.php?title=The_MoPaQ_Archive_Format
Return the release array and frame count for sc2reader use. For more
detailed header information, access mpyq directly.
'''
buffer = ReplayBuffer(file)
#Sanity check that the input is in fact an MPQ file
if buffer.empty or buffer.read_hex(4).upper() != "4D50511B":
raise FileError("File '%s' is not an MPQ file" % file.name)
#Extract replay header data, we are unlikely to ever use most of this
max_data_size = buffer.read_int(LITTLE_ENDIAN)
header_offset = buffer.read_int(LITTLE_ENDIAN)
data_size = buffer.read_int(LITTLE_ENDIAN)
header_data = buffer.read_data_struct()
#array [unknown,version,major,minor,build,unknown] and frame count
return header_data[1].values(),header_data[3]
from datetime import timedelta
class Formatter(argparse.RawTextHelpFormatter):
"""FlexiFormatter which respects new line formatting and wraps the rest
Example:
>>> parser = argparse.ArgumentParser(formatter_class=FlexiFormatter)
>>> parser.add_argument('a',help='''\
... This argument's help text will have this first long line\
... wrapped to fit the target window size so that your text\
... remains flexible.
...
... 1. This option list
... 2. is still persisted
... 3. and the option strings get wrapped like this\
... with an indent for readability.
...
... You must use backslashes at the end of lines to indicate that\
... you want the text to wrap instead of preserving the newline.
... ''')
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
@classmethod
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
1822,
29572,
198,
11748,
269,
10100,
9399,
198,
11748,
24714,
15699,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
2878,
198,
11748,
2420,
37150,
198,
198,
6738,
340,
861,... | 2.357654 | 4,468 |
import sqlite3 as sql
from flask import request
from flask_restful import Resource
| [
11748,
44161,
578,
18,
355,
44161,
198,
198,
6738,
42903,
1330,
2581,
198,
6738,
42903,
62,
2118,
913,
1330,
20857,
628
] | 4.047619 | 21 |
from SimpleXMLRPCServer import SimpleXMLRPCServer
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler
import xmlrpclib
#instancio un objeto y conecto el servidor a donde se va a conectar
servidorSuma = xmlrpclib.ServerProxy('http://localhost:9998')
servidorSub = xmlrpclib.ServerProxy('http://localhost:9997')
servidorMul = xmlrpclib.ServerProxy('http://localhost:9996')
servidorDiv = xmlrpclib.ServerProxy('http://localhost:9995')
servidorRad = xmlrpclib.ServerProxy('http://localhost:9994')
servidorPow = xmlrpclib.ServerProxy('http://localhost:9993')
servidorLog = xmlrpclib.ServerProxy('http://localhost:9992')
# Restrict to a particular path.
# Create server
server = SimpleXMLRPCServer(("localhost", 9999),
requestHandler=RequestHandler)
server.register_introspection_functions()
server.register_instance(MyFuncs())
print "servidorIntermedioCorriendo"
# Correr servidor indefinidamente
server.serve_forever()
| [
6738,
17427,
55,
5805,
49,
5662,
10697,
1330,
17427,
55,
5805,
49,
5662,
10697,
198,
6738,
17427,
55,
5805,
49,
5662,
10697,
1330,
17427,
55,
5805,
49,
5662,
18453,
25060,
198,
11748,
35555,
81,
79,
565,
571,
198,
198,
2,
8625,
1192,
... | 2.771014 | 345 |
# Generated by Django 3.2.8 on 2021-11-20 10:01
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
23,
319,
33448,
12,
1157,
12,
1238,
838,
25,
486,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import dict_tools
import pop.hub
import mock
import pytest
import random
import string
from typing import Any, Dict, List
@pytest.fixture(scope="session", autouse=True)
@pytest.fixture(scope="session", autouse=True)
@pytest.fixture(scope="session")
@pytest.mark.asyncio
@pytest.fixture(scope="module")
@pytest.fixture(scope="session", autouse=True)
| [
11748,
8633,
62,
31391,
198,
11748,
1461,
13,
40140,
198,
11748,
15290,
198,
11748,
12972,
9288,
198,
11748,
4738,
198,
11748,
4731,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
2998... | 2.918699 | 123 |
"""
The MIT License
Copyright (c) Tigran Hakobyan. http://tiggreen.me
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Author: Tigran Hakobyan (txh7358@rit.edu)
Version: 1.0
Date: 11/29/2014
WordCount program that inherits from the MapReduceInterface framework.
Counts the number of occurances of each word in the files.
"""
from map_reduce import *
"""
WordCount class.
"""
class WordCount(MapReduceInterface):
"""
files: the files that the WordCount program should run on.
mapper: the user defined map function.
reducer: the user defined reduce function.
"""
"""
The map function for WordCount program.
file_chunk is the same format file as the initial input files.
Produce a (key, value) pair for each word in the text.
"""
"""
Gets a list of [('a', 1), ('b', [1, 1, 1, 1]), ...]
and returns [(a, total_freq), (b, total_freq), ...].
"""
| [
37811,
198,
464,
17168,
13789,
198,
198,
15269,
357,
66,
8,
14189,
2596,
24734,
672,
4121,
13,
2638,
1378,
83,
6950,
1361,
13,
1326,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
... | 3.479167 | 528 |
import torch
import torch.nn as nn
import torch.nn.functional as F
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
628,
198
] | 3.285714 | 21 |
import os
import sys
import datetime
import git
from jinja2 import Template
ADD_REQUIREMENTS_MACRO = """
{%- macro add_requirements(fname) -%}
# Requirements populated from {{fname}}
{% for requirement in read_requirements(fname) -%}
RUN pip install "{{ requirement }}"
{% endfor -%}
{%- endmacro -%}
"""
if __name__ == '__main__':
main(sys.argv[1])
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
4818,
8079,
198,
198,
11748,
17606,
198,
6738,
474,
259,
6592,
17,
1330,
37350,
628,
628,
198,
198,
29266,
62,
2200,
49128,
28957,
62,
44721,
13252,
796,
37227,
198,
90,
33963,
15021,
751,
62... | 2.757576 | 132 |
import calendar
print(calendar.month(2018, 8)) #This is for particular month calendar
print(calendar.calendar(2020)) #This if for printing whole year calendar
| [
11748,
11845,
201,
198,
201,
198,
4798,
7,
9948,
9239,
13,
8424,
7,
7908,
11,
807,
4008,
1303,
1212,
318,
329,
1948,
1227,
11845,
201,
198,
201,
198,
4798,
7,
9948,
9239,
13,
9948,
9239,
7,
42334,
4008,
1303,
1212,
611,
329,
13570,
... | 3.458333 | 48 |
"""Simulation"""
| [
37811,
8890,
1741,
37811,
198
] | 3.4 | 5 |
# https://leetcode.com/problems/all-oone-data-structure/
#
# algorithms
# Hard (29.33%)
# Total Accepted: 17,753
# Total Submissions: 60,523
# Your AllOne object will be instantiated and called as such:
# obj = AllOne()
# obj.inc(key)
# obj.dec(key)
# param_3 = obj.getMaxKey()
# param_4 = obj.getMinKey()
| [
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
439,
12,
78,
505,
12,
7890,
12,
301,
5620,
14,
198,
2,
198,
2,
16113,
198,
2,
6912,
357,
1959,
13,
2091,
4407,
198,
2,
7472,
21699,
276,
25,
220,
220,
220,
1596,
11,... | 2.578512 | 121 |
"""
This app creates a simple sidebar layout using inline style arguments and the
dbc.Nav component.
dcc.Location is used to track the current location. There are two callbacks,
one uses the current location to render the appropriate page content, the other
uses the current location to toggle the "active" properties of the navigation
links.
For more details on building multi-page Dash applications, check out the Dash
documentation: https://dash.plot.ly/urls
"""
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
# the style arguments for the sidebar. We use position:fixed and a fixed width
SIDEBAR_STYLE = {
"position": "fixed",
"top": 0,
"left": 0,
"bottom": 0,
"width": "16rem",
"padding": "2rem 1rem",
"background-color": "#f8f9fa",
}
# the styles for the main content position it to the right of the sidebar and
# add some padding.
CONTENT_STYLE = {
"margin-left": "18rem",
"margin-right": "2rem",
"padding": "2rem 1rem",
}
sidebar = html.Div(
[
html.H2("Graphs", className="display-4"),
html.Hr(),
html.P(
"GRAPH Data by ", className="lead"
),
dbc.Nav(
[
dbc.NavLink("States", href="/page-1", id="page-1-link"),
dbc.NavLink("Measurements", href="/page-4", id="page-4-link")
],
vertical=True,
pills=True,
),
],
style=SIDEBAR_STYLE,
)
content = html.Div(id="page-content", style=CONTENT_STYLE)
| [
37811,
198,
1212,
598,
8075,
257,
2829,
40217,
12461,
1262,
26098,
3918,
7159,
290,
262,
198,
9945,
66,
13,
30575,
7515,
13,
198,
198,
67,
535,
13,
14749,
318,
973,
284,
2610,
262,
1459,
4067,
13,
1318,
389,
734,
869,
10146,
11,
198... | 2.472622 | 694 |
# Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
import csv
import tempfile
from pathlib import Path
from typing import cast
import click
import numpy as np
import pandas as pd # type: ignore
from ..optbench import Scenario, scenarios, sql, util
# import logging
# logging.basicConfig(encoding='utf-8', level=logging.DEBUG)
# Typer CLI Application
# ---------------------
@click.group()
@app.command()
@click.argument("scenario", **Arg.scenario)
@click.option("--db-port", **Opt.db_port)
@click.option("--db-host", **Opt.db_host)
@click.option("--db-user", **Opt.db_user)
def init(
scenario: Scenario,
db_port: int,
db_host: str,
db_user: str,
) -> None:
"""Initialize the DB under test for the given scenario."""
info(f'Initializing "{scenario}" as the DB under test')
try:
db = sql.Database(port=db_port, host=db_host, user=db_user)
db.drop_database(scenario)
db.create_database(scenario)
db.set_database(scenario)
db.execute_all(statements=sql.parse_from_file(scenario.schema_path()))
except Exception as e:
raise click.ClickException(f"init command failed: {e}")
@app.command()
@click.argument("scenario", **Arg.scenario)
@click.option("--samples", **Opt.samples)
@click.option("--repository", **Opt.repository)
@click.option("--print-results", **Opt.print_results)
@click.option("--db-port", **Opt.db_port)
@click.option("--db-host", **Opt.db_host)
@click.option("--db-user", **Opt.db_user)
def run(
scenario: Scenario,
samples: int,
repository: Path,
print_results: bool,
db_port: int,
db_host: str,
db_user: str,
) -> None:
"""Run benchmark in the DB under test for a given scenario."""
info(f'Running "{scenario}" scenario')
try:
db = sql.Database(port=db_port, host=db_host, user=db_user)
db.set_database(scenario)
df = pd.DataFrame(
data={
query.name(): np.array(
[
cast(
np.timedelta64,
db.explain(query, timing=True).optimization_time(),
).astype(int)
for _ in range(samples)
]
)
for query in [
sql.Query(query)
for query in sql.parse_from_file(scenario.workload_path())
]
}
)
if print_results:
print(df.to_string())
results_path = util.results_path(repository, scenario, db.mz_version())
info(f'Writing results to "{results_path}"')
df.to_csv(results_path, index=False, quoting=csv.QUOTE_MINIMAL)
except Exception as e:
raise click.ClickException(f"run command failed: {e}")
@app.command()
@click.argument("base", **Arg.base)
@click.argument("diff", **Arg.diff)
def compare(
base: Path,
diff: Path,
) -> None:
"""Compare the results of a base and diff benchmark runs."""
info(f'Compare experiment results between "{base}" and "{diff}"')
try:
base_df = pd.read_csv(base, quoting=csv.QUOTE_MINIMAL).agg(
[np.min, np.median, np.max]
)
diff_df = pd.read_csv(diff, quoting=csv.QUOTE_MINIMAL).agg(
[np.min, np.median, np.max]
)
# compute diff/base quotient for all (metric, query) pairs
quot_df = diff_df / base_df
# append average quotient across all queries for each metric
quot_df.insert(0, "Avg", quot_df.mean(axis=1))
# TODO: use styler to color-code the cells
print("base times")
print("----------")
print(base_df.to_string())
print("")
print("diff times")
print("----------")
print(diff_df.to_string())
print("")
print("diff/base ratio")
print("---------------")
print(quot_df.to_string())
except Exception as e:
raise click.ClickException(f"compare command failed: {e}")
# Utility methods
# ---------------
if __name__ == "__main__":
app()
| [
2,
15069,
14633,
1096,
11,
3457,
13,
290,
20420,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
5765,
286,
428,
3788,
318,
21825,
416,
262,
7320,
8090,
13789,
198,
2,
3017,
287,
262,
38559,
24290,
2393,
379,
262,
6808,
286,
428,
16099,
... | 2.314063 | 1,920 |
import numpy as np
import cv2
from tqdm import tqdm
"""
Recovery Function takes a query of frames from a compressed video and returns the original frames of the video,
function needs the compressed videos meta data and returns two lists, one list contains lists of rame sequences in the order
of the indexes passed to the function, the latter contains a flattten version of this list for visualization purposes
Inputs:
- frame_array : List of compressed video frame indexes being queried
- metadata : the metadata of the compressed video
Output:
- final_restruct_output_frames : List of Lists of frames
- final_restruct_output_frames_flat : List of frames
"""
#dummy class meant to simulate video info class
| [
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
37811,
198,
6690,
6560,
15553,
2753,
257,
12405,
286,
13431,
422,
257,
25388,
2008,
290,
5860,
262,
2656,
13431,
286,
2... | 3.192 | 250 |
from .base import BaseTest
from .fixtures import fixtures_wrapper
| [
6738,
764,
8692,
1330,
7308,
14402,
198,
6738,
764,
69,
25506,
1330,
34609,
62,
48553,
198
] | 4.125 | 16 |
from ctypes import *
import math
import random
import os
import cv2
import numpy as np
import time
import darknet
import chcone
import serial
CAM_NUM = 3
DANGER = 150
a = range(-75,-26)
b = range(-26,-19)
c = range(-19,-13)
d = range(-13,-7)
e = range(-7,0)
f = range(0,7)
g = range(7,13)
h = range(13,19)
i = range(19,26)
j = range(26,75)
m = range(-90,-26)
n = range(-26,-12)
o = range(-12,0)
p = range(0,12)
q = range(12,26)
r = range(26,90)
s = serial.Serial('/dev/ttyACM0', 115200)
time.sleep(1.5)
netMain = None
metaMain = None
altNames = None
if __name__ == "__main__":
YOLO()
| [
6738,
269,
19199,
1330,
1635,
198,
11748,
10688,
198,
11748,
4738,
198,
11748,
28686,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
11748,
3223,
3262,
198,
11748,
442,
49180,
198,
11748,
11389,
198,
... | 2.225092 | 271 |
#from domains import * | [
2,
6738,
18209,
1330,
1635
] | 4.4 | 5 |
import torch
import torch.nn as nn
import torch.nn.functional as func
from torchsupport.modules.normalization import AdaptiveBatchNorm
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
25439,
198,
198,
6738,
28034,
11284,
13,
18170,
13,
11265,
1634,
1330,
30019,
425,
33,
963,
35393,
198
] | 3.777778 | 36 |
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Microsoft Public License. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Microsoft Public License, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Microsoft Public License.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
'''
'''
#------------------------------------------------------------------------------
from iptest import *
from iptest.assert_util import *
skiptest("silverlight")
add_clr_assemblies("fieldtests", "typesamples")
if options.RUN_TESTS: #TODO - bug when generating Pydoc
from Merlin.Testing.FieldTest.Literal import *
from Merlin.Testing.TypeSample import *
types = [
StructWithLiterals,
GenericStructWithLiterals[int],
GenericStructWithLiterals[str],
ClassWithLiterals,
GenericClassWithLiterals[long],
GenericClassWithLiterals[object],
]
for i in range(len(types)):
exec("def test_%s_get_by_instance(): _test_get_by_instance(types[%s])" % (i, i))
exec("def test_%s_get_by_type(): _test_get_by_type(types[%s])" % (i, i))
exec("def test_%s_get_by_descriptor(): _test_get_by_descriptor(types[%s])" % (i, i))
exec("def test_%s_set_by_instance(): _test_set_by_instance(types[%s])" % (i, i))
exec("def test_%s_set_by_type(): _test_set_by_type(types[%s])" % (i, i))
exec("def test_%s_delete_via_type(): _test_delete_via_type(types[%s])" % (i, i))
exec("def test_%s_delete_via_instance(): _test_delete_via_instance(types[%s])" % (i, i))
run_test(__name__)
| [
29113,
29113,
14468,
4242,
2,
198,
2,
198,
2,
220,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
2426,
284,
2846,
290,
3403,
286,
262,
5413,
5094,
13789,
13,
317,
220,
198,
2,
4... | 3.007669 | 652 |
from acondbs.github.ops import update_org_member_lists
##__________________________________________________________________||
##__________________________________________________________________||
| [
6738,
936,
623,
1443,
13,
12567,
13,
2840,
1330,
4296,
62,
2398,
62,
19522,
62,
20713,
198,
198,
2235,
27193,
834,
15886,
198,
198,
2235,
27193,
834,
15886,
198
] | 6.862069 | 29 |
from application import EnvironmentController
from domain.model.environment import EnvironmentRepository
from flask_restx import Namespace, fields, Resource
from flask_restx.reqparse import RequestParser
from .EnvironmentMapperImpl import EnvironmentMapperImpl
from .EnvironmentDTOImpl import EnvironmentDTOImpl
from injector import inject
api = Namespace("Environment", description="Datacenter Environment Functionality")
model = api.model("Environment", {
'environment_name': fields.String
})
@api.route("/")
| [
6738,
3586,
1330,
9344,
22130,
198,
6738,
7386,
13,
19849,
13,
38986,
1330,
9344,
6207,
13264,
198,
6738,
42903,
62,
2118,
87,
1330,
28531,
10223,
11,
7032,
11,
20857,
198,
6738,
42903,
62,
2118,
87,
13,
42180,
29572,
1330,
19390,
46677... | 4.11811 | 127 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from communication import Result
from communication import State
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
6946,
1330,
25414,
198,
6738,
6946,
1330,
1812,
198
] | 3.363636 | 33 |
"""Email.Cloud Queue Management
This application allows you to manage the queue of Email.Cloud through a
webapplication.
Author: henk.vanachterberg@broadcom.com
"""
from app import create_app, db
from app.models import GlobalQueue, DomainQueue
from app.api import queryQueue
from random import randrange
app = create_app()
@app.shell_context_processor
@app.cli.command('q')
@app.cli.command('scramble')
| [
37811,
15333,
13,
18839,
4670,
518,
8549,
198,
198,
1212,
3586,
3578,
345,
284,
6687,
262,
16834,
286,
9570,
13,
18839,
832,
257,
198,
12384,
31438,
13,
198,
198,
13838,
25,
30963,
74,
13,
10438,
620,
353,
3900,
31,
36654,
785,
13,
... | 3.377049 | 122 |
# Copyright (c) 2020 Graphcore Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from functools import partial as bind
from optparse import OptionParser
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import numpy as np
import pytest
import tensorflow.compat.v1 as tf
from core.common import upsample
from tensorflow.python import ipu
tf.disable_v2_behavior()
tf.disable_eager_execution()
@pytest.mark.category1
| [
2,
15069,
357,
66,
8,
12131,
29681,
7295,
12052,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
118... | 3.459364 | 283 |
"""Setup the package."""
import os
from setuptools import setup, find_packages
# get the version
version = None
with open(os.path.join('sp_experiment', '__init__.py'), 'r') as fid:
for line in (line.strip() for line in fid):
if line.startswith('__version__'):
version = line.split('=')[1].strip().strip('\'')
break
if version is None:
raise RuntimeError('Could not determine version')
dependencies = [
'numpy~=1.16.1',
'scipy',
'matplotlib',
'pandas',
'pyopengl',
'pyglet~=1.3.0',
'pillow',
'moviepy',
'lxml',
'openpyxl',
'configobj',
'psychopy==3.0.0',
'pytest',
'pytest-cov',
'pyinstaller',
]
setup(name='sp_experiment',
version=version,
description='Implemetation of the Sampling Paradigm in PsychoPy',
url='http://github.com/sappelhoff/sp_experiment',
author='Stefan Appelhoff',
author_email='stefan.appelhoff@mailbox.org',
license='BSD 3-Clause License',
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: Microsoft :: Windows',
],
packages=find_packages(),
install_requires=dependencies,
package_data={'': ['*.json']},
zip_safe=False)
| [
37811,
40786,
262,
5301,
526,
15931,
198,
11748,
28686,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
2,
651,
262,
2196,
198,
9641,
796,
6045,
198,
4480,
1280,
7,
418,
13,
6978,
13,
22179,
10786,
2777,
62,
... | 2.31162 | 568 |
import carla_utils as cu
import time
import numpy as np
import carla
if __name__ == "__main__":
client, world, town_map = cu.connect_to_server('127.0.0.1', 2000, timeout=2.0, map_name='None')
vehicle = cu.get_actor(world, 'vehicle.bh.crossbike', 'hero')
c = CapacController(vehicle, 10)
trajectory = {'time':1, 'x':1, 'y':1, 'vx':1, 'vy':1, 'ax':1, 'ay':1, 'a':1}
while True:
a = c.run_step(trajectory)
print(a)
time.sleep(0.1) | [
11748,
1097,
5031,
62,
26791,
355,
18912,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1097,
5031,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
5456,
11,
995,
11,... | 2.146018 | 226 |
# pragma pylint: disable=missing-docstring
import json
from shapely.geometry import Point
from mobility_pipeline.lib.voronoi import load_cell
| [
2,
23864,
2611,
279,
2645,
600,
25,
15560,
28,
45688,
12,
15390,
8841,
198,
198,
11748,
33918,
198,
6738,
5485,
306,
13,
469,
15748,
1330,
6252,
198,
6738,
15873,
62,
79,
541,
4470,
13,
8019,
13,
20867,
261,
23013,
1330,
3440,
62,
3... | 3.222222 | 45 |
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ attention.py ]
# Synopsis [ Sequence to sequence attention module for Tacotron ]
# Author [ Ting-Wei Liu (Andi611) ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import torch
from torch.autograd import Variable
from torch import nn
from torch.nn import functional as F
######################
# BAHDANAU ATTENTION #
######################
class BahdanauAttention(nn.Module):
"""
Args:
query: (batch, 1, dim) or (batch, dim)
processed_memory: (batch, max_time, dim)
"""
################
# LINEAR LAYER #
################
#####################
# CONVOLUTION LAYER #
#####################
##################
# LOCATION LAYER #
##################
################################
# LOCATION SENSITIVE ATTENTION #
################################
class LocationSensitiveAttention(nn.Module):
"""
Args:
query: (batch, 1, dim) or (batch, dim)
processed_memory: (batch, max_time, dim)
attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
"""
#################
# ATTENTION RNN #
#################
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
1303,
198,
37811,
17174,
17174,
8412,
4557,
35625,
37811,
198,
2,
220,
220,
9220,
5376,
220,
220,
220,
220,
685,
3241,
13,
9078,
2361,
198,
2,
220,
220,
16065,
24608,
220,
... | 3.443325 | 397 |
import cv2
import threading
| [
11748,
269,
85,
17,
198,
11748,
4704,
278,
628
] | 3.222222 | 9 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Pipelines for mapreduce library."""
from __future__ import with_statement
import google
from appengine_pipeline.src import pipeline
from appengine_pipeline.src.pipeline import common as pipeline_common
from google.appengine.api import files
from google.appengine.api.files import file_service_pb
from google.appengine.ext.mapreduce import output_writers
from google.appengine.ext.mapreduce import base_handler
from google.appengine.ext.mapreduce import input_readers
from google.appengine.ext.mapreduce import mapper_pipeline
from google.appengine.ext.mapreduce import shuffler
MapperPipeline = mapper_pipeline.MapperPipeline
ShufflePipeline = shuffler.ShufflePipeline
class KeyValueBlobstoreOutputWriter(
output_writers.BlobstoreRecordsOutputWriter):
"""Output writer for KeyValue records files in blobstore."""
class MapPipeline(base_handler.PipelineBase):
"""Runs the map stage of MapReduce.
Iterates over input reader and outputs data into key/value format
for shuffler consumption.
Args:
job_name: mapreduce job name as string.
mapper_spec: specification of map handler function as string.
input_reader_spec: input reader specification as string.
params: mapper and input reader parameters as dict.
shards: number of shards to start as int.
Returns:
list of filenames list sharded by hash code.
"""
class KeyValuesReader(input_readers.RecordsReader):
"""Reader to read KeyValues records files from Files API."""
expand_parameters = True
class ReducePipeline(base_handler.PipelineBase):
"""Runs the reduce stage of MapReduce.
Merge-reads input files and runs reducer function on them.
Args:
job_name: mapreduce job name as string.
reader_spec: specification of reduce function.
output_writer_spec: specification of output write to use with reduce
function.
params: mapper parameters to use as dict.
filenames: list of filenames to reduce.
Returns:
filenames from output writer.
"""
class MapreducePipeline(base_handler.PipelineBase):
"""Pipeline to execute MapReduce jobs.
Args:
job_name: job name as string.
mapper_spec: specification of mapper to use.
reader_spec: specification of reducer to use.
input_reader_spec: specification of input reader to read data from.
output_writer_spec: specification of output writer to save reduce output to.
mapper_params: parameters to use for mapper phase.
reducer_params: parameters to use for reduce phase.
shards: number of shards to use as int.
Returns:
filenames from output writer.
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
4343,
3012,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
... | 3.370021 | 954 |
filter(name__startswith='Re', age__lt=40, city__endswith='aulo')
| [
198,
24455,
7,
3672,
834,
9688,
2032,
342,
11639,
3041,
3256,
2479,
834,
2528,
28,
1821,
11,
1748,
834,
437,
2032,
342,
11639,
2518,
78,
11537,
198
] | 2.444444 | 27 |
import warnings
from . import ga
from .mv import Mv
# galgebra 0.5.0
warnings.warn(
"The `galgebra.deprecated` module is deprecated",
DeprecationWarning, stacklevel=2)
################################# MV class for backward compatibility ###################
class MV(Mv):
""" A deprecated version of :class:`galgebra.mv.Mv`. """
@staticmethod
@staticmethod
def setup(basis, metric=None, coords=None, rframe=False, debug=False, curv=(None, None)) -> list:
"""
This function allows a single geometric algebra to be created.
If the function is called more than once the old geometric algebra is
overwritten by the new geometric algebra. The named input ``metric``
is the same as the named input ``g`` in the current version of
*galgebra*. Likewise, ``basis``, ``coords``, and ``debug`` are the same
in the old and current versions of *galgebra* [17]_.
Due to improvements in *sympy* the inputs ``rframe`` and ``curv[1]`` are
no longer required. ``curv[0]`` is the vector function (list or tuple of
scalar functions) of the coordinates required to define a vector manifold.
For compatibility with the old version of *galgebra* if ``curv`` is used
``metric`` should be a orthonormal Euclidean metric of the same
dimension as ``curv[0]``.
It is strongly suggested that one use the new methods of defining a
geometric algebra on a manifold.
.. [17]
If the metric is input as a list or list or lists the object is no
longer quoted (input as a string). For example the old
``metric='[1,1,1]'`` becomes ``metric=[1,1,1]``.
"""
if isinstance(metric, str):
metric = MV.convert_metric(metric)
if curv != (None, None):
MV.GA = ga.Ga(basis, g=None, coords=coords, X=curv[0], debug=debug)
else:
MV.GA = ga.Ga(basis, g=metric, coords=coords, X=curv[0], debug=debug)
MV.I = MV.GA.i
MV.metric = MV.GA.g
if coords is not None:
MV.grad, MV.rgrad = MV.GA.grads()
return list(MV.GA.mv()) + [MV.grad]
else:
return list(MV.GA.mv())
def Fmt(self, fmt=1, title=None) -> None:
"""
``Fmt`` in ``MV`` has inputs identical to ``Fmt`` in ``Mv`` except that
if ``A`` is a multivector then ``A.Fmt(2,'A')`` executes a print
statement from ``MV`` and returns ``None``, while from ``Mv``,
``A.Fmt(2,'A')`` returns a string so that the function is compatible
with use in *ipython notebook*.
"""
print(Mv.Fmt(self, fmt=fmt, title=title))
| [
11748,
14601,
198,
198,
6738,
764,
1330,
31986,
198,
6738,
764,
76,
85,
1330,
337,
85,
198,
198,
2,
9426,
29230,
657,
13,
20,
13,
15,
198,
40539,
654,
13,
40539,
7,
198,
220,
220,
220,
366,
464,
4600,
13528,
29230,
13,
10378,
3102... | 2.454628 | 1,102 |
from EPUIKit.widget.flowlayout import FlowLayout
from EPUIKit.widget.label import Label
from EPUIKit.widget.layout import Layout
from EPUIKit.widget.quadrantslayout import QuadrantsLayout
from EPUIKit.widget.widget import Widget
| [
6738,
412,
5105,
18694,
270,
13,
42655,
13,
2704,
4883,
323,
448,
1330,
27782,
32517,
198,
6738,
412,
5105,
18694,
270,
13,
42655,
13,
18242,
1330,
36052,
198,
6738,
412,
5105,
18694,
270,
13,
42655,
13,
39786,
1330,
47639,
198,
6738,
... | 3.318841 | 69 |
"""Decrypt the keys of a settings JSON file."""
import argparse
import os
from intervention_system.deploy import settings_key_path as default_keyfile_path
from intervention_system.deploy import client_config_cipher_path as default_input_path
from intervention_system.deploy import client_configs_path
from intervention_system.util import config
from intervention_system.tools.config.encrypt_config import default_output_path as default_input_path
default_output_path = os.path.join(client_configs_path, 'settings_decrypted.json')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Decrypt a settings file.')
parser.add_argument(
'-i', '--input', type=str, default=default_input_path,
help='Path of the file to decrypt. Default: {}'.format(default_input_path)
)
parser.add_argument(
'-k', '--key', type=str, default=default_keyfile_path,
help='Path to the key file. Default: {}'.format(default_keyfile_path)
)
parser.add_argument(
'-o', '--output', type=str, default=default_output_path,
help='Path to save the decrypted file. Default: {}'.format(default_output_path)
)
args = parser.parse_args()
main(args.input, args.key, args.output)
| [
37811,
10707,
6012,
262,
8251,
286,
257,
6460,
19449,
2393,
526,
15931,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
198,
6738,
9572,
62,
10057,
13,
2934,
1420,
1330,
6460,
62,
2539,
62,
6978,
355,
4277,
62,
2539,
7753,
62,
6978,
... | 2.92723 | 426 |
import argparse
import json
import os
from snapcastr.snapcastr import Snapcastr
from xdg import XDG_CONFIG_HOME
CONFIG_FILE = f"{XDG_CONFIG_HOME}/snapcastr.json"
HOST="0.0.0.0"
PORT=5000
SC_HOST="localhost"
if __name__ == "__main__":
main()
| [
11748,
1822,
29572,
198,
11748,
33918,
198,
11748,
28686,
198,
6738,
11495,
2701,
81,
13,
45380,
2701,
81,
1330,
16026,
2701,
81,
198,
6738,
2124,
67,
70,
1330,
46537,
38,
62,
10943,
16254,
62,
39069,
198,
198,
10943,
16254,
62,
25664,
... | 2.375 | 104 |
import argparse
import os
from Common.load_manager import SourceDataLoadManager
from Common.build_manager import GraphBuilder
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Transform data sources into KGX files. Build neo4j graphs.")
parser.add_argument('-t', '--test_mode', action='store_true', help='Test mode will load a small sample version of the data.')
args = parser.parse_args()
if 'DATA_SERVICES_TEST_MODE' in os.environ:
test_mode_from_env = os.environ['DATA_SERVICES_TEST_MODE']
else:
test_mode_from_env = False
loader_test_mode = args.test_mode or test_mode_from_env
load_manager = SourceDataLoadManager(test_mode=loader_test_mode)
load_manager.start()
graph_builder = GraphBuilder()
graph_builder.build_all_graphs()
| [
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
6738,
8070,
13,
2220,
62,
37153,
1330,
8090,
6601,
8912,
13511,
198,
6738,
8070,
13,
11249,
62,
37153,
1330,
29681,
32875,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
103... | 2.876761 | 284 |
from collections import deque
q = deque()
q.append(1)
q.append(2)
q.append(3)
print(q)
print(q.pop())
print(q.pop())
print(q.pop())
| [
6738,
17268,
1330,
390,
4188,
198,
198,
80,
796,
390,
4188,
3419,
198,
80,
13,
33295,
7,
16,
8,
198,
80,
13,
33295,
7,
17,
8,
198,
80,
13,
33295,
7,
18,
8,
198,
198,
4798,
7,
80,
8,
198,
198,
4798,
7,
80,
13,
12924,
28955,
... | 2.177419 | 62 |
'''
Longest Common Prefix
Write a function to find the longest common prefix string amongst an array of strings.
If there is no common prefix, return an empty string ''.
Input: ['flower', 'flow', 'flight']
Output: 'fl'
Input: ['dog', 'racecar', 'car']
Output: ''
Input: ['aa', 'a']
Output: 'a'
=========================================
Many solutions for this problem exist (Divide and Conquer, Trie, etc) but this is the simplest and the fastest one.
Use the first string as LCP and iterate the rest in each step compare it with another one.
Time Complexity: O(N*A) , N = number of strings, A = average chars, or simplest notation O(S) = total number of chars
Space Complexity: O(1)
'''
############
# Solution #
############
###########
# Testing #
###########
# Test 1
# Correct result => 'fl'
print(longest_common_prefix(['flower', 'flow', 'flight']))
# Test 2
# Correct result => ''
print(longest_common_prefix(['dog', 'racecar', 'car']))
# Test 3
# Correct result => 'a'
print(longest_common_prefix(['aa', 'a'])) | [
7061,
6,
198,
14617,
395,
8070,
3771,
13049,
198,
198,
16594,
257,
2163,
284,
1064,
262,
14069,
2219,
21231,
4731,
12077,
281,
7177,
286,
13042,
13,
198,
1532,
612,
318,
645,
2219,
21231,
11,
1441,
281,
6565,
4731,
705,
4458,
198,
198... | 3.222222 | 324 |
from distutils.core import setup, Command
from distutils.command.install_data import install_data
from distutils.dir_util import remove_tree
from distutils import log
from os import path, makedirs, walk, environ
from shutil import copyfile
from subprocess import call
from sys import version_info
if version_info[0] >= 3:
from io import FileIO
file = FileIO
from falias import __version__
environ.update({'PYTHONPATH': 'falias'})
kwargs = {
'name': "Falias",
'version': __version__,
'description': "Lightweight support python library",
'author': "Ondrej Tuma",
'author_email': "mcbig@zeropage.cz",
'url': "http://falias.zeropage.cz/",
'packages': ['falias'],
'data_files': [('share/doc/falias', ['LICENCE', 'README.rst']), ],
'license': "BSD",
'long_description': doc(),
'classifiers': [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Natural Language :: Czech",
"Operating System :: POSIX",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries :: Python Modules"
],
'cmdclass': {'test': PyTest,
'build_html': build_html,
'clean_html': clean_html,
'install_html': install_html},
}
setup(**kwargs)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
11,
9455,
198,
6738,
1233,
26791,
13,
21812,
13,
17350,
62,
7890,
1330,
2721,
62,
7890,
198,
6738,
1233,
26791,
13,
15908,
62,
22602,
1330,
4781,
62,
21048,
198,
6738,
1233,
26791,
1330,
2604,
... | 2.341722 | 755 |
from ..utils.preprocessing import data_preprocess
from ..utils.api import api_handler
from ..utils.decorators import detect_batch_decorator
@detect_batch_decorator
def facial_localization(image, cloud=None, batch=False, api_key=None, version=None, **kwargs):
"""
Given an image, returns a list of faces found within the image.
For each face, we return a dictionary containing the upper left corner and lower right corner.
If crop is True, the cropped face is included in the dictionary.
Input should be in a numpy ndarray or a filename.
Example usage:
.. code-block:: python
>>> from indicoio import facial_localization
>>> import numpy as np
>>> img = np.zeros([image of a face])
>>> faces = facial_localization(img)
>>> len(faces)
1
:param image: The image to be analyzed.
:type image: filepath or ndarray
:rtype: List of faces (dict) found.
"""
image = data_preprocess(image, batch=batch)
url_params = {"batch": batch, "api_key": api_key, "version": version}
return api_handler(image, cloud=cloud, api="faciallocalization", url_params=url_params, **kwargs)
| [
6738,
11485,
26791,
13,
3866,
36948,
1330,
1366,
62,
3866,
14681,
198,
6738,
11485,
26791,
13,
15042,
1330,
40391,
62,
30281,
198,
6738,
11485,
26791,
13,
12501,
273,
2024,
1330,
4886,
62,
43501,
62,
12501,
273,
1352,
628,
198,
31,
1525... | 2.961832 | 393 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import web
import json
from bson.objectid import ObjectId
from config import setting
import app_helper
db = setting.db_web
# 病种分类入口(根目录)
url = ('/app2/category')
GEN_SIGN = {
0 : '',
1 : '●',
2 : ' ►',
3 : ' ★',
4 : ' ◆',
}
# -------------------
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
11748,
3992,
198,
11748,
33918,
198,
6738,
275,
1559,
13,
15252,
312,
1330,
9515,
7390,
198,
6738,
4566,
1330,
... | 1.856322 | 174 |
import bs4 as bs
import datetime
import urllib.request
from django.views.generic import ListView
from .models import Job
from django.shortcuts import render, redirect
# last = Job.objects.raw('SELECT * FROM joblist_job LIMIT 1')[0]
# jobtime = last.date
# print(jobtime)
# time = datetime.date.today()
# print(time)
# print(last.date == time)
| [
11748,
275,
82,
19,
355,
275,
82,
198,
11748,
4818,
8079,
198,
11748,
2956,
297,
571,
13,
25927,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
7343,
7680,
198,
6738,
764,
27530,
1330,
15768,
198,
6738,
42625,
14208,
13,
19509,
... | 3 | 116 |