content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Copyright 2015 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'skia_warnings_as_errors': 0,
},
'targets': [
{
'target_name': 'libSkKTX',
'type': 'static_library',
'include_dirs' : [
'../third_party/ktx',
'../include/gpu',
'../include/private',
'../src/core',
'../src/gpu',
'../src/utils',
],
'sources': [
'../third_party/ktx/ktx.cpp',
],
'dependencies': [
'core.gyp:*',
'etc1.gyp:libetc1',
],
'direct_dependent_settings': {
'include_dirs': [
'../third_party/ktx',
],
},
}],
}
| [
2,
15069,
1853,
3012,
3457,
13,
198,
2,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
90,
198,
220,
705,
25641,
2977,
1035... | 2.065672 | 335 |
from abc import ABCMeta, abstractclassmethod
from UserPreferencePredictor.TrainDataMaker import Player
import typing
PlayerList = typing.List[Player]
| [
6738,
450,
66,
1330,
9738,
48526,
11,
12531,
4871,
24396,
198,
6738,
11787,
6719,
4288,
47,
17407,
273,
13,
44077,
6601,
48890,
1330,
7853,
198,
11748,
19720,
198,
198,
14140,
8053,
796,
19720,
13,
8053,
58,
14140,
60,
628,
198
] | 3.825 | 40 |
# Sortarea topologica returneaza o ordonare a nodurilor in asa fel incat niciun nod din lista nu are muchie catre
# un nod care e inaintea lui in lista. Sortarea Topologica merge doar pe grafuri aciclice orientate (DAG - directed acyclic graph)
# Pentru fiecare nod i care are muchie care alt nod j, gasim i inainte lui j in lista din sortarea topologica.
graph = [
[(1, 3), (2, 6)],
[(2, 4), (3, 4), (4, 11)],
[(3, 8), (6, 11)],
[(4, -4), (5, 5), (6, 2)],
[(7, 9)],
[(7, 1)],
[(7, 2)],
[]
]
print(len(graph))
print(graph)
N = 8
sortare = sortare_topologica(graph, N)
# for i in range(len(graph)):
# sortare[i] += 1
print(sortare)
dists = dagShortestPath(graph, N)
print(dists)
| [
2,
33947,
20337,
1353,
928,
3970,
1441,
68,
7056,
267,
2760,
261,
533,
257,
18666,
333,
346,
273,
287,
355,
64,
10756,
753,
265,
9200,
72,
403,
18666,
16278,
1351,
64,
14364,
389,
881,
494,
3797,
260,
201,
198,
2,
555,
18666,
1337,
... | 2.15562 | 347 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, Dict, List, Set, TYPE_CHECKING
from tests.integration_tests.base_tests import login
from tests.integration_tests.dashboards.filter_sets.consts import (
DASHBOARD_OWNER_USERNAME,
FILTER_SET_OWNER_USERNAME,
REGULAR_USER,
)
from tests.integration_tests.dashboards.filter_sets.utils import (
call_get_filter_sets,
collect_all_ids,
)
if TYPE_CHECKING:
from flask.testing import FlaskClient
from superset.models.filter_set import FilterSet
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
383,
7054,... | 3.533693 | 371 |
from typing import *
from functools import reduce
# =========== Aliases and TypeVar ==========
T = TypeVar('T', int, float)
Matrix = List[List[T]]
# =========== Structural Typing ==========
generic([['0']])
generic([[0]])
generic(((0,),))
| [
6738,
19720,
1330,
1635,
198,
6738,
1257,
310,
10141,
1330,
4646,
628,
198,
2,
796,
2559,
855,
12104,
1386,
290,
5994,
19852,
796,
2559,
28,
198,
198,
51,
796,
5994,
19852,
10786,
51,
3256,
493,
11,
12178,
8,
198,
198,
46912,
796,
7... | 2.863636 | 88 |
#Write your code below this line 👇
print(len(input("What is your Name?"))) | [
2,
16594,
534,
2438,
2174,
428,
1627,
50169,
229,
198,
4798,
7,
11925,
7,
15414,
7203,
2061,
318,
534,
6530,
1701,
22305
] | 3.363636 | 22 |
# Copyright 2018-2019 Leland Lucius
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from pysmapi.smapi import *
| [
198,
2,
15069,
2864,
12,
23344,
406,
8822,
42477,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
1... | 3.732143 | 168 |
"""
cryptography.py
Author: Emma Dunbar
Credit: Geoff Dunbar, Learn Python
Assignment:
Write and submit a program that encrypts and decrypts user data.
See the detailed requirements at https://github.com/HHS-IntroProgramming/Cryptography/blob/master/README.md
"""
associations = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 .,:;'\"/\\<>(){}[]-=_+?!"
while True:
what=input("Enter e to encrypt, d to decrypt, or q to quit: ")
if what=="q":
print("Goodbye!")
break
if what=="e":
m1=list(input("Message: "))
k1=list(input("Key: "))
l1=[]
l2=[]
for char in m1:
num=associations.find(char)
l1=l1+[num]
for char2 in k1:
got=associations.find(char2)
l2=l2+[got]
s=len(l1)/len(l2)+1
l2=int(s)*l2
l3=[]
h=('')
for i in range(0,len(l1)):
j=l1[i]+l2[i]
l3=l3+[j]
for f in l3:
if f>=len(associations):
f=f-len(associations)
e=associations[f]
h=h+e
print(h)
if what=="d":
m2=input("Message: ")
k2=input("Key: ")
l1=[]
l2=[]
for char in m2:
num=associations.find(char)
l1=l1+[num]
for char2 in k2:
got=associations.find(char2)
l2=l2+[got]
s=len(l1)/len(l2)+1
l2=int(s)*l2
l3=[]
h=('')
for i in range(0,len(l1)):
j=l1[i]-l2[i]
l3=l3+[j]
for f in l3:
e=associations[f]
h=h+e
print(h)
if (what!="q") and (what!="e") and (what!="d"):
print("Did not understand command, try again.")
continue | [
37811,
198,
29609,
4867,
13,
9078,
198,
13838,
25,
18966,
5648,
5657,
198,
23690,
25,
24688,
5648,
5657,
11,
14365,
11361,
198,
198,
8021,
16747,
25,
198,
198,
16594,
290,
9199,
257,
1430,
326,
34117,
82,
290,
42797,
82,
2836,
1366,
1... | 1.676966 | 1,068 |
import itertools
from heapq import heappush, heappop
REMOVED = '<removed-element>' # placeholder for a removed element
| [
11748,
340,
861,
10141,
198,
6738,
24575,
80,
1330,
339,
1324,
1530,
11,
339,
1324,
404,
628,
198,
40726,
8874,
1961,
796,
705,
27,
2787,
2668,
12,
30854,
29,
6,
220,
220,
220,
220,
220,
1303,
46076,
329,
257,
4615,
5002,
628
] | 3.02381 | 42 |
#!/usr/bin/env python
# coding: utf-8
# In[9]:
import numpy as np
arr = np.array([[1,2,3],[4,5,6]])
print(arr)
# In[11]:
import numpy as np
arr = np.array([[1,2,3],[4,5,6]])
print("Array is of type: ", type(arr))
print("No. of dimensions: ", arr.ndim)
print("Shape of array: ", arr.shape)
print("Size of array: ", arr.size)
# In[4]:
import numpy as np
a_arr = np.zeros((2,2))
print(a_arr)
b_arr = np.ones((1,2))
print(b_arr)
d_arr = np.eye(2)
print(d_arr)
e_arr = np.random.random((2,2))
print(e_arr)
# In[12]:
import numpy as np
arr1 = np.arange(0, 30, 5)
print ( arr1)
arr2= np.linspace(0, 5, 10)
print ( arr2)
# Reshaping 3X4 array to 2X2X3 array
arr3 = np.array([[1, 2, 3, 4],
[5, 2, 4, 2],
[1, 2, 0, 1]])
newarr = arr3.reshape(2, 2, 3)
print ("Reshaped array:", newarr)
# Flatten array
arr4 = np.array([[1, 2, 3], [4, 5, 6]])
flarr = arr4.flatten()
print ("Fattened array:", flarr)
# In[17]:
import numpy as np
# An exemplar array
arr1 = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11,12],
[12, 13, 14, 15]])
# Slicing array
ans1 = arr1[:1, ::2]
print((ans1))
# Integer array indexing example
ans2 = arr1[[0, 1, 2, 3], [3, 2, 1, 0]]
print ("\nElements at indices (0, 3), (1, 2), (2, 1),"
"(3, 0):\n", ans2)
# boolean array indexing example
cond = arr > 2
ans3 = arr[cond]
print ("Elements greater than 0:", ans3)
# In[19]:
import numpy as np
arr = np.array([1, 2, 5, 3])
# add 1 to every element
print ( arr+1)
# subtract 3 from each element
print (arr-3)
# multiply each element by 10
print ( arr*10)
# square each element
print ( arr**2)
# In[21]:
a = np.array([[1, 2],
[3, 4]])
b = np.array([[5,6],
[7, 8]])
# add arrays
print ( a + b)
# multiply arrays (elementwise multiplication)
print ( a*b)
# In[22]:
import numpy as np
x = np.array([1, 2])
print(x.dtype)
x = np.array([1.0, 2.0])
print(x.dtype)
x = np.array([1, 2], dtype=np.int64)
print(x.dtype)
# In[23]:
import numpy as np
a = np.array([[1,2],[3,4]])
b = np.array([[5,6],[7,8]])
c = np.array([9,10])
d = np.array([11, 12])
# Inner product of vectors
print(a.dot(b))
print(np.dot(a, b))
# Matrix / vector product; both produce the rank 1 array [29 67]
print(c.dot(d))
print(np.dot(c,d))
# Matrix / matrix product; both produce the rank 2 array
# [[19 22]
# [43 50]]
print(a.dot(c))
print(np.dot(b,d))
# In[24]:
import numpy as np
from matplotlib import pylot as plt
x=np.arrange(1,11)
y=2*x+5
plt.title("Matplotlib demo")
plt.xlabel("x axis caption")
plt.ylabel("y axis caption")
plt.plot(x,y,"ob")
plt.show()
# In[ ]:
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
24,
5974,
628,
198,
11748,
299,
32152,
355,
45941,
198,
198,
3258,
796,
45941,
13,
18747,
26933,
58,
16,
11,
17,
11,
18,
384... | 1.870025 | 1,608 |
# -*- coding: utf-8 -*-
from asyncio import TimeoutError, get_event_loop
from concurrent.futures._base import Error
from inspect import isawaitable
from typing import Callable, Optional, Union
from aiohttp import ClientError, ClientSession
from ._py3_patch import (NewResponse, NotSet, _ensure_can_be_await,
_exhaust_simple_coro, logger)
from .exceptions import FailureException, ValidationError
class Requests:
"""Lite wrapper for aiohttp for better performance.
Removes the frequency_controller & sync usage (task.x) & compatible args of requests for good performance, but remains retry / callback / referer_info.
referer_info: sometimes used for callback.
"""
@property
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
30351,
952,
1330,
3862,
448,
12331,
11,
651,
62,
15596,
62,
26268,
198,
6738,
24580,
13,
69,
315,
942,
13557,
8692,
1330,
13047,
198,
6738,
10104,
1330,
318,
... | 3.142857 | 231 |
# Copyright (c) 2021 - present, Timur Shenkao
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from typing import List
# 1299. Replace Elements with Greatest Element on Right Side
# https://leetcode.com/problems/replace-elements-with-greatest-element-on-right-side/
# Given an array arr, replace every element in that array with the greatest element among the elements to its right,
# and replace the last element with -1.
#
# After doing so, return the array.
| [
2,
15069,
357,
66,
8,
33448,
532,
1944,
11,
5045,
333,
22323,
4914,
78,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
7... | 4.018939 | 264 |
""" Wrap items in an immutable, simplified interface """
if False: # type checking
from typing import *
I = TypeVar("I", bound="Item")
import collections
class Item(collections.Mapping):
""" Wrap objects in a consistent traversal interface. """
__slots__ = ("__item", "__parent", "__visitors", "__children_names")
# ------------------------------------------
# Internals
# ------------------------------------------
@property
def item(self): # type: (Any) -> Any
""" Access interal object """
return self.__item
@property
def parent(self): # type: (Any) -> I
""" Get previous object """
return self.__parent
# ------------------------------------------
# Building
# ------------------------------------------
@staticmethod
def is_this_type(item, parent): # type: (Any, Optional[I]) -> bool
""" Check if the passed in object represents the Object """
return False
@classmethod
def wrap(
cls, visitors, item, parent=None
): # type: (Sequence[Type[I]], Any, Optional[I]) -> I
""" Create an instance of Item, wrapping the provided object """
for visitor in visitors:
if visitor.is_this_type(item, parent):
return visitor(visitors, item, parent)
raise TypeError("Unhandled item {}".format(item))
# ------------------------------------------
# Traversing
# ------------------------------------------
def get_child(self, name): # type: (str) -> Any
""" Return a child of this item """
raise KeyError("Child {} not in {}".format(name, self.item))
def get_children_names(self): # type: () -> Sequence[str]
""" Return the names of all children in this item """
return []
# ------------------------------------------
# Plumbing
# ------------------------------------------
| [
37811,
41028,
3709,
287,
281,
40139,
11,
27009,
7071,
37227,
198,
198,
361,
10352,
25,
220,
1303,
2099,
10627,
198,
220,
220,
220,
422,
19720,
1330,
1635,
628,
220,
220,
220,
314,
796,
5994,
19852,
7203,
40,
1600,
5421,
2625,
7449,
49... | 3.073132 | 629 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
""" An extensible ASCII table reader and writer.
"""
from __future__ import absolute_import, division, print_function
from .core import (InconsistentTableError,
ParameterError,
NoType, StrType, NumType, FloatType, IntType, AllType,
Column,
BaseInputter, ContinuationLinesInputter,
BaseHeader,
BaseData,
BaseOutputter, TableOutputter,
BaseReader,
BaseSplitter, DefaultSplitter, WhitespaceSplitter,
convert_numpy,
masked
)
from .basic import (Basic, BasicHeader, BasicData,
Rdb,
Csv,
Tab,
NoHeader,
CommentedHeader)
from .fastbasic import (FastBasic,
FastCsv,
FastTab,
FastNoHeader,
FastCommentedHeader,
FastRdb)
from .cds import Cds
from .ecsv import Ecsv
from .latex import Latex, AASTex, latexdicts
from .html import HTML
from .ipac import Ipac
from .daophot import Daophot
from .sextractor import SExtractor
from .fixedwidth import (FixedWidth, FixedWidthNoHeader,
FixedWidthTwoLine, FixedWidthSplitter,
FixedWidthHeader, FixedWidthData)
from .ui import (set_guess, get_reader, read, get_writer, write, get_read_trace)
from . import connect
| [
2,
49962,
739,
257,
513,
12,
565,
682,
347,
10305,
3918,
5964,
532,
766,
38559,
24290,
13,
81,
301,
198,
37811,
1052,
1070,
27339,
37101,
3084,
9173,
290,
6260,
13,
198,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
... | 1.947942 | 826 |
x = "123435"
print(x.count("3"))
| [
87,
796,
366,
1065,
2682,
2327,
1,
198,
4798,
7,
87,
13,
9127,
7203,
18,
48774,
198
] | 1.941176 | 17 |
import re
import time
import pprint
import logging
import contextlib
import multiprocessing
from pathlib import Path
from logging import info, debug, warning
from collections import defaultdict, Counter
from iproute2_parse import Iproute2_parse
from netstat_parse import Netstat_parse
from system_files_parse import System_files_parse
from system_commands import System_commands
from k8s_parse import K8s_parse
from pcap_parse import Pcap_parse
from anonymize import Anonymize
PROGRAM_VERSION = '0.1'
PROGRAM_HEADER = 'netmap v%s' % PROGRAM_VERSION
| [
11748,
302,
198,
11748,
640,
198,
11748,
279,
4798,
198,
11748,
18931,
198,
11748,
4732,
8019,
198,
11748,
18540,
305,
919,
278,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
18931,
1330,
7508,
11,
14257,
11,
6509,
198,
6738,
17268,
13... | 3.44375 | 160 |
from hexrd.wppf.WPPF import LeBail
from hexrd.wppf.WPPF import Rietveld | [
6738,
17910,
4372,
13,
86,
381,
69,
13,
54,
10246,
37,
1330,
1004,
33,
603,
198,
6738,
17910,
4372,
13,
86,
381,
69,
13,
54,
10246,
37,
1330,
371,
1155,
303,
335
] | 2.21875 | 32 |
import text.util
(X,terms,doc_ids,tfids, docs) = text.util.load_corpus( "data/month3.pkl" )
from gensim.corpora.dictionary import Dictionary
from gensim.models.nmf import Nmf
from gensim.models import CoherenceModel
from prettytable import PrettyTable
import itertools
import networkx as nx
import matplotlib.pyplot as plt
x = PrettyTable()
common_dictionary = Dictionary(docs)
common_corpus = [common_dictionary.doc2bow(text) for text in docs]
# for k in range(4, 10):
# nmf = Nmf(common_corpus, num_topics=k)
# c_model = CoherenceModel(model=nmf, corpus=common_corpus, dictionary=common_dictionary, texts=docs, coherence='c_v')
# print(k, c_model.get_coherence())
# x = PrettyTable()
# x.field_names = [''] + [ "t" + str(i+1) for i in range(0,10)]
# for i in range(0,k):
# x.add_row([i] + [ common_dictionary[term] for (term, w) in nmf.get_topic_terms(i)])
# print(x)
from gensim.matutils import jaccard
import random
nmf = Nmf(common_corpus, num_topics=9)
texts = random.choices(docs, k=20)
texts = [docs[0], docs[20], docs[80], docs[90], docs[200], docs[210]] #[docs[i] for i in range(0, len(docs), 30)]
colors = ["skyblue", "pink", "red", "green", "yellow", "cyan", "purple", "magenta", "orange", "blue"]
G = nx.Graph()
for i, _ in enumerate(texts):
G.add_node(i)
for (i1, i2) in itertools.combinations(range(len(texts)), 2):
bow1, bow2 = texts[i1], texts[i2]
distance = jaccard(bow1, bow2)
if(distance > 0.001):
G.add_edge(i1, i2, weight=1/distance)
pos = nx.spring_layout(G)
threshold = 1.04
elarge=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] > threshold]
esmall=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] <= threshold]
node_colors = [get_node_color(i) for (i, _) in enumerate(texts)]
nx.draw_networkx_nodes(G, pos, node_size=700, node_color=node_colors)
nx.draw_networkx_edges(G,pos,edgelist=elarge, width=2)
nx.draw_networkx_edges(G,pos,edgelist=esmall, width=2, alpha=0.2, edge_color='b', style='dashed')
nx.draw_networkx_labels(G, pos, font_size=20, font_family='sans-serif')
plt.show() | [
11748,
2420,
13,
22602,
198,
7,
55,
11,
38707,
11,
15390,
62,
2340,
11,
27110,
2340,
11,
34165,
8,
796,
2420,
13,
22602,
13,
2220,
62,
10215,
79,
385,
7,
366,
7890,
14,
8424,
18,
13,
79,
41582,
1,
1267,
198,
198,
6738,
308,
641,... | 2.292709 | 919 |
import sys
import pandas as pd
if len(sys.argv) == 1:
print('Set arg n, like "python ch02/ans15.py 5"')
else:
n = int(sys.argv[1])
df = pd.read_csv('ch02/popular-names.txt', sep='\t', header=None)
nrow = -(-len(df) // n)
for i in range(n):
df.loc[nrow * i:nrow * (i + 1)].to_csv(f'ch02/ans16_{i}', sep='\t', index=False, header=None)
| [
11748,
25064,
198,
11748,
19798,
292,
355,
279,
67,
628,
198,
361,
18896,
7,
17597,
13,
853,
85,
8,
6624,
352,
25,
198,
220,
220,
220,
3601,
10786,
7248,
1822,
299,
11,
588,
366,
29412,
442,
2999,
14,
504,
1314,
13,
9078,
642,
1,
... | 2.085714 | 175 |
from __future__ import division
import csv
from percept.conf.base import settings
from percept.utils.input import DataFormats
from percept.tests.framework import CSVInputTester
from percept.datahandlers.inputs import BaseInput
from percept.utils.models import get_namespace
import os
from itertools import chain
import logging
import json
import re
import pandas as pd
import subprocess
from pandas.io import sql
import sqlite3
import json
import requests
import subprocess
log = logging.getLogger(__name__)
class SenateInput(BaseInput):
"""
Extends baseinput to read simpsons scripts
"""
input_format = SenateFormats.mjson
help_text = "Read in music links data."
namespace = get_namespace(__module__)
def read_input(self, mfile, has_header=True):
"""
directory is a path to a directory with multiple csv files
"""
mjson= json.load(open(mfile))
for m in mjson:
m['ltype'] = m['ltype'].split("?")[0]
ltypes = list(set([m['ltype'] for m in mjson]))
for l in ltypes:
jp = join_path(settings.MUSIC_PATH,l)
if not os.path.isdir(jp):
os.mkdir(jp)
fpaths = []
for m in mjson:
fname = m['link'].split("/")[-1]
fpath = join_path(join_path(settings.MUSIC_PATH,m['ltype']),fname)
try:
if not os.path.isfile(fpath):
r = requests.get(m['link'])
f = open(fpath, 'wb')
f.write(r.content)
f.close()
fpaths.append({'type' : m['ltype'], 'path' : fpath})
except Exception:
log.exception("Could not get music file.")
for p in fpaths:
newfile = p['path'][:-4] + ".ogg"
if not os.path.isfile(newfile):
frommp3 = subprocess.Popen(['mpg123', '-w', '-', p['path']], stdout=subprocess.PIPE)
toogg = subprocess.Popen(['oggenc', '-'], stdin=frommp3.stdout, stdout=subprocess.PIPE)
with open(newfile, 'wb') as outfile:
while True:
data = toogg.stdout.read(1024 * 100)
if not data:
break
outfile.write(data)
p['newpath'] = newfile
self.data = fpaths | [
6738,
11593,
37443,
834,
1330,
7297,
198,
11748,
269,
21370,
198,
6738,
34953,
13,
10414,
13,
8692,
1330,
6460,
198,
6738,
34953,
13,
26791,
13,
15414,
1330,
6060,
8479,
1381,
198,
6738,
34953,
13,
41989,
13,
30604,
1330,
44189,
20560,
... | 2.037768 | 1,165 |
from python_slack.slackobjects.base import SlackObject, SlackObjectDict
from python_slack.slackobjects.timeutils import Timestamp
| [
6738,
21015,
62,
6649,
441,
13,
6649,
441,
48205,
13,
8692,
1330,
36256,
10267,
11,
36256,
10267,
35,
713,
198,
6738,
21015,
62,
6649,
441,
13,
6649,
441,
48205,
13,
2435,
26791,
1330,
5045,
27823,
198,
220,
220,
220,
220,
198
] | 3.292683 | 41 |
#
# Copyright 2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
##################################################
# Imports
##################################################
import plotter
import common
import wx
import numpy
import math
import pubsub
from constants import *
from gnuradio import gr #for gr.prefs
import forms
##################################################
# Constants
##################################################
SLIDER_STEPS = 200
LOOP_BW_MIN_EXP, LOOP_BW_MAX_EXP = -6, 0.0
GAIN_MU_MIN_EXP, GAIN_MU_MAX_EXP = -6, -0.301
DEFAULT_FRAME_RATE = gr.prefs().get_long('wxgui', 'const_rate', 5)
DEFAULT_WIN_SIZE = (500, 400)
DEFAULT_CONST_SIZE = gr.prefs().get_long('wxgui', 'const_size', 2048)
CONST_PLOT_COLOR_SPEC = (0, 0, 1)
MARKER_TYPES = (
('Dot Small', 1.0),
('Dot Medium', 2.0),
('Dot Large', 3.0),
('Line Link', None),
)
DEFAULT_MARKER_TYPE = 2.0
##################################################
# Constellation window control panel
##################################################
class control_panel(wx.Panel):
"""
A control panel with wx widgits to control the plotter.
"""
def __init__(self, parent):
"""
Create a new control panel.
Args:
parent: the wx parent window
"""
self.parent = parent
wx.Panel.__init__(self, parent, style=wx.SUNKEN_BORDER)
parent[SHOW_CONTROL_PANEL_KEY] = True
parent.subscribe(SHOW_CONTROL_PANEL_KEY, self.Show)
control_box = forms.static_box_sizer(
parent=self, label='Options',
bold=True, orient=wx.VERTICAL,
)
#loop_bw
control_box.AddStretchSpacer()
forms.text_box(
sizer=control_box, parent=self, label='Loop Bandwidth',
converter=forms.float_converter(),
ps=parent, key=LOOP_BW_KEY,
)
forms.log_slider(
sizer=control_box, parent=self,
min_exp=LOOP_BW_MIN_EXP,
max_exp=LOOP_BW_MAX_EXP,
num_steps=SLIDER_STEPS,
ps=parent, key=LOOP_BW_KEY,
)
#gain_mu
control_box.AddStretchSpacer()
forms.text_box(
sizer=control_box, parent=self, label='Gain Mu',
converter=forms.float_converter(),
ps=parent, key=GAIN_MU_KEY,
)
forms.log_slider(
sizer=control_box, parent=self,
min_exp=GAIN_MU_MIN_EXP,
max_exp=GAIN_MU_MAX_EXP,
num_steps=SLIDER_STEPS,
ps=parent, key=GAIN_MU_KEY,
)
#marker
control_box.AddStretchSpacer()
forms.drop_down(
sizer=control_box, parent=self,
ps=parent, key=MARKER_KEY, label='Marker',
choices=map(lambda x: x[1], MARKER_TYPES),
labels=map(lambda x: x[0], MARKER_TYPES),
)
#run/stop
control_box.AddStretchSpacer()
forms.toggle_button(
sizer=control_box, parent=self,
true_label='Stop', false_label='Run',
ps=parent, key=RUNNING_KEY,
)
#set sizer
self.SetSizerAndFit(control_box)
##################################################
# Constellation window with plotter and control panel
##################################################
| [
2,
198,
2,
15069,
3648,
3232,
10442,
5693,
11,
3457,
13,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
22961,
8829,
198,
2,
198,
2,
22961,
8829,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
2,
340,
739,... | 2.729955 | 1,322 |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 10 14:08:02 2017
@author: Elizabeth
"""
from olivine.SanCarlos import SanCarlos_spectra as SC
from pynams import styles
import olivine
high_ending = olivine.high_ending
low_ending = olivine.low_ending
#%% Range of 3 baselines for initial concentration estimates from SC1-1
spec = SC.SC_untreated_Ea
spec.make_baseline(curvature=0.04)
fig, ax = spec.plot_showbaseline()
fig.set_size_inches(14, 14)
spec.save_baseline()
spec.make_baseline(curvature=0.06)
spec.plot_showbaseline(axes=ax, style_base=styles.style_3)
spec.save_baseline(baseline_ending=low_ending)
spec.make_baseline(curvature=-0.01, wn_low=3500, wn_high=3650)
spec.plot_showbaseline(axes=ax, style_base=styles.style_3)
spec.save_baseline(baseline_ending=high_ending)
spec = SC.SC_untreated_Eb
spec.make_baseline(curvature=0.025, abs_smear_high=10)
fig, ax = spec.plot_showbaseline()
fig.set_size_inches(14, 14)
spec.save_baseline()
spec.make_baseline(force_through_wn=3350, abs_smear_high=10)
spec.plot_showbaseline(axes=ax, style_base=styles.style_3)
spec.save_baseline(baseline_ending=high_ending)
spec.make_baseline(curvature=0.04, abs_smear_high=10)
spec.plot_showbaseline(axes=ax, style_base=styles.style_3)
spec.save_baseline(baseline_ending=low_ending)
spec = SC.SC_untreated_Ec
spec.make_baseline(curvature=0.075, abs_smear_high=10, wn_high=3750)
fig, ax = spec.plot_showbaseline()
fig.set_size_inches(14, 14)
spec.save_baseline()
spec.make_baseline(curvature=0.09, abs_smear_high=10, wn_high=3800)
spec.plot_showbaseline(axes=ax, style_base=styles.style_3)
spec.save_baseline(baseline_ending=low_ending)
spec.make_baseline(curvature=0.05, abs_smear_high=10)
spec.plot_showbaseline(axes=ax, style_base=styles.style_3)
spec.save_baseline(baseline_ending=high_ending)
#%% final - SC1-2 after dehydration
spec = SC.SC_final_averaged
spec.make_baseline(curvature=0.04)
fig, ax = spec.plot_showbaseline()
fig.set_size_inches(14, 14)
spec.save_baseline()
spec.make_baseline(curvature=0.06)
spec.plot_showbaseline(axes=ax, style_base=styles.style_3)
spec.save_baseline(baseline_ending=low_ending)
spec.make_baseline(force_through_wn=3550, wn_low=3350, wn_high=3650)
spec.plot_showbaseline(axes=ax, style_base=styles.style_3)
spec.save_baseline(baseline_ending=high_ending)
#%% SC1-7 hydrated
wb = SC.wb_1000C_SC1_7
spec7 = SC.spec7
init = SC.SC_untreated_Ea
fig, ax = init.plot_spectrum(style={'color':'r', 'linewidth':3}, offset=0.04)
baseline1 = {'abs_smear_low':10, 'abs_smear_high':10, 'wn_low':3100,
'curvature':0.075}
spec7.make_baseline(**baseline1)
spec7.save_baseline(folder=SC.FTIR_file_location)
spec7.plot_showbaseline(axes=ax)
baseline2 = {'abs_smear_low':10, 'abs_smear_high':10, 'wn_low':3100,
'curvature':0.09}
spec7.make_baseline(**baseline2)
spec7.save_baseline(baseline_ending=low_ending, folder=SC.FTIR_file_location)
spec7.plot_showbaseline(axes=ax)
baseline3 = {'abs_smear_low':10, 'abs_smear_high':10, 'wn_low':3200}
spec7.make_baseline(**baseline3)
spec7.save_baseline(baseline_ending=high_ending, folder=SC.FTIR_file_location)
spec7.plot_showbaseline(axes=ax)
wb.make_baselines(**baseline1)
wb.save_baselines()
wb.make_baselines(**baseline2)
wb.save_baselines(baseline_ending=low_ending)
wb.make_baselines(**baseline3)
wb.save_baselines(baseline_ending=high_ending)
#%% SC1-2 hydrated and dehydrated
spec2 = SC.spec2
baseline = {'abs_smear_high':10, 'wn_low':3200, 'curvature':0.05}
spec2.make_baseline(**baseline)
spec2.save_baseline()
baseline2 = {'wn_low':3400}
spec2.make_baseline(**baseline2)
spec2.save_baseline(baseline_ending=high_ending)
baseline3 = {'abs_smear_high':10, 'wn_low':3200, 'curvature':0.07}
spec2.make_baseline(**baseline3)
spec2.save_baseline(baseline_ending=low_ending)
wblist = [SC.wb_800C_hyd, SC.wb_800C_1hr, SC.wb_800C_3hr, SC.wb_800C_7hr,
SC.wb_800C_13hr, SC.wb_800C_19hr, SC.wb_800C_43hr, SC.wb_800C_68hr]
for wb in wblist:
wb.make_baselines(**baseline)
wb.save_baselines()
wb.make_baselines(**baseline2)
wb.save_baselines(baseline_ending=high_ending)
wb.make_baselines(**baseline3)
wb.save_baselines(baseline_ending=low_ending)
specs = [SC.wb_800C_3hr.profiles[0].spectra[0],
SC.wb_800C_13hr.profiles[0].spectra[0],
# SC.wb_800C_13hr.profiles[0].spectra[1],
SC.wb_800C_13hr.profiles[2].spectra[0],
SC.wb_800C_19hr.profiles[2].spectra[0],
SC.wb_800C_19hr.profiles[2].spectra[1],
SC.wb_800C_19hr.profiles[2].spectra[2],
SC.wb_800C_19hr.profiles[2].spectra[3],
SC.wb_800C_43hr.profiles[1].spectra[0],
SC.wb_800C_43hr.profiles[1].spectra[1],
SC.wb_800C_43hr.profiles[1].spectra[2],
SC.wb_800C_43hr.profiles[2].spectra[0],
SC.wb_800C_43hr.profiles[2].spectra[1],
SC.wb_800C_68hr.profiles[2].spectra[-1],
SC.wb_800C_68hr.profiles[2].spectra[-2],
SC.wb_800C_68hr.profiles[2].spectra[-3],
SC.wb_800C_68hr.profiles[2].spectra[-4]]
for spec in specs:
spec.get_baseline(baseline_ending=low_ending)
spec.save_baseline()
# spec.plot_showbaseline() | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
1526,
838,
1478,
25,
2919,
25,
2999,
2177,
198,
198,
31,
9800,
25,
10674,
198,
37811,
198,
198,
6738,
267,
16017,
500,
13,
15017,
26886,
... | 2.184367 | 2,354 |
class Noble():
"""
Time Limit Exceeded. Your submission didn't complete in the allocated time limit.
"""
def noble_integer(self, A):
"""
"""
# A = [3, 2, 1, 3]
noble = -1
for i in set(A):
# print(i)
n = 0
for j in A:
if i < j:
n = n + 1
if i == n:
noble = 1
break
return noble
if __name__ == '__main__':
main()
| [
4871,
20833,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
3862,
27272,
1475,
2707,
276,
13,
3406,
14498,
1422,
470,
1844,
287,
262,
19171,
640,
4179,
13,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
825,
15581,
62,
41433,
... | 1.721649 | 291 |
### ServiceWeb
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from tastypie import fields
from tastypie.authentication import ApiKeyAuthentication
from tastypie.authorization import Authorization
from tastypie.serializers import Serializer
############
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from tastypie.http import HttpUnauthorized, HttpForbidden, HttpResponse
from django.conf.urls import url
from tastypie.utils import trailing_slash
#from django.contrib.auth.hashers import make_password, HASHERS
### Models
from accounts.models import *
class SectorTestResource(ModelResource):
""" Modelador Tabla """
"""Deserialize for multipart Data"""
""" Create """
#def obj_create(self, bundle, **kwargs):
# return super(SectorTestResource, self).obj_create(bundle, user=bundle.request.user)
""" Update """
class UserTestResource(ModelResource):
""" Modelador Tabla """
"""Deserialize for multipart Data"""
""" Include login in URL """
""" Function Login """
class SectorResource(ModelResource):
""" Modelador Tabla """
class UserResource(ModelResource):
""" Modelador User """
""" Deserialize for Content-type """
""" Include login in URL """
""" Function Login """
class SchoolResource(ModelResource):
""" FK """
sector = fields.ForeignKey(SectorResource, attribute='sector', null=True, full=True)
""" Modelador School """
################################################################################
### ANDROID
################################################################################
class RequirementResource(ModelResource):
""" FK """
school = fields.ForeignKey(SchoolResource, attribute='school', null=True, full=True)
user = fields.ForeignKey(UserResource, attribute='user', null=True, full=True)
""" Modelador Tabla """
""" Deserialize for Content-type"""
""" Update """
class VisitResource(ModelResource):
""" FK """
requirement = fields.ForeignKey(RequirementResource, attribute='requirement', null=True, full=True)
user = fields.ForeignKey(UserResource, attribute='user', null=True, full=True)
""" Modelador Tabla """
""" Deserialize for Content-type """
""" Update """
class TechnicalFormResource(ModelResource):
""" FK """
visit = fields.ForeignKey(VisitResource, attribute='visit', null=True, full=True)
""" Modelador Tabla """
""" Deserialize for Content-type """
""" Update """
class PedagogicalFormResource(ModelResource):
""" FK """
visit = fields.ForeignKey(VisitResource, attribute='visit', null=True, full=True)
""" Modelador Tabla """
""" Deserialize for Content-type"""
""" Update """
| [
21017,
4809,
13908,
198,
6738,
14854,
4464,
494,
13,
37540,
1330,
9104,
26198,
11,
11096,
11,
11096,
62,
54,
10554,
62,
16448,
18421,
198,
6738,
14854,
4464,
494,
1330,
7032,
198,
6738,
14854,
4464,
494,
13,
41299,
3299,
1330,
5949,
72,... | 3.438339 | 819 |
"""ListBox workaround.
WxFormBuilder on macOS currently freezes up when using a ListBox.
So to sidestep this issue, we will use a custom control, that is
actually just a ListBox. This way it doesn't try to render a live
preview of a ListBox and put us in an endless cycle of pain.
Not sure how ListBox behaves on other platforms.
"""
from __future__ import unicode_literals
import wx
class ListBox(wx.ListBox):
"""ListBox workaround."""
def __init__(self, parent, wx_id):
"""Initialize."""
wx.ListBox.__init__(self, parent, wx_id, style=wx.LB_SINGLE)
| [
37811,
8053,
14253,
46513,
13,
198,
198,
54,
87,
8479,
32875,
319,
40017,
3058,
44389,
510,
618,
1262,
257,
7343,
14253,
13,
198,
2396,
284,
9785,
395,
538,
428,
2071,
11,
356,
481,
779,
257,
2183,
1630,
11,
326,
318,
198,
37739,
65... | 3.005181 | 193 |
import src.tnet as tnet
import src.CARS as cars
import numpy as np
import copy
from src.utils import *
import matplotlib as mpl
from matplotlib import rc
import matplotlib.pyplot as plt
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
#rc('text', usetex=True)
netFile, gFile, fcoeffs, tstamp, dir_out = tnet.get_network_parameters('EMA', experiment_name='EMA_penRate_comparison-'+'REB')
#netFile, gFile, fcoeffs, tstamp, dir_out = tnet.get_network_parameters('NYC_Uber_small', experiment_name='NYC_Uber_small_penRate_comparison')
#netFile, gFile, fcoeffs, tstamp, dir_out = tnet.get_network_parameters('NYC_Uber_small_1', experiment_name='NYC_Uber_small_1_penRate_comparison-REB')
#netFile, gFile, fcoeffs, tstamp, dir_out = tnet.get_network_parameters('Anaheim', experiment_name='Anaheim_test_CARSn')
#netFile, gFile, flowFile, fcoeffs, tstamp, dir_out = tnet.get_network_parameters('Barcelons', experiment_name='Barcelona_buildNet')
#netFile, gFile, flowFile, fcoeffs, tstamp, dir_out = tnet.get_network_parameters('ChicagoSketch', experiment_name='ChicagoSketch')
#netFile, gFile, flowFile, fcoeffs, tstamp, dir_out = tnet.get_network_parameters('Sydeny', experiment_name='Sydeny')
demand_multiplier = list(np.linspace(0.8,1.8,2))
demand_multiplier = [1]
'''
print('---- solving NLP problem to set up a base ---')
real_obj = []
for g_multi in demand_multiplier:
tNet = tnet.tNet(netFile=netFile, gFile=gFile, fcoeffs=fcoeffs)
tNet.build_supergraph(walk_multiplier=1)
pedestrian = [(u, v) for (u, v, d) in tNet.G_supergraph.edges(data=True) if d['type'] == 'p']
connector = [(u, v) for (u, v, d) in tNet.G_supergraph.edges(data=True) if d['type'] == 'f']
g_per = tnet.perturbDemandConstant(tNet.g, g_multi)
tNet.set_g(g_per)
cars.solve_social_Julia(tNet, exogenous_G=False)
print('\t solve for g_multiplier = ' + str(round(g_multi,2)))
socialObj = tnet.get_totalTravelTime(tNet.G_supergraph, fcoeffs)
real_obj.append(socialObj)
print(socialObj)
'''
n = [2+i for i in range(4)]
print("\ntestCars progressBar:")
progBar = progressBar(len(n)*2*len(demand_multiplier))
progBar.set()
CARS = {}
for i in n:
CARS[i] = {}
for g_multi in demand_multiplier:
for linear in [True, False]:
tNet = tnet.tNet(netFile=netFile, gFile=gFile, fcoeffs=fcoeffs)
tNet.build_supergraph(walk_multiplier=1)
pedestrian = [(u, v) for (u, v, d) in tNet.G_supergraph.edges(data=True) if d['type'] == 'p']
connector = [(u, v) for (u, v, d) in tNet.G_supergraph.edges(data=True) if d['type'] == 'f']
g_per = tnet.perturbDemandConstant(tNet.g, g_multi)
tNet.set_g(g_per)
tNet, runtime, od_flows = cars.solve_CARSn(tNet, fcoeffs=fcoeffs, n=i, exogenous_G=False, rebalancing=False, linear=linear, method=1)
CARS2obj = tnet.get_totalTravelTime(tNet.G_supergraph, fcoeffs)
CARS[i][linear] = (CARS2obj-1630.1380990494615)/1630.1380990494615*100
progBar.tic()
del tNet
fig, ax = plt.subplots(figsize=(5,2))
ax.plot(n, [v[True] for k,v in CARS.items()], label = 'LP')
ax.plot(n, [v[False] for k,v in CARS.items()], label = 'QP')
ax.set_xlabel('n')
ax.set_ylabel('% deviation from NLP')
ax.set_xlim([n[0], n[-1]])
ax.legend(framealpha=1)
ax.grid(True)
#plt.tight_layout()
plt.show()
| [
11748,
12351,
13,
83,
3262,
355,
256,
3262,
198,
11748,
12351,
13,
20034,
50,
355,
5006,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4866,
198,
6738,
12351,
13,
26791,
1330,
1635,
198,
11748,
2603,
29487,
8019,
355,
285,
489,
198,
... | 2.254021 | 1,492 |
import os
import discord
import time
import logging
import json
from discord.ext import commands
from discord import Game, Embed, Color, Status, ChannelType
from random import randint, sample
from discord.ext.commands import cooldown
from os import path
#Logging
logger = logging.getLogger('discord')
logger.setLevel(logging.INFO)
handler = logging.FileHandler(filename='logs.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
if path.exists("config.json") == False:
with open('config.json', 'w') as configout:
json.dump({
"token": "Token goes here",
"prefix": "!",
"owner": 350765965278969860,
"danbooru_username": "",
"danbooru_key": ""
}, configout)
print("[INFO] config.json generated!!")
quit()
else:
with open("config.json") as f:
config = json.load(f)
# Creating bot instance
bot = commands.Bot(command_prefix=config.get('prefix'), self_bot=False, owner_id=config.get('owner'), case_insensitive=True, help_command=None)
#Loaading cogs
if __name__ == '__main__':
for extension in os.listdir("cogs"):
if extension == "__pycache__":
pass
else:
bot.load_extension("cogs."+extension[:-3])
#listeners
@bot.event
#Message on error event
@bot.event
# Authentication
if config.get('token') == "Token goes here":
print("[ERROR] Change token in config!")
elif config.get('token') == "":
print("[ERROR] No token present!")
else:
print("[INFO] Starting up and logging in...")
bot.run(config.get('token'), bot=True, reconnect=True)
| [
11748,
28686,
198,
11748,
36446,
198,
11748,
640,
198,
11748,
18931,
198,
11748,
33918,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
6738,
36446,
1330,
3776,
11,
13302,
276,
11,
5315,
11,
12678,
11,
11102,
6030,
198,
6738,
4738,
1330,
4... | 2.626563 | 640 |
#!/usr/bin/env python
import os
import urllib
import requests
import json
import redis
import uuid
from flask import Flask, g, request, redirect, url_for, render_template, jsonify, make_response
application = Flask(__name__)
REDIS_URL = os.getenv("REDIS_URL", "redis://localhost:6379")
API_ROOT = 'https://bbs.net9.org:8080'
application.secret_key = os.urandom(24)
@application.before_request
@application.route('/auth')
@application.route('/', methods=['GET', 'POST'])
@application.route('/config', methods=['GET', 'POST'])
if __name__ == '__main__':
application.debug = True
application.run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
28686,
198,
11748,
2956,
297,
571,
198,
11748,
7007,
198,
11748,
33918,
198,
11748,
2266,
271,
198,
11748,
334,
27112,
198,
6738,
42903,
1330,
46947,
11,
308,
11,
2581,
11,
18941,... | 2.90566 | 212 |
import os
from dateutil import parser as date_parser
from sqlalchemy import Column, Text, Integer, DateTime, ARRAY, Float, func
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from definitions import ROOT_DIR
from parser import Parser
engine_addr = 'postgresql+psycopg2://postgres:password@/iot_tweet?host=/cloudsql/iot-tweet:europe-west3:main-instance'
# engine_addr = 'postgresql+psycopg2://postgres:password@localhost:5431/iot_tweet'
engine = create_engine(engine_addr, echo=True)
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
corpus_path = os.path.join(ROOT_DIR, 'corpus/iot-tweets-2009-2016-completv3.tsv')
parser = Parser()
parser.load_w2v_model()
print('ok')
corpus = open(corpus_path, 'r', encoding='utf-8')
i = 0
corpus.readline()
for line in corpus:
print(line)
parts = line[:-1].split('\t')
cleaned_tweet = parser.clean_tweet(parts[-6])
urls = parts[5:-6]
t = Tweet(
id=int(parts[0]),
sentiment=parts[1],
topic_id=(None if parts[2] == 'None' else int(parts[2])),
country=parts[3],
gender=parts[4],
urls=' '.join(urls),
text=parts[-6],
user_id=(int(parts[-5]) if parts[-5] != '' else None),
user_name=parts[-4][1:-1],
date=(date_parser.parse(parts[-3][1:-1]) if parts[-3][1:-1] != '' else None),
hashtags=parts[-2],
indication=parts[-1],
cleaned_text=cleaned_tweet,
vector=parser.tweet2vec(cleaned_tweet)
)
session.add(t)
if i % 1000 == 0:
print('writing', i)
session.commit()
i += 1
corpus.close()
session.commit()
| [
11748,
28686,
198,
198,
6738,
3128,
22602,
1330,
30751,
355,
3128,
62,
48610,
198,
6738,
44161,
282,
26599,
1330,
29201,
11,
8255,
11,
34142,
11,
7536,
7575,
11,
5923,
30631,
11,
48436,
11,
25439,
198,
6738,
44161,
282,
26599,
1330,
225... | 2.504688 | 640 |
# Testing pandas.makes_up
import utipy as ut
import numpy as np
import pandas as pd
| [
2,
23983,
19798,
292,
13,
49123,
62,
929,
198,
198,
11748,
3384,
541,
88,
355,
3384,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
628,
628
] | 2.83871 | 31 |
import os
import webbrowser
import slicer
from SlicerDevelopmentToolboxUtils.mixins import ModuleWidgetMixin, ModuleLogicMixin
from SlicerPIRADSWidgets.ProstateSectorMapDialog import ProstateSectorMapDialog
| [
11748,
28686,
198,
11748,
3992,
40259,
198,
198,
11748,
14369,
263,
198,
198,
6738,
311,
677,
263,
41206,
25391,
3524,
18274,
4487,
13,
19816,
1040,
1330,
19937,
38300,
35608,
259,
11,
19937,
11187,
291,
35608,
259,
198,
198,
6738,
311,
... | 3.333333 | 63 |
import pygame as pg
from .. import tools
'''este codigo vai auxiliar nas ações do menu'''
| [
198,
198,
11748,
12972,
6057,
355,
23241,
198,
6738,
11485,
1330,
4899,
198,
7061,
6,
29872,
14873,
14031,
410,
1872,
27506,
4797,
25221,
257,
16175,
127,
113,
274,
466,
6859,
7061,
6,
198
] | 2.787879 | 33 |
import os
consumer_key = os.environ.get("consumer_key")
consumer_secret = os.environ.get("consumer_secret")
access_token = os.environ.get("access_token")
access_token_secret = os.environ.get("access_token_secret")
| [
11748,
28686,
198,
198,
49827,
62,
2539,
796,
28686,
13,
268,
2268,
13,
1136,
7203,
49827,
62,
2539,
4943,
198,
49827,
62,
21078,
796,
28686,
13,
268,
2268,
13,
1136,
7203,
49827,
62,
21078,
4943,
198,
15526,
62,
30001,
796,
28686,
13... | 2.986111 | 72 |
from django.conf.urls import url
from .views import UserRegisterAPIView, UserLoginAPIView
urlpatterns = [
url(r'^register/$', UserRegisterAPIView.as_view(), name='register'),
url(r'^login/$', UserLoginAPIView.as_view(), name='login'),
] | [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
201,
198,
6738,
764,
33571,
1330,
11787,
38804,
2969,
3824,
769,
11,
11787,
47790,
2969,
3824,
769,
201,
198,
201,
198,
201,
198,
6371,
33279,
82,
796,
685,
201,
198,
197,
197,... | 2.677419 | 93 |
# -*- coding:utf-8 -*-
"""
@file: user
@time: 2020/6/17 0:48
"""
from flask_restful import Api
from app.libs.lin_response import Resource
from flask.blueprints import Blueprint
from flask import request
from flask import make_response, jsonify
from app.ops.membership import get_membership_list
from app.ops.user import get_open_id, create_or_get_user
from app.libs.restful import gen_result_by_code
from app.ops.address import get_address_list
import app.libs.status_code as sc
user_bp = Blueprint("egg_user", __name__, url_prefix="/api/v1/user")
user_api = Api(user_bp)
@user_api.resource("/login")
@user_api.resource("/user_info")
@user_api.resource("/address_list")
@user_api.resource("/address")
@user_api.resource("/cards")
@user_api.resource("/card")
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
31,
7753,
25,
2836,
198,
31,
2435,
25,
12131,
14,
21,
14,
1558,
657,
25,
2780,
198,
37811,
198,
6738,
42903,
62,
2118,
913,
1330,
5949,
72,
198,
6738,
... | 2.824818 | 274 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from scuole.core.admin import ReadOnlyAdmin
from .models import District, DistrictStats
@admin.register(District)
@admin.register(DistrictStats)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
629,
84,
2305,... | 3.243902 | 82 |
# import inspect
# import sys
# from glob import glob
# from os.path import basename, dirname, join
#
# from oidcservice.service import Service
#
#
# def factory(req_name, **kwargs):
# pwd = dirname(__file__)
# if pwd not in sys.path:
# sys.path.insert(0, pwd)
# for x in glob(join(pwd, '*.py')):
# _mod = basename(x)[:-3]
# if not _mod.startswith('__'):
# # _mod = basename(x)[:-3]
# if _mod not in sys.modules:
# print('"{}" not in sys.modules'.format(_mod))
# __import__(_mod, globals(), locals())
#
# for name, obj in inspect.getmembers(sys.modules[_mod]):
# if inspect.isclass(obj) and issubclass(obj, Service):
# print('obj.__name__ = "{}"'.format(obj.__name__))
# try:
# if obj.__name__ == req_name:
# return obj(**kwargs)
# except AttributeError:
# pass
#
# print('Failed! pwd={}, req_name={}'.format(pwd, req_name)) | [
2,
1330,
10104,
198,
2,
1330,
25064,
198,
2,
422,
15095,
1330,
15095,
198,
2,
422,
28686,
13,
6978,
1330,
1615,
12453,
11,
26672,
3672,
11,
4654,
198,
2,
198,
2,
422,
267,
312,
66,
15271,
13,
15271,
1330,
4809,
198,
2,
198,
2,
1... | 1.878683 | 577 |
import pygame
from Game.Scenes import *
from Game.Shared import *
Sudoku().start()
| [
11748,
12972,
6057,
198,
198,
6738,
3776,
13,
3351,
18719,
1330,
1635,
198,
6738,
3776,
13,
2484,
1144,
1330,
1635,
628,
628,
198,
50,
463,
11601,
22446,
9688,
3419,
198
] | 2.933333 | 30 |
#!python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Outputs the current date and time information as a key-value file
appropriate for use with template_replace.py.
"""
import datetime
import optparse
import os
import sys
if __name__ == '__main__':
sys.exit(main())
| [
2,
0,
29412,
198,
2,
15069,
2321,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,... | 3.72973 | 222 |
from snakemake.io import expand
from drop import utils
| [
6738,
17522,
15883,
13,
952,
1330,
4292,
198,
6738,
4268,
1330,
3384,
4487,
628
] | 4 | 14 |
# -*- coding: utf-8 -*-
output_file=open('/Users/harshfatepuria/Documents/Github/Evaluation-of-Content-Analysis-on-TREC-Polat-DD-Dataset/result/5-SizeSummary/sizeRatioSummary123.csv','w')
output_file.write("State,Solr Index Size,Actual File Size\n")
with open("/Users/harshfatepuria/Documents/Github/Evaluation-of-Content-Analysis-on-TREC-Polat-DD-Dataset/result/5-SizeSummary/sizeRatioSummary.json") as f:
newList=eval(f.read())
for newDict in newList:
output_file.write(newDict["type"]+","+str(newDict["ratio"])+"\n")
output_file.close()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
22915,
62,
7753,
28,
9654,
10786,
14,
14490,
14,
71,
5406,
69,
378,
14225,
544,
14,
38354,
14,
38,
10060,
14,
36,
2100,
2288,
12,
1659,
12,
19746,
12,
32750,
12... | 2.447368 | 228 |
from nonogram import *
from patterns import *
from multiprocessing import *
from time import time
from pprint import pprint
import sys
import itertools as it
'''
?????
oxoox
oxxoo
xoxoo
키 값이 이미 두여있는지 확인한다.
가장 처음 키값을 둔다.
'''
# n 개의 자리. m 개의 공
# 3 개의 자리. 2 개의 공
'''
(oo) () ()
() (oo) ()
() () (oo)
(o) (o) ()
(o) () (o)
() (o) (o)
'''
# 3 개의 자리. 3 개의 공
'''
(ooo) () ()
() (ooo) ()
() () (ooo)
(oo) (o) ()
(oo) () (o)
() (oo) (o)
(o) (oo) ()
(o) () (oo)
() (o) (oo)
가장 많이 뭉쳐있는 공이 m/2 보다 작아지면(이 경우 3/2)
더 이상 의미 없기 때문에 중단.
'''
'''
그러므로 m 이 짝수 일 때 가장 큰 공이
m 개 일 경우,
m-1 개 일 경우,
...
(m/2)+1 개 일 경우,
m/2 개 일 경우
로 나누어 생각할 수 있고
m 이 홀수 일 때 가장 큰 공이
m 개 일 경우,
m-1 개 일 경우,
...
(m/2)+3/2 개 일 경우,
(m/2)+1/2 개 일 경우,
로 나누어 생각할 수 있다.
각각의 경우는 모두 서로 다르다는 것이 자명한데
각각의 경우에서 가장 큰 공이 다르기 때문이다.
이때 각각의 경우에서 가장 큰 공을 제외한 나머지 공들로
위와 같이 경우의 수를 나누어 공들이 배치되는 경우의 수를 구할 수 있다.
'''
'''
m 개의 공을 n 개의 자리에 배치하는 경우의 수
1 개의 공을 1 개의 자리에 배치하는 경우의 수 == 1
(o)
1 개의 공을 2 개의 자리에 배치하는 경우의 수 == 2
(o) ()
() (o)
1 개의 공을 3 개의 자리에 배치하는 경우의 수 == 3
(o) () ()
() (o) ()
() () (o)
1 개의 공을 n 개의 자리에 배치하는 경우의 수 == n
2 개의 공을 1 개의 자리에 배치하는 경우의 수 == 1
(oo)
2 개의 공을 2 개의 자리에 배치하는 경우의 수 == 3
(oo) ()
() (oo)
(o) (o)
2 개의 공을 3 개의 자리에 배치하는 경우의 수 == 6
(oo) () ()
() (oo) ()
() () (oo)
(o) (o) ()
(o) () (o)
() (o) (o)
2 개의 공을 4 개의 자리에 배치하는 경우의 수 == 10
(oo) () () ()
() (oo) () ()
() () (oo) ()
() () () (oo)
(o) (o) () ()
(o) () (o) ()
(o) () () (o)
() (o) (o) ()
() (o) () (o)
() () (o) (o)
2 개의 공을 5 개의 자리에 배치하는 경우의 수 == 15
(oo) () () () ()
() (oo) () () ()
() () (oo) () ()
() () () (oo) ()
() () () () (oo)
(o) (o) () () ()
(o) () (o) () ()
(o) () () (o) ()
(o) () () () (o)
() (o) (o) () ()
() (o) () (o) ()
() (o) () () (o)
() () (o) (o) ()
() () (o) () (o)
() () () (o) (o)
2 개의 공을 n 개의 자리에 배치하는 경우의 수 == n(n+1)/2
3 개의 공을 1 개의 자리에 배치하는 경우의 수 == 1
(ooo)
3 개의 공을 2 개의 자리에 배치하는 경우의 수 == 4
(ooo) ()
() (ooo)
(oo) (o)
(o) (oo)
3 개의 공을 3 개의 자리에 배치하는 경우의 수 == 9
(ooo) () ()
() (ooo) ()
() () (ooo)
(oo) (o) ()
(oo) () (o)
(o) (oo) ()
() (oo) (o)
(o) () (oo)
() (o) (oo)
3 개의 공을 4 개의 자리에 배치하는 경우의 수 == 16
(ooo) () () ()
() (ooo) () ()
() () (ooo) ()
() () () (ooo)
(oo) (o) () ()
(oo) () (o) ()
(oo) () () (o)
(o) (oo) () ()
() (oo) (o) ()
() (oo) () (o)
(o) () (oo) ()
() (o) (oo) ()
() () (oo) (o)
(o) () () (oo)
() (o) () (oo)
() () (o) (oo)
3 개의 공을 n 개의 자리에 배치하는 경우의 수 == n^2
4 개의 공을 1 개의 자리에 배치하는 경우의 수 == 1
(oooo)
4 개의 공을 2 개의 자리에 배치하는 경우의 수 == 5
(oooo) ()
() (oooo)
(ooo) (o)
(o) (ooo)
(oo) (oo)
4 개의 공을 3 개의 자리에 배치하는 경우의 수 == 13
(oooo) () ()
() (oooo) ()
() () (oooo)
(ooo) (o) ()
(ooo) () (o)
(o) (ooo) ()
() (ooo) (o)
(o) () (ooo)
() (o) (ooo)
(oo) (oo) ()
(oo) () (oo)
(oo) (o) (o)
() (oo) (oo)
4 개의 공을 4 개의 자리에 배치하는 경우의 수 == 26
(oooo) () () ()
() (oooo) () ()
() () (oooo) ()
() () () (oooo)
(ooo) (o) () ()
(ooo) () (o) ()
(ooo) () () (o)
(o) (ooo) () ()
() (ooo) (o) ()
() (ooo) () (o)
(o) () (ooo) ()
() (o) (ooo) ()
() () (ooo) (o)
(o) () () (ooo)
() (o) () (ooo)
() () (o) (ooo)
(oo) (oo) () ()
(oo) () (oo) ()
(oo) () () (oo)
(oo) (o) (o) ()
(oo) (o) () (o)
(oo) () (o) (o)
() (oo) (oo) ()
() (oo) () (oo)
() (oo) (o) (o)
() () (oo) (oo)
4 개의 공을 n 개의 자리에 배치하는 경우의 수 ==
(1/6)n^3 + n^2 -(1/6)n ==
n(n^2 + 6n - 1) / 6 ==
n(n + 3 - 10^(1/2))(n + 3 + 10^(1/2))/6
'''
if __name__ == '__main__':
main(sys.argv)
# print(Pattern.patterns([2,2,1,1,1,1,2], 30))
# print(Pattern.patterns([3,1,1,1,1,1,1,2], 30))
# test_target = [1, 0, 0, 0]
# test_performance()
# test_processes()
| [
6738,
1729,
21857,
1330,
1635,
198,
6738,
7572,
1330,
1635,
198,
6738,
18540,
305,
919,
278,
1330,
1635,
198,
6738,
640,
1330,
640,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
11748,
25064,
198,
11748,
340,
861,
10141,
355,
340,
198,
... | 1.05036 | 3,336 |
#!/usr/bin/env python
import json
import requests
from pprint import pprint
hostname = 'r1.lab.local'
user = 'wwt'
password = 'WWTwwt1!'
# Suppress SSL certificate verification errors
# Using self-signed certificates in lab and therefore will otherwise report verbos
# SSL validation errors
requests.packages.urllib3.disable_warnings()
restconf_url = f"https://{hostname}/restconf/data/"
module_uri = "native"
# Ensure that the Content-Type and Accept header fields are set
headers = {
'Content-Type': 'application/yang-data+json',
'Accept': 'application/yang-data+json'
}
if __name__ == '__main__':
retrieve() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
201,
198,
11748,
33918,
201,
198,
11748,
7007,
201,
198,
6738,
279,
4798,
1330,
279,
4798,
220,
201,
198,
201,
198,
4774,
3672,
796,
705,
81,
16,
13,
23912,
13,
12001,
6,
201,
... | 2.877193 | 228 |
import numpy as np
from random import shuffle
from past.builtins import xrange
def svm_loss_naive(W, X, y, reg):
"""
Structured SVM loss function, naive implementation (with loops).
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
dW = np.zeros(W.shape) # initialize the gradient as zero
# compute the loss and the gradient
num_classes = W.shape[1]
num_train = X.shape[0]
loss = 0.0
for i in xrange(num_train):
scores = X[i].dot(W)
correct_class_score = scores[y[i]]
for j in xrange(num_classes):
if j == y[i]:
continue
margin = scores[j] - correct_class_score + 1 # note delta = 1
if margin > 0:
loss += margin
dW[:,j] = dW[:,j] + X[i]
dW[:,y[i]] = dW[:,y[i]] - X[i]
# Right now the loss is a sum over all training examples, but we want it
# to be an average instead so we divide by num_train.
loss /= num_train
dW /= num_train
# Add regularization to the loss.
loss += reg * np.sum(W * W)
dW += 2 * reg * W
return loss, dW
def svm_loss_vectorized(W, X, y, reg):
"""
Structured SVM loss function, vectorized implementation.
Inputs and outputs are the same as svm_loss_naive.
"""
loss = 0.0
num_train = X.shape[0]
dW = np.zeros(W.shape) # initialize the gradient as zero
scores = X.dot(W)
correct_class_score = scores[np.arange(num_train),y]
correct_class_score = np.reshape(correct_class_score,(num_train,1))
loss_matrix = ((scores+1) - correct_class_score)
loss_matrix[np.arange(num_train),y] = 0
loss_matrix[loss_matrix < 0] = 0
#grad_matrix = loss_matrix
loss = loss_matrix.sum()/num_train
loss += reg * np.sum(W * W)
loss_matrix[loss_matrix > 0] = 1
grad_coeff = loss_matrix.sum(axis=1)
loss_matrix[np.arange(num_train),y] = - grad_coeff
dW += X.T.dot(loss_matrix)
dW /= num_train
dW += (2 * reg * W)
return loss, dW
| [
11748,
299,
32152,
355,
45941,
198,
6738,
4738,
1330,
36273,
198,
6738,
1613,
13,
18780,
1040,
1330,
2124,
9521,
198,
198,
4299,
264,
14761,
62,
22462,
62,
2616,
425,
7,
54,
11,
1395,
11,
331,
11,
842,
2599,
198,
220,
37227,
198,
22... | 2.578485 | 911 |
from django.test import TestCase
from django.urls import reverse
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198
] | 3.611111 | 18 |
from .core.bbox.assigners.hungarian_assigner_3d import HungarianAssigner3D
from .core.bbox.coders.nms_free_coder import NMSFreeCoder
from .core.bbox.match_costs import BBox3DL1Cost
from .datasets import CustomNuScenesDataset, WaymoMultiViewDataset
from .datasets.pipelines import (
PhotoMetricDistortionMultiViewImage, PadMultiViewImage,
NormalizeMultiviewImage, CropMultiViewImage, RandomScaleImageMultiViewImage,
HorizontalRandomFlipMultiViewImage)
from .models.backbones.vovnet import VoVNet
from .models.detectors.obj_dgcnn import ObjDGCNN
from .models.detectors.detr3d import Detr3D
from .models.dense_heads.dgcnn3d_head import DGCNN3DHead
from .models.dense_heads.detr3d_head import Detr3DHead
from .models.utils.detr import Deformable3DDetrTransformerDecoder
from .models.utils.dgcnn_attn import DGCNNAttn
from .models.utils.detr3d_transformer import Detr3DTransformer, Detr3DTransformerDecoder, Detr3DCrossAtten
| [
6738,
764,
7295,
13,
65,
3524,
13,
562,
570,
364,
13,
43274,
3699,
62,
562,
570,
263,
62,
18,
67,
1330,
27304,
8021,
570,
263,
18,
35,
198,
6738,
764,
7295,
13,
65,
3524,
13,
19815,
364,
13,
77,
907,
62,
5787,
62,
66,
12342,
1... | 2.783784 | 333 |
import asyncio
import discord
from discord.ext import commands
# Define commonly used functions. We use a single underscore ('_') to let people know that we shouldn't access this
# outside of this module but still allow it
# Define the checks
def bypass_check(
predicate, **parameters
): # If the user is a bot mod this check will allow them to skip the check if it fails
"""If the user is a bot mod this check will allow them to skip the check if it fails. Auto-passes the ctx
parameter """
return pred
| [
11748,
30351,
952,
198,
198,
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
628,
198,
2,
2896,
500,
8811,
973,
5499,
13,
775,
779,
257,
2060,
44810,
19203,
62,
11537,
284,
1309,
661,
760,
326,
356,
6584,
470,
1895,
428,
198,
... | 3.639456 | 147 |
import json
import csv
from operator import itemgetter
def get_price(dict):
"""
This function is used to get the price of a manuscript.
"""
price = dict["price"]
return price
def get_all_prices(file):
"""
This function is used to produce a list of all prices.
:param file: a json file containing the mss
:return: a list
"""
prices_list = []
for mss in file["single_sale"]:
price = get_price(mss)
if price is not None:
prices_list.append(price)
for mss in file["multiple_sales"]:
for ms in mss["mss"]:
price = get_price(ms)
if price is not None:
prices_list.append(price)
return prices_list
def get_average(lst):
"""
This function is used to calculate the average of a list of float.
:param lst: a list
:return: a float
"""
sum = 0
if len(lst) != 0:
for i in lst:
if i is float or int:
sum = sum + i
average = sum / len(lst)
average = round(average, 2)
return average
else:
return None
def price_evolution(mss_dict):
"""
This function is used to get the evolution of the price for a multiple time sold manuscript.
:para mss_dict: the data of a manuscript, as a dict
:return: a dict containing data
"""
# This is the final dict.
data = {}
# This list contains all prices, used for the average.
prices_list = []
# This list contains price and sell date of each sell, it's a list of dicts.
sales_list = []
for mss in mss_dict["mss"]:
id = mss["id"]
# The two entries are overwrite : it's ok because we only want to keep one id and one desc.
data["id"] = id
data["author"] = mss["author"]
data["desc"] = mss["desc"]
price = get_price(mss)
date = mss["sell_date"]
# It's only usefull to retrive prices when we have both the price and the date.
if price and date is not None:
# This dict will contains two keys : the date and the price of the sell.
sales = {}
sales["price"] = price
sales["date"] = date
sales_list.append(sales)
if price is not None:
prices_list.append(price)
# Itemgetter is used to retrieve price by chronological order.
sales_list = sorted(sales_list, key=itemgetter('date'))
data["sales"] = sales_list
# Prices are sorted : the lowest to the highest.
prices_list.sort()
if prices_list != []:
data["average"] = get_average(prices_list)
data["highest_price"] = prices_list[-1]
data["lowest_price"] = prices_list[0]
return data
if __name__ == "__main__":
# First, we retrieve data from the JSON file.
with open('../output/reconciliated.json') as json_file:
data = json.load(json_file)
average = get_average(get_all_prices(data))
print("The average price is " + str(average))
with open('../output/price/price_evolution.csv', 'w+') as csv_file:
fieldnames = ['id', 'author', 'desc', 'sales', 'average', 'highest_price', 'lowest_price']
csv = csv.DictWriter(csv_file, fieldnames=fieldnames)
csv.writeheader()
for mss in data["multiple_sales"]:
data = price_evolution(mss)
csv.writerow(data)
| [
11748,
33918,
198,
11748,
269,
21370,
198,
6738,
10088,
1330,
2378,
1136,
353,
198,
198,
4299,
651,
62,
20888,
7,
11600,
2599,
198,
197,
37811,
198,
197,
1212,
2163,
318,
973,
284,
651,
262,
2756,
286,
257,
17116,
13,
198,
197,
37811,... | 2.700901 | 1,110 |
from GUI.Shapes.Shape import Shape
| [
6738,
25757,
13,
2484,
7916,
13,
33383,
1330,
25959,
628
] | 3.6 | 10 |
from rest_framework import serializers
from trades.models import Trade
class TradeSerializer(serializers.HyperlinkedModelSerializer):
"""
Regulate what goes over the wire for a `Trade` resource.
"""
def __init__(self, *args, **kwargs):
"""custom initialisation of serializer to support dynamic field list"""
fields = None
context = kwargs.get("context")
if context:
# Don not pass 'fields' to superclass
fields = context.pop("fields", None)
# Instantiate the superclass normally
super(TradeSerializer, self).__init__(*args, **kwargs)
if fields:
# Drop fields not specified in the `fields` argument.
for field_name in (set(self.fields.keys()) - set(fields)):
self.fields.pop(field_name)
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
198,
6738,
17674,
13,
27530,
1330,
9601,
628,
198,
4871,
9601,
32634,
7509,
7,
46911,
11341,
13,
38197,
25614,
17633,
32634,
7509,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
3310... | 2.615142 | 317 |
import base64
import urllib
from mimetypes import MimeTypes
from jinja2 import contextfilter
mime = MimeTypes()
@contextfilter
| [
201,
198,
11748,
2779,
2414,
201,
198,
11748,
2956,
297,
571,
201,
198,
6738,
17007,
2963,
12272,
1330,
337,
524,
31431,
201,
198,
201,
198,
6738,
474,
259,
6592,
17,
1330,
4732,
24455,
201,
198,
201,
198,
201,
198,
76,
524,
796,
33... | 2.54386 | 57 |
# -*- coding: utf-8 -*-
# pylint: disable=redefined-outer-name,unused-argument
"""Configuration and fixtures for unit test suite."""
import io
import os
import re
import shutil
import click
import pytest
from aiida.plugins import DataFactory
from aiida_pseudo.data.pseudo import PseudoPotentialData
from aiida_pseudo.groups.family import PseudoPotentialFamily, CutoffsFamily
pytest_plugins = ['aiida.manage.tests.pytest_fixtures'] # pylint: disable=invalid-name
@pytest.fixture
def clear_db(clear_database_before_test):
"""Alias for the `clear_database_before_test` fixture from `aiida-core`."""
yield
@pytest.fixture
def ctx():
"""Return an empty `click.Context` instance."""
return click.Context(click.Command(name='dummy'))
@pytest.fixture
def chtmpdir(tmpdir):
"""Change the current working directory to a temporary directory."""
with tmpdir.as_cwd():
yield
@pytest.fixture
def run_cli_command():
"""Run a `click` command with the given options.
The call will raise if the command triggered an exception or the exit code returned is non-zero.
"""
def _run_cli_command(command, options=None, raises=None):
"""Run the command and check the result.
:param command: the command to invoke
:param options: the list of command line options to pass to the command invocation
:param raises: optionally an exception class that is expected to be raised
"""
import traceback
from click.testing import CliRunner
runner = CliRunner()
result = runner.invoke(command, options or [])
if raises is not None:
assert result.exception is not None, result.output
assert result.exit_code != 0
else:
assert result.exception is None, ''.join(traceback.format_exception(*result.exc_info))
assert result.exit_code == 0, result.output
result.output_lines = [line.strip() for line in result.output.split('\n') if line.strip()]
return result
return _run_cli_command
@pytest.fixture
def filepath_fixtures() -> str:
"""Return the absolute filepath to the directory containing the file `fixtures`.
:return: absolute filepath to directory containing test fixture data.
"""
return os.path.join(os.path.dirname(__file__), 'fixtures')
@pytest.fixture
def filepath_pseudos(filepath_fixtures):
"""Return the absolute filepath to the directory containing the pseudo potential files.
:return: absolute filepath to directory containing test pseudo potentials.
"""
def _filepath_pseudos(entry_point='upf') -> str:
"""Return the absolute filepath containing the pseudo potential files for a given entry point.
:param entry_point: pseudo potential data entry point
:return: filepath to folder containing pseudo files.
"""
return os.path.join(filepath_fixtures, 'pseudos', entry_point)
return _filepath_pseudos
@pytest.fixture
def get_pseudo_potential_data(filepath_pseudos):
"""Return a factory for `PseudoPotentialData` nodes."""
def _get_pseudo_potential_data(element='Ar', entry_point=None) -> PseudoPotentialData:
"""Return a `PseudoPotentialData` for the given element.
:param element: one of the elements for which there is a UPF test file available.
:return: the `PseudoPotentialData`
"""
if entry_point is None:
cls = DataFactory('pseudo')
content = f'<UPF version="2.0.1"><PP_HEADER\nelement="{element}"\nz_valence="4.0"\n/></UPF>\n'
pseudo = cls(io.BytesIO(content.encode('utf-8')), f'{element}.pseudo')
pseudo.element = element
else:
cls = DataFactory(f'pseudo.{entry_point}')
filename = f'{element}.{entry_point}'
with open(os.path.join(filepath_pseudos(entry_point), filename), 'rb') as handle:
pseudo = cls(handle, filename)
return pseudo
return _get_pseudo_potential_data
@pytest.fixture
def generate_cutoffs():
"""Return a dictionary of cutoffs for all elements in a given family."""
def _generate_cutoffs(family):
"""Return a dictionary of cutoffs for a given family."""
return {element: {'cutoff_wfc': 1.0, 'cutoff_rho': 2.0} for element in family.elements}
return _generate_cutoffs
@pytest.fixture
def generate_cutoffs_dict(generate_cutoffs):
"""Return a dictionary of cutoffs for a given family with specified stringencies."""
def _generate_cutoffs_dict(family, stringencies=('normal',)):
"""Return a dictionary of cutoffs for a given family."""
cutoffs_dict = {}
for stringency in stringencies:
cutoffs_dict[stringency] = generate_cutoffs(family)
return cutoffs_dict
return _generate_cutoffs_dict
@pytest.fixture
def get_pseudo_family(tmpdir, filepath_pseudos):
"""Return a factory for a ``PseudoPotentialFamily`` instance."""
def _get_pseudo_family(
label='family',
cls=PseudoPotentialFamily,
pseudo_type=PseudoPotentialData,
elements=None,
cutoffs_dict=None,
unit=None,
default_stringency=None
) -> PseudoPotentialFamily:
"""Return an instance of `PseudoPotentialFamily` or subclass containing the given elements.
:param elements: optional list of elements to include instead of all the available ones
:params cutoffs_dict: optional dictionary of cutoffs to specify. Format: multiple sets of cutoffs can be
specified where the key represents the stringency, e.g. ``low`` or ``normal``. For each stringency, a
dictionary should be defined that for each element symbols for which the family contains a pseudopotential,
two values are specified, ``cutoff_wfc`` and ``cutoff_rho``, containing a float value with the recommended
cutoff to be used for the wave functions and charge density, respectively..
:param unit: string definition of a unit of energy as recognized by the ``UnitRegistry`` of the ``pint`` lib.
:param default_stringency: string with the default stringency name, if not specified, the first one specified in
the ``cutoffs`` argument will be used if specified.
:return: the pseudo family
"""
if elements is not None:
elements = {re.sub('[0-9]+', '', element) for element in elements}
if pseudo_type is PseudoPotentialData:
# There is no actual pseudopotential file fixtures for the base class, so default back to `.upf` files
extension = 'upf'
else:
extension = pseudo_type.get_entry_point_name()[len('pseudo.'):]
dirpath = filepath_pseudos(extension)
for pseudo in os.listdir(dirpath):
if elements is None or any(pseudo.startswith(element) for element in elements):
shutil.copyfile(os.path.join(dirpath, pseudo), os.path.join(str(tmpdir), pseudo))
family = cls.create_from_folder(str(tmpdir), label, pseudo_type=pseudo_type)
if cutoffs_dict is not None and isinstance(family, CutoffsFamily):
default_stringency = default_stringency or list(cutoffs_dict.keys())[0]
for stringency, cutoff_values in cutoffs_dict.items():
family.set_cutoffs(cutoff_values, stringency, unit)
family.set_default_stringency(default_stringency)
return family
return _get_pseudo_family
@pytest.fixture
def get_pseudo_archive(tmpdir, filepath_pseudos):
"""Create an archive with pseudos."""
return _get_pseudo_archive
@pytest.fixture
def generate_structure():
"""Return a ``StructureData``."""
def _generate_structure(elements=('Ar',)):
"""Return a ``StructureData``."""
from aiida.orm import StructureData
structure = StructureData(cell=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
for index, element in enumerate(elements):
symbol = re.sub(r'[0-9]+', '', element)
structure.append_atom(position=(index * 0.5, index * 0.5, index * 0.5), symbols=symbol, name=element)
return structure
return _generate_structure
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
279,
2645,
600,
25,
15560,
28,
445,
18156,
12,
39605,
12,
3672,
11,
403,
1484,
12,
49140,
198,
37811,
38149,
290,
34609,
329,
4326,
1332,
18389,
526,
15931,
198,
11... | 2.693264 | 3,058 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.QuotaModifyDetail import QuotaModifyDetail
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
33918,
198,
198,
6738,
435,
541,
323,
13,
64,
404,
13,
15042,
13,
26209,
13,
2348,
541,
323,
31077,
1330,
978,... | 2.613333 | 75 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: list数据结构
Desc :
"""
def transpose_list():
"""矩阵转置"""
matrix = [[1, 2, 3, 4],[5, 6, 7, 8],[9, 10, 11, 12],]
result = zip(*matrix)
print(type(result))
for z in result: print(z)
# zip是一个可迭代对象,迭代完了就到尾了,后面木有元素了
result = list(result)
print(result)
if __name__ == '__main__':
transpose_list()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
33221,
25,
1351,
46763,
108,
162,
235,
106,
163,
119,
241,
162,
252,
226,
198,
24564,
1058,
220,
198,
220... | 1.633333 | 240 |
###############################################################################
# Copyright (c) 2017-2020 Koren Lev (Cisco Systems), #
# Yaron Yogev (Cisco Systems), Ilia Abashin (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
from unittest.mock import patch
from api.test.api.responders_test.test_data import base
from api.test.api.responders_test.test_data import inventory
from api.test.api.test_base import TestBase
| [
29113,
29113,
7804,
4242,
21017,
198,
2,
15069,
357,
66,
8,
2177,
12,
42334,
3374,
77,
16042,
357,
34,
4861,
11998,
828,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,... | 2.541667 | 360 |
""" creating fake user for test"""
import os
import secrets
import django
import factory
from django.contrib.auth import get_user_model
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_playground.settings",)
django.setup()
user = get_user_model()
class UserFactory(factory.Factory):
"""
Using factory boy to generate random data
"""
class Meta:
"""
Setting up the model
"""
model = user
email = factory.Faker(provider="email")
password = factory.Faker(provider="password")
username = factory.Faker(provider="user_name")
bio = factory.Faker(provider="text")
full_name = factory.Faker(provider="name")
phone_num = factory.Faker(provider="phone_number")
def create_users(*, users: int = 5) -> None:
"""
create random users
"""
for _ in range(users):
email = UserFactory().email
password = UserFactory().password
username = UserFactory().username
bio = UserFactory().bio
full_name = UserFactory().full_name
phone_num = UserFactory().phone_num
user.objects.create(
email=email,
bio=bio,
full_name=full_name,
phone_num=phone_num,
username=username,
gender=secrets.choice(["Female", "Male", "Rather not say"]),
is_active=secrets.choice([True, False]),
is_staff=secrets.choice([True, False]),
is_superuser=secrets.choice([True, False]),
)
user.set_password(password)
user.save()
create_users(users=50)
| [
37811,
4441,
8390,
2836,
329,
1332,
37811,
198,
11748,
28686,
198,
11748,
13141,
198,
198,
11748,
42625,
14208,
198,
11748,
8860,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
198,
418,
13,
268,... | 2.39759 | 664 |
import numpy as np
# Given the first Probability of our coin, calculate the entropy
# Calculate the entropy of our 3 coins
print(calc_entropy(0.5))
print(calc_entropy(0.9))
print(calc_entropy(0.1))
# Calculate the entropy of someone playing tennis from our tennis dataset
# (9 out of 14 people said no)
print(calc_entropy(9 / 14))
| [
11748,
299,
32152,
355,
45941,
628,
198,
2,
11259,
262,
717,
30873,
1799,
286,
674,
10752,
11,
15284,
262,
40709,
628,
198,
2,
27131,
378,
262,
40709,
286,
674,
513,
10796,
198,
4798,
7,
9948,
66,
62,
298,
28338,
7,
15,
13,
20,
40... | 3.054545 | 110 |
import logging
import os
from pathlib import Path
from urllib.parse import urlparse
from schema_salad.exceptions import ValidationException
from schema_salad.ref_resolver import file_uri
from cwltool.load_tool import resolve_and_validate_document, fetch_document
from cwltool.main import main as cwl_tool
def validate_cwl_doc_main(cwl_doc_path):
"""
Not currently used. Calls the main function of cwltool with validation parameters. Does a lot of extra stuff.
:param cwl_doc_path:
:return:
"""
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.ERROR)
cwl_doc_path = str(cwl_doc_path)
rv = cwl_tool(argsl=['--validate', '--disable-color', cwl_doc_path], logger_handler=stream_handler)
if rv != 0:
raise ValidationException(f"cwltool did not return a return value of 0 for {cwl_doc_path}")
return
def validate_cwl_doc(cwl_doc):
"""
This is adapted from cwltool.main.main and avoids the unnecessary stuff by using cwltool.main.main directly.
:param cwl_doc_path:
:return:
"""
if isinstance(cwl_doc, (Path, str)): # Can also be CWLObjectType
cwl_doc = str(cwl_doc)
if not (urlparse(cwl_doc)[0] and urlparse(cwl_doc)[0] in ['http', 'https', 'file']):
cwl_doc = file_uri(os.path.abspath(cwl_doc))
loading_context, workflow_object, uri = fetch_document(cwl_doc)
resolve_and_validate_document(loading_context, workflow_object, uri)
return
| [
11748,
18931,
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
29572,
198,
6738,
32815,
62,
21680,
324,
13,
1069,
11755,
1330,
3254,
24765,
16922,
198,
6738,
32815,
62,
21680,
324,
... | 2.619893 | 563 |
import os, sys
import numpy as np
import pandas as pd
import librosa
'''
Check if the the submssion folders are valid: all files must have the
correct format, shape and naming.
WORK IN PROGRESS...
'''
def validate_task1_submission(submission_folder, test_folder):
'''
Args:
- submission_folder: folder containing the model's output for task 1 (non zipped).
- test_folder: folder containing the released test data (non zipped).
'''
#read folders
contents_submitted = sorted(os.listdir(submission_folder))
contents_test = sorted(os.listdir(test_folder))
contents_submitted = [i for i in contents_submitted if 'DS_Store' not in i]
contents_test = [i for i in contents_test if 'DS_Store' not in i]
contents_test = [i for i in contents_test if '_B' not in i]
contents_test = [i.split('_')[0]+'.wav' for i in contents_test]
#check if non.npy files are present
non_npy = [x for x in contents_submitted if x[-4:] != '.npy'] #non .npy files
if len(non_npy) > 0:
raise AssertionError ('Non-.npy files present. Please include only .npy files '
'in the submission folder.')
#check total number of files
num_files = len(contents_submitted)
target_num_files = len(contents_test)
if not num_files == target_num_files:
raise AssertionError ('Wrong amount of files. Target:' + str(target_num_files) +
', detected:' + str(len(contents_submitted)))
#check files naming
names_submitted = [i.split('.')[0] for i in contents_submitted]
names_test = [i.split('.')[0] for i in contents_test]
names_submitted.sort()
names_test.sort()
if not names_submitted == names_test:
raise AssertionError ('Wrong file naming. Please name each output file '
'exactly as its input .wav file, but with .npy extension')
#check shape file-by-file
for i in contents_test:
submitted_path = os.path.join(submission_folder, i.split('.')[0]+'.npy')
test_path = os.path.join(test_folder, i.split('.')[0]+'_A.wav')
s = np.load(submitted_path, allow_pickle=True)
t, _ = librosa.load(test_path, 16000, mono=False)
target_shape = t.shape[-1]
if not s.shape[-1] == target_shape:
raise AssertionError ('Wrong shape for: ' + str(i) + '. Target: ' + str(target_shape) +
', detected:' + str(s.shape))
print ('The shape of your submission for Task 1 is valid!') | [
11748,
28686,
11,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
9195,
4951,
64,
198,
7061,
6,
198,
9787,
611,
262,
262,
850,
76,
824,
295,
24512,
389,
4938,
25,
477,
3696,
1276,
423,
26... | 2.47456 | 1,022 |
import pandas as pd
import numpy as np
import sys
sys.path.append('./')
from train_base import write_csv, read_info, convert_to_loader, _run_language
from util import argparser
full_results = [['lang', 'artificial', 'avg_len', 'test_shannon', 'test_loss',
'test_acc', 'val_loss', 'val_acc', 'best_epoch']]
if __name__ == '__main__':
args = argparser.parse_args(csv_folder='artificial/%s/normal')
assert args.data == 'northeuralex', 'this script should only be run with northeuralex data'
fill_artificial_args(args)
run_languages(args)
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7,
4458,
14,
11537,
198,
6738,
4512,
62,
8692,
1330,
3551,
62,
40664,
11,
1100,
62,
10951,
11,
10385,
62,
... | 2.584071 | 226 |
import sklearn as sk
import sklearn.metrics
import torch
import torch_geometric as tg
import torch_geometric.data
from tqdm.auto import tqdm
from . import config, utils
import sys
sys.path.insert(0, '../..')
sys.path.insert(0, '../../pyged/lib')
import pyged | [
11748,
1341,
35720,
355,
1341,
198,
11748,
1341,
35720,
13,
4164,
10466,
198,
11748,
28034,
198,
11748,
28034,
62,
469,
16996,
355,
256,
70,
198,
11748,
28034,
62,
469,
16996,
13,
7890,
198,
6738,
256,
80,
36020,
13,
23736,
1330,
256,
... | 2.910112 | 89 |
try:
from PIL import Image, ImageOps, UnidentifiedImageError
from pyzbar import pyzbar
qr_available = True
except ImportError:
qr_available = False
image_error = IndexError if not qr_available else UnidentifiedImageError
from os import urandom
from hashlib import scrypt
from itertools import cycle
from time import time, ctime
from datetime import timedelta
from qrcode import make as make_qr
from os.path import split as path_split
from base64 import b64encode, b64decode, urlsafe_b64decode
from telethon import TelegramClient
from telethon.tl.types import CodeSettings
from telethon.sessions import StringSession
from telethon.errors import (
PhoneNumberInvalidError, SessionPasswordNeededError
)
from telethon.tl.functions.account import (
ChangePhoneRequest, SendChangePhoneCodeRequest
)
from telethon.tl.functions.auth import (
ResendCodeRequest, AcceptLoginTokenRequest
)
from reedsolo import RSCodec
from pyaes import AESModeOfOperationCBC, Encrypter, Decrypter
from pyaes.util import append_PKCS7_padding, strip_PKCS7_padding
VERSION = 'v4.0'
TelegramClient.__version__ = VERSION
RSC = RSCodec(222)
DEFAULT_SALT = b'\x82\xa1\x93<Zk2\x8b\x8ah|m\x04YC\x14\x97\xc4\nx\x14E?\xffmY\xa4\x9a*8\xc2\xb2'
def decode_restored(encoded_restored: list) -> list:
'''
Converts all elements in list from bytes
to the required types and decodes all
from base64 to correct format.
'''
try:
restored = encoded_restored[:]
restored[0] = b64decode(restored[0])
restored[1] = restored[1].decode()
restored[2] = float(restored[2])
restored[3] = b64decode(restored[3]).decode()
restored[4] = int(restored[4])
restored[5] = b64decode(restored[5]).decode()
except IndexError:
raise ValueError('Invalid decrypted restored. Bad decryption?')
return restored
| [
28311,
25,
198,
220,
220,
220,
422,
350,
4146,
1330,
7412,
11,
7412,
41472,
11,
791,
19107,
5159,
12331,
198,
220,
220,
220,
422,
12972,
89,
5657,
1330,
12972,
89,
5657,
198,
220,
220,
220,
10662,
81,
62,
15182,
796,
6407,
198,
1634... | 2.705202 | 692 |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from torch.autograd import gradcheck
from monai.networks.layers.filtering import BilateralFilter
from tests.utils import skip_if_no_cpp_extension, skip_if_no_cuda
TEST_CASES = [
[
# Case Description
"1 dimension, 1 channel, low spatial sigma, low color sigma",
# Spatial and Color Sigmas
(1, 0.2),
# Input
[
# Batch 0
[
# Channel 0
[1, 0, 0, 0, 1]
],
# Batch 1
[
# Channel 0
[0, 0, 1, 0, 0]
],
],
# Expected
[
# Batch 0
[
# Channel 0
[1.000000, 0.000000, 0.000000, 0.000000, 1.000000]
],
# Batch 1
[
# Channel 0
[0.000000, 0.000000, 1.000000, 0.000000, 0.000000]
],
],
],
[
# Case Description
"1 dimension, 1 channel, low spatial sigma, high color sigma",
# Spatial and Color Sigmas
(1, 0.9),
# Input
[
# Batch 0
[
# Channel 0
[1, 0, 0, 0, 1]
],
# Batch 1
[
# Channel 0
[0, 0, 1, 0, 0]
],
],
# Expected
[
# Batch 0
[
# Channel 0
[0.880626, 0.306148, 0.158734, 0.164534, 0.754386]
],
# Batch 1
[
# Channel 0
[0.019010, 0.104507, 0.605634, 0.183721, 0.045619]
],
],
],
[
# Case Description
"1 dimension, 1 channel, high spatial sigma, low color sigma",
# Spatial and Color Sigmas
(4, 0.2),
# Input
[
# Batch 0
[
# Channel 0
[1, 0, 0, 0, 1]
],
# Batch 1
[
# Channel 0
[0, 0, 1, 0, 0]
],
],
# Expected
[
# Batch 0
[
# Channel 0
[1.000000, 0.000000, 0.000000, 0.000000, 1.000000]
],
# Batch 1
[
# Channel 0
[0.000000, 0.000000, 1.000000, 0.000000, 0.000000]
],
],
],
[
# Case Description
"1 dimension, 1 channel, high spatial sigma, high color sigma",
# Sigmas
(4, 0.9),
# Input
[
# Batch 0
[
# Channel 0
[1, 0, 0, 0, 1]
],
# Batch 1
[
# Channel 0
[0, 0, 1, 0, 0]
],
],
# Expected
[
# Batch 0
[
# Channel 0
[0.497667, 0.268683, 0.265026, 0.261467, 0.495981]
],
# Batch 1
[
# Channel 0
[0.149889, 0.148226, 0.367978, 0.144023, 0.141317]
],
],
],
[
# Case Description
"1 dimension, 4 channel, low spatial sigma, high color sigma",
# Spatial and Color Sigmas
(1, 0.9),
# Input
[
# Batch 0
[
# Channel 0
[1, 0, 0, 0, 0],
# Channel 1
[1, 0, 1, 0, 0],
# Channel 2
[0, 0, 1, 0, 1],
# Channel 3
[0, 0, 0, 0, 1],
]
],
# Expected
[
# Batch 0
[
# Channel 0
[0.988107, 0.061340, 0.001565, 0.000011, 0.000000],
# Channel 1
[0.988107, 0.061340, 0.998000, 0.000016, 0.000123],
# Channel 2
[0.000000, 0.000000, 0.996435, 0.000006, 0.999236],
# Channel 3
[0.000000, 0.000000, 0.000000, 0.000000, 0.999113],
]
],
],
[
# Case Description
"2 dimension, 1 channel, high spatial sigma, high color sigma",
# Sigmas
(4, 0.9),
# Input
[
# Batch 0
[
# Channel 0
[[1, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1]]
],
# Batch 1
[
# Channel 0
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
],
],
# Expected
[
# Batch 0
[
# Channel 0
[
[0.211469, 0.094356, 0.092973, 0.091650, 0.211894],
[0.093755, 0.091753, 0.090524, 0.089343, 0.088384],
[0.091803, 0.089783, 0.088409, 0.087346, 0.086927],
[0.089938, 0.088126, 0.086613, 0.085601, 0.085535],
[0.208359, 0.086535, 0.085179, 0.084210, 0.205858],
]
],
# Batch 1
[
# Channel 0
[
[0.032760, 0.030146, 0.027442, 0.024643, 0.021744],
[0.030955, 0.029416, 0.026574, 0.023629, 0.020841],
[0.028915, 0.027834, 0.115442, 0.022515, 0.020442],
[0.026589, 0.025447, 0.024319, 0.021286, 0.019964],
[0.023913, 0.022704, 0.021510, 0.020388, 0.019379],
]
],
],
],
[
# Case Description
"2 dimension, 4 channel, high spatial sigma, high color sigma",
# Spatial and Color Sigmas
(4, 0.9),
# Input
[
# Batch 0
[
# Channel 0
[[1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 1]],
# Channel 1
[[1, 0, 1, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 1, 0, 1]],
# Channel 2
[[0, 0, 1, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 1, 0, 0]],
# Channel 3
[[0, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 0]],
]
],
# Expected
[
# Batch 0
[
# Channel 0
[
[0.557349, 0.011031, 0.001800, 0.011265, 0.000631],
[0.009824, 0.010361, 0.010429, 0.010506, 0.010595],
[0.008709, 0.009252, 0.009688, 0.009714, 0.009744],
[0.007589, 0.008042, 0.008576, 0.008887, 0.008852],
[0.000420, 0.006827, 0.001048, 0.007763, 0.190722],
],
# Channel 1
[
[0.614072, 0.011045, 0.925766, 0.011287, 0.007548],
[0.009838, 0.010382, 0.010454, 0.010536, 0.010630],
[0.008727, 0.009277, 0.009720, 0.009751, 0.009787],
[0.007611, 0.008071, 0.008613, 0.008932, 0.008904],
[0.027088, 0.006859, 0.950749, 0.007815, 0.230270],
],
# Channel 2
[
[0.056723, 0.000150, 0.973790, 0.000233, 0.990814],
[0.000151, 0.000214, 0.000257, 0.000307, 0.000364],
[0.000186, 0.000257, 0.000328, 0.000384, 0.000449],
[0.000221, 0.000295, 0.000382, 0.000465, 0.000538],
[0.993884, 0.000333, 0.984743, 0.000532, 0.039548],
],
# Channel 3
[
[0.000000, 0.000136, 0.049824, 0.000210, 0.983897],
[0.000136, 0.000193, 0.000232, 0.000277, 0.000329],
[0.000168, 0.000232, 0.000297, 0.000347, 0.000405],
[0.000200, 0.000266, 0.000345, 0.000420, 0.000485],
[0.967217, 0.000301, 0.035041, 0.000481, 0.000000],
],
]
],
],
[
# Case Description
"3 dimension, 1 channel, high spatial sigma, high color sigma",
# Sigmas
(4, 0.9),
# Input
[
# Batch 0
[
# Channel 0
[
# Frame 0
[[1, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1]],
# Frame 1
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# Frame 2
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# Frame 3
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# Frame 4
[[1, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1]],
]
]
],
# Expected
[
# Batch 0
[
# Channel 0
[
# Frame 0
[
[0.085451, 0.037820, 0.036880, 0.035978, 0.084296],
[0.037939, 0.036953, 0.036155, 0.035385, 0.034640],
[0.037167, 0.036302, 0.035603, 0.034931, 0.034465],
[0.036469, 0.035724, 0.035137, 0.034572, 0.034480],
[0.088942, 0.035193, 0.034682, 0.034266, 0.090568],
],
# Frame 1
[
[0.037125, 0.035944, 0.035103, 0.033429, 0.033498],
[0.033380, 0.032653, 0.033748, 0.033073, 0.032549],
[0.034834, 0.034001, 0.033500, 0.032902, 0.032560],
[0.033972, 0.033554, 0.033220, 0.032765, 0.032570],
[0.033590, 0.033222, 0.032927, 0.032689, 0.032629],
],
# Frame 2
[
[0.035635, 0.034468, 0.033551, 0.032818, 0.032302],
[0.034523, 0.032830, 0.032146, 0.031536, 0.031149],
[0.033612, 0.032011, 0.031664, 0.031128, 0.030839],
[0.032801, 0.031668, 0.031529, 0.031198, 0.030978],
[0.032337, 0.031550, 0.031419, 0.031383, 0.031211],
],
# Frame 3
[
[0.034300, 0.033236, 0.032239, 0.031517, 0.031133],
[0.033357, 0.031842, 0.031035, 0.030471, 0.030126],
[0.032563, 0.031094, 0.030156, 0.029703, 0.029324],
[0.031850, 0.030505, 0.030027, 0.029802, 0.029461],
[0.031555, 0.030121, 0.029943, 0.030000, 0.029700],
],
# Frame 4
[
[0.083156, 0.032122, 0.031204, 0.030380, 0.080582],
[0.032296, 0.030936, 0.030170, 0.029557, 0.029124],
[0.031617, 0.030293, 0.029377, 0.028886, 0.028431],
[0.031084, 0.029859, 0.028839, 0.028439, 0.027973],
[0.164616, 0.029457, 0.028484, 0.028532, 0.211082],
],
]
]
],
],
]
@skip_if_no_cuda
@skip_if_no_cpp_extension
if __name__ == "__main__":
unittest.main()
| [
2,
15069,
12131,
532,
33448,
25000,
20185,
42727,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921... | 1.502538 | 8,274 |
if __name__ == '__main__':
import sys
import json
from copy import copy
import numpy as np
from hangar import planes
from engines import engine_db
requirements = {}
with open('requirements.json') as f:
requirements = json.load(f)
print('Mission Requirements:')
print(json.dumps(requirements, indent=2))
print('')
occupant = requirements['occupant']
occupant_count = requirements['occupant_count']
required_payload = occupant_count * (occupant['weight'] + occupant['baggage_weight'])
required_range = requirements['range']
required_V_approach = requirements['landing']['approach_speed']
required_d_runway = requirements['runway_length']
if len(sys.argv) > 1:
planes = {sys.argv[1]: planes[sys.argv[1]]}
# Create a giant list of each plane in hangar, matched up with engine in
# engines.csv. The number of engines will be varied from 1 to 5. This
# means there will be (num_planes*num_engines*5) potential combinations
for plane_name, plane in planes.items():
x = np.linspace(500, 1200, 1400)
y = np.linspace(40, 70, 60)
xv, yv = np.meshgrid(x, y)
results = np.zeros((y.shape[0], x.shape[0], 4))
for i in range(x.shape[0]):
for j in range(y.shape[0]):
experiment = copy(plane)
experiment.S = x[i]
experiment.b = y[j]
experiment.W_payload = required_payload
h = 0
experiment.set_altitude(h)
V_cruise = experiment.speed_carson()
roc = plane.rate_of_climb(plane.drag(plane.Cd(plane.Cd_i(plane.Cl(V_cruise))), V_cruise), V_cruise)
while roc > 0:
h += 500
experiment.set_altitude(h)
V_cruise = experiment.speed_carson()
roc = plane.rate_of_climb(plane.drag(plane.Cd(plane.Cd_i(plane.Cl(V_cruise))), V_cruise), V_cruise)
# solve for max payload, and run the rest of the
# calculations assuming the plane is 100% full
# payload = max_payload(experiment, V_cruise)
# if payload > required_payload:
# experiment.W_payload = required_payload
d_range = experiment.max_range_const_speed(V_cruise)
# assume that...
# our airport is at sea level
# no flaps/slats are used during takeoff
# wings are 4m off the ground
# rolling coefficient is 0.02
# these assumptions aren't terribly important since
# we're just worried about maximizing things
experiment.set_altitude(0)
d_takeoff = experiment.d_takeoff(0.02, 4)
# assume that Cl_max increases by 50% because of
# flaps/slats during landing
experiment.Cl_max *= 1.5
# make sure all computations use the appropriate weight
experiment.W_fuel = 0
V_approach = experiment.speed_landing()
# ignore n_struct, since we don't know it
n_cl_max = experiment.n_cl_max(V_approach)
n_thrust = experiment.n_thrust(V_approach)
n = min(n_cl_max, n_thrust)
if n**2 - 1 <= 0:
r_pattern = 0
else:
r_pattern = experiment.turning_radius(V_approach, n)
# assume we can deliver 30% reverse thrust
d_landing = experiment.d_landing(0.02, 4, 0.30)
"""
NOW WE HAVE RESULTS:
cost
V_cruise
payload
range
d_takeoff
d_landing
r_pattern
"""
d_runway = max(d_takeoff, d_landing)
# Save results to judge based on priorities later on
results[j, i] = np.array([
V_cruise,
d_range,
d_runway,
r_pattern,
])
print(results.shape)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
print(xv.shape)
print(yv.shape)
print(results.shape)
ax.set_xlabel('Surface Area [m^2]')
ax.set_ylabel('Span [m]')
ax.set_zlabel('Range [km]')
ax.plot_wireframe(X=xv, Y=yv, Z=results[:,:,1]/1000)
plt.show()
# if len(results) > 0:
# # convert to numpy and make everything dimensionless
# results_np = np.array(results)
# results_np /= results_np.max(axis=0)
# # put avg value at 0
# results_np -= results_np.mean(axis=0)
# # judge each plane based on priorities
# scores = (results_np * priorities).sum(axis=1)
# winner = scores.argmax()
#
# print('Finished testing ' + plane_name + ':')
# print(' There are {} combinations that meet requirements'.format(len(results)))
# # print top 5 engine combinations
# print(' These are the best engine combinations:')
# for i in range(5):
# winner = scores.argmax()
# scores[winner] = scores.min()
# winner_name = result_names[winner][len(plane_name)+1:]
# engine_count = winner_name[-1]
# winner_name = winner_name[:-2]
# print(' [{}] '.format(i+1) + engine_count + ' x ' + winner_name)
# print(' Plane Cost: {}'.format(results[winner][0]))
# print(' Sufficient Payload: {}'.format(result_reqs_met[winner][0]))
# print(' Sufficient Range: {}'.format(result_reqs_met[winner][1]))
# print(' Satisfactory Runway: {}'.format(result_reqs_met[winner][2]))
# print(' Satisfactory Approach: {}'.format(result_reqs_met[winner][3]))
#
# if len(sys.argv) > 2 and sys.argv[2] == 'a':
# print(' These are all possible engine combinations:')
# for i in range(len(results)):
# config_name = result_names[i][len(plane_name) + 1:]
# engine_count = config_name[-1]
# config_name = config_name[:-2]
# print(' [{}] '.format(i + 1) + engine_count + ' x ' + config_name)
# print(' Plane Cost: {}'.format(results[i][0]))
# print(' Sufficient Payload: {}'.format(result_reqs_met[i][0]))
# print(' Sufficient Range: {}'.format(result_reqs_met[i][1]))
# print(' Satisfactory Runway: {}'.format(result_reqs_met[i][2]))
# print(' Satisfactory Approach: {}'.format(result_reqs_met[i][3]))
# else:
# print(plane_name + ' cannot meet requirements, regardless of engine')
# print('')
| [
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1330,
25064,
198,
220,
220,
220,
1330,
33918,
198,
220,
220,
220,
422,
4866,
1330,
4866,
198,
220,
220,
220,
1330,
299,
32152,
355,
45941,
198,
... | 1.958626 | 3,698 |
from django.db import migrations
from django.db.models import Case, Value, When
| [
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
8913,
11,
11052,
11,
1649,
628,
198
] | 3.416667 | 24 |
#from mpc import mpc
#from mpc.mpc import QuadCost, LinDx, GradMethods
import torch
import numpy as np
import torch.nn as nn
import pdb
from ..scene_funcs.cnn import CNN
from ..scene_funcs.scene_funcs import scene_funcs
from .. import augmentation
import time
from .utils import *
import cv2
import trajnetbaselines
import warnings
warnings.filterwarnings("ignore")
| [
2,
6738,
285,
14751,
1330,
285,
14751,
201,
198,
2,
6738,
285,
14751,
13,
3149,
66,
1330,
20648,
13729,
11,
5164,
35,
87,
11,
17701,
46202,
201,
198,
11748,
28034,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
28034,
13,
... | 2.879699 | 133 |
# -*- coding:utf-8 -*-
import threading
import time
"""
多个线程方法中可以共用全局变量.
查看work1线程对全局变量的修改,
在work2中能否查看修改后的结果.
"""
"""
# 定义全局变量
num = 0
# work1
def work1():
# 声明num是一个全局变量
global num
for i in range(10):
num += 1
print("work1--------",num)
# work2
def work2():
# num可以在多个线程中共享.
print("work2=======",num)
if __name__=="__main__":
# 创建2个子线程
t1 = threading.Thread(target=work1)
t2 = threading.Thread(target=work2)
# 启动线程
t1.start()
t2.start()
# 判断线程数量不等于1,一直循环睡眠,保证print时,在t1和t2执行结束后,在print主线程.
while len(threading.enumerate()) != 1:
time.sleep(1)
# 在t1和t2,线程执行完毕后再打印num
print("main-------------",num)
"""
"""
多线程--共享全局变量问题
1.问题:
假设两个线程t1和t2都要对全局变量num(默认是0)进行加1运算,t1和t2都各对num加10次,num的最终结果为20.
但是由于是多线程同时操作,有可能出现下列情况:
1) 在num=0时,t1取得num=0,此时系统把t1调度为"sleeping"状态,把t2转换为"running"状态,t2也获得num=0
2) 然后t2对得到的值进行加1并赋给num,获得num=1.
3) 然后系统又把t2调度为"sleeping",把t2转为"running",线程t1又把它之前得到的0加1后赋值给num.
4) 这样导致虽然t1和t2都对num加1,但结果仍然是num=1
"""
# 定义全局变量
num = 0
# work1
# work2
if __name__=="__main__":
# 创建2个子线程
t1 = threading.Thread(target=work1)
t2 = threading.Thread(target=work2)
# 启动线程
t1.start()
# 优先让t1线程优先执行,t1执行完毕后,t2才能执行.
t1.join()
t2.start()
# 判断线程数量不等于1,一直循环睡眠,保证print时,在t1和t2执行结束后,在print主线程.
while len(threading.enumerate()) != 1:
time.sleep(1)
# 在t1和t2,线程执行完毕后再打印num
print("main-------------",num)
# 结论:当多个线程修改同一个资源时,会出现资源竞争,导致计算结果有误. | [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
11748,
4704,
278,
198,
11748,
640,
220,
198,
198,
37811,
198,
13783,
248,
10310,
103,
163,
118,
123,
163,
101,
233,
43095,
37345,
243,
40792,
20998,
107,
20015,
98,
17... | 1.036262 | 1,434 |
from django.urls import path
from . import views
app_name = 'contact'
urlpatterns = [
path('',views.send_email , name='send_email' ),
path('success/' , views.send_success , name='send_success'),
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
1330,
5009,
628,
198,
198,
1324,
62,
3672,
796,
705,
32057,
6,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
33571,
13,
21280,
62,
12888,
83... | 2.786667 | 75 |
import tempfile
import os
from django.conf import settings
from twiggy_goodies.threading import log
from allmychanges.downloaders.vcs.git import (
do,
_download,
_guess)
from allmychanges.vcs_extractor import (
get_versions_from_vcs,
choose_version_extractor)
from allmychanges.crawler import _extract_version
from allmychanges.env import Environment, serialize_envs
from allmychanges.utils import cd
def guess(*args, **kwargs):
"""We build changelog from commit messages only if there are
tags like version numbers or a special version extractor is
available for this repository.
"""
with log.name_and_fields('vcs.git_commits'):
return _guess(callback=callback, *args, **kwargs)
| [
11748,
20218,
7753,
198,
11748,
28686,
628,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
665,
328,
1360,
62,
11274,
444,
13,
16663,
278,
1330,
2604,
198,
6738,
477,
1820,
36653,
13,
15002,
364,
13,
85,
6359,
13,
18300,
1... | 3.020576 | 243 |
#!/usr/bin/env python
"""
Compute the convex hull of a given mesh.
"""
import argparse
import pymesh
import numpy as np
if __name__ == "__main__":
main();
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
201,
198,
37811,
201,
198,
7293,
1133,
262,
24748,
87,
23644,
286,
257,
1813,
19609,
13,
201,
198,
37811,
201,
198,
201,
198,
11748,
1822,
29572,
201,
198,
11748,
279,
4948,
5069,... | 2.416667 | 72 |
# https://stackoverflow.com/questions/65528568/how-do-i-load-the-celeba-dataset-on-google-colab-using-torch-vision-without-ru
import os
import zipfile
import gdown
import torch
from natsort import natsorted
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
## Setup
# Number of gpus available
ngpu = 1
device = torch.device('cuda:0' if (
torch.cuda.is_available() and ngpu > 0) else 'cpu')
## Fetch data from Google Drive
# Root directory for the dataset
data_root = 'dat/celeba'
# Path to folder with the dataset
dataset_folder = f'{data_root}/img_align_celeba'
# URL for the CelebA dataset
url = 'https://drive.google.com/uc?id=1cNIac61PSA_LqDFYFUeyaQYekYPc75NH'
# Path to download the dataset to
download_path = f'{data_root}/img_align_celeba.zip'
# Create required directories
if not os.path.exists(data_root):
os.makedirs(data_root)
os.makedirs(dataset_folder)
# Download the dataset from google drive
gdown.download(url, download_path, quiet=False)
# Unzip the downloaded file
with zipfile.ZipFile(download_path, 'r') as ziphandler:
ziphandler.extractall(dataset_folder)
## Create a custom Dataset class
## Load the dataset
# Path to directory with all the images
img_folder = f'{dataset_folder}/img_align_celeba'
# Spatial size of training images, images are resized to this size.
image_size = 64
# Transformations to be applied to each individual image sample
transform=transforms.Compose([
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
])
# Load the dataset from file and apply transformations
celeba_dataset = CelebADataset(img_folder, transform)
## Create a dataloader
# Batch size during training
batch_size = 128
# Number of workers for the dataloader
num_workers = 0 if device.type == 'cuda' else 2
# Whether to put fetched data tensors to pinned memory
pin_memory = True if device.type == 'cuda' else False
celeba_dataloader = torch.utils.data.DataLoader(celeba_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
shuffle=True) | [
2,
3740,
1378,
25558,
2502,
11125,
13,
785,
14,
6138,
507,
14,
35916,
26279,
3104,
14,
4919,
12,
4598,
12,
72,
12,
2220,
12,
1169,
12,
49840,
7012,
12,
19608,
292,
316,
12,
261,
12,
13297,
12,
4033,
397,
12,
3500,
12,
13165,
354,
... | 2.50105 | 952 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
from __future__ import annotations
from collections import Counter
from collections import defaultdict
from collections.abc import Hashable
from collections.abc import Iterable
from collections.abc import Sequence
from dataclasses import dataclass
from dataclasses import field
from re import compile
from typing import DefaultDict
import numpy as np
from lmfit import Parameters as ParametersLF
from chemex.configuration.methods import Method
from chemex.configuration.parameters import DefaultListType
from chemex.messages import print_status_changes
from chemex.model import model
from chemex.nmr.rates import rate_functions
from chemex.parameters.name import ParamName
from chemex.parameters.setting import Parameters
from chemex.parameters.setting import ParamSetting
_FLOAT = r"[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?"
_RE_PARAM_NAME = compile(r"\[(.+?)\]")
_RE_GRID_DEFINITION = compile(
rf"(lin[(]{_FLOAT},{_FLOAT},\d+[)]$)|"
rf"(log[(]{_FLOAT},{_FLOAT},\d+[)]$)|"
rf"([(](({_FLOAT})(,|[)]$))+)"
)
@dataclass
@dataclass
_parameter_catalog = ParameterCatalog()
_parameter_catalog_mf = ParameterCatalog()
_manager = ParamManager(_parameter_catalog, _parameter_catalog_mf)
set_param_vary = _manager.set_vary
set_param_expressions = _manager.set_expressions
add_parameters = _manager.add_multiple
add_parameters_mf = _manager.add_multiple_mf
get_parameters = _manager.get_parameters
build_lmfit_params = _manager.build_lmfit_params
update_from_parameters = _manager.update_from_parameters
parse_grid = _manager.parse_grid
set_param_values = _manager.set_values
set_param_defaults = _manager.set_defaults
sort_parameters = _manager.sort
fix_all_parameters = _manager.fix_all
def set_parameter_status(method: Method):
"""Set whether or not to vary a fitting parameter or to use a mathemetical
expression."""
matches_con = set_param_expressions(method.constraints)
matches_fix = set_param_vary(method.fix, vary=False)
matches_fit = set_param_vary(method.fit, vary=True)
print_status_changes(matches_fit, matches_fix, matches_con)
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
17268,
1330,
15034,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
17268,
13,
39305,
1330,
21059,
540,
198,
6738,
17268,
13,
39305,
1330,
40806,
540,
198,
6738,
17268,
13,
39305,
1330... | 2.910615 | 716 |
import socketserver
from serve.message import Message
from serve.response import Response
from util.debug import debug
| [
11748,
37037,
18497,
198,
6738,
4691,
13,
20500,
1330,
16000,
198,
6738,
4691,
13,
26209,
1330,
18261,
198,
6738,
7736,
13,
24442,
1330,
14257,
628,
628,
628
] | 4.592593 | 27 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Unit tests for PyMVPA basic Classifiers"""
import numpy as np
from mvpa2.testing import *
from mvpa2.testing import _ENFORCE_CA_ENABLED
from mvpa2.testing.datasets import *
from mvpa2.testing.clfs import *
from mvpa2.support.copy import deepcopy
from mvpa2.base.node import ChainNode
from mvpa2.base import externals
from mvpa2.datasets.base import dataset_wizard
from mvpa2.generators.partition import NFoldPartitioner, OddEvenPartitioner
from mvpa2.generators.permutation import AttributePermutator
from mvpa2.generators.resampling import Balancer
from mvpa2.generators.splitters import Splitter
from mvpa2.misc.exceptions import UnknownStateError
from mvpa2.misc.errorfx import mean_mismatch_error
from mvpa2.base.learner import DegenerateInputError, FailedToTrainError, \
FailedToPredictError
from mvpa2.clfs.meta import CombinedClassifier, \
BinaryClassifier, MulticlassClassifier, \
SplitClassifier, MappedClassifier, FeatureSelectionClassifier, \
TreeClassifier, RegressionAsClassifier, MaximalVote
from mvpa2.measures.base import TransferMeasure, ProxyMeasure, CrossValidation
from mvpa2.mappers.flatten import mask_mapper
from mvpa2.misc.attrmap import AttributeMap
from mvpa2.mappers.fx import mean_sample, BinaryFxNode
# What exceptions to allow while testing degenerate cases.
# If it pukes -- it is ok -- user will notice that something
# is wrong
_degenerate_allowed_exceptions = [
DegenerateInputError, FailedToTrainError, FailedToPredictError]
if __name__ == '__main__': # pragma: no cover
import runner
runner.run()
| [
2,
795,
16436,
25,
532,
9,
12,
4235,
25,
21015,
26,
12972,
12,
521,
298,
12,
28968,
25,
604,
26,
33793,
12,
8658,
82,
12,
14171,
25,
18038,
532,
9,
12,
198,
2,
25357,
25,
900,
10117,
28,
29412,
39747,
28,
19,
40379,
28,
19,
15... | 3.023077 | 650 |
from __future__ import annotations
from labster.lib.workflow import Workflow
from .states import ALL_STATES, EN_EDITION
from .transitions import ABANDONNER, ACCUSER_RECEPTION, COMMENTER, \
CONFIRMER_FINALISATION_DGRTT, CONFIRMER_RECEVABILITE_DGRTT, DESARCHIVER, \
PRENDRE_LA_MAIN_DGRTT, PRENDRE_LA_MAIN_GESTIONNAIRE, REJETER_DGRTT, \
REQUERIR_MODIFICATION_DGRTT, REQUERIR_MODIFICATION_DIR, SOUMETTRE, \
VALIDER_DIR
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
2248,
1706,
13,
8019,
13,
1818,
11125,
1330,
5521,
11125,
198,
198,
6738,
764,
27219,
1330,
11096,
62,
2257,
29462,
11,
12964,
62,
1961,
17941,
198,
6738,
764,
7645,
1756,
1330,
9564... | 2.438202 | 178 |
import datetime
import toga
import toga_dummy
from toga_dummy.utils import TestCase
| [
11748,
4818,
8079,
198,
198,
11748,
284,
4908,
198,
11748,
284,
4908,
62,
67,
13513,
198,
6738,
284,
4908,
62,
67,
13513,
13,
26791,
1330,
6208,
20448,
628
] | 3.071429 | 28 |
from django.apps import AppConfig | [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934
] | 4.125 | 8 |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 28 13:36:21 2019
@author: Gunardi Saputra
"""
#! python3
# bulletPointAdder.py = Adds Wikipedia bullet points to the start
# of each line of text on the clipboard
import pyperclip
text = pyperclip.paste()
pyperclip.copy(text)
# Separate lines and add stars.
lines = text.split("\n")
for i in range(len(lines)): # loop throug all indexes in the "lines" list
lines[i] = "* " + lines[i] # add star to each string in "lines" list
text = "\n".join(lines)
pyperclip.copy(text)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
2556,
2579,
1511,
25,
2623,
25,
2481,
13130,
198,
198,
31,
9800,
25,
6748,
22490,
35980,
35076,
198,
37811,
198,
198,
2,
0,
21015,
18,
19... | 2.833333 | 186 |
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Author: Federico Ceratto <federico.ceratto@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from unittest.mock import call
from oslo_concurrency import processutils
from designate.backend.agent_backend import impl_knot2
from designate import exceptions
import designate.tests
from designate.tests.unit.agent import backends
| [
2,
15069,
1584,
30446,
15503,
6400,
446,
14973,
7712,
5834,
18470,
198,
2,
198,
2,
6434,
25,
35089,
3713,
17419,
45807,
1279,
69,
5702,
3713,
13,
2189,
45807,
31,
71,
431,
13,
785,
29,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
... | 3.773279 | 247 |
import numpy as np
import sncosmo
import glob
from astropy import units as u
import os
import gzip
import cPickle
def save(object, filename, protocol=-1):
"""Saves a compressed object to disk
"""
file = gzip.GzipFile(filename, 'wb')
cPickle.dump(object, file, protocol)
file.close()
def load( filename ):
"""Loads a compressed object from disk
"""
file = gzip.GzipFile(filename, 'rb')
object = cPickle.load( file )
file.close()
return object
if __name__ == '__main__':
get_JLA_bandpasses()
register_JLA_magsys()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
3013,
6966,
5908,
198,
11748,
15095,
198,
6738,
6468,
28338,
1330,
4991,
355,
334,
198,
11748,
28686,
198,
11748,
308,
13344,
198,
11748,
269,
31686,
293,
198,
220,
220,
220,
220,
198,
4299,
36... | 2.623853 | 218 |
'''
In social network like Facebook or Twitter, people send friend requests and accept others’ requests as well. Now given two tables as below:
Table: friend_request
| sender_id | send_to_id |request_date|
|-----------|------------|------------|
| 1 | 2 | 2016_06-01 |
| 1 | 3 | 2016_06-01 |
| 1 | 4 | 2016_06-01 |
| 2 | 3 | 2016_06-02 |
| 3 | 4 | 2016-06-09 |
Table: request_accepted
| requester_id | accepter_id |accept_date |
|--------------|-------------|------------|
| 1 | 2 | 2016_06-03 |
| 1 | 3 | 2016-06-08 |
| 2 | 3 | 2016-06-08 |
| 3 | 4 | 2016-06-09 |
| 3 | 4 | 2016-06-10 |
Write a query to find the overall acceptance rate of requests rounded to 2 decimals, which is the number of acceptance divide the number of requests.
For the sample data above, your query should return the following result.
|accept_rate|
|-----------|
| 0.80|
Note:
The accepted requests are not necessarily from the table friend_request. In this case, you just need to simply count the total accepted requests (no matter whether they are in the original requests), and divide it by the number of requests to get the acceptance rate.
It is possible that a sender sends multiple requests to the same receiver, and a request could be accepted more than once. In this case, the ‘duplicated’ requests or acceptances are only counted once.
If there is no requests at all, you should return 0.00 as the accept_rate.
Explanation: There are 4 unique accepted requests, and there are 5 requests in total. So the rate is 0.80.
Follow-up:
Can you write a query to return the accept rate but for every month?
How about the cumulative accept rate for every day?
'''
# Write your MySQL query statement below
select if (f.ct = 0, 0.00, cast(r.ct / f.ct as decimal(4, 2))) as accept_rate
from (select count(distinct sender_id, send_to_id) as ct from friend_request) as f
join
(select count(distinct requester_id, accepter_id) as ct from request_accepted) as r
| [
7061,
6,
198,
818,
1919,
3127,
588,
3203,
393,
3009,
11,
661,
3758,
1545,
7007,
290,
2453,
1854,
447,
247,
7007,
355,
880,
13,
2735,
1813,
734,
8893,
355,
2174,
25,
198,
198,
10962,
25,
1545,
62,
25927,
198,
198,
91,
29788,
62,
31... | 2.777922 | 770 |
from flask_unchained.cli import cli, click
from ..vendor_bundle.commands import foo_group
@foo_group.command()
def baz():
"""myapp docstring"""
click.echo('myapp')
@click.group()
def goo_group():
"""myapp docstring"""
@goo_group.command()
@cli.command()
| [
6738,
42903,
62,
3316,
1328,
13,
44506,
1330,
537,
72,
11,
3904,
198,
198,
6738,
11485,
85,
18738,
62,
65,
31249,
13,
9503,
1746,
1330,
22944,
62,
8094,
628,
198,
31,
21943,
62,
8094,
13,
21812,
3419,
198,
4299,
275,
1031,
33529,
19... | 2.644231 | 104 |
#!/usr/bin/env python
#
# Copyright 2009-2021 NTESS. Under the terms
# of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
#
# Copyright (c) 2009-2021, NTESS
# All rights reserved.
#
# This file is part of the SST software package. For license
# information, see the LICENSE file in the top level directory of the
# distribution.
import sst
from sst.merlin.base import *
from sst.merlin.endpoint import *
from sst.merlin.interface import *
from sst.merlin.topology import *
if __name__ == "__main__":
### Setup the topology
topo = topoDragonFly()
topo.hosts_per_router = 4
topo.routers_per_group = 8
topo.intergroup_links = 4
topo.num_groups = 4
topo.algorithm = ["minimal","ugal"]
# Set up the routers
router = hr_router()
router.link_bw = "4GB/s"
router.flit_size = "8B"
router.xbar_bw = "6GB/s"
router.input_latency = "20ns"
router.output_latency = "20ns"
router.input_buf_size = "4kB"
router.output_buf_size = "4kB"
router.num_vns = 2
router.xbar_arb = "merlin.xbar_arb_lru"
topo.router = router
topo.link_latency = "20ns"
### set up the endpoint
networkif = LinkControl()
networkif.link_bw = "4GB/s"
networkif.input_buf_size = "1kB"
networkif.output_buf_size = "1kB"
networkif2 = LinkControl()
networkif2.link_bw = "4GB/s"
networkif2.input_buf_size = "1kB"
networkif2.output_buf_size = "1kB"
# Set up VN remapping
networkif.vn_remap = [0]
networkif2.vn_remap = [1]
ep = TestJob(0,topo.getNumNodes() // 2)
ep.network_interface = networkif
#ep.num_messages = 10
#ep.message_size = "8B"
#ep.send_untimed_bcast = False
ep2 = TestJob(1,topo.getNumNodes() // 2)
ep2.network_interface = networkif2
#ep.num_messages = 10
#ep.message_size = "8B"
#ep.send_untimed_bcast = False
system = System()
system.setTopology(topo)
system.allocateNodes(ep,"linear")
system.allocateNodes(ep2,"linear")
system.build()
# sst.setStatisticLoadLevel(9)
# sst.setStatisticOutput("sst.statOutputCSV");
# sst.setStatisticOutputOptions({
# "filepath" : "stats.csv",
# "separator" : ", "
# })
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
3717,
12,
1238,
2481,
24563,
7597,
13,
4698,
262,
2846,
198,
2,
286,
17453,
5550,
12,
4535,
830,
2327,
1495,
351,
24563,
7597,
11,
262,
471,
13,
50,
13,
198,
2,
... | 2.296781 | 994 |
import os
import glob
import Go
import time
timestr = time.strftime("%Y%-m%/d--%H-%M-%S")
print("Stating at "+timestr)
path = 'Data/20181218natsukaze_self/01'
output = open("9x9binary.txt", 'w+')
board_size = 9
total_pos = 19
for infile in glob.glob(os.path.join(path, '*.sgf')):
# print("current file is: " + infile)
file = open(infile, 'r')
lines = file.readlines()
result, nTab = board(lines)
game = Go.Binput(board_size, nTab)
# print(game)
wb_bit = convert(game)
wb_bit = wb_bit + str(result)
output.write(wb_bit + "\n")
output.close()
timestr = time.strftime("%Y%m%d-%H%M%S")
print("Stopping at " + timestr)
| [
11748,
28686,
198,
11748,
15095,
198,
11748,
1514,
198,
11748,
640,
628,
198,
16514,
395,
81,
796,
640,
13,
2536,
31387,
7203,
4,
56,
33963,
76,
4,
14,
67,
438,
4,
39,
12,
4,
44,
12,
4,
50,
4943,
198,
4798,
7203,
1273,
803,
379,... | 2.245734 | 293 |
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.tpu import node_pb2
from google3.cloud.graphite.mmv2.services.google.tpu import node_pb2_grpc
from typing import List
| [
2,
15069,
33448,
3012,
11419,
13,
1439,
6923,
33876,
13,
198,
2,
220,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
... | 3.577093 | 227 |
import os
from pathlib import Path
from typing import Sequence
from flask import Flask
from flask_wtf.csrf import CSRFProtect
from whitenoise import WhiteNoise
frontend_dist_directory: str = get_frontend_assets_path()
app: Flask = Flask(
__name__,
template_folder=frontend_dist_directory,
static_folder=frontend_dist_directory,
)
csrf = CSRFProtect(app)
# pyre-ignore[8]: incompatible attribute type
app.wsgi_app = WhiteNoise(app.wsgi_app)
# pyre-ignore[16]: undefined attribute
app.wsgi_app.add_files(frontend_dist_directory)
app.config.from_mapping(
{
"DEBUG": True,
"CACHE_TYPE": "filesystem",
"CACHE_DIR": "/tmp/mariner/",
"CACHE_DEFAULT_TIMEOUT": 300,
"SECRET_KEY": os.urandom(16),
}
)
| [
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
45835,
198,
198,
6738,
42903,
1330,
46947,
198,
6738,
42903,
62,
86,
27110,
13,
6359,
41871,
1330,
9429,
32754,
41426,
198,
6738,
20542,
23397,
786,
1330,
2635,
294... | 2.52 | 300 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import boto3
from botocore.exceptions import ClientError
import pytest
from moto import mock_sagemaker
import sure # noqa
from moto.sagemaker.models import VpcConfig
@mock_sagemaker
@mock_sagemaker
@mock_sagemaker
@mock_sagemaker
@mock_sagemaker
@mock_sagemaker
@mock_sagemaker
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
275,
2069,
18,
198,
6738,
10214,
420,
382,
13,
1069,
11755,
1330,
20985,
12331,
198,
11748,
... | 2.524476 | 143 |
"""
Repeatedly run single integration steps for some initial conditions until some stopping
conditions.
"""
import logging
import time
from decimal import Decimal
import numpy as np
from orbsim.r4b_3d import UNIT_TIME
from new_ephemerides import (
get_coordinates_on_day_rad,
get_ephemerides,
get_ephemerides_on_day,
)
# from ctypes import cdll
from ctypes import *
cudasim = cdll.LoadLibrary("./libcudasim.so")
from math import pi
def simulate(
psi,
max_year="2039",
h=1 / UNIT_TIME,
max_duration=1 * 3600 * 24 / UNIT_TIME,
max_iter=int(1e6),
):
"""Simple simulator that will run a LEO until duration or max_iter is reached.
Keyword Arguments:
psi {tuple} -- Initial conditions: (day, Q0, B0, burn)
max_year {string} -- Max year for ephemerides table (default: "2020")
h {float} -- Initial time step size (default: 1/UNIT_LENGTH = 1 second in years)
max_duration {int} -- Max duration of simulation (in years) (default: {1 day})
max_iter {int} -- Max number of iterations of simulation (default: {1e6})
(1e6 iterations corresponds to ~11 days with h = 1 s)
Returns:
[type] -- [description]
"""
logging.info("STARTING: Simple simulation.")
t0 = time.time()
max_iter = int(max_iter)
# Unpack psi
days = np.array(psi[0])
ts = days * (3600 * 24) / UNIT_TIME
Qs = np.array(psi[1])
Bs = np.array(psi[2])
nPaths = Qs.shape[0]
# Read ephemerides
logging.debug("Getting ephemerides tables")
ephemerides = get_ephemerides(max_year=max_year)
earth = np.array(ephemerides['earth'])
mars = np.array(ephemerides['mars'])
"""
make list of all paths to integrate
"""
ts = np.asarray(ts)
Rs = np.array(Qs[:,0])
thetas = np.array(Qs[:,1])
phis = np.array(Qs[:,2])
B_Rs = np.array(Bs[:,0])
B_thetas = np.array(Bs[:,1])
B_phis = np.array(Bs[:,2])
arives = np.zeros(nPaths)
scores = np.zeros(nPaths)
cudasim.simulate.restype = None
cudasim.simulate.argtypes = [
c_int,
c_double,
c_double,
c_int,
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
c_int,
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
]
earth_R = earth[:,3].astype(np.float64)
earth_theta = earth[:,4].astype(np.float64) * pi / 180
earth_phi = earth[:,5].astype(np.float64) * pi / 180
mars_R = mars[:,3].astype(np.float64)
mars_theta = mars[:,4].astype(np.float64) * pi / 180
mars_phi = mars[:,5].astype(np.float64) * pi / 180
ts_ctype = ts.ctypes.data_as(POINTER(c_double))
Rs_ctype = Rs.ctypes.data_as(POINTER(c_double))
thetas_ctype = thetas.ctypes.data_as(POINTER(c_double))
phis_ctype = phis.ctypes.data_as(POINTER(c_double))
B_Rs_ctype = B_Rs.ctypes.data_as(POINTER(c_double))
B_thetas_ctype = B_thetas.ctypes.data_as(POINTER(c_double))
B_phis_ctype = B_phis.ctypes.data_as(POINTER(c_double))
earth_R_ctype = earth_R.ctypes.data_as(POINTER(c_double))
earth_theta_ctype = earth_theta.ctypes.data_as(POINTER(c_double))
earth_phi_ctype = earth_phi.ctypes.data_as(POINTER(c_double))
mars_R_ctype = mars_R.ctypes.data_as(POINTER(c_double))
mars_theta_ctype = mars_theta.ctypes.data_as(POINTER(c_double))
mars_phi_ctype = mars_phi.ctypes.data_as(POINTER(c_double))
arive_ctype = arives.ctypes.data_as(POINTER(c_double))
score_ctype = scores.ctypes.data_as(POINTER(c_double))
cudasim.simulate(
nPaths,
h,
max_duration,
int(max_iter),
ts_ctype,
Rs_ctype,
thetas_ctype,
phis_ctype,
B_Rs_ctype,
B_thetas_ctype,
B_phis_ctype,
int(earth_R.size),
earth_R_ctype,
earth_theta_ctype,
earth_phi_ctype,
mars_R_ctype,
mars_theta_ctype,
mars_phi_ctype,
arive_ctype,
score_ctype,
)
return arives, scores
def format_time(time_value, time_unit="seconds"):
"""Format time from a single unit (by default seconds) to a DDD:HH:MM:SS string
Arguments:
time {[float]} -- [Time value in some unit]
Keyword Arguments:
time_unit {str} -- [Time unit] (default: {"seconds"})
Raises:
ValueError -- [Unsupported input time unit]
Returns:
[str] -- [String of time formatted as DDD:HH:MM:SS]
"""
if time_unit == "years":
time_value = time_value * UNIT_TIME
elif time_unit == "seconds":
pass
else:
raise ValueError("Input time must be either 'years' or 'seconds' (default)")
days = int(time_value // (3600 * 24))
time_value %= 3600 * 24
hours = int(time_value // 3600)
time_value %= 3600
minutes = int(time_value // 60)
time_value %= 60
seconds = time_value
text = f"{days:0>3d}:{hours:0>2d}:{minutes:0>2d}:{seconds:0>5.2f}"
return text
# if __name__ == "__main__":
# simulate()
| [
37811,
198,
47541,
515,
306,
1057,
2060,
11812,
4831,
329,
617,
4238,
3403,
1566,
617,
12225,
198,
17561,
1756,
13,
198,
37811,
198,
198,
11748,
18931,
198,
11748,
640,
198,
6738,
32465,
1330,
4280,
4402,
198,
11748,
299,
32152,
355,
45... | 2.137778 | 2,475 |
# __all__ = ['ScheduleEditor', 'PulseDesigner', '...']
from .ScheduleDesigner import *
from .PulseDesigner import * | [
2,
11593,
439,
834,
796,
37250,
27054,
5950,
17171,
3256,
705,
47,
9615,
23067,
263,
3256,
705,
986,
20520,
198,
6738,
764,
27054,
5950,
23067,
263,
1330,
1635,
198,
6738,
764,
47,
9615,
23067,
263,
1330,
1635
] | 3.108108 | 37 |
import os
import plenum.config as plenum_config
| [
11748,
28686,
198,
198,
11748,
458,
44709,
13,
11250,
355,
458,
44709,
62,
11250,
628,
628
] | 3.25 | 16 |
#Calculator Description This mini project Calculator is the implementation of Calculator which can perform different arithematic operations. Few of the most required arithematic operations that must be present in this mini-projects are
#Result after each operation should be stored for future operations Addition of 2 numbers Subtraction of 2 numbers Multiplication of 2 numbers Division of 2 numbers, handle DivisionByZero error Calculating Values of Trignometric Ratio, sin, cos, tan, cot, sec, cosine
print("This Is A Calculator")
print("The Following Task Can Be Done:")
print("--------------------------------")
print("Press 1 For Addition")
print("Press 2 For Substraction")
print("Press 3 For Muliplication")
print("Press 4 For Divison")
print("Press 5 For Calculating Trignometric Ratioes")
print("---------------------------------------------")
print("Please Choose Any")
#Here We are Calling Functions
while(True):
press=int(input())
if press==1:
print("You Have Choosed Addition")
print("--------------------------")
a = int(input("Please Enter The First No"))
b = int(input("Please Enter The Secound No"))
sum = a + b
print("The result is",sum)
print("--------------------------------------")
statment()
while(True):
key=input()
if key=='Y':
call()
stroke=int(input())
if stroke==1:
a=int(input("Please Enter The No"))
sum=sum+a
print("The result is",sum)
call()
if stroke==2:
a=int(input("Please Enter The No"))
sum=sum-a
print("The result is",sum)
call()
if stroke==3:
a=int(input("Please Enter The No"))
sum=sum*a
print("The result is",sum)
call()
if stroke==4:
a=int(input("Please Enter The No"))
sum=sum/a
print("The result is",sum)
call()
if key=='N':
print("Quitting Now")
calling()
break
if press==2:
print("You Have Choosed Substraction")
print("--------------------------")
a = int(input("Please Enter The First No"))
b = int(input("Please Enter The Secound No"))
sum = a-b
print("The result is",sum)
print("--------------------------------------")
statment()
while(True):
key=input()
if key=='Y':
call()
stroke=int(input())
if stroke==1:
a=int(input("Please Enter The No"))
sum=sum+a
print("The result is",sum)
call()
if stroke==2:
a=int(input("Please Enter The No"))
sum=sum-a
print("The result is",sum)
call()
if stroke==3:
a=int(input("Please Enter The No"))
sum=sum*a
print("The result is",sum)
call()
if stroke==4:
a=int(input("Please Enter The No"))
sum=sum/a
print("The result is",sum)
call()
if key=='N':
print("Quitting Now")
calling()
break
if press==3:
print("You Have Choosed Multiplication")
print("--------------------------")
a = int(input("Please Enter The First No"))
b = int(input("Please Enter The Secound No"))
sum = a*b
print("The result is",sum)
print("--------------------------------------")
statment()
while(True):
key=input()
if key=='Y':
call()
stroke=int(input())
if stroke==1:
a=int(input("Please Enter The No"))
sum=sum+a
print("The result is",sum)
call()
if stroke==2:
a=int(input("Please Enter The No"))
sum=sum-a
print("The result is",sum)
call()
if stroke==3:
a=int(input("Please Enter The No"))
sum=sum*a
print("The result is",sum)
call()
if stroke==4:
a=int(input("Please Enter The No"))
sum=sum/a
print("The result is",sum)
call()
if key=='N':
print("Quitting Now")
calling()
break
if press==4:
print("You Have Choosed Divison")
print("--------------------------")
a = int(input("Please Enter The First No"))
b = int(input("Please Enter The Secound No"))
sum = a/b
print("The result is",sum)
print("--------------------------------------")
statment()
while(True):
key=input()
if key=='Y':
call()
stroke=int(input())
if stroke==1:
a=int(input("Please Enter The No"))
sum=sum+a
print("The result is",sum)
call()
if stroke==2:
a=int(input("Please Enter The No"))
sum=sum-a
print("The result is",sum)
call()
if stroke==3:
a=int(input("Please Enter The No"))
sum=sum*a
print("The result is",sum)
call()
if stroke==4:
a=int(input("Please Enter The No"))
sum=sum/a
print("The result is",sum)
call()
if key=='N':
print("Quitting Now")
calling()
break
if press==5:
print("You Have Choosed Calculation For Trignomertric Finction")
print("--------------------------")
a = int(input("Please Enter A Number"))
import math
print("The Cos of",a,"is:",math.cos(a))
print("The Sine of",a,"is:",math.sin(a))
print("--------------------------------------")
statment()
while(True):
key=input()
if key=='Y':
call()
stroke=int(input())
if stroke==1:
a=int(input("Please Enter The No"))
sum=sum+a
print("The result is",sum)
call()
if stroke==2:
a=int(input("Please Enter The No"))
sum=sum-a
print("The result is",sum)
call()
if stroke==3:
a=int(input("Please Enter The No"))
sum=sum*a
print("The result is",sum)
call()
if stroke==4:
a=int(input("Please Enter The No"))
sum=sum/a
print("The result is",sum)
call()
if key=='N':
print("Quitting Now")
calling()
break
| [
2,
9771,
3129,
1352,
12489,
770,
9927,
1628,
43597,
318,
262,
7822,
286,
43597,
543,
460,
1620,
1180,
610,
270,
23380,
4560,
13,
20463,
286,
262,
749,
2672,
610,
270,
23380,
4560,
326,
1276,
307,
1944,
287,
428,
9927,
12,
42068,
389,
... | 1.811307 | 4,245 |
import pyperclip
from orangeshare import Config
from orangeshare.notify import notify
def handle_file(file: str, file_name: str):
"""
Copies the file to clipboard by saving it to a temporary directory and then copying it
:param file: The file
:param file_name: The filename
:return: response for the request
"""
print(file, file_name)
# config = Config.get_config()
# if config.config.getboolean("CLIPBOARD", "notification", fallback=True):
# notify("Copied File to clipboard: \"{}\"".format(filename))
# return {"success": True}
return {"message": "Copying files to clipboard is not yet implemented"}
def handle_text(text: str, *args):
"""
Copies the given text to the clipboard
:param text: The text
:return: response for the request
"""
pyperclip.copy(text)
config = Config.get_config()
if config.config.getboolean("CLIPBOARD", "notification", fallback=True):
if config.config.getboolean("CLIPBOARD", "notification_content", fallback=True):
notify("Copied Text to clipboard:\n" + text)
else:
notify("Copied Text to clipboard")
return {'success': True}
| [
11748,
12972,
525,
15036,
198,
198,
6738,
393,
648,
5069,
533,
1330,
17056,
198,
6738,
393,
648,
5069,
533,
13,
1662,
1958,
1330,
19361,
628,
198,
4299,
5412,
62,
7753,
7,
7753,
25,
965,
11,
2393,
62,
3672,
25,
965,
2599,
198,
220,
... | 2.834123 | 422 |
from app import app
from flask import render_template
@app.route('/usuarios') | [
6738,
598,
1330,
598,
198,
6738,
42903,
1330,
8543,
62,
28243,
198,
198,
31,
1324,
13,
38629,
10786,
14,
385,
84,
13010,
11537
] | 3.391304 | 23 |
# Generated by Django 3.0.6 on 2020-06-23 17:09
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
21,
319,
12131,
12,
3312,
12,
1954,
1596,
25,
2931,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
from gevent import monkey, sleep, spawn
monkey.patch_all() # NOQA
from sqlalchemy import create_engine
from collections import deque
import logging
import ujson
import errno
import time
import os
from iris.api import load_config
from iris import metrics
# metrics
stats_reset = {
'sql_errors': 0,
'deleted_messages': 0,
'deleted_incidents': 0,
'deleted_comments': 0
}
# logging
logger = logging.getLogger()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')
log_file = os.environ.get('RETENTION_LOG_FILE')
if log_file:
ch = logging.handlers.RotatingFileHandler(log_file, mode='a', maxBytes=10485760, backupCount=10)
else:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.setLevel(logging.INFO)
logger.addHandler(ch)
# pidfile
pidfile = os.environ.get('RETENTION_PIDFILE')
if pidfile:
try:
pid = os.getpid()
with open(pidfile, 'w') as h:
h.write('%s\n' % pid)
logger.info('Wrote pid %s to %s', pid, pidfile)
except IOError:
logger.exception('Failed writing pid to %s', pidfile)
# Avoid using DictCursor; manually handle columns/offsets here, and only create dict
# when time to archive and dump json. XXX: make sure ID is first
incident_fields = (
('`incident`.`id`', 'incident_id'),
('`incident`.`created`', 'created'),
('`incident`.`context`', 'context'),
('`incident`.`plan_id`', 'plan_id'),
('`plan`.`name`', 'plan_name'),
('`application`.`name`', 'application_name'),
('`target`.`name`', 'owner'),
)
message_fields = (
('`message`.`id`', 'message_id'),
('`message`.`incident_id`', 'incident_id'),
('`mode`.`name`', 'mode'),
('`priority`.`name`', 'priority'),
('`target`.`name`', 'target'),
('`template`.`name`', 'template'),
('`message`.`subject`', 'subject'),
('`message`.`template_id`', 'template_id'),
('`message`.`body`', 'body'),
('`message`.`created`', 'created'),
)
comment_fields = (
('`comment`.`id`', 'comment_id'),
('`comment`.`incident_id`', 'incident_id'),
('`target`.`name`', 'author'),
('`comment`.`content`', 'content'),
('`comment`.`created`', 'created'),
)
| [
2,
15069,
357,
66,
8,
27133,
10501,
13,
1439,
2489,
10395,
13,
49962,
739,
262,
347,
10305,
12,
17,
28081,
5964,
13,
198,
2,
4091,
38559,
24290,
287,
262,
1628,
6808,
329,
5964,
1321,
13,
198,
198,
6738,
4903,
1151,
1330,
21657,
11,... | 2.59436 | 922 |
import random
import time
import string
random.seed(a=5)
with open('transfer_operation.sql', 'w') as output:
output.write('SET SEARCH_PATH = crypto_exchange;\n\nINSERT INTO transfer_operation (account_id, operation_code, operation_amt, external_wallet_no, operation_dttm)\nVALUES\n')
for i in range(1, 201):
x = round(random.random() * 3, 8)
y = round(random.random() * 3, 8)
query1 = ''' ({}, '{}', {}, '{}', '{}'),\n'''.format(i, 'REPLENISHMENT', x, random_string(), randomDate('31-5-2019 0:0:0', '15-6-2019 0:0:0', random.random()))
query2 = ''' ({}, '{}', {}, '{}', '{}'),\n'''.format(i, 'REPLENISHMENT', y, random_string(), randomDate('31-5-2019 0:0:0', '15-6-2019 0:0:0', random.random()))
query3 = ''' ({}, '{}', {}, '{}', '{}'),\n'''.format(i, 'WITHDRAWAL', min(round(random.random() / 4, 8), x / 2), random_string(), randomDate('15-6-2019 0:0:0', '20-6-2019 0:0:0', random.random()))
query4 = ''' ({}, '{}', {}, '{}', '{}'),\n'''.format(i, 'WITHDRAWAL', min(round(random.random() / 4, 8), y / 2), random_string(), randomDate('15-6-2019 0:0:0', '20-6-2019 0:0:0', random.random()))
output.write(query1)
output.write(query2)
output.write(query3)
output.write(query4)
| [
11748,
4738,
201,
198,
11748,
640,
201,
198,
11748,
4731,
201,
198,
201,
198,
25120,
13,
28826,
7,
64,
28,
20,
8,
201,
198,
201,
198,
4480,
1280,
10786,
39437,
62,
27184,
13,
25410,
3256,
705,
86,
11537,
355,
5072,
25,
201,
198,
2... | 2.185059 | 589 |
#! /usr/bin/env python
from netCDF4 import Dataset
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import array
import matplotlib.cm as cm
from mpl_toolkits.basemap import Basemap
#import cmocean as cm
import glob
import struct
from importlib import import_module
import datetime
import time
import sys
import os
import re
host=os.environ['HOST']
if re.match(r"^pfe", host):
sys.path.append('/home6/bzhao/python_utils')
NOBACKUP='/nobackup/bzhao'
elif re.match(r"^discover", host):
sys.path.append('/home/bzhao/python_utils')
NOBACKUP='/discover/nobackup/bzhao'
else:
sys.path.append('/home/bzhao/python_utils')
NOBACKUP='/nobackup/bzhao'
import read_utils
import data_utils
import plot_utils
import math_utils
#import get_info
#from pylab import *
POLE='N'
fig_index=1
#cmp = cm.cm.ice
#fig = plt.figure(num=fig_index, figsize=(8,5), facecolor='w')
fig = plt.figure(num=fig_index, figsize=(14,14), facecolor='w')
if POLE=='N':
fbot_levels = np.array([0.1, 0.25, 0.5, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0])
#fbot_levels = np.arange(0, 3.75, 0.25)
else:
fbot_levels = np.array([0.1, 0.25, 0.5, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0])
cmap2,norm=plot_utils.rescaled_cmap(fbot_levels)
aice_levels = np.array([0.12, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7,
0.8, 0.9, 0.95, 0.99])
cmap1,norm1=plot_utils.rescaled_cmap(aice_levels)
#cmap1.set_under('w')
#cmap2.set_under('w')
line_levs = np.array([0.15])
is_fm_yrs=['04', '05', '06', '07', '08']
is_on_yrs=['03','04', '05', '06', '07']
is_N = 19600
is_x = 140
is_y = 140
isfm=np.zeros((5,is_N,5))
ison=np.zeros((5,is_N,5))
is_dir=NOBACKUP+'/ObservationData/ICESat/'
for n in range(1,len(is_fm_yrs)+1,1):
is_name=is_dir+'icesat_icethk_fm'+is_fm_yrs[n-1]+'_filled.dat'
isfm[n-1]=np.loadtxt(is_name)
for n in range(1,len(is_on_yrs)+1,1):
is_name=is_dir+'icesat_icethk_on'+is_on_yrs[n-1]+'_filled.dat'
ison[n-1]=np.loadtxt(is_name)
#print isfm.shape, ison.shape
#print isfm[:,0,:]
isfmo = ma.masked_where(isfm==9999.0,isfm)
isono = ma.masked_where(ison==9999.0,ison)
isfmo = ma.masked_where(isfmo==-1.0,isfmo)
isono = ma.masked_where(isono==-1.0,isono)
#isfmo[isfmo==-1.0]=0.0
#isono[isono==-1.0]=0.0
#print isfmo.shape
#print isfmo[:,0,:]
isfm_m=np.mean(isfmo,axis=0)
ison_m=np.mean(isono,axis=0)
#print isfm_m.shape, ison_m.shape
#print isfm_m[0,:]
#SEASON='M08'
#YEAR='1973'
#YEAR='2010'
isfmg = np.reshape(isfm_m[:,-1], (is_x, is_y))
isfmlon = np.reshape(isfm_m[:,1], (is_x, is_y))
isfmlat = np.reshape(isfm_m[:,0], (is_x, is_y))
isfmg *= 0.01
isong = np.reshape(ison_m[:,-1], (is_x, is_y))
isonlon = np.reshape(ison_m[:,1], (is_x, is_y))
isonlat = np.reshape(ison_m[:,0], (is_x, is_y))
isong *= 0.01
try:
exp=import_module(sys.argv[1])
EXPDIR=exp.data_path
HOMDIR=os.environ['HOMDIR']
EXPID=exp.expid
PLOT_PATH=exp.plot_path
try:
os.makedirs(PLOT_PATH)
except OSError:
pass
pngname = 'hice_icesat'
except ImportError:
EXPDIR=sys.argv[1]
HOMDIR=EXPDIR
EXPID=EXPDIR.split('/')[-1]
PLOT_PATH = './'
pngname = EXPID+'_HICE_ICESAT'
COLLECTION='geosgcm_seaice'
#EXPDIR=sys.argv[1]
#EXPID=EXPDIR.split('/')[-1]
SEASON='M03'
fname=EXPDIR+'/'+COLLECTION+'/'+EXPID+'.'+COLLECTION+'.monthly.clim.'+SEASON+'.nc4'
print fname
if os.path.isfile(fname):
ncfile = Dataset(fname, 'r', format='NETCDF4')
hi03=ncfile.variables['HICE'][0]
LON=ncfile.variables['LON'][:]
LAT=ncfile.variables['LAT'][:]
lon = LON
lat = LAT
tmask=ncfile.variables['TMASK'][0]
ncfile.close()
else:
files = glob.glob(EXPDIR+'/'+COLLECTION+'/*monthly.????'+SEASON[-2:]+'.nc4')
files.sort()
ncfile = Dataset(files[0], 'r', format='NETCDF4')
LON=ncfile.variables['LON'][:]
LAT=ncfile.variables['LAT'][:]
lon = LON
lat = LAT
tmask=ncfile.variables['TMASK'][0]
ncfile.close()
hi03=np.zeros(tmask.shape)
for f in files:
ncfile = Dataset(f, 'r', format='NETCDF4')
hi=ncfile.variables['HICE'][0]
ncfile.close()
hi03 += hi
hi03 /= float(len(files))
SEASON='M10'
fname=EXPDIR+'/'+COLLECTION+'/'+EXPID+'.'+COLLECTION+'.monthly.clim.'+SEASON+'.nc4'
print fname
if os.path.isfile(fname):
ncfile = Dataset(fname, 'r', format='NETCDF4')
hi10=ncfile.variables['HICE'][0]
ncfile.close()
else:
files = glob.glob(EXPDIR+'/'+COLLECTION+'/*monthly.????'+SEASON[-2:]+'.nc4')
files.sort()
hi10=np.zeros(tmask.shape)
for f in files:
ncfile = Dataset(f, 'r', format='NETCDF4')
hi=ncfile.variables['HICE'][0]
ncfile.close()
hi10 += hi
hi10 /= float(len(files))
SEASON='M11'
fname=EXPDIR+'/'+COLLECTION+'/'+EXPID+'.'+COLLECTION+'.monthly.clim.'+SEASON+'.nc4'
print fname
if os.path.isfile(fname):
ncfile = Dataset(fname, 'r', format='NETCDF4')
hi11=ncfile.variables['HICE'][0]
ncfile.close()
else:
files = glob.glob(EXPDIR+'/'+COLLECTION+'/*monthly.????'+SEASON[-2:]+'.nc4')
files.sort()
hi11=np.zeros(tmask.shape)
for f in files:
ncfile = Dataset(f, 'r', format='NETCDF4')
hi=ncfile.variables['HICE'][0]
ncfile.close()
hi11 += hi
hi11 /= float(len(files))
hifall=(hi10*31.0+hi11*30.0)/(31.0+30.0)
hispr=hi03
print LON.shape
hifall = ma.masked_where(tmask<0.5, hifall)
hispr = ma.masked_where(tmask<0.5, hispr)
titlestr=EXPID+' Feb-Mar'
#ax1 = plt.axes([-0.05, 0.225, 0.6, 0.6])
plt.subplot(2,2,1)
meridians=[1,0,1,1]
#plot_utils.plot_pole(lon,lat,aicem[0,:,:],aice_levels,'',POLE,'cont',meridians)
plot_pole_new(lon,lat,hispr,fbot_levels,cmap2,norm,'',POLE,'cont',meridians)
plt.title(titlestr,y=1.1,size=20)
#coloraxis = [0.05, 0.1, 0.4, 0.035]
#cx = fig.add_axes(coloraxis, label='m', title='1')
cbar=plt.colorbar(orientation='vertical',ticks=list(fbot_levels),extend='both',shrink=0.8)
titlestr=EXPID+' Oct-Nov'
#ax2 = plt.axes([0.425, 0.225, 0.6, 0.6])
plt.subplot(2,2,2)
meridians=[1,0,1,1]
#plot_utils.plot_pole(lon,lat,aicem[0,:,:],aice_levels,'',POLE,'cont',meridians)
plot_pole_new(lon,lat,hifall,fbot_levels,cmap2,norm,'',POLE,'cont',meridians)
plt.title(titlestr,y=1.1,size=20)
#plt.suptitle(EXPID,y=0.96,fontsize=25,fontweight='bold')
coloraxis = [0.5, 0.1, 0.5, 0.035]
#cx = fig.add_axes(coloraxis, label='m', title='m')
#cbar=plt.colorbar(cax=cx,orientation='horizontal',ticks=list(fbot_levels),extend='both')
cbar=plt.colorbar(orientation='vertical',ticks=list(fbot_levels),extend='both',shrink=0.8)
titlestr='ICESat Feb-Mar 2004-2008'
#ax1 = plt.axes([-0.05, 0.225, 0.6, 0.6])
plt.subplot(2,2,3)
meridians=[1,0,1,1]
#plot_utils.plot_pole(lon,lat,aicem[0,:,:],aice_levels,'',POLE,'cont',meridians)
plot_pole_new(isfmlon,isfmlat,isfmg,fbot_levels,cmap2,norm,'',POLE,'cont',meridians)
plt.title(titlestr,y=1.1,size=20)
#coloraxis = [0.05, 0.1, 0.4, 0.035]
#cx = fig.add_axes(coloraxis, label='m', title='1')
cbar=plt.colorbar(orientation='vertical',ticks=list(fbot_levels),extend='both',shrink=0.8)
titlestr='ICESat Oct-Nov 2003-2007'
#ax1 = plt.axes([-0.05, 0.225, 0.6, 0.6])
plt.subplot(2,2,4)
meridians=[1,0,1,1]
#plot_utils.plot_pole(lon,lat,aicem[0,:,:],aice_levels,'',POLE,'cont',meridians)
plot_pole_new(isonlon,isonlat,isong,fbot_levels,cmap2,norm,'',POLE,'cont',meridians)
plt.title(titlestr,y=1.1,size=20)
#coloraxis = [0.05, 0.1, 0.4, 0.035]
#cx = fig.add_axes(coloraxis, label='m', title='1')
cbar=plt.colorbar(orientation='vertical',ticks=list(fbot_levels),extend='both',shrink=0.8)
#plt.suptitle(EXPID,y=0.96,fontsize=16,fontweight='bold')
#pngname=EXPID+'_HICE_ICESat'
#print pngname
plt.savefig(PLOT_PATH+'/'+pngname)
#pcolor(lon,lat,aicem[0,:,:])
#colorbar()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
2010,
34,
8068,
19,
1330,
16092,
292,
316,
198,
11748,
2603,
29487,
8019,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,... | 1.978013 | 3,866 |
# $Id: f8ce5bf718c826df5fb3cd06701dc2bf6e144acb $
"""
Network-related methods and classes.
"""
from __future__ import absolute_import
__docformat__ = 'restructuredtext en'
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import urlparse
import shutil
import tempfile
import urllib2
import logging
import os
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['download']
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
log = logging.getLogger('grizzled.net')
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def download(url, directory=None, bufsize=8192):
"""
Download the specified URL to a directory. This module properly handles
HTTP authentication for URLs like this one::
https://user:password@localhost:8080/foo/bar/baz.tgz
Note, however, that user/password authentication is only supported for
"http" and "https" URLs.
:Parameters:
url : str
the URL to download
directory : str
The directory to receive the downloaded file. If this parameter is
omitted, ``download()`` will create a temporary directory to
contain the file.
bufsize : int
buffer size to use when reading URL
:rtype: tuple
:return: A (*download_directory*, *downloaded_file*) tuple
"""
pieces = urlparse.urlparse(url)
path = pieces.path
if not directory:
directory = tempfile.mkdtemp(prefix='download')
outputPath = os.path.join(directory, os.path.basename(path))
# Handle user/password explicitly.
if pieces.scheme.startswith('http') and pieces.username:
# Initialize basic HTTP authentication for this URL.
# See http://aspn.activestate.com/ASPN/docs/ActivePython/2.5/howto/urllib2/index.html
#
# NOTE: This is necessary because urllib doesn't handle URLs like
# http://user:password@host:port/...
# Get the user name and password from the URL.
user, password = pieces.username, pieces.password
netloc = pieces.hostname
if pieces.port:
pieces.hostname += ':%d' % pieces.port
newPieces = (pieces.scheme, netloc, pieces.path, pieces.query,
pieces.params, pieces.fragment)
url = urlparse.urlunparse(newPieces)
log.debug('Installing authorization handler for URL %s' % url)
passwordMgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
passwordMgr.add_password(realm=None,
uri=url,
user=user,
passwd=password)
authHandler = urllib2.HTTPBasicAuthHandler(passwordMgr)
opener = urllib2.build_opener(authHandler)
opener.open(url)
urllib2.install_opener(opener)
log.debug('Downloading "%s" to "%s"' % (url, outputPath))
shutil.copyfileobj(urllib2.urlopen(url), open(outputPath, 'wb'), bufsize)
return (outputPath, directory)
| [
2,
720,
7390,
25,
277,
23,
344,
20,
19881,
45720,
66,
23,
2075,
7568,
20,
21855,
18,
10210,
15,
3134,
486,
17896,
17,
19881,
21,
68,
18444,
330,
65,
720,
198,
198,
37811,
198,
26245,
12,
5363,
5050,
290,
6097,
13,
198,
37811,
198,... | 3.019279 | 1,193 |