id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
1787866 |
from .shadowstack import ShadowStack
from .cpuid import CpuId
from .shiftstack import ShiftStack
from .adversarial import Adversarial
from .binary_optimization import BinaryOptimization
from .simple_ptr_enc import SimplePointerEncryption
| StarcoderdataPython |
399425 | #!/usr/bin/env python
# Advanced Multi-Mission Operations System (AMMOS) Instrument Toolkit (AIT)
# Bespoke Link to Instruments and Small Satellites (BLISS)
#
# Copyright 2017, by the California Institute of Technology. ALL RIGHTS
# RESERVED. United States Government Sponsorship acknowledged. Any
# commercial use must be negotiated with the Office of Technology Transfer
# at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws. By accepting
# this software, the user agrees to comply with all applicable U.S. export
# laws and regulations. User has the responsibility to obtain export licenses,
# or other export authority as may be required before exporting such
# information to foreign countries or providing access to foreign persons.
import datetime
import socket
import time
import gevent
import gevent.socket
import gevent.monkey; gevent.monkey.patch_all()
import pyasn1.error
from pyasn1.codec.der.decoder import decode
from pyasn1.codec.native.encoder import encode
from ait.core import log
import ait.dsn.sle
import ait.dsn.sle.frames
from ait.dsn.sle.pdu.raf import *
def process_pdu(raf_mngr):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
gevent.sleep(0)
if raf_mngr._data_queue.empty():
continue
log.info('Empty {}'.format(raf_mngr._data_queue.empty()))
pdu = raf_mngr._data_queue.get()
try:
decoded_pdu, remainder = raf_mngr.decode(pdu)
except pyasn1.error.PyAsn1Error as e:
log.error('Unable to decode PDU. Skipping ...')
continue
except TypeError as e:
log.error('Unable to decode PDU due to type error ...')
continue
if ('data' in decoded_pdu['rafTransferBuffer'][0]['annotatedFrame'] and
decoded_pdu['rafTransferBuffer'][0]['annotatedFrame']['data'].isValue):
# Data is present and initialized. Processing telemetry ...
trans_data = decoded_pdu['rafTransferBuffer'][0]['annotatedFrame']['data'].asOctets()
else:
# Object does not contain data or data is not initalized. Skipping ...
continue
tmf = ait.dsn.sle.frames.TMTransFrame(trans_data)
log.info('Emitting {} bytes of telemetry to GUI'.format(len(tmf._data[0])))
sock.sendto(tmf._data[0], ('localhost', 3076))
if __name__ == '__main__':
raf_mngr = ait.dsn.sle.RAF(hostname="INSERT RAF HOSTNAME", port="INSERT RAF PORT", inst_id="INSERT RAF_ONLINE_INST_ID")
raf_mngr.connect()
time.sleep(1)
raf_mngr.bind()
time.sleep(1)
raf_mngr.start(datetime.datetime(2019, 1, 1), datetime.datetime(2027, 2, 1)) #start and stop times, respectively
tlm_monitor = gevent.spawn(process_pdu, raf_mngr)
gevent.sleep(0)
log.info('Processing telemetry. Press <Ctrl-c> to terminate connection ...')
try:
while True:
gevent.sleep(0)
except:
pass
finally:
tlm_monitor.kill()
raf_mngr.stop()
time.sleep(1)
raf_mngr.unbind()
time.sleep(1)
| StarcoderdataPython |
12823421 | #!/usr/bin/env pybricks-micropython
import math
from pybricks.hubs import EV3Brick
from pybricks.parameters import Color
from pybricks.tools import wait
from pybricks.media.ev3dev import Font, Image
# Initialize the EV3
ev3 = EV3Brick()
# SPLIT SCREEN ################################################################
# Make a sub-image for the left half of the screen
left = Image(ev3.screen, sub=True, x1=0, y1=0,
x2=ev3.screen.width // 2 - 1, y2=ev3.screen.height - 1)
# Make a sub-image for the right half of the screen
right = Image(ev3.screen, sub=True, x1=ev3.screen.width // 2, y1=0,
x2=ev3.screen.width - 1, y2=ev3.screen.height - 1)
# Use a monospaced font so that text is vertically aligned when we print
right.set_font(Font(size=8, monospace=True))
# Graphing y = sin(x)
def f(x):
return math.sin(x)
for t in range(200):
# Graph on left side
# Scale t to x-axis and compute y values
x0 = (t - 1) * 2 * math.pi / left.width
y0 = f(x0)
x1 = t * 2 * math.pi / left.width
y1 = f(x1)
# Scale y values to screen coordinates
sy0 = (-y0 + 1) * left.height / 2
sy1 = (-y1 + 1) * left.height / 2
# Shift the current graph to the left one pixel
left.draw_image(-1, 0, left)
# Fill the last column with white to erase the previous plot point
left.draw_line(left.width - 1, 0, left.width - 1, left.height - 1, 1, Color.WHITE)
# Draw the new value of the graph in the last column
left.draw_line(left.width - 2, int(sy0), left.width - 1, int(sy1), 3)
# Print every 10th value on right side
if t % 10 == 0:
right.print('{:10.2f}{:10.2f}'.format(x1, y1))
wait(100)
# SPRITE ANIMATION ############################################################
# Copy of screen for double-buffering
buf = Image(ev3.screen)
# Load images from file
bg = Image('background.png')
sprite = Image('sprite.png')
# Number of cells in each sprite animation
NUM_CELLS = 8
# Each cell in the sprite is 75 x 100 pixels
CELL_WIDTH, CELL_HEIGHT = 75, 100
# Get sub-images for each individual cell
# This is more efficient that loading individual images
walk_right = [Image(sprite, sub=True, x1=x * CELL_WIDTH, y1=0,
x2=(x + 1) * CELL_WIDTH - 1, y2=CELL_HEIGHT - 1)
for x in range(NUM_CELLS)]
walk_left = [Image(sprite, sub=True, x1=x * CELL_WIDTH, y1=CELL_HEIGHT,
x2=(x + 1) * CELL_WIDTH - 1, y2=2 * CELL_HEIGHT - 1)
for x in range(NUM_CELLS)]
# Walk from left to right
for x in range(-100, 200, 2):
# Start with the background image
buf.draw_image(0, 0, bg)
# Draw the current sprite - purple is treated as transparent
buf.draw_image(x, 5, walk_right[x // 5 % NUM_CELLS], Color.PURPLE)
# Copy the double-buffer to the screen
ev3.screen.draw_image(0, 0, buf)
# 20 frames per second
wait(50)
# Walk from right to left
for x in range(200, -100, -2):
buf.draw_image(0, 0, bg)
buf.draw_image(x, 5, walk_left[x // 5 % NUM_CELLS], Color.PURPLE)
ev3.screen.draw_image(0, 0, buf)
wait(50)
wait(1000)
| StarcoderdataPython |
6605754 | # -*- coding: utf-8 -*-
import math
import logging
logger = logging.getLogger(__name__)
class NoPruning:
@classmethod
def filter(cls, Pe, Le, Te, *args):
for i in range(1, len(Pe)+1):
for j in range(i+1, len(Pe)+1):
yield i, j
class LazyCountPruning:
@classmethod
def filter(cls, Pe, Le, Te, Tl, *args):
# lazy-count pruning: |Pe| <= Tl < T (Lemma 3)
if len(Pe) >= Tl:
yield from NoPruning.filter(Pe, Le, Te)
class BucketCountPruning:
@classmethod
def filter(cls, Pe, Le, Te, Tl, tighter_bound_func, *bound_args):
# lazy-count pruning: |Pe| <= Tl < T (Lemma 3)
if len(Pe) >= Tl:
try:
Te_diff_Tl = tighter_bound_func(*bound_args)
# tighter bound is not supported for jaccard, cosine and dice -- uses Te - Tl
except:
Te_diff_Tl = Te - Tl
for i, j in cls.iter_bucket_spans(Pe, Te_diff_Tl):
if j - i + 1 >= Tl:
yield i, j
@classmethod
def iter_bucket_spans(cls, Pe, t):
i, j = 1, 2
# initialize bucket with starting position
k = i
while True:
try:
pi, pj = Pe[i-1], Pe[j-1]
except IndexError:
l = i
yield k, l
break
else:
if pj - pi + 1 > t:
l = i
yield k, l
k = j
i += 1
j += 1
class BatchCountPruning:
@classmethod
def filter(cls, Pe, Le, Te, Tl, tighter_bound_func, *bound_args):
# lazy-count pruning: |Pe| <= Tl < T (Lemma 3)
if len(Pe) >= Tl:
# find possible candidate windows using ``binary_span`` and ``binary_shift``
for i, j in cls.iter_possible_candidate_windows(Pe, Te, Tl):
try:
# |e|, |Pe[i. . .j]|, t
tighter_Te = tighter_bound_func(bound_args[0], j-i+1, bound_args[1])
# tighter bound is not supported for edit distance and similarity -- uses Te
except:
tighter_Te = Te
# check if possible candidate window is an actual candidate window
if cls.check_possible_candidate_window(i, j, Pe, Le, Te, Tl, tighter_Te):
# return the span for counting
yield i, j
@classmethod
def check_possible_candidate_window(cls, i, j, Pe, Le, Te, Tl, tighter_Te=None):
# (j-1)+1 = j (-1 due to 0-based indexing and +1 because python list is non-inclusive)
Pe_ij = Pe[i-1:j]
# this is redundant to check because it is made sure by ``find_possible_candidate_spans``
# valid window: make sure that we have a valid window (cf. Definition 3, condition 1)
if Tl <= len(Pe_ij) <= Te:
pi = Pe[i-1]
pj = Pe[j-1]
if tighter_Te is None:
tighter_Te = Te
# candid window: make sure we have a candidate window (cf. Definition 3, condition 2)
if Le <= pj - pi + 1 <= tighter_Te:
return True
return False
@classmethod
def iter_possible_candidate_windows(cls, Pe, Te, Tl):
i = 1
while i <= len(Pe) - Tl + 1:
# pg. 535 left column, last line (intially Pe[1,..,Tl])
j = i + Tl - 1
# 0 based indexing; add -1
pj, pi = Pe[j-1], Pe[i-1]
# length for substring |D[pi...pj]| = pj-pi+1 is not larger than upper bound
if (pj - pi + 1) <= Te:
# we have a valid substring with size, Tl ≤ |Pe[i · · · j]| ≤ Te
# Hence, find candidate window ⊥e ≤ |D[pi · · · pj ]| ≤ Te
mid = cls.binary_span(i, j, Pe, Te)
yield i, mid
i += 1
else:
# candidate windows are too long
i = cls.binary_shift(i, j, Pe, Te, Tl)
@classmethod
def binary_shift(cls, i, j, Pe, Te, Tl):
lower = i
upper = j
while lower <= upper:
mid = math.ceil((lower + upper) / 2)
pmid, pj = Pe[mid-1], Pe[j-1]
if ((pj + (mid - i)) - pmid + 1) > Te:
lower = mid + 1
else:
upper = mid - 1
i = lower
j = i + Tl - 1
# if j jumps over, clip it to the length of position list
if j > len(Pe):
j = len(Pe)
pi, pj = Pe[i-1], Pe[j-1]
if (pj - pi + 1) > Te:
i = cls.binary_shift(i, j, Pe, Te, Tl)
return i
@classmethod
def binary_span(cls, i, j, Pe, Te):
lower = j
upper = i + Te - 1
while lower <= upper:
# mid is new right span, eventually larger than j (i.e. lower)
# if mid jumps out of len(Pe) then it will raise IndexError!
mid = math.ceil((upper + lower)/2)
if mid <= len(Pe):
pmid, pi = Pe[mid-1], Pe[i-1]
if (pmid - pi + 1 > Te):
upper = mid - 1
else:
lower = mid + 1
# this is heuristic based, if mid exceeds the length, we decrement it;
# without this condition we miss many candidate windows e.g. 'surauijt ch'
# in Table 1 document for entity 'surajit ch'
else:
upper = mid - 1
mid = upper
return mid
| StarcoderdataPython |
6529215 | <filename>flowchem/components/stdlib/y_mixer.py
from typing import Optional
from flowchem.components.properties import PassiveMixer
class YMixer(PassiveMixer):
"""
A Y mixer.
This is an alias of `Component`.
Arguments:
- `name`: The name of the mixer.
Attributes:
- See arguments.
"""
def __init__(self, name: Optional[str] = None):
super().__init__(name=name)
| StarcoderdataPython |
6494727 | <gh_stars>0
from testdata import PROBLEMS
from testdata import GOOGLE_PROBLEMS
import unittest
from clausefinder import ClauseFinder
from clausefinder import googlenlp
class GoogleTest(unittest.TestCase):
"""Test ClauseFinder using Google NLP"""
def test0_JsonProblems(self):
if GOOGLE_PROBLEMS is None:
return
i = -1
for p in GOOGLE_PROBLEMS:
i += 1
if p.has_key('preprocessed'):
doc = googlenlp.Doc(p['preprocessed']['google'])
else:
doc = googlenlp.Doc(p['google'])
cf = ClauseFinder(doc)
clauses = []
for sent in doc.sents:
clauses.extend(cf.find_clauses(sent))
self.assertEqual(len(clauses), len(p['clauses']), doc.text)
for expect,actual in zip(p['clauses'],clauses):
self.assertEqual(expect['type'], actual.type, doc.text)
self.assertEqual(expect['text'], actual.text, doc.text)
def disabled_test1_TextProblems(self):
nlp = googlenlp.GoogleNLP()
for p in PROBLEMS:
result = nlp.parse(p['sentence'])
self.assertIsNotNone(result)
doc = googlenlp.Doc(result)
cf = ClauseFinder(doc)
clauses = []
for sent in doc.sents:
clauses.extend(cf.find_clauses(sent))
self.assertEquals(len(clauses), len(p['clauses']))
for expect,actual in zip(p['clauses'],clauses):
self.assertEquals(expect['type'], actual.type)
self.assertEquals(expect['text'], actual.text)
def run_tests():
suite = unittest.TestLoader().loadTestsFromTestCase(GoogleTest)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3412321 | import base64
import random
import string
from rotkehlchen.fval import FVal
from rotkehlchen.utils.misc import ts_now
def make_random_bytes(size):
return bytes(bytearray(random.getrandbits(8) for _ in range(size)))
def make_random_b64bytes(size):
return base64.b64encode(make_random_bytes(size))
def make_random_uppercasenumeric_string(size):
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=size))
def make_random_positive_fval(max_num=1000000):
return FVal(random.uniform(0, max_num))
def make_random_timestamp(start=1451606400, end=None):
if end is None:
end = ts_now()
return random.randint(start, end)
| StarcoderdataPython |
6620211 | <gh_stars>1-10
from tool.runners.python import SubmissionPy
class JonSubmission(SubmissionPy):
def run(self, s):
m = s.strip().splitlines()
ny = len(m)
nx = len(m[0])
def val(x, y):
if x < 0 or x >= nx or y < 0 or y >= ny:
return 10
return int(m[y][x])
risk = 0
for x in range(nx):
for y in range(ny):
v = val(x, y)
if v < val(x-1, y) and v < val(x+1, y) and v < val(x, y-1) and v < val(x, y+1):
risk += 1 + v
return risk
def test_jon():
"""
Run `python -m pytest ./day-09/part-1/jon.py` to test the submission.
"""
assert (
JonSubmission().run(
"""
2199943210
3987894921
9856789892
8767896789
9899965678
""".strip()
)
== 15
)
| StarcoderdataPython |
3387939 | <reponame>awesome-archive/Dragon<gh_stars>0
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
"""The Implementation of the data layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
from ..layer import Layer
class DataLayer(Layer):
"""
The implementation of ``DataLayer``.
Different from ``Caffe``, we force to use `LMDB`_ backend.
Parameters
----------
source : str
The path of database. Refer `DataParameter.source`_.
prefetch: int
The prefetch count. Refer `DataParameter.prefetch`_.
batch_size : int
The size of a mini-batch. Refer `DataParameter.batch_size`_.
phase : caffe_pb2.Phase
The phase of layer. Refer `LayerParameter.phase`_.
mirrow : boolean
Whether to randomly mirror. Refer `TransformationParameter.mirror`_.
crop_size : int
The crop size. Refer `TransformationParameter.crop_size`_.
force_color : boolean
Force to have 3 channels. Refer `TransformationParameter.force_color`_.
color_augmentation : boolean
Whether to distort colors. Extension of `TransformationParameter`_.
padding : int
The padding size. Extension of `TransformationParameter`_.
min_random_scale : float
The min scale of the images. Extension of `TransformationParameter`_.
max_random_scale : float
The max scale of the images. Extension of `TransformationParameter`_.
dtype : caffe_pb2.MemoryDataParameter.DataType
The output data type. ``FLOAT32`` or ``FLOAT16``.
mean_value : list of float
The mean of each channel. Refer `TransformationParameter.mean_value`_.
scale : float
The scaling factor. Refer `TransformationParameter.scale`_.
"""
def __init__(self, LayerParameter):
super(DataLayer, self).__init__(LayerParameter)
param = LayerParameter.data_param
memory_param = LayerParameter.memory_data_param
transform_param = LayerParameter.transform_param
parallel_param = LayerParameter.parallel_param
self.arguments = {
'source': param.source,
'prefetch': param.prefetch,
'batch_size': param.batch_size,
'phase': {0: 'TRAIN', 1: 'TEST'}[int(LayerParameter.phase)],
'mirror': transform_param.mirror,
'crop_size': transform_param.crop_size,
'force_color': transform_param.force_color,
'color_augmentation': transform_param.color_augmentation,
'padding': transform_param.padding,
'min_random_scale': transform_param.min_random_scale,
'max_random_scale': transform_param.max_random_scale,
'shuffle': parallel_param.shuffle,
'multiple_nodes': parallel_param.multiple_nodes,
'partition': parallel_param.partition,
'dtype': {0: 'float32', 1: 'float16'}[memory_param.dtype],
'data_format': 'NCHW',
}
if len(transform_param.mean_value) > 0:
self.arguments['mean_values'] = [float(element)
for element in transform_param.mean_value]
if transform_param.scale != 1:
self.arguments['mean_values'] = \
[1. / transform_param.scale] * 3
def LayerSetup(self, bottom):
data, label = dragon.ops.LMDBData(**self.arguments)
return dragon.ops.ImageData(data, **self.arguments), label
class MemoryDataLayer(Layer):
"""The implementation of ``MemoryDataLayer``.
We extend it with ``FP16`` and ``NHWC => NCHW``.
Parameters
----------
dtype : caffe_pb2.MemoryDataParameter.DataType
The output data type. ``FLOAT32`` or ``FLOAT16``.
mean_value : list of float
The mean of each channel. Refer `TransformationParameter.mean_value`_.
scale : float
The scaling factor. Refer `TransformationParameter.scale`_.
"""
def __init__(self, LayerParameter):
super(MemoryDataLayer, self).__init__(LayerParameter)
param = LayerParameter.memory_data_param
transform_param = LayerParameter.transform_param
self.arguments = {
'dtype': {0: 'float32', 1: 'float16'}[param.dtype],
'data_format': 'NCHW',
}
if len(transform_param.mean_value) > 0:
self.arguments['mean_values'] = \
[float(element) for element in transform_param.mean_value]
if transform_param.scale != 1:
self.arguments['mean_values'] = \
[1. / transform_param.scale] * 3
def LayerSetup(self, bottom):
return dragon.ops.ImageData(bottom, **self.arguments) | StarcoderdataPython |
6614168 | <reponame>vhn0912/python-snippets
import numpy as np
a_2d = np.arange(12).reshape(3, 4)
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a_2d[0, 0] = 100
print(a_2d)
# [[100 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a_2d[0] = 100
print(a_2d)
# [[100 100 100 100]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a_2d[np.ix_([False, True, True], [1, 3])] = 200
print(a_2d)
# [[100 100 100 100]
# [ 4 200 6 200]
# [ 8 200 10 200]]
a_2d = np.arange(12).reshape(3, 4)
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
print(a_2d[::2, :3])
# [[ 0 1 2]
# [ 8 9 10]]
print(np.arange(6).reshape(2, 3) * 100)
# [[ 0 100 200]
# [300 400 500]]
a_2d[::2, :3] = np.arange(6).reshape(2, 3) * 100
print(a_2d)
# [[ 0 100 200 3]
# [ 4 5 6 7]
# [300 400 500 11]]
a_2d = np.arange(12).reshape(3, 4)
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
print(a_2d[::2, :3])
# [[ 0 1 2]
# [ 8 9 10]]
print(np.arange(3) * 100)
# [ 0 100 200]
a_2d[::2, :3] = np.arange(3) * 100
print(a_2d)
# [[ 0 100 200 3]
# [ 4 5 6 7]
# [ 0 100 200 11]]
a_2d = np.arange(12).reshape(3, 4)
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
print(a_2d[::2, :3])
# [[ 0 1 2]
# [ 8 9 10]]
print(np.arange(2) * 100)
# [ 0 100]
# a_2d[::2, :3] = np.arange(2) * 100
# ValueError: could not broadcast input array from shape (2) into shape (2,3)
| StarcoderdataPython |
4900096 | <reponame>jay-johnson/celery-loaders
import celery
from spylunking.log.setup_logging import build_colorized_logger
log = build_colorized_logger(
name='custom-task')
class CustomTask(celery.Task):
"""CustomTask"""
log_label = "custom_task"
def on_success(self, retval, task_id, args, kwargs):
"""on_success
http://docs.celeryproject.org/en/latest/reference/celery.app.task.html
:param retval: return value
:param task_id: celery task id
:param args: arguments passed into task
:param kwargs: keyword arguments passed into task
"""
log.info(("{} SUCCESS - retval={} task_id={} "
"args={} kwargs={}")
.format(
self.log_label,
retval,
task_id,
args,
kwargs))
# end of on_success
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""on_failure
http://docs.celeryproject.org/en/latest/userguide/tasks.html#task-inheritance
:param exc: exception
:param task_id: task id
:param args: arguments passed into task
:param kwargs: keyword arguments passed into task
:param einfo: exception info
"""
use_exc = str(exc)
log.error(("{} FAIL - exc={} "
"args={} kwargs={}")
.format(
self.log_label,
use_exc,
args,
kwargs))
# end of on_failure
# end of CustomTask
| StarcoderdataPython |
4831285 | #!/usr/bin/env python
"""
Normalise count file by RPM
"""
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import sys
htseq = open(sys.argv[1], "r")
htseq_readlines = htseq.readlines()
htseq.close()
total_reads = float(sys.argv[2])
scaling_factor = float(total_reads/1000000)
raw_and_rpm_outfile = open(sys.argv[3] + "_raw-rpm.tsv", "w")
rpm_outfile = open(sys.argv[3] + "_rpm.count", "w")
for line in htseq_readlines:
strpline = line.strip().split("\t")
if line.startswith("Feature"):
raw_and_rpm_outfile.write(strpline[0] + "\t" + strpline[1] + "\tReadsPerMillion\n")
else:
read_count = int(strpline[1])
if line.startswith("__"):
pass
else:
rpm = read_count/scaling_factor
raw_and_rpm_outfile.write(strpline[0] + "\t" + str(read_count) + "\t" + str(rpm) + "\n")
rpm_outfile.write(strpline[0] + "\t" + str(rpm) + "\n")
raw_and_rpm_outfile.close()
rpm_outfile.close()
| StarcoderdataPython |
11266089 | import pytest
import scipy.optimize as sopt
import scipy.sparse as sp
import numpy as np
from numpy.testing import assert_array_almost_equal
from nnls import block_pivoting, lawson_hanson
def test_block_pivoting():
# design matrix size (square)
n = 100
# ------------------------------------------------------------------------
# test same output as scipy.nnls
# eye with noise (only useful for full rank A)
rng = np.random.RandomState(10293)
A = np.eye(n) + 0.1*rng.rand(n,n)
b = np.arange(n)
scipy_sol = sopt.nnls(A, b)[0]
bp_sol = block_pivoting(A, b)
assert_array_almost_equal(scipy_sol, bp_sol)
# ------------------------------------------------------------------------
# test sparse
A = np.eye(n)
idx = rng.choice(np.arange(n**2), int(0.9*n**2), replace=False)
noise = 0.1*rng.rand(n,n)
noise[np.unravel_index(idx, (n,n))] = 0
A += noise
csr_A = sp.csr_matrix(A.copy())
csc_A = sp.csc_matrix(A.copy())
dense_sol = block_pivoting(A, b)
csr_sol = block_pivoting(csr_A, b)
csc_sol = block_pivoting(csc_A, b)
# check cs*_A still sparse
assert sp.issparse(csr_A)
assert sp.issparse(csc_A)
assert_array_almost_equal(csr_sol, dense_sol)
assert_array_almost_equal(csc_sol, dense_sol)
def test_lawson_hanson():
# design matrix size (square)
n = 100
# ------------------------------------------------------------------------
# test same output as scipy.nnls
# A is the n x n Hilbert matrix
A = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
b = np.ones(n)
scipy_sol = sopt.nnls(A, b)[0]
lh_sol = lawson_hanson(A, b)
assert_array_almost_equal(scipy_sol, lh_sol)
# ------------------------------------------------------------------------
# test sparse
rng = np.random.RandomState(10293)
A = np.eye(n)
idx = rng.choice(np.arange(n**2), int(0.9*n**2), replace=False)
noise = 0.1*rng.rand(n,n)
noise[np.unravel_index(idx, (n,n))] = 0
A += noise
b = np.arange(n)
csr_A = sp.csr_matrix(A.copy())
csc_A = sp.csc_matrix(A.copy())
dense_sol = lawson_hanson(A, b)
csr_sol = lawson_hanson(csr_A, b)
csc_sol = lawson_hanson(csc_A, b)
# check cs*_A still sparse
assert sp.issparse(csr_A)
assert sp.issparse(csc_A)
assert_array_almost_equal(csr_sol, dense_sol)
assert_array_almost_equal(csc_sol, dense_sol)
| StarcoderdataPython |
11231569 | <filename>tests/conftest.py<gh_stars>0
import hypothesis
hypothesis.settings.register_profile('dev', max_examples=10)
hypothesis.settings.register_profile('dist', max_examples=100)
| StarcoderdataPython |
9763827 | from django.shortcuts import render
from django.shortcuts import redirect
from InvManage.models import EventCard, HistoryFilterState
from InvManage.filters import EventCardFilter
from django.http import JsonResponse, HttpResponse
from django.template.response import TemplateResponse
from InvManage.serializers import HistoryFilterStateSerializer
from rest_framework.renderers import JSONRenderer
from InvManage.forms import HistoryForm
import json
def display_history_view(request):
"""
Retrieves the list of events on ``GET`` request. The ``create``, ``update``, and ``delete`` events are registered for each model.
.. http:get:: /history/
Gets the list of all history items.
**Example request**:
.. sourcecode:: http
GET /history/ HTTP/1.1
Host: localhost:8000
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
[
{
"objname": {
""
},
"operation": {
"Created",
"Updated",
"Deleted"
},
"objmodel": {
"Company",
"Vendor",
"PurchaseOrder",
"Product",
"Consumer",
"SalesOrder",
"GoodsReceiptNote"
},
"history-qlen": {
"10"
},
"date__gt": {
"11/01/2020"
},
"date__lt": {
"09/26/2021"
}
}
]
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/html; charset=utf-8
:reqheader Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
:statuscode 200: List of filtered history events received successfully.
"""
if request.method == 'GET':
# Create a dictionary of all events
events = EventCard.objects.all().order_by('-date') # Fetches event cards ordering them with recent events first
qlenForm = HistoryForm(request.GET, prefix='history')
# Get filter parameters
state = HistoryFilterState.objects.all().first() # Get saved filter state
if len(request.GET) == 0: # on page reload their are no parameters in the request
jsonDec = json.decoder.JSONDecoder() # Instantiate decoder
filterParams = jsonDec.decode(state.params) # Decode JSON string to python dictionary
myFilter = EventCardFilter(filterParams, queryset=events)
queryset = myFilter.qs[:int(filterParams['history-qlen'])]
qlenForm = HistoryForm({'history-qlen':int(filterParams['history-qlen'])})
else:
if qlenForm.is_valid():
qlen = qlenForm.cleaned_data['qlen']
params = {
'operation' : request.GET.getlist('operation'),
'objmodel' : request.GET.getlist('objmodel'),
'date__gt' : request.GET.get('date__gt'),
'date__lt' : request.GET.get('date__lt'),
'history-qlen' : qlen
}
state.params = json.dumps(params)
state.save()
myFilter = EventCardFilter(request.GET, queryset=events)
queryset = myFilter.qs[:int(request.GET['history-qlen'])]
dictionaries = []
for event in queryset:
dictionaries.append(event.__dict__)
# Create a lookup dictionary for urls to be embedded in the event cards
lookup = {'Company':'/company/update',
'Vendor': '/vendor/update',
'Consumer': '/consumer/update',
'Product': '/product/update',
'PurchaseOrder': '/purchase_order/update',
'SalesOrder': '/sales_order/update',
'GoodsReceiptNote':'/grn/update'}
return render(request, 'history/history.html',{'dicts': dictionaries,
'lookupRoute':lookup,
'myFilter':myFilter,
'qlenForm': qlenForm})
| StarcoderdataPython |
3278256 | <filename>docs/_mocked_modules/ctypes/__init__.py
"""Bare minimum mock version of ctypes.
This shadows the real ctypes module when building the documentation,
so that :mod:`rubicon.objc` can be imported by Sphinx autodoc even when no Objective-C runtime is available.
This module only emulates enough of ctypes to make the docs build.
Most parts are in no way accurately implemented, and some ctypes features are missing entirely.
Parts of this file are based on the source code of the ctypes module from CPython,
under the terms of the PSF License Version 2, included below.
The code in question has all parts removed that we don't need,
and any remaining dependencies on the native _ctypes module have been replaced with pure Python code.
Specifically, the following parts are (partially) based on CPython source code:
* the definitions of the "ctypes primitive types" (the :class:`_SimpleCData` subclasses and their aliases)
* the implementations of :func:`CFUNCTYPE` and :func:`PYFUNCTYPE`
* the implementations of :class:`CDLL`, :class:`PyDLL` and :class:`LibraryLoader`
* the definitions of the :data:`pythonapi`, :data:`cdll` and :data:`pydll` globals
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
--------------------------------------------
1. This LICENSE AGREEMENT is between the Python Software Foundation
("PSF"), and the Individual or Organization ("Licensee") accessing and
otherwise using this software ("Python") in source or binary form and
its associated documentation.
2. Subject to the terms and conditions of this License Agreement, PSF hereby
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
analyze, test, perform and/or display publicly, prepare derivative works,
distribute, and otherwise use Python alone or in any derivative version,
provided, however, that PSF's License Agreement and PSF's notice of copyright,
i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software Foundation;
All Rights Reserved" are retained in Python alone or in any derivative version
prepared by Licensee.
3. In the event Licensee prepares a derivative work that is based on
or incorporates Python or any part thereof, and wants to make
the derivative work available to others as provided herein, then
Licensee hereby agrees to include in any such work a brief summary of
the changes made to Python.
4. PSF is making Python available to Licensee on an "AS IS"
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
INFRINGE ANY THIRD PARTY RIGHTS.
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any
relationship of agency, partnership, or joint venture between PSF and
Licensee. This License Agreement does not grant permission to use PSF
trademarks or trade name in a trademark sense to endorse or promote
products or services of Licensee, or any third party.
8. By copying, installing or otherwise using Python, Licensee
agrees to be bound by the terms and conditions of this License
Agreement.
"""
import struct
# We pretend to be a 64-bit system.
_POINTER_SIZE = 8
class ArgumentError(Exception):
pass
_array_type_cache = {}
class _CDataMeta(type):
def __mul__(self, count):
try:
return _array_type_cache[self, count]
except KeyError:
array_type = type(
"{}_Array_{}".format(self.__name__, str(count)),
(Array,),
{'_type_': self, '_length_': count},
)
_array_type_cache[self, count] = array_type
return array_type
class _CData(object, metaclass=_CDataMeta):
@classmethod
def from_address(cls, address):
return cls()
@classmethod
def in_dll(cls, dll, name):
return cls()
def _auto_unwrap(self):
return self
class _SimpleCData(_CData):
@classmethod
def _sizeof(cls):
return struct.calcsize(cls._type_)
def __new__(cls, value=None):
self = super().__new__(cls)
self.value = value if value is not None else cls._DEFAULT_VALUE
return self
def __init__(self, value=None):
pass
def _auto_unwrap(self):
if _SimpleCData in type(self).__bases__:
return self.value
else:
return self
class py_object(_SimpleCData):
_type_ = "O"
_DEFAULT_VALUE = None
@classmethod
def _sizeof(cls):
return _POINTER_SIZE
class c_short(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "h"
class c_ushort(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "H"
class c_long(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "l"
class c_ulong(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "L"
class c_int(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "i"
class c_uint(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "I"
class c_float(_SimpleCData):
_DEFAULT_VALUE = 0.0
_type_ = "f"
class c_double(_SimpleCData):
_DEFAULT_VALUE = 0.0
_type_ = "d"
class c_longdouble(_SimpleCData):
_DEFAULT_VALUE = 0.0
_type_ = "g"
c_longlong = c_long
c_ulonglong = c_ulong
class c_ubyte(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "B"
class c_byte(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "b"
class c_char(_SimpleCData):
_DEFAULT_VALUE = b'\x00'
_type_ = "c"
class c_char_p(_SimpleCData):
_DEFAULT_VALUE = None
_type_ = "z"
@classmethod
def _sizeof(cls):
return _POINTER_SIZE
class c_void_p(_SimpleCData):
_DEFAULT_VALUE = None
_type_ = "P"
@classmethod
def _sizeof(cls):
return _POINTER_SIZE
class c_bool(_SimpleCData):
_DEFAULT_VALUE = False
_type_ = "?"
class c_wchar_p(_SimpleCData):
_DEFAULT_VALUE = None
_type_ = "Z"
@classmethod
def _sizeof(cls):
return _POINTER_SIZE
class c_wchar(_SimpleCData):
_DEFAULT_VALUE = '\x00'
_type_ = "u"
c_size_t = c_ulong
c_ssize_t = c_long
c_int8 = c_byte
c_uint8 = c_ubyte
c_int16 = c_short
c_uint16 = c_ushort
c_int32 = c_int
c_uint32 = c_uint
c_int64 = c_long
c_uint64 = c_ulong
class _Pointer(_CData):
pass
_pointer_type_cache = {None: c_void_p}
def POINTER(ctype):
try:
return _pointer_type_cache[ctype]
except KeyError:
pointer_ctype = type('LP_{}'.format(ctype.__name__), (_Pointer,), {'_type_': ctype})
_pointer_type_cache[ctype] = pointer_ctype
return pointer_ctype
def pointer(cvalue):
return POINTER(type(cvalue))(cvalue)
class Array(_CData):
pass
class Structure(_CData):
def __init__(self, *args):
super().__init__()
if args:
for (name, _ctype), value in zip(type(self)._fields_, args):
setattr(self, name, value)
else:
for name, ctype in type(self)._fields_:
setattr(self, name, ctype()._auto_unwrap())
class Union(_CData):
pass
class CFuncPtr(_CData):
_restype_ = None
_argtypes_ = ()
def __init__(self, src):
super().__init__()
if isinstance(src, tuple):
(name, dll) = src
self._func_name = name
self._dll_name = dll._name
else:
self._func_name = None
self._dll_name = None
self.restype = type(self)._restype_
self.argtypes = type(self)._argtypes_
def __call__(self, *args):
if self.restype is None:
return None
else:
if self._dll_name == 'objc' and self._func_name in {'objc_getClass', 'objc_getProtocol'}:
res = self.restype(hash(args[0]))
else:
res = self.restype()
return res._auto_unwrap()
_c_functype_cache = {}
def CFUNCTYPE(restype, *argtypes):
try:
return _c_functype_cache[(restype, argtypes)]
except KeyError:
class CFunctionType(CFuncPtr):
_argtypes_ = argtypes
_restype_ = restype
_c_functype_cache[(restype, argtypes)] = CFunctionType
return CFunctionType
def PYFUNCTYPE(restype, *argtypes):
class CFunctionType(CFuncPtr):
_argtypes_ = argtypes
_restype_ = restype
return CFunctionType
def sizeof(ctype):
return ctype._sizeof()
def addressof(cvalue):
return id(cvalue)
def alignment(ctype):
return sizeof(ctype)
def byref(ctype):
return pointer(ctype)
def cast(cvalue, ctype):
if isinstance(cvalue, ctype):
return cvalue
else:
return ctype(cvalue.value)
def memmove(dst, src, count):
raise NotImplementedError('memmove({}, {}, {})'.format(dst, src, count))
def string_at(address):
return c_char_p(b'')
class CDLL(object):
_func_restype_ = c_int
def __init__(self, name):
super().__init__()
self._name = name
class _FuncPtr(CFuncPtr):
_restype_ = self._func_restype_
self._FuncPtr = _FuncPtr
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
raise AttributeError(name)
func = self.__getitem__(name)
setattr(self, name, func)
return func
def __getitem__(self, name_or_ordinal):
func = self._FuncPtr((name_or_ordinal, self))
if not isinstance(name_or_ordinal, int):
func.__name__ = name_or_ordinal
return func
class PyDLL(CDLL):
pass
pythonapi = PyDLL(None)
class LibraryLoader(object):
def __init__(self, dlltype):
self._dlltype = dlltype
def __getattr__(self, name):
if name[0] == '_':
raise AttributeError(name)
dll = self._dlltype(name)
setattr(self, name, dll)
return dll
def __getitem__(self, name):
return getattr(self, name)
def LoadLibrary(self, name):
return self._dlltype(name)
cdll = LibraryLoader(CDLL)
pydll = LibraryLoader(PyDLL)
| StarcoderdataPython |
5027443 | #! /usr/bin/env python
import pathlib
import re
import shutil
import os
import git
from ha import prepare_homeassistant
from const import (
TMP_DIR, PACKAGE_DIR, REQUIREMENTS_FILE, CONST_FILE, REQUIREMENTS_FILE_DEV, LICENSE_FILE_HA,
LICENSE_FILE_NEW, path, files, requirements_remove, HA_VERSION_FILE,
)
if os.path.isdir(PACKAGE_DIR):
shutil.rmtree(PACKAGE_DIR)
if os.path.isfile(REQUIREMENTS_FILE):
os.remove(REQUIREMENTS_FILE)
ha_version = prepare_homeassistant()
with open(HA_VERSION_FILE, 'r') as f:
current_version = f.read()
print(f"Current Version: {current_version}")
def process_files():
os.mkdir(PACKAGE_DIR)
os.mkdir(os.path.join(PACKAGE_DIR, "test_util"))
shutil.copy2(os.path.join(TMP_DIR, REQUIREMENTS_FILE), REQUIREMENTS_FILE)
shutil.copy2(
os.path.join(TMP_DIR, "homeassistant", CONST_FILE),
os.path.join(PACKAGE_DIR, CONST_FILE),
)
shutil.copy2(
os.path.join(TMP_DIR, "tests", "test_util", "aiohttp.py"),
os.path.join(PACKAGE_DIR, "test_util", "aiohttp.py"),
)
shutil.copy2(
os.path.join(TMP_DIR, "tests", "test_util", "__init__.py"),
os.path.join(PACKAGE_DIR, "test_util", "__init__.py"),
)
shutil.copy2(
os.path.join(TMP_DIR, LICENSE_FILE_HA),
LICENSE_FILE_NEW,
)
for f in files:
shutil.copy2(os.path.join(TMP_DIR, "tests", f), os.path.join(PACKAGE_DIR, f))
filename = os.path.join(PACKAGE_DIR, f)
with open(filename, "r") as file:
filedata = file.read()
filedata = filedata.replace("tests.", ".")
with open(filename, "w") as file:
file.write(filedata)
os.rename(
os.path.join(PACKAGE_DIR, "conftest.py"), os.path.join(PACKAGE_DIR, "plugins.py")
)
with open(os.path.join(PACKAGE_DIR, CONST_FILE), "r") as original_file:
data = original_file.readlines()
data = [d for d in data[:14] if "from homeassistant." not in d]
with open(os.path.join(PACKAGE_DIR, CONST_FILE), "w") as new_file:
new_file.write("".join(data))
added_text = "This file is originally from homeassistant/core and modified by pytest-homeassistant-custom-component.\n"
triple_quote = '"""\n'
for f in pathlib.Path(PACKAGE_DIR).rglob("*.py"):
with open(f, "r") as original_file:
data = original_file.readlines()
old_docstring = data[0][3:][:-4]
new_docstring = f"{triple_quote}{old_docstring}\n\n{added_text}{triple_quote}"
body = "".join(data[1:])
with open(f, "w") as new_file:
new_file.write("".join([new_docstring, body]))
added_text = "# This file is originally from homeassistant/core and modified by pytest-homeassistant-custom-component.\n"
with open(REQUIREMENTS_FILE, "r") as original_file:
data = original_file.readlines()
def is_test_requirement(requirement):
# if == not in d this is either a comment or unkown package, include
if "==" not in requirement:
return True
regex = re.compile('types-.+')
if re.match(regex, requirement):
return False
if d.split("==")[0] in requirements_remove:
return False
return True
new_data = []
removed_data = []
for d in data:
if is_test_requirement(d):
new_data.append(d)
else:
removed_data.append(d)
new_data.append(f"homeassistant=={ha_version}\n")
new_data.insert(0, added_text)
def find_sqlalchemy(data):
for d in data:
if "sqlalchemy" in d:
return d
raise ValueError("could not find sqlalchemy")
with open(os.path.join(TMP_DIR, "requirements_all.txt"), "r") as f:
data = f.readlines()
sqlalchemy = find_sqlalchemy(data)
if not "\n" == sqlalchemy[-2:]:
sqlalchemy = f"{sqlalchemy}\n"
new_data.append(sqlalchemy)
removed_data.insert(0, added_text)
with open(REQUIREMENTS_FILE, "w") as new_file:
new_file.writelines(new_data)
with open(REQUIREMENTS_FILE_DEV, "w") as new_file:
new_file.writelines(removed_data)
from pytest_homeassistant_custom_component.const import __version__
with open("README.md", "r") as original_file:
data = original_file.readlines()
data[
2
] = f"\n"
with open("README.md", "w") as new_file:
new_file.write("".join(data))
print(f"New Version: {__version__}")
# modify load_fixture
with open(os.path.join(PACKAGE_DIR, "common.py"), "r") as original_file:
data = original_file.readlines()
import_time_lineno = [i for i, line in enumerate(data) if "from time" in line]
assert len(import_time_lineno) == 1
data.insert(import_time_lineno[0] + 1, "import traceback\n")
fixture_path_lineno = [i for i, line in enumerate(data) if "def get_fixture_path" in line]
assert len(fixture_path_lineno) == 1
data.insert(fixture_path_lineno[0] + 2, " start_path = traceback.extract_stack()[-3].filename\n")
data[fixture_path_lineno[0] + 7] = data[fixture_path_lineno[0] + 7].replace("__file__", "start_path")
data[fixture_path_lineno[0] + 9] = data[fixture_path_lineno[0] + 9].replace("__file__", "start_path")
with open(os.path.join(PACKAGE_DIR, "common.py"), "w") as new_file:
new_file.writelines(data)
if ha_version != current_version:
process_files()
with open(HA_VERSION_FILE, 'w') as f:
f.write(ha_version)
else:
print("Already up to date")
| StarcoderdataPython |
6475912 | def solution(S):
p = ""
N = 0
for s in S:
p += s
unique = set()
for k in range(1, 1 + len(p)):
c = p[-k]
if c in unique:
unique.remove(c)
else:
unique.add(c)
N += 1 if len(unique) == k % 2 else 0
return N
def test():
assert solution("02002") == 11
assert solution("09345") == 5
assert solution("09355") == 7
assert solution("00000") == 15
if __name__ == "__main__":
import sys
S = sys.argv[1]
if S == "test":
test()
else:
print(solution(S))
| StarcoderdataPython |
1735895 | import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import numpy as np
class Preprocessor:
def __init__(self):
pass
def normalize(self, df, columns):
min_max_scaler = preprocessing.MinMaxScaler()
df[columns] = min_max_scaler.fit_transform(df[columns])
return df
def select(self, input_df, columns):
return input_df[columns]
def filter(self, input_df, col, valid_status):
return input_df[input_df[col].isin(valid_status)]
def transform(self):
pass
def under_sample(self, input_df, ratio=1.0, random_state=3):
"""Undersamples the majority class to reach a ratio by default
equal to 1 between the majority and minority classes"""
count_class_0, count_class_1 = input_df["Status"].value_counts()
df_class_0 = input_df[input_df["Status"] == "paid"]
df_class_1 = input_df[input_df["Status"] == "defaulted"]
df_class_0_under = df_class_0.sample(
int(ratio * count_class_1), random_state=random_state
)
df_train_under = pd.concat([df_class_0_under, df_class_1], axis=0)
return df_train_under
def over_sample(self, input_df, ratio=1.0, random_state=3):
"""Oversamples the minority class to reach a ratio by default
equal to 1 between the majority and mionority classes"""
count_class_0, count_class_1 = input_df["Status"].value_counts()
df_class_0 = input_df[input_df["Status"] == "paid"]
df_class_1 = input_df[input_df["Status"] == "defaulted"]
df_class_1_over = df_class_1.sample(
int(ratio * count_class_0), replace=True, random_state=random_state
)
df_train_over = pd.concat([df_class_0, df_class_1_over], axis=0)
return df_train_over
def split(self, input_df, test_size=0.3, random_state=3):
train, test = train_test_split(input_df, test_size=test_size, random_state=random_state)
return train, test
def ohe_encode(self, input_df, categorical_columns, ordinal_columns):
ohe = preprocessing.OneHotEncoder(handle_unknown="ignore", sparse=False)
X = np.transpose(ohe.fit_transform(input_df[categorical_columns]))
for c in ordinal_columns:
X = np.vstack([X, input_df[c]])
X = np.transpose(X)
features = ohe.get_feature_names(categorical_columns).tolist()
for c in ordinal_columns:
features.append(c)
X_df = pd.DataFrame(X, columns=features)
return X_df
def label_encode(self, df, categorical_columns):
for cal in categorical_columns:
df[cal] = df[cal].astype('category')
cat_cols = df.select_dtypes(['category']).columns
df[cat_cols] = df[cat_cols].apply(lambda x: x.cat.codes)
return df
def write_to_csv(self, input_df, path):
input_df.to_csv(path)
def transformFundedTime(self, df):
# A new feature "Funded Time" gives the exact time when the loan was funded.
df["Funded Time"] = df.apply(lambda row: row['Funded Date.year'] + 0.833 * row['Funded Date.month'], axis=1)
return df
def transformCountryCurrency(self, df):
df['Country Currency'] = df.apply(lambda row: row.Country + '_' + row.Currency, axis=1)
return df
def transformStatus(self, df):
df['Status'] = pd.get_dummies(df["Status"], columns=["Status"])["defaulted"]
return df
| StarcoderdataPython |
8002493 | <gh_stars>0
#pip install lxml
#pip install pyOpenSSL
import requests
import json
from bs4 import BeautifulSoup as bs
import ssl
import time
'''
The elsevier search is kind of a tree structure:
"keyword --> a list of journals (a journal contain many articles) --> lists of articles
'''
journals = []
articles = []
abstracts = {}
years = ['1990', '1991', '1992', '1993', '1994', '1995', '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']
def getJournals(url):
global journals
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
html = requests.get(url)
data = json.loads(html.content)# response is in json format so we load it into a dictionary
hits = data['hits']['hits']# target urls are hidden deep inside nested dictionaries and lists
for hit in hits:
journals.append(str(hit['_source']['url']+'/most-downloaded-articles'))
return None
def getArtciels(url):
global artciles
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
html = requests.get(url)
bsObj = bs(html.content, features='html.parser')
for link in bsObj.findAll('a'):
if 'href' in link.attrs:
rawlink = link.attrs['href']
if rawlink.startswith('https://www.sciencedirect.com/science/article/pii/'):
articles.append(rawlink)
return None
def getAbstracts(url):
global abstracts
global years
# https://www.sciencedirect.com/science/article/pii/S0043135419301794
# https://api.elsevier.com/content/article/pii/S0043135419301794
# ?APIKey=<KEY>
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
html = requests.get(url)
bsObj = bs(html.content, features='lxml')
if bsObj.find('dc:title').text and bsObj.find('dc:identifier').text and bsObj.find('dc:identifier').text[-11:-7] in years:
title = bsObj.find('dc:title').text
abstract = bsObj.find('dc:description').text
abstract = abstract.replace('Abstract', ' ').strip()
iden = bsObj.find('dc:identifier').text
preKey = iden[-11:-7] + title
abstracts[preKey] = abstract
else:
pass
return None
start = time.time()
# Search a key word
pre_keyword = input("Please type in your keyword: ")
# https://site-search-api.prod.ecommerce.elsevier.com/search?
# query=catalyst%20for%20water%20splitting&labels=journals&start=0&limit=10&lang=en-xs
keyword = pre_keyword.replace(' ', '%20')
address = 'https://site-search-api.prod.ecommerce.elsevier.com/search?query=' + keyword + '&labels=journals&start=0&limit=800&lang=en-xs'
######################################################################################
####### increase the number following "&limit=" to increase the number of URLs #######
######################################################################################
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
getJournals(address)
for url in journals:
getArtciels(url)
for article in articles:
link = 'https://api.elsevier.com/content/article/pii/' + article[-17::] + '?APIKey=<KEY>'
getAbstracts(link)
#filename_year, {'title': title of article, 'abstract' : abstract content}
filenames = []
for key,value in abstracts.items():
if str(pre_keyword) + '_' + key[0:4] + '.json' not in filenames:
filename = str(pre_keyword) + '_' + key[0:4] + '.json'
filenames.append(filename)
with open(filename, 'w') as file:
data = {key[4::]:value}
file.write(json.dumps(data))
else:
#peoblem: new abstract overwrites, so in the end each file only has one abstract
filename = str(pre_keyword) + '_' + key[0:4] + '.json'
with open(filename, 'a') as file:
data = {key[4::]:value}
file.write(json.dumps(data))
end = time.time()
print(end-start)
| StarcoderdataPython |
3495297 | <filename>The-Sieve-of-Eratosthenes/SoE2.py
# interater version
# return generator derectly
class myClass():
prime_numbers = []
index = 0
count = 0
def __init__(self, num):
if num <= 0:
raise RuntimeError("Not positive integer")
elif num == 1:
raise RuntimeError("equal 1")
elif type(num) != int:
raise RuntimeError("not integer")
temp_list = [i for i in range(2, num + 1)]
ind = 0
while True:
for i in temp_list[ind + 1:]:
if i % temp_list[ind] == 0:
temp_list.remove(i)
if temp_list[ind] ** 2 >= temp_list[-1]:
break
ind += 1
self.prime_numbers = temp_list
self.count = len(self.prime_numbers)
def __iter__(self):
return (i for i in self.prime_numbers)
'''
def __next__(self):
if self.count == 0:
raise StopIteration
else:
self.index += 1
self.count -= 1
return self.prime_numbers[self.index - 1]
'''
for i in myClass(20):
print(i)
| StarcoderdataPython |
6456118 | <gh_stars>1-10
import tensorflow as tf
from tensorflow.python.framework import graph_util
with tf.Session() as sess:
with open('./expert-graph.pb', 'rb') as graph:
graph_def = tf.GraphDef()
graph_def.ParseFromString(graph.read())
for i in graph_def.node:
print(i.name)
output = tf.import_graph_def(graph_def, return_elements=['out:0'])
sess = tf.Session()
# 《《《 加载模型结构 》》》
saver = tf.train.import_meta_graph('./interFS-0.meta')
# 只需要指定目录就可以恢复所有变量信息
saver.restore(sess, tf.train.latest_checkpoint('./'))
[i.name for i in sess.graph_def.node]
# The key step
constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def,output_node_names=[i.name for i in sess.graph_def.node]) # 保存图表并保存变量参数
tf.train.write_graph(constant_graph, './', 'expert-graph.pb', as_text=False) | StarcoderdataPython |
8102385 | # coding: utf-8
import logging
from typing import List, Optional
from .api_area_block import ApiAreaBlock
from ..web.block_obj import BlockObj
from ..dataclass.ns import Ns
from ..dataclass.area import Area
from ..common.regx import pattern_http
from ..common.constants import URL_SPLIT
from ..common.log_load import Log
log = Log()
class ApiArea(BlockObj):
"""List of area items"""
def __init__(self, block: ApiAreaBlock):
self._block: ApiAreaBlock = block
super().__init__(self._block.soup)
self._data = None
def _log_missing(self, for_str: Optional[str] = None):
if for_str:
f_str = f" for {for_str}"
else:
f_str = ''
log.logger.warning(
"ApiArea.get_obj() Failed to get find data%s. Url: %s", f_str, self.url_obj.url)
def _get_ns(self, name: str, href: str) -> Ns:
parts = href.split(sep=URL_SPLIT)
parts[0] = 'com' # rename interfacecom etc...
parts.pop() # drop XShapeDescriptor.html etc...
ns = ".".join(parts)
return Ns(name=name, namespace=ns)
def get_obj(self) -> List[Area]:
"""List of area items"""
if not self._data is None:
return self._data
self._data = []
tag = self._block.get_obj()
if not tag:
self._log_missing('ApiAreaBlock instance')
return self._data
rs = tag.select('area')
if not rs:
self._log_missing('area tags')
return self._data
for el in rs:
href = el.get('href', None)
if not href:
self._log_missing('area href')
continue
name = el.get('alt', None)
if not name:
self._log_missing('area alt')
continue
coords = el.get('coords', None)
if not coords:
self._log_missing('area cords')
continue
title = el.get('title', '')
a_coords = coords.split(',')
if len(a_coords) != 4:
log.logger.warning(
"ApiArea.get_obj() Bad Coords for %s. Url: %s", name, self.url_obj.url)
continue
ns = self._get_ns(name=name, href=href)
x1 = int(a_coords[0].strip())
y1 = int(a_coords[1].strip())
x2 = int(a_coords[2].strip())
y2 = int(a_coords[3].strip())
m = pattern_http.match(href)
if not m:
href = self.url_obj.url_base + '/' + href
area = Area(name=name, ns=ns, href=href, x1=x1,
y1=y1, x2=x2, y2=y2, title=title)
self._data.append(area)
return self._data
| StarcoderdataPython |
11236883 | <gh_stars>100-1000
import argparse
import logging
import uuid
from urllib.parse import urljoin, urlparse
import os
import requests
import requests.exceptions
import tldextract
from bs4 import BeautifulSoup
from py_ms_cognitive import PyMsCognitiveWebSearch, PyMsCognitiveImageSearch
parentdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
os.sys.path.insert(0, parentdir)
from helpers import utils
DESC = """File sample crawler"""
MS_KEY = "YOUR_KEY"
# QUERY = "inurl:(htm|html|php) intitle:\"index of\""
QUERY = "surfing ext:pdf"
MAX_TIMEOUT = 2
LIMIT_RESULTS = 10
headers = {
'User-Agent': 'TUBBOT',
}
image_list = ["jpg", "png", "gif", "tif"]
def download_files_from_website(website, filetype, out_dir):
r = requests.get(website)
html_text = r.text
soup = BeautifulSoup(html_text, "html.parser")
link_anchors = soup.find_all("a")
links = list(map(lambda x: x.get("href"), link_anchors)) # type: [str]
links = list(filter(lambda x: x is not None and x.lower().endswith(filetype), links))
for link in links:
rurl = urljoin(website,
link) # Join the two urls. Urljoin handles every case: path is relative and path is absolute
print("Yielded", rurl)
filename = filetype + "_" + str(uuid.uuid4())
if not os.path.exists(out_dir):
os.makedirs(out_dir)
utils.download_seed_to_folder(download_link=rurl, to_directory=out_dir, filename=filename)
return len(links)
class FileCrawler:
"""
This class should be used to seeds a specific file and save it to a specific url
"""
def __init__(self, filetype: str, ms_key: str, out_dir: str):
"""
:param filetype: The filetype to seeds.
:param ms_key: The api key for the bing search api.
"""
self.filetype = filetype.lower()
self.ms_key = ms_key
print("KEY", self.ms_key)
self.out_dir = out_dir
self.search_service = None
@staticmethod
def is_valid_file(filetype, url: str) -> bool:
"""
Given an url, it checks if the content is a file and of the right fileformat or not.
:param url: The url to check
:return: True if url is not an html webpage, false if it is
"""
utils.temp_print("Trying", url)
try:
response = requests.head(url, timeout=MAX_TIMEOUT, headers=headers)
except Exception as e:
return False
if response.headers.get("content-type") is not None:
# return False
if "text/html" in response.headers["content-type"]:
return False
if filetype in response.headers["content-type"]:
return True
part = url.rpartition(".") # Returns a three tuple, last tuple containing the part after the "."
if part[2].lower() == filetype:
return True
return False
def website_crawl(self, query):
"""
This function issues the given query to ping, then crawls the websites
that were given in the ResultSet for links to a file. To be used with
queries such as "jpg example file" or "inurl:(avi) intitle:index of"
:return: A generator - StopIteration is called when no more links can/should be found.
"""
self.search_service = PyMsCognitiveWebSearch(self.ms_key, query)
self.search_service.SEARCH_WEB_BASE = "https://api.cognitive.microsoft.com/bing/v7.0/search"
results = self.search_service.search_all(format="json", quota=LIMIT_RESULTS)
print(len(results))
for item in results:
try:
r = requests.get(item.url, timeout=MAX_TIMEOUT)
except Exception as e:
print("Skipping ", item.url, "because of Exception", str(e))
continue
parsed_uri = urlparse(r.url)
subdomain = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
# extract the top level domain:
rootdomain = '{uri.scheme}://{ext.domain}.{ext.suffix}'.format(uri=parsed_uri,
ext=tldextract.extract(r.url))
try:
if requests.head(subdomain + "/robots.txt").status_code == 404 and requests.head(
rootdomain + "/robots.txt").status_code == 404:
# No Robots TXT - Skip
print("Skipping", subdomain, "because it does not contain a robots.txt")
continue
except Exception as e:
print("Skipping", subdomain, "because of exception", str(e))
continue
print("Now scanning through", r.url)
html_text = r.text
if "index of" in query and not "index of" in html_text:
# TODO: Really really hacky. This if statement shoud only be
# TODO: in place if we are issuing the index of query
# We probably did not reach a file repository
continue
soup = BeautifulSoup(html_text, "html.parser")
link_anchors = soup.find_all("a")
links = list(map(lambda x: x.get("href"), link_anchors)) # type: [str]
links = list(filter(lambda x: x is not None and x.lower().endswith(self.filetype), links))
for link in links:
path = link
filelink = urljoin(r.url,
path) # Join the two urls. Urljoin handles every case: path is relative and path is absolute
if self.is_valid_file(self.filetype, filelink):
print("Yielding", filelink)
yield filelink
def try_filetype_crawl(self):
"""
Try to find download links to files of the given file format.
:return: A generator - StopIteration is called when no more links can/should be found.
"""
# First: Try a simple "filetype:" query - works for some, but not all filetypes
query = "filetype:" + self.filetype
PyMsCognitiveWebSearch.SEARCH_WEB_BASE = "https://api.cognitive.microsoft.com/bing/v7.0/search"
self.search_service = PyMsCognitiveWebSearch(self.ms_key, query)
results = self.search_service.search_all(format="json", quota=LIMIT_RESULTS + 20)
for item in results:
try:
r = requests.get(item.url, timeout=MAX_TIMEOUT,
headers=headers) # Request the url to resolve the redirect
except Exception as e: # requests.exceptions.ConnectTimeout:
print("Skipping ", item.url, "because of Exception", str(e))
# Then just skip
continue
if self.is_valid_file(self.filetype, r.url):
print("Yielding ", r.url)
yield r.url
# If this fails, maybe the requested filetype is an image? Then perform an image search
if self.filetype in image_list: # Perform an image Search
query = self.filetype + " sample"
PyMsCognitiveImageSearch.SEARCH_IMAGE_BASE = "https://api.cognitive.microsoft.com/bing/v7.0/images/search"
self.search_service = PyMsCognitiveImageSearch(self.ms_key, query)
results = self.search_service._search(limit=LIMIT_RESULTS,
format="json") # TODO: Class does not implement pagination? :(
for item in results:
utils.temp_print("Checking item", item.content_url)
try:
r = requests.get(item.content_url, timeout=MAX_TIMEOUT, headers=headers)
except Exception as e:
print("Skipping ", item.url, "because of Exception", str(e))
# print("Timeout, checking next item")
continue
print("Url is", r.url)
if self.is_valid_file(self.filetype, r.url):
print("Yielding ", r.url)
yield r.url
for result in self.website_crawl("." + self.filetype + " example file"):
print("Yielding", result)
yield result
for result in self.website_crawl("." + self.filetype + " sample file"):
print("Yielding", result)
yield result
# Last Resort: The index of trick. Note thatfi this can yield some undesired file samples, use with caution!
query = "inurl:(" + self.filetype + ") intitle:\"index of:\""
self.search_service = PyMsCognitiveWebSearch(self.ms_key, query)
results = self.search_service.search_all(format="json", quota=LIMIT_RESULTS)
print(len(results))
for item in results:
try:
r = requests.get(item.url, timeout=MAX_TIMEOUT)
except Exception as e:
print("Skipping ", item.url, "because of Exception", str(e))
continue
parsed_uri = urlparse(r.url)
domain = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
try:
if requests.head(domain + "/robots.txt").status_code == 404:
# No Robots TXT - Skip
print("Skipping", domain, "because it does not contain a robots.txt")
continue
except Exception as e:
print("Skipping", domain, "because of exception", str(e))
continue
print("Now scanning through", r.url)
html_text = r.text
if not "index of" in html_text:
# We probably did not reach a file repository
continue
soup = BeautifulSoup(html_text, "html.parser")
link_anchors = soup.find_all("a")
links = list(map(lambda x: x.get("href"), link_anchors)) # type: [str]
links = list(filter(lambda x: x is not None and x.lower().endswith(self.filetype), links))
for link in links:
path = link
filelink = urljoin(r.url,
path) # Join the two urls. Urljoin handles every case: path is relative and path is absolute
if self.is_valid_file(self.filetype, filelink):
print("Yielding", filelink)
yield filelink
def download(self, max_download=1) -> int:
"""
Tries to download max number of samples files of the given file format to the self.out_dir folder
:return: The amount of downloaded files.
"""
print("MAX", max_download)
i = 0
for rurl in self.try_filetype_crawl():
print("Yielded", rurl)
filename = self.filetype + "_" + str(uuid.uuid4())
if (not os.path.exists(self.out_dir)):
os.makedirs(self.out_dir)
utils.download_seed_to_folder(download_link=rurl, to_directory=self.out_dir, filename=filename)
# with open(self.out_dir + "/" + filename + "." + self.filetype, "wb") as file:
# for chunk in r.iter_content(chunk_size=1024):
# if chunk: # filter out keep-alive new chunks
# file.write(chunk)
i += 1
if i >= max_download:
return max_download
# print("Downloaded",rurl)
return i
def main():
argParser = argparse.ArgumentParser(DESC)
argParser.add_argument('-in', '--infile', type=str, required=True,
help="""List of new line seperated URLs to seeds""")
argParser.add_argument("-k", "--key", type=str, required=True, help="The MS Key")
argParser.add_argument("-d", "--dir", type=str, required=True, help="The directory to save the crawled files to.")
argParser.add_argument("-m", "--max", type=int, required=False,
help="The number of samples to download per filetype. Default 2", default=2)
_args = argParser.parse_args()
ms_key = _args.key
out_dir = _args.dir
if not os.path.exists(out_dir):
raise Exception("The specified dir " + str(out_dir) + " does not exist.")
infile_path = _args.infile
logging.basicConfig(filename="results.log", filemode="w", level=logging.INFO)
success = []
failures = []
total = []
with open(infile_path, "r") as infile:
for line in infile:
filetype = line.strip()
if filetype[0] == ".":
filetype = filetype[1:]
filetype = filetype.lower()
total.append(filetype)
filetype_out_dir = out_dir + "/" + filetype + "_samples"
filetype_max = _args.max # The maximum for this filetype
if not os.path.exists(filetype_out_dir):
pass
else:
# We already have files - only download so much that we reach the maximum threshold
filetype_max = filetype_max - len(
list(filter(lambda x: x.lower().endswith(filetype), os.listdir(filetype_out_dir))))
if filetype_max > 0:
print("Crawling for", filetype, "with max", filetype_max)
fcrawler = FileCrawler(filetype=filetype, ms_key=ms_key, out_dir=filetype_out_dir)
results = fcrawler.download(max_download=filetype_max)
if results <= 0:
failures.append(str(filetype))
logging.warning("Found no samples for " + str(filetype))
else:
success.append(str(filetype))
else:
success.append(str(filetype))
print("Skipping", filetype, "already have", abs(filetype_max - _args.max))
print("Found", len(success), "/", len(total), "filetypes")
print("Done")
if __name__ == "__main__":
main()
| StarcoderdataPython |
3437009 | <reponame>harry1911/CoolCompiler<gh_stars>0
from general import visitor, errors
from general import ast_hierarchy as ast
from .type import Type
class TypeBuilderVisitor:
def __init__(self, enviroment):
self.enviroment = enviroment
self.current_type = None # type(current_type) = Type
@visitor.on('node')
def visit(self, node):
pass
@visitor.when(ast.ProgramNode)
def visit(self, node):
for _class in node.class_list:
self.visit(_class)
@visitor.when(ast.ClassNode)
def visit(self, node):
self.current_type = self.enviroment.get_type(node.name)
parent_type = self.enviroment.get_type(node.parent)
if parent_type is None and node.name != "Object":
errors.throw_error(errors.TypeError(text=f"In class '{self.current_type.name}' parent type '{node.parent}' is missing.", line=node.line, column=node.column))
if parent_type.name in ['Int', 'String', 'Bool']:
errors.throw_error(errors.SemanticError(text=f"In class '{self.current_type.name}' it is an error to inherit from basic class '{node.parent}'.", line=node.line, column=node.column))
for attribute in node.attribute_list:
self.visit(attribute)
for method in node.method_list:
self.visit(method)
@visitor.when(ast.FeatureAttributeNode)
def visit(self, node):
# node.type_attribute can be SELF_TYPE
if node.type_attribute == 'SELF_TYPE':
attribute_type = Type('SELF_TYPE', self.current_type.name)
else:
attribute_type = self.enviroment.get_type(node.type_attribute)
if attribute_type is not None:
if node.name == "self":
errors.throw_error(errors.SemanticError(text=f"Name attribute can not be self.", line=node.line, column=node.column))
else:
ans = self.current_type.define_attribute(node.name, node.type_attribute, node.line, node.column)
if not ans:
errors.throw_error(errors.SemanticError(text=f"In class '{self.current_type.name}' attribute '{node.name}' is defined multiple times.", line=node.line, column=node.column))
else:
errors.throw_error(errors.TypeError(text=f"The type '{node.type_attribute}' of attribute '{node.name}' is missing.", line=node.line, column=node.column))
@visitor.when(ast.FeatureMethodNode)
def visit(self, node):
# node.return_type_method can be SELF_TYPE
if node.return_type_method == 'SELF_TYPE':
return_type = Type('SELF_TYPE', self.current_type.name)
else:
return_type = self.enviroment.get_type(node.return_type_method)
if return_type is not None:
# formal_parameter_list
argument_list = []
for parameter in node.formal_parameter_list:
if parameter.name == 'self':
errors.throw_error(errors.SemanticError(text=f"In method '{node.name}' it is an error to bind self as a formal parameter.", line=node.line, column=node.column))
if parameter.name in argument_list:
errors.throw_error(errors.SemanticError(text=f"In method '{node.name}' the argument '{parameter.name}' is defined multiple times.", line=node.line, column=node.column))
argument_list.append(parameter.name)
argument_types = []
for parameter in node.formal_parameter_list:
if parameter.type_parameter == 'SELF_TYPE':
errors.throw_error(errors.TypeError(text=f"In method '{node.name}' the type of argument '{parameter.name}' cannot be SELF_TYPE.", line=node.line, column=node.column))
_type = self.enviroment.get_type(parameter.type_parameter)
if _type is not None:
argument_types.append(parameter.type_parameter)
else:
errors.throw_error(errors.TypeError(text=f"The type of the parameter '{parameter.name}' in method '{node.name}' is missing.", line=node.line, column=node.column))
ans = self.current_type.define_method(node.name, node.return_type_method, argument_list, argument_types, node.line, node.column)
if not ans:
errors.throw_error(errors.SemanticError(text=f"In class '{self.current_type.name}' method '{node.name}' is defined multiple times.", line=node.line, column=node.column))
else:
errors.throw_error(errors.TypeError(text=f"In class '{self.current_type.name}' return type of method '{node.name}' is missing.", line=node.line, column=node.column))
| StarcoderdataPython |
5195081 | from Duelist_Algorithm import Duelist_Algorithm
import math
def f(x1,x2):
x = []
x.append(x1)
x.append(x2)
obj = (math.sin(3*x[0]*math.pi))**2 + ((x[0] - 1)**2)*(1 + math.sin(3*x[1]*math.pi)**2) + ((x[1] - 1)**2)*(1 + math.sin(2*x[1]*math.pi)**2)
return obj
#İstenilen test fonksiyonu, f isimli fonksiyonda obj olarak kullanılabilir.
#ackley obj = -20*math.exp(-0.2*math.sqrt(0.5*(x1*x1*x2*x2)))-math.exp(0.5*(math.cos(2*math.pi*x1)+math.cos(2*math.pi*x2))) + math.e + 20
#beale obj = (1.5 - x[0] + x[0]*x[1])**2 + (2.25 - x[0] + x[0]*x[1]**2)**2 + (2.625 - x[0] + x[0]*x[1]**3)**2
#goldstein obj = ((1 + (x[0] + x[1] + 1) ** 2 * (19 - 14 * x[0] + 3 * (x[0] ** 2) - 14 * x[1] + 6 * x[0] * x[1] + 3 * (x[1] ** 2))) * (30 + (2 * x[0] - 3 * x[1]) ** 2 * (18 - 32 * x[0] + 12 * (x[0] ** 2) + 48 * x[1] - 36 * x[0] * x[1] + 27 * (x[1] ** 2))))
#levi obj = (math.sin(3*x[0]*math.pi))**2 + ((x[0] - 1)**2)*(1 + math.sin(3*x[1]*math.pi)**2) + ((x[1] - 1)**2)*(1 + math.sin(2*x[1]*math.pi)**2)
x=["x1","x2"]
xmin=[-10,-10]
xmax=[10,10]
DA = Duelist_Algorithm(f,x,xmin,xmax,iterasyon=100)
DA.baslangic()
| StarcoderdataPython |
1707332 | <reponame>acrenwelge/python-stuff<filename>simple-scripts/create-tar.py
import tarfile
import glob
def create_tarfile():
tfile = tarfile.open("mytarfile.tar", "w")
for file in glob.glob(pathname="./test/*.txt"):
tfile.add(file)
tfile.close()
create_tarfile()
| StarcoderdataPython |
11271684 | <gh_stars>1-10
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Creates saved models used for testing.
This executable should be run with an argument pointing to the testdata/ folder
in this directory. It will re-generate the saved models that are used for
testing.
"""
import os
from absl import app
from tensorflow.python.compat import v2_compat
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.module import module
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import saved_model
def _gen_uninitialized_variable(base_dir):
"""Generates a saved model with an uninitialized variable."""
class SubModule(module.Module):
"""A module with an UninitializedVariable."""
def __init__(self):
self.uninitialized_variable = resource_variable_ops.UninitializedVariable(
name="uninitialized_variable", dtype=dtypes.int64)
class Module(module.Module):
"""A module with an UninitializedVariable."""
def __init__(self):
super(Module, self).__init__()
self.sub_module = SubModule()
self.initialized_variable = variables.Variable(
1.0, name="initialized_variable")
# An UninitializedVariable with the same name as the variable in the
# SubModule, but with a different type.
self.uninitialized_variable = resource_variable_ops.UninitializedVariable(
name="uninitialized_variable", dtype=dtypes.float32)
@def_function.function(
input_signature=[tensor_spec.TensorSpec((), dtypes.float32)])
def compute(self, value):
return self.initialized_variable + value
to_save = Module()
saved_model.save(
to_save, export_dir=os.path.join(base_dir, "UninitializedVariable"))
def _gen_simple_while_loop(base_dir):
"""Generates a saved model with a while loop."""
class Module(module.Module):
"""A module with a while loop."""
@def_function.function(
input_signature=[tensor_spec.TensorSpec((), dtypes.float32)])
def compute(self, value):
acc, _ = control_flow_ops.while_loop(
cond=lambda acc, i: i > 0,
body=lambda acc, i: (acc + i, i - 1),
loop_vars=(constant_op.constant(0.0), value))
return acc
to_save = Module()
saved_model.save(
to_save, export_dir=os.path.join(base_dir, "SimpleWhileLoop"))
def main(args):
if len(args) != 2:
raise app.UsageError("Expected one argument (base_dir).")
_, base_dir = args
_gen_uninitialized_variable(base_dir)
_gen_simple_while_loop(base_dir)
if __name__ == "__main__":
v2_compat.enable_v2_behavior()
app.run(main)
| StarcoderdataPython |
6499675 | <gh_stars>0
# 生产者 -- 任务, 函数
# 1. 这个函数,必须要让 celery 的实例, task 装饰器 装饰
# 2. 需要 celery 自动检测指定包的任务
from libs.yuntongxun.sms import CCP
from celery_tasks.main import app
@app.task
def celery_send_sms_code(mobile, code):
CCP().send_template_sms(mobile, [code, 5], 1)
| StarcoderdataPython |
8017376 | <reponame>dgarrett622/EXOSIMS
# -*- coding: utf-8 -*-
import numpy as np
import astropy.units as u
class ZodiacalLight(object):
"""Zodiacal Light class template
This class contains all variables and methods necessary to perform
Zodiacal Light Module calculations in exoplanet mission simulation.
Args:
\*\*specs:
user specified values
Attributes:
magZ (float):
1 zodi brightness magnitude (per arcsec2)
magEZ (float):
1 exo-zodi brightness magnitude (per arcsec2)
varEZ (float):
exo-zodiacal light variation (variance of log-normal distribution)
nEZ (float):
exo-zodiacal light level in zodi
"""
_modtype = 'ZodiacalLight'
_outspec = {}
def __init__(self, magZ=23, magEZ=22, varEZ=0, nEZ=1.5, **specs):
self.magZ = float(magZ) # 1 zodi brightness (per arcsec2)
self.magEZ = float(magEZ) # 1 exo-zodi brightness (per arcsec2)
self.varEZ = float(varEZ) # exo-zodi variation (variance of log-normal dist)
self.nEZ = float(nEZ) # exo-zodi level in zodi
assert self.varEZ >= 0, "Exozodi variation must be >= 0"
# populate outspec
for att in self.__dict__.keys():
dat = self.__dict__[att]
self._outspec[att] = dat.value if isinstance(dat,u.Quantity) else dat
def __str__(self):
"""String representation of the Zodiacal Light object
When the command 'print' is used on the Zodiacal Light object, this
method will return the values contained in the object"""
for att in self.__dict__.keys():
print '%s: %r' % (att, getattr(self, att))
return 'Zodiacal Light class object attributes'
def fZ(self, TL, sInds, lam, r_sc):
"""Returns surface brightness of local zodiacal light
Args:
TL (object):
TargetList class object
sInds (integer ndarray):
Integer indices of the stars of interest, with the length of
the number of planets of interest
lam (astropy Quantity):
Central wavelength in units of nm
r_sc (astropy Quantity 1x3 array):
Observatory (spacecraft) position vector in units of km
Returns:
fZ (astropy Quantity array):
Surface brightness of zodiacal light in units of 1/arcsec2
"""
# check type of sInds
sInds = np.array(sInds)
if not sInds.shape:
sInds = np.array([sInds])
nZ = np.ones(len(sInds))
fZ = nZ*10**(-0.4*self.magZ)/u.arcsec**2
return fZ
def fEZ(self, TL, sInds, I):
"""Returns surface brightness of exo-zodiacal light
Args:
TL (object):
TargetList class object
sInds (integer ndarray):
Numpy ndarray containing integer indices of the stars of interest,
with the length of the number of planets of interest.
I (astropy Quantity array):
Inclination of the planets of interest in units of deg
Returns:
fEZ (astropy Quantity array):
Surface brightness of exo-zodiacal light in units of 1/arcsec2
"""
# check type of sInds
sInds = np.array(sInds)
if not sInds.shape:
sInds = np.array([sInds])
# assume log-normal distribution of variance
if self.varEZ == 0:
nEZ = np.array([self.nEZ]*len(sInds))
else:
mu = np.log(self.nEZ) - 0.5*np.log(1. + self.varEZ/self.nEZ**2)
v = np.sqrt(np.log(self.varEZ/self.nEZ**2 + 1.))
nEZ = np.random.lognormal(mean=mu, sigma=v, size=len(sInds))
# supplementary angle for inclination > 90 degrees
mask = np.where(I.value > 90)[0]
I.value[mask] = 180 - I.value[mask]
beta = I.value
fbeta = 2.44 - 0.0403*beta + 0.000269*beta**2
# absolute V-band magnitude of the star
MV = TL.MV[sInds]
# absolute V-band magnitude of the Sun
MVsun = 4.83
fEZ = nEZ*10**(-0.4*self.magEZ) * 2*fbeta * 10.**(-0.4*(MV-MVsun))/u.arcsec**2
return fEZ | StarcoderdataPython |
1772594 | import unittest
import tornado.httputil
from imbi import errors
class DefaultFunctionalityTests(unittest.TestCase):
def test_that_error_url_can_be_configured(self):
saved_error_url = errors.ERROR_URL
try:
errors.set_canonical_server('server.example.com')
err = errors.ApplicationError(500, 'error-fragment', '')
self.assertEqual('https://server.example.com/#error-fragment',
err.document['type'])
finally:
errors.ERROR_URL = saved_error_url
def test_that_reason_is_set_from_status_code(self):
err = errors.ApplicationError(500, 'fragment', '')
self.assertEqual(tornado.httputil.responses[500], err.reason)
def test_that_reason_is_title_if_set(self):
err = errors.ApplicationError(500, 'fragment', '', title='title')
self.assertEqual('title'.title(), err.reason)
def test_that_unknown_status_codes_are_handled(self):
err = errors.ApplicationError(1, 'fragment', '')
self.assertEqual('Unknown Status Code', err.reason)
err = errors.ApplicationError(600, 'fragment', '')
self.assertEqual('Unknown Status Code', err.reason)
def test_that_type_can_be_overridden(self):
err = errors.ApplicationError(
500, 'error-fragment', '',
type='https://example.com/troubleshooting')
self.assertEqual('https://example.com/troubleshooting',
err.document['type'])
def test_that_title_defaults_to_reason(self):
err = errors.ApplicationError(500, 'fragment', '')
self.assertEqual(tornado.httputil.responses[500],
err.document['title'])
def test_that_detail_defaults_to_formatted_log_message(self):
err = errors.ApplicationError(500, 'fragment', '1+1=%s', 1 + 1)
self.assertEqual('1+1=2', err.document['detail'])
def test_with_missing_log_args(self):
err = errors.ApplicationError(500, 'fragment', '%s')
self.assertEqual('%s', err.document['detail'])
def test_with_unused_log_args(self):
err = errors.ApplicationError(500, 'fragment', 'No args', 'arg')
self.assertEqual('No args', err.document['detail'])
class SpecificErrorBehaviorTests(unittest.TestCase):
def test_item_not_found_default_title(self):
err = errors.ItemNotFound()
self.assertEqual('Item not found', err.document['title'])
def test_item_not_found_log_message(self):
err = errors.ItemNotFound()
self.assertEqual('Item not found', err.log_message)
def test_method_not_allowed_log_message(self):
err = errors.MethodNotAllowed('post')
self.assertEqual('POST is not a supported HTTP method',
err.document['detail'])
def test_unsupported_media_type_log_message(self):
err = errors.UnsupportedMediaType('application/xml')
self.assertEqual('application/xml is not a supported media type',
err.document['detail'])
def test_database_error_defaults(self):
err = errors.DatabaseError()
self.assertEqual('Database Error', err.document['title'])
self.assertEqual('Database Error', err.reason)
self.assertEqual('Database failure', err.log_message)
def test_database_error_with_exception(self):
failure = RuntimeError('whatever')
err = errors.DatabaseError(error=failure)
self.assertEqual('Database Error', err.document['title'])
self.assertEqual('Database Error', err.reason)
self.assertEqual('Database failure: %s', err.log_message)
self.assertEqual((failure, ), err.args)
def test_database_error_with_explicit_title(self):
err = errors.DatabaseError(title='No rows returned')
self.assertEqual('No rows returned', err.document['title'])
self.assertEqual('No rows returned'.title(), err.reason)
self.assertEqual('Database failure', err.log_message)
| StarcoderdataPython |
3551116 | import cocotb
from cocotb.clock import Clock
from cocotb.triggers import Timer, RisingEdge
import logging
from cocotb.wavedrom import trace
import wavedrom
from cocotb.binary import BinaryRepresentation, BinaryValue
@cocotb.test()
async def gcd_Test(dut):
clk = Clock(dut.clk,10,"ns")
cocotb.fork(clk.start())
await RisingEdge(dut.clk)
dut.rst.value = 1
dut.num1.value = 2
dut.num2.value = 4
await RisingEdge(dut.clk)
dut.rst.value = 0
for j in range(50):await RisingEdge(dut.clk) | StarcoderdataPython |
3295066 | <reponame>loremipsumdolor/STEVE
'''
S.T.E.V.E. Console
Interactive command-line console
A software component of S.T.E.V.E. (Super Traversing Enigmatic Voice-commanded Engine)
Code and device by <NAME>; code released under the MIT license
'''
import threading
import cmdparser
class console(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.msgparse = cmdparser.msgparse()
def run(self):
print
print "Type help for a list of commands."
while True:
con = raw_input("S.T.E.V.E. > ")
if con == "help":
with open('consolehelp.txt') as consolehelp:
for line in sorted(consolehelp):
print line.rstrip('\n')
elif con == "exit":
break
else:
interpret = self.msgparse.interpret(con, "console")
parse = self.msgparse.parse(interpret[0], interpret[1], interpret[1])
if type(parse) is list:
for x in range(len(parse)):
print parse[x]
elif type(parse) is str:
print parse
elif type(parse) is int:
print str(parse)
elif interpret[0] == "":
pass
else:
print "Error: Invalid results."
if __name__ == '__main__':
print "Not to be called directly." | StarcoderdataPython |
6649811 | """
Defines the S3Model ontology in Python 3.7
Copyright, 2009 - 2022, <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from dataclasses import dataclass
@dataclass
class CMC:
"""
Core Model Component - A component model contained in a reference model.
A CMC represents a specific core type of component that further contains elements with base datatypes and other CMCs to define its structure.</s3m:description>
"""
@dataclass
class CMS:
"""
Core Model Symbol - A CMS represents a CMC in instance data.
In practice, it is usually substituted for by a Replaceable Model Symbol (RMS).
This substitution is because constraints are expressed in a Replaceable Model Component (RMC) which is then represented by an RMS.</s3m:description>
"""
| StarcoderdataPython |
1642984 | <filename>python/smap/ops/test/test_meter.py<gh_stars>10-100
"""
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
from twisted.trial import unittest
from smap.ops.meter import _meter
class TestMeter(unittest.TestCase):
def test_increasing(self):
d = np.ones(20)
for i in xrange(0, 20):
d[i] = i
rv = _meter(d, 0)
self.assertEquals(rv, 19)
def test_one_reset(self):
d = np.ones(20)
for i in xrange(0, 20):
if i < 10: d[i] = i
else: d[i] = i - 10
rv = _meter(d, 0)
self.assertEquals(rv, 18)
def test_reset_before(self):
d = np.ones(20)
for i in xrange(0, 20):
d[i] = i
d[-1] = 3
rv = _meter(d, 0)
self.assertEquals(rv, 21)
def test_reset_first(self):
d = np.ones(20)
for i in xrange(1, 20):
d[i] = i
d[0] = 10
rv = _meter(d, 0)
self.assertEquals(rv, 29)
def test_decreasing_simple(self):
d = np.array([3, 2, 1])
rv = _meter(d, 0)
self.assertEquals(rv, np.sum(d))
def test_decreasing(self):
d = np.ones(20)
for i in xrange(0, 20):
d[i] = 20 - i
rv = _meter(d, reset_threshold=0)
self.assertEquals(rv, np.sum(d))
def test_starting_offset(self):
d = np.ones(20)
for i in xrange(0, 20):
d[i] = i + 10
rv = _meter(d, reset_threshold=0)
self.assertEquals(rv, 19)
| StarcoderdataPython |
9654173 | <reponame>jrderek/Data-science-master-resources
import mysql.connector
db = mysql.connector.connect(
host="127.0.0.1",
user="root",
password="<PASSWORD>",
database="employee_data",
)
cursor = db.cursor()
sql = "UPDATE customers SET name=%s, address=%s WHERE customer_id=%s"
val = ("ShakibAL", "Dhaka", 2)
cursor.execute(sql, val)
db.commit()
print("{} data changed".format(cursor.rowcount)) | StarcoderdataPython |
9668368 | from car.camera import Camera
from car.car_status import CarStatus
from car.motor import Motor
class Car:
""" This car represents the Raspberry Pi car """
def __init__(self, m1_forward, m1_backward, m2_forward, m2_backward, m3_forward, m3_backward, m4_forward,
m4_backward, resolution_x, resolution_y, rotation, status=CarStatus.STOPPED):
self._camera = Camera(resolution_x, resolution_y, rotation)
self._motor1 = Motor(m1_forward, m1_backward)
self._motor2 = Motor(m2_forward, m2_backward)
self._motor3 = Motor(m3_forward, m3_backward)
self._motor4 = Motor(m4_forward, m4_backward)
self._status = status
def move_forward(self) -> None:
self._status = CarStatus.MOVING_FORWARD
self._motor1.move_forward()
self._motor2.move_forward()
self._motor3.move_forward()
self._motor4.move_forward()
def move_backward(self) -> None:
self._status = CarStatus.MOVING_BACKWARD
self._motor1.move_backward()
self._motor2.move_backward()
self._motor3.move_backward()
self._motor4.move_backward()
def stop(self) -> None:
self._status = CarStatus.STOPPED
self._motor1.stop()
self._motor2.stop()
self._motor3.stop()
self._motor4.stop()
def take_picture(self, image_filename) -> None:
self._status = CarStatus.TAKING_IMAGE
self._camera.take_picture(image_filename)
self._status = CarStatus.IMAGE_TAKEN
def update_status(self, status: CarStatus) -> None:
self._status = status
@property
def status(self) -> CarStatus:
return self._status
@status.setter
def status(self, status):
self._status = status
| StarcoderdataPython |
1632993 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Cisco plugin db cleanup part II
Revision ID: 263772d65691
Revises: <PASSWORD>
Create Date: 2013-07-29 02:31:26.646343
"""
# revision identifiers, used by Alembic.
revision = '263772d65691'
down_revision = '35c7c198ddea'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.cisco.network_plugin.PluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.rename_table('credentials', 'cisco_credentials')
op.rename_table('nexusport_bindings', 'cisco_nexusport_bindings')
op.rename_table('qoss', 'cisco_qos_policies')
op.drop_table('cisco_vlan_ids')
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'cisco_vlan_ids',
sa.Column('vlan_id', sa.Integer, nullable=False),
sa.Column('vlan_used', sa.Boolean),
sa.PrimaryKeyConstraint('vlan_id'),
)
op.rename_table('cisco_credentials', 'credentials')
op.rename_table('cisco_nexusport_bindings', 'nexusport_bindings')
op.rename_table('cisco_qos_policies', 'qoss')
| StarcoderdataPython |
282154 | from bill.views import InvoiceList, InvoiceDetail
from rest_framework.urlpatterns import format_suffix_patterns
from django.urls import path
urlpatterns = [
path('invoices', InvoiceList.as_view(), name='invoice-list'),
path('invoices/<int:pk>', InvoiceDetail.as_view(), name='invoice-detail'),
]
urlpatterns = format_suffix_patterns(urlpatterns) | StarcoderdataPython |
52998 | <reponame>JSchwalb11/OpenCV_Practical
import numpy as np
import argparse
import imutils
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True, help = "Path to the image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
cv2.imshow("Original", image)
(h,w) = image.shape[:2]
center = (w // 2, h //2)
M = cv2.getRotationMatrix2D(center, 45, 1.0)
rotated = cv2.warpAffine(image, M, (w,h))
cv2.imshow("Rotated by 45 Degrees", rotated)
M = cv2.getRotationMatrix2D(center, -90, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow("Rotated by -90 Degrees", rotated)
rotated = imutils.rotate(image, 180)
cv2.imshow("Rotated by 180 Degrees", rotated)
cv2.waitKey(0)
| StarcoderdataPython |
89803 | <gh_stars>100-1000
from __future__ import division
import sys
import time
import re
import threading
from ..runtime import min_version, runtime_info, read_vm_size
from ..utils import timestamp
from ..metric import Metric
from ..metric import Breakdown
if min_version(3, 4):
import tracemalloc
class AllocationProfiler(object):
MAX_TRACEBACK_SIZE = 25 # number of frames
MAX_MEMORY_OVERHEAD = 10 * 1e6 # 10MB
MAX_PROFILED_ALLOCATIONS = 25
def __init__(self, agent):
self.agent = agent
self.ready = False
self.profile = None
self.profile_lock = threading.Lock()
self.overhead_monitor = None
self.start_ts = None
def setup(self):
if self.agent.get_option('allocation_profiler_disabled'):
return
if not runtime_info.OS_LINUX and not runtime_info.OS_DARWIN:
self.agent.log('CPU profiler is only supported on Linux and OS X.')
return
if not min_version(3, 4):
self.agent.log('Memory allocation profiling is available for Python 3.4 or higher')
return
self.ready = True
def reset(self):
self.profile = Breakdown('Allocation call graph', Breakdown.TYPE_CALLGRAPH)
def start_profiler(self):
self.agent.log('Activating memory allocation profiler.')
def start():
tracemalloc.start(self.MAX_TRACEBACK_SIZE)
self.agent.run_in_main_thread(start)
self.start_ts = time.time()
def monitor_overhead():
if tracemalloc.is_tracing() and tracemalloc.get_tracemalloc_memory() > self.MAX_MEMORY_OVERHEAD:
self.agent.log('Allocation profiler memory overhead limit exceeded: {0} bytes'.format(tracemalloc.get_tracemalloc_memory()))
self.stop_profiler()
self.overhead_monitor = self.agent.schedule(0.5, 0.5, monitor_overhead)
def stop_profiler(self):
self.agent.log('Deactivating memory allocation profiler.')
with self.profile_lock:
if self.overhead_monitor:
self.overhead_monitor.cancel()
self.overhead_monitor = None
if tracemalloc.is_tracing():
snapshot = tracemalloc.take_snapshot()
self.agent.log('Allocation profiler memory overhead {0} bytes'.format(tracemalloc.get_tracemalloc_memory()))
tracemalloc.stop()
self.process_snapshot(snapshot, time.time() - self.start_ts)
def build_profile(self, duration):
with self.profile_lock:
self.profile.normalize(duration)
self.profile.propagate()
self.profile.floor()
self.profile.filter(2, 1000, float("inf"))
return [{
'category': Metric.CATEGORY_MEMORY_PROFILE,
'name': Metric.NAME_UNCOLLECTED_ALLOCATIONS,
'unit': Metric.UNIT_BYTE,
'unit_interval': 1,
'profile': self.profile
}]
def destroy(self):
pass
def process_snapshot(self, snapshot, duration):
stats = snapshot.statistics('traceback')
for stat in stats[:self.MAX_PROFILED_ALLOCATIONS]:
if stat.traceback:
skip_stack = False
for frame in stat.traceback:
if self.agent.frame_cache.is_agent_frame(frame.filename):
skip_stack = True
break
if skip_stack:
continue
current_node = self.profile
for frame in reversed(stat.traceback):
if frame.filename == '<unknown>':
continue
frame_name = '{0}:{1}'.format(frame.filename, frame.lineno)
current_node = current_node.find_or_add_child(frame_name)
current_node.set_type(Breakdown.TYPE_CALLSITE)
current_node.increment(stat.size, stat.count)
| StarcoderdataPython |
3448674 | <gh_stars>10-100
from setuptools import find_packages, setup
setup(
name='homely',
description=('Automate the installation of your personal config files and'
' favourite tools using Python. https://homely.readthedocs.io/'),
url='https://homely.readthedocs.io/',
author='<NAME>',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Topic :: Utilities',
],
keywords='dotfiles environment configuration tools utilities automation',
packages=['homely'] + ['homely.{}'.format(p)
for p in find_packages('homely')],
install_requires=['simplejson', 'click', 'requests', 'python-daemon'],
entry_points={
'console_scripts': ['homely=homely._cli:main'],
},
# automatic version number using setuptools_scm
setup_requires=['setuptools_scm'],
use_scm_version={
"write_to": 'homely/__init__.py',
},
)
| StarcoderdataPython |
9655725 | <filename>example_snippets/multimenus_snippets/Snippets/SciPy/Optimization and root-finding routines/General-purpose optimization/Nelder-Mead Simplex algorithm.py<gh_stars>0
def rosen(x):
"""The Rosenbrock function"""
return sum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0)
x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2])
res = optimize.minimize(rosen, x0, method='nelder-mead',
options={'xtol': 1e-8, 'disp': True})
print(res.x) | StarcoderdataPython |
5085524 | <gh_stars>0
# import numpy as np
# import networkx as nx
# import matplotlib.pyplot as plt
# import matplotlib as mpl
# import matplotlib.colors as colors
# from .paths import paths_prob_to_edges_flux
| StarcoderdataPython |
1692090 | <reponame>nicholascar/comp7230-training<filename>lecture_resources/lecture_02_SQLite.py
import csv
import sqlite3
# create an SQLite DB
conn = sqlite3.connect('test.db')
print("Opened database successfully")
# create a table
conn.execute("DROP TABLE IF EXISTS dwellings;")
conn.execute(
"""
CREATE TABLE dwellings
(ID INT PRIMARY KEY NOT NULL,
NoPeople INT NOT NULL,
ContainingMB TEXT NOT NULL,
CensusYear INT NOT NULL);
"""
)
print("Table created successfully")
# check it's empty
cur = conn.cursor()
cur.execute("SELECT COUNT(*) FROM dwellings;")
rows = cur.fetchall()
print(f"Rows in table: {rows[0][0]}")
# read CSV data, insert it into table
with open("dwellings_data.csv") as f:
reader = csv.reader(f)
next(reader) # skip header
for field in reader:
conn.execute("INSERT INTO dwellings VALUES (?,?,?,?);", field)
print("Read CSV data into table")
# check there are 30 entries in table
cur = conn.cursor()
cur.execute("SELECT COUNT(*) FROM dwellings;")
rows = cur.fetchall()
print(f"Rows in table: {rows[0][0]}")
# select only 2021 data, aggregate by containing Mesh Block
# check there are 30 entries in table
cur = conn.cursor()
cur.execute(
"""
SELECT ContainingMB, AVG(NoPeople)
FROM dwellings
WHERE CensusYear = 2021
GROUP BY ContainingMB
"""
)
rows = cur.fetchall()
print("Average number of people per dwelling per Mesh Block in 2021:")
for row in rows:
print(row)
conn.close()
import os
os.unlink("test.db")
| StarcoderdataPython |
1970025 |
"""Utilities for finding overlap or missing items in arrays."""
from .._ffi.function import _init_api
from .. import backend as F
class Filter(object):
"""Class used to either find the subset of IDs that are in this
filter, or the subset of IDs that are not in this filter
given a second set of IDs.
Examples
--------
>>> import torch as th
>>> from dgl.utils import Filter
>>> f = Filter(th.tensor([3,2,9], device=th.device('cuda')))
>>> f.find_included_indices(th.tensor([0,2,8,9], device=th.device('cuda')))
tensor([1,3])
>>> f.find_excluded_indices(th.tensor([0,2,8,9], device=th.device('cuda')))
tensor([0,2], device='cuda')
"""
def __init__(self, ids):
"""Create a new filter from a given set of IDs. This currently is only
implemented for the GPU.
Parameters
----------
ids : IdArray
The unique set of IDs to keep in the filter.
"""
self._filter = _CAPI_DGLFilterCreateFromSet(
F.zerocopy_to_dgl_ndarray(ids))
def find_included_indices(self, test):
"""Find the index of the IDs in `test` that are in this filter.
Parameters
----------
test : IdArray
The set of IDs to to test with.
Returns
-------
IdArray
The index of IDs in `test` that are also in this filter.
"""
return F.zerocopy_from_dgl_ndarray( \
_CAPI_DGLFilterFindIncludedIndices( \
self._filter, F.zerocopy_to_dgl_ndarray(test)))
def find_excluded_indices(self, test):
"""Find the index of the IDs in `test` that are not in this filter.
Parameters
----------
test : IdArray
The set of IDs to to test with.
Returns
-------
IdArray
The index of IDs in `test` that are not in this filter.
"""
return F.zerocopy_from_dgl_ndarray( \
_CAPI_DGLFilterFindExcludedIndices( \
self._filter, F.zerocopy_to_dgl_ndarray(test)))
_init_api("dgl.utils.filter")
| StarcoderdataPython |
3552915 | """
##################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : __init__.py
# Abstract :
# Current Version: 1.0.0
# Date : 2020-05-31
##################################################################################################
"""
from .davar_common import *
from .davar_det import *
from .davar_rcg import *
from .davar_spotting import *
from .davar_ie import *
from .davar_videotext import *
from .davar_table import *
from .mmcv import *
from .version import __version__
__all__ = ['__version__']
| StarcoderdataPython |
4827853 | <reponame>hulecom/read-GRACE-harmonics
#!/usr/bin/env python
u"""
calc_sensitivity_kernel.py
Written by <NAME> (06/2021)
Calculates spatial sensitivity kernels through a least-squares mascon procedure
COMMAND LINE OPTIONS:
--help: list the command line options
-O X, --output-directory X: output directory for mascon files
--lmin X: minimum spherical harmonic degree
-l X, --lmax X: maximum spherical harmonic degree
-m X, --mmax X: maximum spherical harmonic order
-R X, --radius X: Gaussian smoothing radius (km)
-n X, --love X: Load Love numbers dataset
0: Han and Wahr (1995) values from PREM
1: Gegout (2005) values from PREM
2: Wang et al. (2012) values from PREM
--reference X: Reference frame for load love numbers
CF: Center of Surface Figure (default)
CM: Center of Mass of Earth System
CE: Center of Mass of Solid Earth
-F X, --format X: input and output data format
ascii
netCDF4
HDF5
--mask X: Land-sea mask for redistributing mascon mass and land water flux
--mascon-file X: index file of mascons spherical harmonics
--redistribute-mascons: redistribute mascon mass over the ocean
--fit-method X: method for fitting sensitivity kernel to harmonics
1: mass coefficients
2: geoid coefficients
--log: Output log of files created for each job
-V, --verbose: Verbose output of processing run
-M X, --mode X: Permissions mode of the files created
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
h5py: Pythonic interface to the HDF5 binary data format.
https://www.h5py.org/
future: Compatibility layer between Python 2 and Python 3
https://python-future.org/
PROGRAM DEPENDENCIES:
read_love_numbers.py: reads Load Love Numbers from Han and Wahr (1995)
plm_holmes.py: Computes fully normalized associated Legendre polynomials
gauss_weights.py: Computes the Gaussian weights as a function of degree
ocean_stokes.py: reads a land-sea mask and converts to spherical harmonics
gen_stokes.py: converts a spatial field into spherical harmonic coefficients
harmonic_summation.py: calculates a spatial field from spherical harmonics
harmonics.py: spherical harmonic data class for processing GRACE/GRACE-FO
destripe_harmonics.py: calculates the decorrelation (destriping) filter
and filters the GRACE/GRACE-FO coefficients for striping errors
ncdf_read_stokes.py: reads spherical harmonic netcdf files
ncdf_stokes.py: writes output spherical harmonic data to netcdf
hdf5_read_stokes.py: reads spherical harmonic HDF5 files
hdf5_stokes.py: writes output spherical harmonic data to HDF5
spatial.py: spatial data class for reading, writing and processing data
ncdf_read.py: reads input spatial data from netCDF4 files
hdf5_read.py: reads input spatial data from HDF5 files
ncdf_write.py: writes output spatial data to netCDF4
hdf5_write.py: writes output spatial data to HDF5
units.py: class for converting GRACE/GRACE-FO Level-2 data to specific units
utilities.py: download and management utilities for files
REFERENCES:
<NAME>, <NAME> and <NAME>. "Regional acceleration
in ice mass loss from Greenland and Antarctica using GRACE
time-variable gravity data". Geophysical Research Letters,
41(22):8130-8137, 2014. https://doi.org/10.1002/2014GL061052
<NAME>, <NAME>, <NAME>, and <NAME> Swenson "Recent contributions of
glaciers and ice caps to sea level rise". Nature, 482, 514-518 (2012).
https://doi.org/10.1038/nature10847
<NAME>, <NAME>, <NAME> Swenson, "Dwindling groundwater resources in
northern India, from satellite gravity observations",
Geophysical Research Letters, 36(18), L18401, (2009).
https://doi.org/10.1029/2009GL039401
UPDATE HISTORY:
Updated 06/2021: switch from parameter files to argparse arguments
Updated 05/2021: define int/float precision to prevent deprecation warning
Updated 04/2021: add parser object for removing commented or empty lines
Updated 01/2021: harmonics object output from gen_stokes.py/ocean_stokes.py
Updated 12/2020: added more love number options
Updated 10/2020: use argparse to set command line parameters
Updated 08/2020: use utilities to define path to load love numbers file
Updated 04/2020: using the harmonics class for spherical harmonic operations
updated load love numbers read function
Updated 10/2019: changing Y/N flags to True/False
Updated 10/2018: verify integers for python3 compatibility
Updated 06/2018: using python3 compatible octal and input
Updated 03/2018: added extrapolation of load love numbers if LMAX > 696
Updated 09/2017: use a different land-sea mask for calculating ocean_Ylms
use rcond=-1 in numpy least-squares algorithm
Updated 05/2016: using __future__ print function
Updated 02/2016: direct calculation of number of harmonics n_harm
use getopt parameters to set number of PROCESSES to run in parallel,
whether or not to output a log file, added new help module
Updated 11/2015: create unique log filenames
Updated 07/2015: added output of the sensitivity kernel Ylms in addition
to the spatial fields (rather than just the spatial fields)
will output logs with parameters and output_files
added multiprocessing error handling with traceback
Updated 05/2015: added parameter MMAX for LMAX != MMAX
added portion to redistribute mascon mass uniformly over the ocean
Updated 10/2014: distributed computing with the multiprocessing module
added INTERVAL parameter for (degree spacing)/2
input/output file type (ascii, netCDF4, HDF5)
Updated 05/2014: added import functions
Updated 02/2014: updated comments and added os.path.joins for connecting
directories and files (generalizing code)
some general updates to the program code
Updated 08/2013: general updates to inputting data
Updated 03/2012: edited to use new gen_stokes time-series option
Written 02/2012
"""
from __future__ import print_function, division
import sys
import os
import re
import time
import argparse
import numpy as np
import traceback
import gravity_toolkit.utilities as utilities
from gravity_toolkit.harmonics import harmonics
from gravity_toolkit.spatial import spatial
from gravity_toolkit.units import units
from gravity_toolkit.read_love_numbers import read_love_numbers
from gravity_toolkit.plm_holmes import plm_holmes
from gravity_toolkit.gauss_weights import gauss_weights
from gravity_toolkit.ocean_stokes import ocean_stokes
from gravity_toolkit.harmonic_summation import harmonic_summation
#-- PURPOSE: keep track of threads
def info(args):
print(os.path.basename(sys.argv[0]))
print(args)
print('module name: {0}'.format(__name__))
if hasattr(os, 'getppid'):
print('parent process: {0:d}'.format(os.getppid()))
print('process id: {0:d}'.format(os.getpid()))
#-- PURPOSE: read load love numbers for the range of spherical harmonic degrees
def load_love_numbers(LMAX, LOVE_NUMBERS=0, REFERENCE='CF'):
"""
Reads PREM load Love numbers for the range of spherical harmonic degrees
and applies isomorphic parameters
Arguments
---------
LMAX: maximum spherical harmonic degree
Keyword arguments
-----------------
LOVE_NUMBERS: Load Love numbers dataset
0: Han and Wahr (1995) values from PREM
1: Gegout (2005) values from PREM
2: Wang et al. (2012) values from PREM
REFERENCE: Reference frame for calculating degree 1 love numbers
CF: Center of Surface Figure (default)
CM: Center of Mass of Earth System
CE: Center of Mass of Solid Earth
Returns
-------
kl: Love number of Gravitational Potential
hl: Love number of Vertical Displacement
ll: Love number of Horizontal Displacement
"""
#-- load love numbers file
if (LOVE_NUMBERS == 0):
#-- PREM outputs from Han and Wahr (1995)
#-- https://doi.org/10.1111/j.1365-246X.1995.tb01819.x
love_numbers_file = utilities.get_data_path(
['data','love_numbers'])
header = 2
columns = ['l','hl','kl','ll']
elif (LOVE_NUMBERS == 1):
#-- PREM outputs from Gegout (2005)
#-- http://gemini.gsfc.nasa.gov/aplo/
love_numbers_file = utilities.get_data_path(
['data','Load_Love2_CE.dat'])
header = 3
columns = ['l','hl','ll','kl']
elif (LOVE_NUMBERS == 2):
#-- PREM outputs from Wang et al. (2012)
#-- https://doi.org/10.1016/j.cageo.2012.06.022
love_numbers_file = utilities.get_data_path(
['data','PREM-LLNs-truncated.dat'])
header = 1
columns = ['l','hl','ll','kl','nl','nk']
#-- LMAX of load love numbers from Han and Wahr (1995) is 696.
#-- from Wahr (2007) linearly interpolating kl works
#-- however, as we are linearly extrapolating out, do not make
#-- LMAX too much larger than 696
#-- read arrays of kl, hl, and ll Love Numbers
hl,kl,ll = read_love_numbers(love_numbers_file, LMAX=LMAX, HEADER=header,
COLUMNS=columns, REFERENCE=REFERENCE, FORMAT='tuple')
#-- return a tuple of load love numbers
return (hl,kl,ll)
#-- PURPOSE: calculate a regional time-series through a least
#-- squares mascon process
def calc_sensitivity_kernel(LMAX, RAD,
LMIN=None,
MMAX=None,
LOVE_NUMBERS=0,
REFERENCE=None,
DATAFORM=None,
MASCON_FILE=None,
REDISTRIBUTE_MASCONS=False,
FIT_METHOD=0,
LANDMASK=None,
DDEG=None,
INTERVAL=None,
OUTPUT_DIRECTORY=None,
MODE=0o775):
#-- file information
suffix = dict(ascii='txt', netCDF4='nc', HDF5='H5')
#-- file parser for reading index files
#-- removes commented lines (can comment out files in the index)
#-- removes empty lines (if there are extra empty lines)
parser = re.compile(r'^(?!\#|\%|$)', re.VERBOSE)
#-- Create output Directory if not currently existing
if (not os.access(OUTPUT_DIRECTORY,os.F_OK)):
os.mkdir(OUTPUT_DIRECTORY)
#-- list object of output files for file logs (full path)
output_files = []
#-- read arrays of kl, hl, and ll Love Numbers
hl,kl,ll = load_love_numbers(LMAX, LOVE_NUMBERS=LOVE_NUMBERS,
REFERENCE=REFERENCE)
#-- Earth Parameters
factors = units(lmax=LMAX).harmonic(hl,kl,ll)
#-- Average Density of the Earth [g/cm^3]
rho_e = factors.rho_e
#-- Average Radius of the Earth [cm]
rad_e = factors.rad_e
#-- input/output string for both LMAX==MMAX and LMAX != MMAX cases
MMAX = np.copy(LMAX) if not MMAX else MMAX
order_str = 'M{0:d}'.format(MMAX) if (MMAX != LMAX) else ''
#-- Calculating the Gaussian smoothing for radius RAD
if (RAD != 0):
wt = 2.0*np.pi*gauss_weights(RAD,LMAX)
gw_str = '_r{0:0.0f}km'.format(RAD)
else:
#-- else = 1
wt = np.ones((LMAX+1))
gw_str = ''
#-- Read Ocean function and convert to Ylms for redistribution
if REDISTRIBUTE_MASCONS:
#-- read Land-Sea Mask and convert to spherical harmonics
ocean_Ylms = ocean_stokes(LANDMASK, LMAX, MMAX=MMAX, LOVE=(hl,kl,ll))
ocean_str = '_OCN'
else:
#-- not distributing uniformly over ocean
ocean_str = ''
#-- input mascon spherical harmonic datafiles
with open(MASCON_FILE,'r') as f:
mascon_files = [l for l in f.read().splitlines() if parser.match(l)]
#-- number of mascons
n_mas = len(mascon_files)
#-- spatial area of the mascon
total_area = np.zeros((n_mas))
#-- name of each mascon
mascon_name = []
#-- for each valid file in the index (iterate over mascons)
mascon_list = []
for k,fi in enumerate(mascon_files):
#-- read mascon spherical harmonics
if (DATAFORM == 'ascii'):
#-- ascii (.txt)
Ylms = harmonics().from_ascii(os.path.expanduser(fi),date=False)
elif (DATAFORM == 'netCDF4'):
#-- netcdf (.nc)
Ylms = harmonics().from_netCDF4(os.path.expanduser(fi),date=False)
elif (DATAFORM == 'HDF5'):
#-- HDF5 (.H5)
Ylms = harmonics().from_HDF5(os.path.expanduser(fi),date=False)
#-- Calculating the total mass of each mascon (1 cmwe uniform)
total_area[k] = 4.0*np.pi*(rad_e**3)*rho_e*Ylms.clm[0,0]/3.0
#-- distribute mascon mass uniformly over the ocean
if REDISTRIBUTE_MASCONS:
#-- calculate ratio between total mascon mass and
#-- a uniformly distributed cm of water over the ocean
ratio = Ylms.clm[0,0]/ocean_Ylms.clm[0,0]
#-- for each spherical harmonic
for m in range(0,MMAX+1):#-- MMAX+1 to include MMAX
for l in range(m,LMAX+1):#-- LMAX+1 to include LMAX
#-- remove ratio*ocean Ylms from mascon Ylms
#-- note: x -= y is equivalent to x = x - y
Ylms.clm[l,m] -= ratio*ocean_Ylms.clm[l,m]
Ylms.slm[l,m] -= ratio*ocean_Ylms.slm[l,m]
#-- truncate mascon spherical harmonics to d/o LMAX/MMAX and add to list
mascon_list.append(Ylms.truncate(lmax=LMAX, mmax=MMAX))
#-- mascon base is the file without directory or suffix
mascon_base = os.path.basename(mascon_files[k])
mascon_base = os.path.splitext(mascon_base)[0]
#-- if lower case, will capitalize
mascon_base = mascon_base.upper()
#-- if mascon name contains degree and order info, remove
mascon_name.append(mascon_base.replace('_L{0:d}'.format(LMAX),''))
#-- create single harmonics object from list
mascon_Ylms = harmonics().from_list(mascon_list, date=False)
#-- Output spatial data object
grid = spatial()
#-- Output Degree Spacing
dlon,dlat = (DDEG[0],DDEG[0]) if (len(DDEG) == 1) else (DDEG[0],DDEG[1])
#-- Output Degree Interval
if (INTERVAL == 1):
#-- (-180:180,90:-90)
n_lon = np.int64((360.0/dlon)+1.0)
n_lat = np.int64((180.0/dlat)+1.0)
grid.lon = -180 + dlon*np.arange(0,n_lon)
grid.lat = 90.0 - dlat*np.arange(0,n_lat)
elif (INTERVAL == 2):
#-- (Degree spacing)/2
grid.lon = np.arange(-180+dlon/2.0,180+dlon/2.0,dlon)
grid.lat = np.arange(90.0-dlat/2.0,-90.0-dlat/2.0,-dlat)
n_lon = len(grid.lon)
n_lat = len(grid.lat)
#-- Computing plms for converting to spatial domain
theta = (90.0-grid.lat)*np.pi/180.0
PLM,dPLM = plm_holmes(LMAX,np.cos(theta))
#-- Calculating the number of cos and sin harmonics between LMIN and LMAX
#-- taking into account MMAX (if MMAX == LMAX then LMAX-MMAX=0)
n_harm=np.int64(LMAX**2 - LMIN**2 + 2*LMAX + 1 - (LMAX-MMAX)**2 - (LMAX-MMAX))
#-- Initialing harmonics for least squares fitting
#-- mascon kernel
M_lm = np.zeros((n_harm,n_mas))
#-- mascon kernel converted to output unit
MA_lm = np.zeros((n_harm,n_mas))
#-- sensitivity kernel
A_lm = np.zeros((n_harm,n_mas))
#-- Initializing conversion factors
#-- factor for converting to smoothed coefficients of mass
fact = np.zeros((n_harm))
#-- factor for converting back into geoid coefficients
fact_inv = np.zeros((n_harm))
#-- smoothing factor
wt_lm = np.zeros((n_harm))
#-- ii is a counter variable for building the mascon column array
ii = 0
#-- Creating column array of clm/slm coefficients
#-- Order is [C00...C6060,S11...S6060]
#-- Calculating factor to convert geoid spherical harmonic coefficients
#-- to coefficients of mass (Wahr, 1998)
coeff = rho_e*rad_e/3.0
coeff_inv = 0.75/(np.pi*rho_e*rad_e**3)
#-- Switching between Cosine and Sine Stokes
for cs,csharm in enumerate(['clm','slm']):
#-- copy cosine and sin harmonics
mascon_harm = getattr(mascon_Ylms, csharm)
#-- for each spherical harmonic degree
#-- +1 to include LMAX
for l in range(LMIN,LMAX+1):
#-- for each spherical harmonic order
#-- Sine Stokes for (m=0) = 0
mm = np.min([MMAX,l])
#-- +1 to include l or MMAX (whichever is smaller)
for m in range(cs,mm+1):
#-- Mascon Spherical Harmonics
M_lm[ii,:] = np.copy(mascon_harm[l,m,:])
#-- degree dependent factor to convert to mass
fact[ii] = (2.0*l + 1.0)/(1.0 + kl[l])
#-- degree dependent factor to convert from mass
fact_inv[ii] = coeff_inv*(1.0 + kl[l])/(2.0*l+1.0)
#-- degree dependent smoothing
wt_lm[ii] = np.copy(wt[l])
#-- add 1 to counter
ii += 1
#-- Converting mascon coefficients to fit method
if (FIT_METHOD == 1):
#-- Fitting Sensitivity Kernel as mass coefficients
#-- converting M_lm to mass coefficients of the kernel
for i in range(n_harm):
MA_lm[i,:] = M_lm[i,:]*wt_lm[i]*fact[i]
fit_factor = wt_lm*fact
inv_fit_factor = np.copy(fact_inv)
else:
#-- Fitting Sensitivity Kernel as geoid coefficients
for i in range(n_harm):
MA_lm[:,:] = M_lm[i,:]*wt_lm[i]
fit_factor = wt_lm*np.ones((n_harm))
inv_fit_factor = np.ones((n_harm))
#-- Fitting the sensitivity kernel from the input kernel
for i in range(n_harm):
#-- setting kern_i equal to 1 for d/o
kern_i = np.zeros((n_harm))
#-- converting to mass coefficients if specified
kern_i[i] = 1.0*fit_factor[i]
#-- spherical harmonics solution for the
#-- mascon sensitivity kernels
#-- Least Squares Solutions: Inv(X'.X).(X'.Y)
kern_lm = np.linalg.lstsq(MA_lm,kern_i,rcond=-1)[0]
for k in range(n_mas):
A_lm[i,k] = kern_lm[k]*total_area[k]
#-- for each mascon
for k in range(n_mas):
#-- reshaping harmonics of sensitivity kernel to LMAX+1,MMAX+1
#-- calculating the spatial sensitivity kernel of each mascon
#-- kernel calculated as outlined in Tiwari (2009) and Jacobs (2012)
#-- Initializing output sensitivity kernel (both spatial and Ylms)
kern_Ylms = harmonics(lmax=LMAX, mmax=MMAX)
kern_Ylms.clm = np.zeros((LMAX+1,MMAX+1))
kern_Ylms.slm = np.zeros((LMAX+1,MMAX+1))
kern_Ylms.time = total_area[k]
#-- counter variable for deconstructing the mascon column arrays
ii = 0
#-- Switching between Cosine and Sine Stokes
for cs,csharm in enumerate(['clm','slm']):
#-- for each spherical harmonic degree
#-- +1 to include LMAX
for l in range(LMIN,LMAX+1):
#-- for each spherical harmonic order
#-- Sine Stokes for (m=0) = 0
mm = np.min([MMAX,l])
#-- +1 to include l or MMAX (whichever is smaller)
for m in range(cs,mm+1):
#-- inv_fit_factor: normalize from mass harmonics
temp = getattr(kern_Ylms, csharm)
temp[l,m] = inv_fit_factor[ii]*A_lm[ii,k]
#-- add 1 to counter
ii += 1
#-- convert spherical harmonics to output spatial grid
grid.data = harmonic_summation(kern_Ylms.clm, kern_Ylms.slm,
grid.lon, grid.lat, LMAX=LMAX, MMAX=MMAX, PLM=PLM).T
grid.time = total_area[k]
#-- output names for sensitivity kernel Ylm and spatial files
#-- for both LMAX==MMAX and LMAX != MMAX cases
args = (mascon_name[k],ocean_str,LMAX,order_str,gw_str,suffix[DATAFORM])
FILE1 = '{0}_SKERNEL_CLM{1}_L{2:d}{3}{4}.{5}'.format(*args)
FILE2 = '{0}_SKERNEL{1}_L{2:d}{3}{4}.{5}'.format(*args)
#-- output sensitivity kernel to file
if (DATAFORM == 'ascii'):
#-- ascii (.txt)
kern_Ylms.to_ascii(os.path.join(OUTPUT_DIRECTORY,FILE1),date=False)
grid.to_ascii(os.path.join(OUTPUT_DIRECTORY,FILE2),date=False,
units='unitless',longname='Sensitivity_Kernel')
elif (DATAFORM == 'netCDF4'):
#-- netCDF4 (.nc)
kern_Ylms.to_netCDF4(os.path.join(OUTPUT_DIRECTORY,FILE1),date=False)
grid.to_netCDF4(os.path.join(OUTPUT_DIRECTORY,FILE2),date=False,
units='unitless',longname='Sensitivity_Kernel')
elif (DATAFORM == 'HDF5'):
#-- netcdf (.H5)
kern_Ylms.to_HDF5(os.path.join(OUTPUT_DIRECTORY,FILE1),date=False)
grid.to_HDF5(os.path.join(OUTPUT_DIRECTORY,FILE2),date=False,
units='unitless',longname='Sensitivity_Kernel')
#-- change the permissions mode
os.chmod(os.path.join(OUTPUT_DIRECTORY,FILE1),MODE)
os.chmod(os.path.join(OUTPUT_DIRECTORY,FILE2),MODE)
#-- add output files to list object
output_files.append(os.path.join(OUTPUT_DIRECTORY,FILE1))
output_files.append(os.path.join(OUTPUT_DIRECTORY,FILE2))
#-- return the list of output files
return output_files
#-- PURPOSE: print a file log for the mascon sensitivity kernel analysis
def output_log_file(arguments,output_files):
#-- format: calc_skernel_run_2002-04-01_PID-70335.log
args = (time.strftime('%Y-%m-%d',time.localtime()), os.getpid())
LOGFILE = 'calc_skernel_run_{0}_PID-{1:d}.log'.format(*args)
#-- create a unique log and open the log file
DIRECTORY = os.path.expanduser(arguments.output_directory)
fid = utilities.create_unique_file(os.path.join(DIRECTORY,LOGFILE))
#-- print argument values sorted alphabetically
print('ARGUMENTS:', file=fid)
for arg, value in sorted(vars(arguments).items()):
print('{0}: {1}'.format(arg, value), file=fid)
#-- print output files
print('\n\nOUTPUT FILES:', file=fid)
for f in output_files:
print('{0}'.format(f), file=fid)
#-- close the log file
fid.close()
#-- PURPOSE: print a error file log for the mascon sensitivity kernel analysis
def output_error_log_file(arguments):
#-- format: calc_skernel_failed_run_2002-04-01_PID-70335.log
args = (time.strftime('%Y-%m-%d',time.localtime()), os.getpid())
LOGFILE = 'calc_skernel_failed_run_{0}_PID-{1:d}.log'.format(*args)
#-- create a unique log and open the log file
DIRECTORY = os.path.expanduser(arguments.output_directory)
fid = utilities.create_unique_file(os.path.join(DIRECTORY,LOGFILE))
#-- print argument values sorted alphabetically
print('ARGUMENTS:', file=fid)
for arg, value in sorted(vars(arguments).items()):
print('{0}: {1}'.format(arg, value), file=fid)
#-- print traceback error
print('\n\nTRACEBACK ERROR:', file=fid)
traceback.print_exc(file=fid)
#-- close the log file
fid.close()
#-- This is the main part of the program that calls the individual modules
def main():
#-- Read the system arguments listed after the program
parser = argparse.ArgumentParser(
description="""Calculates spatial sensitivity kernels through a
least-squares mascon procedure
""",
fromfile_prefix_chars="@"
)
parser.convert_arg_line_to_args = utilities.convert_arg_line_to_args
#-- command line parameters
parser.add_argument('--output-directory','-O',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
default=os.getcwd(),
help='Output directory for mascon files')
#-- minimum spherical harmonic degree
parser.add_argument('--lmin',
type=int, default=1,
help='Minimum spherical harmonic degree')
#-- maximum spherical harmonic degree and order
parser.add_argument('--lmax','-l',
type=int, default=60,
help='Maximum spherical harmonic degree')
parser.add_argument('--mmax','-m',
type=int, default=None,
help='Maximum spherical harmonic order')
#-- different treatments of the load Love numbers
#-- 0: Han and Wahr (1995) values from PREM
#-- 1: Gegout (2005) values from PREM
#-- 2: Wang et al. (2012) values from PREM
parser.add_argument('--love','-n',
type=int, default=0, choices=[0,1,2],
help='Treatment of the Load Love numbers')
#-- option for setting reference frame for gravitational load love number
#-- reference frame options (CF, CM, CE)
parser.add_argument('--reference',
type=str.upper, default='CF', choices=['CF','CM','CE'],
help='Reference frame for load Love numbers')
#-- Gaussian smoothing radius (km)
parser.add_argument('--radius','-R',
type=float, default=0,
help='Gaussian smoothing radius (km)')
#-- input data format (ascii, netCDF4, HDF5)
parser.add_argument('--format','-F',
type=str, default='netCDF4', choices=['ascii','netCDF4','HDF5'],
help='Input data format for auxiliary files')
#-- mascon index file and parameters
parser.add_argument('--mascon-file',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
help='Index file of mascons spherical harmonics')
parser.add_argument('--redistribute-mascons',
default=False, action='store_true',
help='Redistribute mascon mass over the ocean')
#-- 1: mass coefficients
#-- 2: geoid coefficients
parser.add_argument('--fit-method',
type=int, default=1, choices=(1,2),
help='Method for fitting sensitivity kernel to harmonics')
#-- land-sea mask for redistributing mascon mass
parser.add_argument('--mask',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
help='Land-sea mask for redistributing mascon mass')
#-- output grid parameters
parser.add_argument('--spacing','-S',
type=float, nargs='+', default=[0.5,0.5], metavar=('dlon','dlat'),
help='Spatial resolution of output data')
parser.add_argument('--interval','-I',
type=int, default=2, choices=[1,2,3],
help=('Output grid interval '
'(1: global, 2: centered global, 3: non-global)'))
#-- Output log file for each job in forms
#-- calc_skernel_run_2002-04-01_PID-00000.log
#-- calc_skernel_failed_run_2002-04-01_PID-00000.log
parser.add_argument('--log',
default=False, action='store_true',
help='Output log file for each job')
#-- print information about processing run
parser.add_argument('--verbose','-V',
default=False, action='store_true',
help='Verbose output of processing run')
#-- permissions mode of the local directories and files (number in octal)
parser.add_argument('--mode','-M',
type=lambda x: int(x,base=8), default=0o775,
help='permissions mode of output files')
args,_ = parser.parse_known_args()
#-- try to run the analysis with listed parameters
try:
info(args) if args.verbose else None
#-- run calc_sensitivity_kernel algorithm with parameters
output_files = calc_sensitivity_kernel(
args.lmax,
args.radius,
LMIN=args.lmin,
MMAX=args.mmax,
LOVE_NUMBERS=args.love,
REFERENCE=args.reference,
DATAFORM=args.format,
MASCON_FILE=args.mascon_file,
REDISTRIBUTE_MASCONS=args.redistribute_mascons,
FIT_METHOD=args.fit_method,
LANDMASK=args.mask,
DDEG=args.spacing,
INTERVAL=args.interval,
OUTPUT_DIRECTORY=args.output_directory,
MODE=args.mode)
except:
#-- if there has been an error exception
#-- print the type, value, and stack trace of the
#-- current exception being handled
print('process id {0:d} failed'.format(os.getpid()))
traceback.print_exc()
if args.log:#-- write failed job completion log file
output_error_log_file(args)
else:
if args.log:#-- write successful job completion log file
output_log_file(args,output_files)
#-- run main program
if __name__ == '__main__':
main()
| StarcoderdataPython |
6575805 | <reponame>liziwenzzzz/cv_template<filename>network/AOD/Model.py
import pdb
import numpy as np
import torch
import os
from .aod import AODnet
from options import opt
from optimizer import get_optimizer
from scheduler import get_scheduler
from network.base_model import BaseModel
from mscv import ExponentialMovingAverage, print_network, load_checkpoint, save_checkpoint
# from mscv.cnn import normal_init
from loss import get_default_loss
import misc_utils as utils
class Model(BaseModel):
def __init__(self, opt):
super(Model, self).__init__()
self.opt = opt
self.cleaner = AODnet().to(device=opt.device)
#####################
# Init weights
#####################
# normal_init(self.cleaner)
print_network(self.cleaner)
self.optimizer = get_optimizer(opt, self.cleaner)
self.scheduler = get_scheduler(opt, self.optimizer)
self.avg_meters = ExponentialMovingAverage(0.95)
self.save_dir = os.path.join(opt.checkpoint_dir, opt.tag)
def update(self, sample):
y = sample['label'].to(opt.device)
output = self.forward(sample)
loss = get_default_loss(output, y, self.avg_meters)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return {'output': output}
def forward(self, sample):
x = sample['input'].to(opt.device)
return self.cleaner(x)
def write_train_summary(self, update_return):
pass
def step_scheduler(self):
self.scheduler.step()
def get_lr(self):
return self.scheduler.get_lr()[0]
def load(self, ckpt_path):
load_dict = {
'cleaner': self.cleaner,
}
if opt.resume:
load_dict.update({
'optimizer': self.optimizer,
'scheduler': self.scheduler,
})
utils.color_print('Load checkpoint from %s, resume training.' % ckpt_path, 3)
else:
utils.color_print('Load checkpoint from %s.' % ckpt_path, 3)
ckpt_info = load_checkpoint(load_dict, ckpt_path, map_location=opt.device)
epoch = ckpt_info.get('epoch', 0)
return epoch
def save(self, which_epoch):
save_filename = f'{which_epoch}_{opt.model}.pt'
save_path = os.path.join(self.save_dir, save_filename)
save_dict = {
'cleaner': self.cleaner,
'optimizer': self.optimizer,
'scheduler': self.scheduler,
'epoch': which_epoch
}
save_checkpoint(save_dict, save_path)
utils.color_print(f'Save checkpoint "{save_path}".', 3)
| StarcoderdataPython |
4813562 | <filename>ffmpeg_sample/test04_pydub_mp3_join.py<gh_stars>0
print ('pydubでmp3を連結する')
# pydubを使うにはffmpegが必要-
from pydub import AudioSegment
# mp3ファイルの読み込み
audio1 = AudioSegment.from_file("./test_data/test1.mp3", "mp3")
audio2 = AudioSegment.from_file("./test_data/test2.mp3", "mp3")
audio3 = AudioSegment.from_file("./test_data/test3.mp3", "mp3")
# オーディオを連結する
audio = audio1 + audio2 + audio3
# エクスポートする
audio.export("./test_data/output04.mp3", format="mp3")
print('Success!')
| StarcoderdataPython |
4961205 | """Stockanalysis.com/etf Model"""
__docformat__ = "numpy"
import argparse
from typing import List
import webbrowser
import requests
import pandas as pd
from bs4 import BeautifulSoup as bs
from tabulate import tabulate
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
)
# Run this when called to get all available etfs and names
r = requests.get("https://stockanalysis.com/etf/")
soup = bs(r.text, "html.parser").findAll("ul", {"class": "no-spacing"})
all_links = soup[0].findAll("li")
etf_symbols = []
etf_names = []
for link in all_links:
etf_symbols.append(link.text.split("-")[0].strip(" "))
etf_names.append(link.text.split("-")[1].strip(" "))
def limit_number_of_holdings(num: str) -> int:
if int(num) > 200:
raise argparse.ArgumentTypeError("Asking for too many holdings")
return int(num)
def open_web(other_args: List[str]):
"""Opens webbrowser to the website page
Parameters
----------
other_args : List[str]
Argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="web",
description="Opens webbrowser to the website page",
)
parser.add_argument(
"-n", "--name", type=str, dest="name", help="Symbol to look for", required=False
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-n")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if not ns_parser.name:
webbrowser.open("https://stockanalysis.com/etf/")
return
if ns_parser.name.upper() in etf_symbols:
webbrowser.open(f"https://stockanalysis.com/etf/{ns_parser.name.lower()}")
else:
print("ETF symbol not available")
except Exception as e:
print(e, "\n")
def name_search(other_args: List[str]):
"""Search all available etfs for matching input
Parameters
----------
other_args: List[str]
Argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="search",
description="Search all available etfs for matching input",
)
parser.add_argument(
"-n", "--name", type=str, dest="name", help="Name to search for", required=True
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-n")
ns_parser = parse_known_args_and_warn(parser, other_args[:2])
if not ns_parser:
return
matching_etfs = [
etf_symbols[idx] + " - " + etf
for idx, etf in enumerate(etf_names)
if " ".join(other_args[1:]).lower() in etf.lower()
]
print(*matching_etfs, sep="\n")
if len(matching_etfs) == 0:
print("No matches found")
print("")
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def etf_overview(other_args: List[str]):
"""
Get overview data for selected etf
Parameters
----------
other_args : List[str]
Argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="overview",
description="Get overview data for selected etf",
)
parser.add_argument(
"-n", "--name", type=str, dest="name", help="Symbol to look for", required=True
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-n")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if ns_parser.name.upper() not in etf_symbols:
print("ETF symbol not available")
return
r1 = requests.get(f"https://stockanalysis.com/etf/{ns_parser.name}")
soup1 = bs(r1.text, "html.parser").find("div", {"class": "info"}).findAll("td")
column = []
value = []
column.append("Last Price")
value.append(
bs(r1.text, "html.parser")
.find("div", {"class": "quote"})
.find("td", {"id": "qLast"})
.text
)
for row in soup1[:-4:2]:
column.append(row.text)
for row in soup1[1:-4:2]:
value.append(row.text)
df = pd.DataFrame(value, index=column, columns=[ns_parser.name.upper()])
print(tabulate(df, headers=df.columns, tablefmt="fancy_grid"))
print("")
return
except Exception as e:
print(e, "\n")
def etf_holdings(other_args: List[str]):
"""Look at ETF holdings
Parameters
----------
other_args: List[str]
Argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="holdings",
description="Look at ETF holdings",
)
parser.add_argument(
"-n",
"--name",
type=str,
dest="name",
help="ETF to get holdings for",
required=True,
)
parser.add_argument(
"-l",
"--limit",
type=limit_number_of_holdings,
dest="limit",
help="Number of holdings to get",
default=20,
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-n")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
r1 = requests.get(f"https://stockanalysis.com/etf/{ns_parser.name}/holdings")
s1 = (
bs(r1.text, "html.parser")
.find("table", {"class": "fullholdings"})
.find("tbody")
)
tick, percent, shares = [], [], []
for idx, entry in enumerate(s1.findAll("tr"), 1):
tick.append(entry.findAll("td")[1].text)
percent.append(entry.findAll("td")[3].text)
shares.append(entry.findAll("td")[4].text)
if idx >= ns_parser.limit:
break
df = pd.DataFrame(data=[tick, percent, shares]).T
print(
tabulate(
df, headers=["Ticker", "% of ETF", "Shares"], tablefmt="fancy_grid"
)
)
print("")
except Exception as e:
print(e, "\n")
def compare_etfs(other_args: List[str]):
"""Compare selected ETFs
Parameters
----------
other_args : List[str]
Argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="compare",
description="Compare selected ETFs",
)
parser.add_argument(
"-n",
"--names",
type=str,
dest="names",
help="Symbols to compare",
required=True,
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-n")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
to_compare = [name.upper() for name in ns_parser.names.split(",")]
df = pd.DataFrame(columns=to_compare)
for etf in to_compare:
if etf in etf_symbols:
r1 = requests.get(f"https://stockanalysis.com/etf/{etf}")
soup1 = (
bs(r1.text, "html.parser")
.find("div", {"class": "info"})
.findAll("td")
)
column = []
value = []
column.append("Last Price")
value.append(
bs(r1.text, "html.parser")
.find("div", {"class": "quote"})
.find("td", {"id": "qLast"})
.text
)
for row in soup1[:-4:2]:
column.append(row.text)
for row in soup1[1:-4:2]:
value.append(row.text)
df[etf] = value
else:
print(f"{etf} not found")
df = df.drop(etf, axis=1)
df.index = column
print(tabulate(df, headers=df.columns, tablefmt="fancy_grid"))
print("")
except Exception as e:
print(e, "\n")
| StarcoderdataPython |
4977557 | # Generated by Django 2.2.8 on 2019-12-14 06:31
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('service', '0019_rent_is_paid'),
]
operations = [
migrations.AddField(
model_name='service',
name='account_number',
field=models.CharField(blank=True, max_length=16, null=True, validators=[django.core.validators.RegexValidator('^[0-9]*$', 'Only digits are allowed.')], verbose_name='Account number'),
),
]
| StarcoderdataPython |
3240830 | <gh_stars>10-100
"""
Utilities to implement Single Sign On for Discourse with a Python managed
authentication DB
https://meta.discourse.org/t/official-single-sign-on-for-discourse/13045
Thanks to <NAME> for the heavy lifting, detailed at
https://meta.discourse.org/t/sso-example-for-django/14258
A SSO request handler might look something like
@login_required
def discourse_sso_view(request):
payload = request.GET.get('sso')
signature = request.GET.get('sig')
try:
nonce = sso_validate(payload, signature, SECRET)
except DiscourseError as e:
return HTTP400(e.args[0])
url = sso_redirect_url(nonce, SECRET, request.user.email,
request.user.id, request.user.username)
return redirect('http://discuss.example.com' + url)
"""
from base64 import b64encode, b64decode
import hmac
import hashlib
try: # py3
from urllib.parse import unquote, urlencode, parse_qs
except ImportError:
from urllib import unquote, urlencode
from urlparse import parse_qs
from pydiscourse.exceptions import DiscourseError
def sso_validate(payload, signature, secret):
"""
payload: provided by Discourse HTTP call to your SSO endpoint as sso GET param
signature: provided by Discourse HTTP call to your SSO endpoint as sig GET param
secret: the secret key you entered into Discourse sso secret
return value: The nonce used by discourse to validate the redirect URL
"""
if None in [payload, signature]:
raise DiscourseError("No SSO payload or signature.")
if not secret:
raise DiscourseError("Invalid secret..")
payload = unquote(payload)
if not payload:
raise DiscourseError("Invalid payload..")
decoded = b64decode(payload.encode("utf-8")).decode("utf-8")
if "nonce" not in decoded:
raise DiscourseError("Invalid payload..")
h = hmac.new(
secret.encode("utf-8"), payload.encode("utf-8"), digestmod=hashlib.sha256
)
this_signature = h.hexdigest()
if this_signature != signature:
raise DiscourseError("Payload does not match signature.")
# Discourse returns querystring encoded value. We only need `nonce`
qs = parse_qs(decoded)
return qs["nonce"][0]
def sso_payload(secret, **kwargs):
return_payload = b64encode(urlencode(kwargs).encode("utf-8"))
h = hmac.new(secret.encode("utf-8"), return_payload, digestmod=hashlib.sha256)
query_string = urlencode({"sso": return_payload, "sig": h.hexdigest()})
return query_string
def sso_redirect_url(nonce, secret, email, external_id, username, **kwargs):
"""
nonce: returned by sso_validate()
secret: the secret key you entered into Discourse sso secret
user_email: email address of the user who logged in
user_id: the internal id of the logged in user
user_username: username of the logged in user
return value: URL to redirect users back to discourse, now logged in as user_username
"""
kwargs.update(
{
"nonce": nonce,
"email": email,
"external_id": external_id,
"username": username,
}
)
return "/session/sso_login?%s" % sso_payload(secret, **kwargs)
| StarcoderdataPython |
8076537 | # python -m pip install matplotlib
import matplotlib.pyplot as plt
# python -m pip install numpy
import numpy as np
# python -m pip install scipy
from scipy.stats import norm
from scipy import stats
def variavel_aleatoria(nome: str, media: int, desvio_padrao: int, qtd: int):
va = np.sort(np.random.normal(media, desvio_padrao, qtd))
arquivo = open(f'Respostas/VA {nome}.txt', 'w')
np.savetxt(arquivo, va)
arquivo.close()
return va
def histograma(dados: list, titulo: str = 'Histograma'):
plt.hist(dados)
plt.title(titulo)
plt.xlabel('Valor')
plt.ylabel('fa')
plt.grid(True)
plt.draw()
plt.savefig(f'Respostas/{titulo}.png')
plt.show()
plt.close()
def fdc(dados: list, titulo: str = 'FDC'):
valores_proporcionais = 1. * np.arange(len(dados)) / (len(dados) - 1)
plt.plot(dados, valores_proporcionais, scaley=True)
plt.title(titulo)
plt.xlabel('Valor')
plt.ylabel('Percentual')
plt.xlim(0, 10)
plt.ylim(0, 1)
plt.grid(True)
plt.xticks([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
plt.yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
plt.draw()
plt.savefig(f'Respostas/{titulo}.png')
plt.show()
plt.close()
def pr_maior(dados: list, nome: str, valor: int):
total = (dados > valor).sum()
probabilidade = total / len(dados)
print(f'Pr[{nome}>{valor}] = {probabilidade * 100}%')
with open(f'Respostas/Pr[{nome} gt {valor}].txt', 'w') as f:
f.write(f'{probabilidade * 100}%')
def pr_igual(dados: list, nome: str, valor: int):
total = (dados == valor).sum()
probabilidade = total / len(dados)
print(f'Pr[{nome}={valor}] = {probabilidade * 100}%')
with open(f'Respostas/Pr[{nome} eq {valor}].txt', 'w') as f:
f.write(f'{probabilidade * 100}%')
# Checando o valor crítico do teste de Kolmogorov-Smirnov
def kolmogorov_smirnov_critico(n):
# Fonte: https://www.soest.hawaii.edu/GG/FACULTY/ITO/GG413/K_S_Table_one_Sample.pdf
# Fonte: http://www.real-statistics.com/statistics-tables/kolmogorov-smirnov-table/
# alpha = 0.05 (nível de confiança de 95%)
if n <= 40:
# valores entre 1 e 40
kolmogorov_critico = [0.97500, 0.84189, 0.70760, 0.62394, 0.56328, 0.51926, 0.48342, 0.45427, 0.43001, 0.40925,
0.39122, 0.37543, 0.36143, 0.34890, 0.33760, 0.32733, 0.31796, 0.30936, 0.30143, 0.29408,
0.28724, 0.28087, 0.27490, 0.26931, 0.26404, 0.25907, 0.25438, 0.24993, 0.24571, 0.24170,
0.23788, 0.23424, 0.23076, 0.22743, 0.22425, 0.22119, 0.21826, 0.21544, 0.21273, 0.21012]
ks_critico = kolmogorov_critico[n - 1]
elif n > 40:
# valores acima de 40:
kolmogorov_critico = 1.36/(np.sqrt(n))
ks_critico = kolmogorov_critico
else:
pass
return ks_critico
def kolmogorov_smirnov(dados_x: list):
# media
media = np.mean(dados_x)
# desvio padrão
std = np.std(dados_x, ddof=1)
ks_critico = kolmogorov_smirnov_critico(len(dados_x))
# Cálculo do teste de normalidade levando em consideração os parâmetros dos dados de X
ks_stat, ks_p_valor = stats.kstest(dados_x, cdf='norm', args=(media,std), N =len(dados_x))
print("\nValor de estatistica")
print(ks_stat)
print("\nValor de KS critico")
print(ks_critico)
if ks_critico >= ks_stat:
print(f'\nCom 95% de confianca, aceitamos a hipotese de normalidade dos dados, segundo o teste de Kolmogorov-Smirnov, que indica que o valor da estatistica e menor ou igual ao valor de KS critico')
else:
print(f'\nCom 95% de confianca, rejeitamos a hipotese de normalidade dos dados, segundo o teste de Kolmogorov-Smirnov, que indica que o valor da estatistica e maior que o valor de KS critico')
def teste_t(dados_x: list, dados_y:list):
# Cálculo do Teste T usando os dados de X e Y
valor_t, p = stats.ttest_ind(dados_x,dados_y)
if p>=0.05:
print(f'\nCom 95% de confianca, aceitamos a hipotese de medias iguais, considerando o valor p=[{p}]')
else:
print(f'\nCom 95% de confianca, rejeitamos a hipotese de medias iguais, considerando o valor p=[{p}]')
if __name__ == '__main__':
x = variavel_aleatoria('X', 5, 1, 1000)
y = variavel_aleatoria('Y', 6, 1, 1000)
histograma(x, 'Histograma de X')
histograma(y, 'Histograma de Y')
fdc(x, 'FDC de X')
fdc(y, 'FDC de Y')
pr_maior(x, 'X', 6)
pr_igual(y, 'Y', 0)
kolmogorov_smirnov(x)
teste_t(x,y)
| StarcoderdataPython |
6503315 | <filename>Code/Scripts/plot_for_month.py
from Code.Scripts.popularity_calculator import calculate_popularity,freq_of_popularity
import xlrd
import matplotlib.pyplot as plt
file_name = 'D:\\Users\\yashk\\Campaign-Assistant\\Data\\Annotated\\graph_month_input.xls'
workbook = xlrd.open_workbook(file_name)
sheet = workbook.sheet_by_index(0)
rows = sheet.nrows
print(rows)
month = {}
for i in range(1,rows):
month_value = sheet.cell_value(i,1)
if month_value not in month.keys():
month[month_value] = [sheet.cell_value(i,0)]
else:
month[month_value].append(sheet.cell_value(i,0))
print(month[1])
print(month[2])
print(month[3])
x_axis = [1,2,3]
month1 = freq_of_popularity(month[1])
month2 = freq_of_popularity(month[2])
month3 = freq_of_popularity(month[3])
y_axis = [calculate_popularity(month1['pos'],month1['nut'],month1['neg'],month1['nut']),
calculate_popularity(month2['pos'],month2['nut'],month2['neg'],month2['nut']),
calculate_popularity(month3['pos'],month3['nut'],month3['neg'],month3['nut'])
]
print(x_axis)
print(y_axis)
plt.plot(x_axis,[item*100 for item in y_axis])
plt.show() | StarcoderdataPython |
11361306 | # Base class for a CLI ThreadedConnection
#
# Copyright (c) 2018 Ensoft Ltd
import re
from entrance.connection.threaded import ThreadedConnection
class ThreadedCLIConnection(ThreadedConnection):
"""
Base class for a ThreadedConnection whose worker thread
maintains a CLI session
"""
async def send(self, data, override=False):
"""
Send some data into the connection
"""
return await self._request("send", override, data)
async def recv(self, nbytes=0, override=False):
"""
Wait for some data from the connection. Note that this will cause the
worker thread to block until some data is available. If nbytes == 0 then
get all the data available at first shot.
"""
return await self._request("recv", override, nbytes)
async def settimeout(self, timeout, override=False):
"""
Set a timeout on send/recv operations. If hit, recv will just return a
shorter or empty response. Sends will silently drop.
"""
return await self._request("settimeout", override, timeout)
# Regexps for _expect_prompt below
_prompt = re.compile(r"(.*)RP/0/(RP)?0/CPU0:[^\r\n]*?#", re.DOTALL)
_interesting = re.compile(r"[^\n]*\n[^\n]* UTC\r\n(.*)", re.DOTALL)
async def expect_prompt(self, strip_top=False, override=False):
"""
Waits for a prompt, and returns all the characters up to that point
(optionally also stripping off an initial line and timestamp)
"""
buf = bytes()
while True:
buf += await self.recv(override=override)
m = self._prompt.match(buf.decode())
if m:
result = m.group(1)
if strip_top:
m = self._interesting.match(result)
if m:
result = m.group(1)
return result
| StarcoderdataPython |
1899095 | import sys
import subprocess
from moban import constants, exceptions
from moban.externals import reporter, file_system
def git_clone(requires):
from git import Repo
if sys.platform != "win32":
# Unfortunately for windows user, the following function
# needs shell=True, which expose security risk. I would
# rather not to trade it with its marginal benefit
make_sure_git_is_available()
moban_home = get_moban_home()
file_system.mkdir_p(moban_home)
for require in requires:
repo_name = get_repo_name(require.git_url)
local_repo_folder = file_system.path_join(moban_home, repo_name)
if file_system.exists(local_repo_folder):
reporter.report_git_pull(repo_name)
repo = Repo(local_repo_folder)
repo.git.pull()
if require.reference:
repo.git.checkout(require.reference)
elif require.branch:
repo.git.checkout(require.branch)
if require.submodule:
reporter.report_info_message("updating submodule")
repo.git.submodule("update")
else:
reporter.report_git_clone(require.git_url)
repo = Repo.clone_from(
require.git_url, local_repo_folder, **require.clone_params()
)
if require.submodule:
reporter.report_info_message("checking out submodule")
repo.git.submodule("update", "--init")
def get_repo_name(repo_url):
import giturlparse
from giturlparse.parser import ParserError
try:
repo = giturlparse.parse(repo_url.rstrip("/"))
return repo.name
except ParserError:
reporter.report_error_message(
constants.MESSAGE_INVALID_GIT_URL % repo_url
)
raise
def get_moban_home():
from appdirs import user_cache_dir
home_dir = user_cache_dir(appname=constants.PROGRAM_NAME)
return file_system.path_join(home_dir, constants.MOBAN_REPOS_DIR_NAME)
def make_sure_git_is_available():
try:
subprocess.check_output(["git", "--help"])
except Exception:
raise exceptions.NoGitCommand("Please install git command")
| StarcoderdataPython |
3313764 | import pathlib
from graphysio.dialogs import askOpenFilePath
from .csv import CsvReader
from .edf import EdfReader
from .parquet import ParquetReader
file_readers = {'csv': CsvReader, 'parquet': ParquetReader, 'edf': EdfReader}
file_readers = {k: mod for k, mod in file_readers.items() if mod.is_available}
class FileReader:
def __init__(self):
super().__init__()
self.reader = None
filters = ';;'.join(
[f'{ext.upper()} files (*.{ext})(*.{ext})' for ext in file_readers]
)
supported = ' '.join(f'*.{ext}' for ext in file_readers)
self.file_filters = f'All supported ({supported});;{filters}'
def askFile(self, folder='') -> 'pathlib.PurePath':
filepath, ext = askOpenFilePath(
'Open File', folder=folder, filter=self.file_filters
)
if not filepath:
return folder
self.reader = file_readers[ext]()
self.reader.set_data({'filepath': filepath})
self.reader.askUserInput()
# Return the parent folder for caching
return filepath.parent
# Meant to be executed in seperate thread
def get_plotdata(self) -> 'PlotData':
if self.reader:
return self.reader()
else:
return None
| StarcoderdataPython |
1658756 | <reponame>Blddwkb/awesome-DeepLearning
#!/usr/bin/env python
# coding: utf-8
# In[37]:
# 查看当前挂载的数据集目录, 该目录下的变更重启环境后会自动还原
# View dataset directory.
# This directory will be recovered automatically after resetting environment.
get_ipython().system('ls /home/aistudio/data')
# In[38]:
# 查看工作区文件, 该目录下的变更将会持久保存. 请及时清理不必要的文件, 避免加载过慢.
# View personal work directory.
# All changes under this directory will be kept even after reset.
# Please clean unnecessary files in time to speed up environment loading.
get_ipython().system('ls /home/aistudio/work')
# In[39]:
# 如果需要进行持久化安装, 需要使用持久化路径, 如下方代码示例:
# If a persistence installation is required,
# you need to use the persistence path as the following:
get_ipython().system('mkdir /home/aistudio/external-libraries')
get_ipython().system('pip install beautifulsoup4 -t /home/aistudio/external-libraries')
# In[40]:
# 同时添加如下代码, 这样每次环境(kernel)启动的时候只要运行下方代码即可:
# Also add the following code,
# so that every time the environment (kernel) starts,
# just run the following code:
import sys
sys.path.append('/home/aistudio/external-libraries')
# 请点击[此处](https://ai.baidu.com/docs#/AIStudio_Project_Notebook/a38e5576)查看本环境基本用法. <br>
# Please click [here ](https://ai.baidu.com/docs#/AIStudio_Project_Notebook/a38e5576) for more detailed instructions.
# In[41]:
#导入相关的库
import paddle
from paddle.nn import Linear
import paddle.nn.functional as F
import numpy as np
import os
import random
import matplotlib.pyplot as plt
# In[42]:
# 编写加载数据的函数
def load_data():
# 从文件导入数据
datafile='./work/housing.data'
data=np.fromfile(datafile,sep=' ',dtype=np.float32)
# 每条数据包括14项,其中前面13项是影响因素,第14项是相应的房屋价格中位数
feature_names = [ 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV' ]
feature_num = len(feature_names)
# 将原始数据进行Reshape,变成[N, 14]这样的形状
#将一维的数据转成二维
data = data.reshape([data.shape[0] // feature_num, feature_num])
# 将原数据集拆分成训练集和测试集
# 这里使用80%的数据做训练,20%的数据做测试
# 测试集和训练集必须是没有交集的
ratio=0.8
offset=int(data.shape[0]*ratio)
training_data=data[:offset]
# 计算train数据集的最大值,最小值,平均值
maximums,minimums,avgs=training_data.max(axis=0),training_data.min(axis=0),training_data.sum(axis=0)/training_data.shape[0]
# 记录数据的归一化参数,在预测时对数据做归一化
global max_values
global min_values
global avg_values
max_values=maximums
min_values=minimums
avg_values=avgs
# 对数据进行归一化处理
for i in range(feature_num):
data[:,i]=(data[:,i]-avgs[i])/(maximums[i]-minimums[i])
# 训练集和测试集的划分比例
training_data=data[:offset]
test_data=data[offset:]
return training_data,test_data
# In[43]:
# 定义网络结构为两层,全连接层+激活函数+全连接层
class Net(paddle.nn.Layer):
# self代表类的实例自身
def __init__(self):
# 初始化父类中的一些参数
super(Net,self).__init__()
self.fc1=Linear(in_features=13,out_features=128)
self.fc2=Linear(in_features=128,out_features=1)
def forward(self,inputs):
x=self.fc1(inputs)
x=F.relu(x)
x=self.fc2(x)
return x
# In[44]:
#创建网络的一个对象
model=Net()
model.train()
#进入模型的训练模式
training_data, test_data = load_data()
opt = paddle.optimizer.Adam(learning_rate=0.01, parameters=model.parameters())
#设置优化器为Adam
# In[45]:
EPOCH_NUM = 100 #设置迭代次数
BATCH_SIZE = 10 #设置批次大小
# 定义外层循环
losses=[]
for epoch_id in range(1,EPOCH_NUM+1):
# 在每轮迭代开始之前,将训练数据的顺序随机的打乱
np.random.shuffle(training_data)
# 将训练数据进行拆分,每个batch包含10条数据
mini_batches = [training_data[k:k+BATCH_SIZE] for k in range(0, len(training_data), BATCH_SIZE)]
# 定义内层循环
for iter_id, mini_batch in enumerate(mini_batches):
x = np.array(mini_batch[:, :-1]) # 获得当前批次训练数据
y = np.array(mini_batch[:, -1:]) # 获得当前批次训练标签(真实房价)
# 将numpy数据转为飞桨动态图tensor形式
house_features = paddle.to_tensor(x)
prices = paddle.to_tensor(y)
# 前向计算
predicts = model(house_features)
# 计算损失
loss = F.square_error_cost(predicts, label=prices)
avg_loss = paddle.mean(loss)
losses.append(avg_loss.numpy())
if iter_id % 20==0:
print("epoch: {}, iter: {}, loss is: {}".format(epoch_id, iter_id, avg_loss.numpy()))
# 反向传播
avg_loss.backward()
# 最小化loss,更新参数
opt.step()
# 清除梯度
opt.clear_grad()
# In[46]:
plot_x = np.arange(len(losses))
plot_y = np.array(losses)
plt.plot(plot_x, plot_y)
plt.show()
# In[47]:
# 保存模型参数,文件名为LR_model.pdparams
paddle.save(model.state_dict(), 'LR_model.pdparams')
print("模型保存成功,模型参数保存在LR_model.pdparams中")
# In[48]:
# 参数为保存模型参数的文件地址
model_dict = paddle.load('LR_model.pdparams')
model.load_dict(model_dict)
model.eval()
# 从上边已加载的测试集中,随机选择一条作为测试数据
idx = np.random.randint(0, test_data.shape[0])
one_data, label = test_data[idx, :-1], test_data[idx, -1]
# 修改该条数据shape为[1,13]
one_data = one_data.reshape([1,-1])
# 将数据转为动态图的variable格式
one_data = paddle.to_tensor(one_data)
predict = model(one_data)
# 对结果做反归一化处理
predict = predict * (max_values[-1] - min_values[-1]) + avg_values[-1]
# 对label数据做反归一化处理
label = label * (max_values[-1] - min_values[-1]) + avg_values[-1]
print("Inference result is {}, the corresponding label is {}".format(predict.numpy(), label))
| StarcoderdataPython |
5171582 | import unittest
import collections
from Core.Rate import Rate
from Core.Structure import StructureAgent
from Core.Atomic import AtomicAgent
from Core.Complex import Complex
from Core.Rule import Rule
from Core.Side import Side
from Core.Reaction import Reaction
from Parsing.ParseBCSL import Parser
class TestRule(unittest.TestCase):
def setUp(self):
self.a1 = AtomicAgent("S", "u")
self.a2 = AtomicAgent("S", "p")
self.a3 = AtomicAgent("B", "_")
self.a4 = AtomicAgent("B", "-")
self.a5 = AtomicAgent("B", "+")
self.s1 = StructureAgent("K", {self.a1})
self.s2 = StructureAgent("B", set())
self.s3 = StructureAgent("K", {self.a2})
self.s4 = StructureAgent("B", set())
self.s5 = StructureAgent("D", {self.a3})
self.s6 = StructureAgent("K", {self.a4})
self.s7 = StructureAgent("K", {self.a5})
self.c1 = Complex([self.s1, self.s2], "cyt")
self.c2 = Complex([self.s3], "cyt")
self.c3 = Complex([self.s2], "cyt")
self.c4 = Complex([self.s5], "cell")
# rules
sequence_1 = (self.s1, self.s2, self.s3, self.s4)
mid_1 = 2
compartments_1 = ["cyt"] * 4
complexes_1 = [(0, 1), (2, 2), (3, 3)]
pairs_1 = [(0, 2), (1, 3)]
rate_1 = Rate("3.0*[K()::cyt]/2.0*v_1")
self.r1 = Rule(sequence_1, mid_1, compartments_1, complexes_1, pairs_1, rate_1)
sequence_2 = (self.s1, self.s2, self.s3, self.s4, self.s5)
mid_2 = 2
compartments_2 = ["cyt"] * 4 + ["cell"]
complexes_2 = [(0, 1), (2, 2), (3, 3), (4, 4)]
pairs_2 = [(0, 2), (1, 3), (None, 4)]
rate_2 = Rate("3.0*[K()::cyt]/2.0*v_1")
self.r2 = Rule(sequence_2, mid_2, compartments_2, complexes_2, pairs_2, rate_2)
sequence_3 = (self.s6, self.s2, self.s5, self.s7, self.s4)
mid_3 = 3
compartments_3 = ["cyt"] * 2 + ["cell"] + ["cyt"] * 2
complexes_3 = [(0, 1), (2, 2), (3, 3), (4, 4)]
pairs_3 = [(0, 3), (1, 4), (2, None)]
rate_3 = Rate("3.0*[K(T{3+})::cyt]/2.0*v_1")
self.r3 = Rule(sequence_3, mid_3, compartments_3, complexes_3, pairs_3, rate_3)
# special cases
self.s1_s = StructureAgent("X", set())
self.s2_s = StructureAgent("Y", set())
self.s3_s = StructureAgent("Z", set())
sequence_4 = (self.s1_s, )
mid_4 = 1
compartments_4 = ["rep"]
complexes_4 = [(0, 0)]
pairs_4 = [(0, None)]
rate_4 = Rate("k1*[X()::rep]")
self.r4 = Rule(sequence_4, mid_4, compartments_4, complexes_4, pairs_4, rate_4)
sequence_5 = (self.s2_s, )
mid_5 = 0
compartments_5 = ["rep"]
complexes_5 = [(0, 0)]
pairs_5 = [(None, 0)]
rate_5 = Rate("1.0/(1.0+([X()::rep])**4.0)")
self.r5 = Rule(sequence_5, mid_5, compartments_5, complexes_5, pairs_5, rate_5)
# reactions
lhs = Side([self.c1])
rhs = Side([self.c2, self.c3, self.c4])
self.reaction1 = Reaction(lhs, rhs, rate_2)
# create
self.t_i = AtomicAgent("T", "i")
self.t_a = AtomicAgent("T", "a")
self.a4_p = AtomicAgent("C", "p")
self.a4_u = AtomicAgent("C", "u")
self.u2_c1_p = AtomicAgent("U", "p")
self.u2_c1_u = AtomicAgent("U", "u")
self.s6 = StructureAgent("D", set())
self.s6_c1_p = StructureAgent("D", {self.a4_p})
self.s6_c1_u = StructureAgent("D", {self.a4_u})
self.s2_c1_p = StructureAgent("B", {self.u2_c1_p})
self.s2_c1_u = StructureAgent("B", {self.u2_c1_u})
self.s1_c1_a = StructureAgent("K", {self.a1, self.t_a})
self.s1_c1_i = StructureAgent("K", {self.a1, self.t_i})
self.s3_c1_a = StructureAgent("K", {self.a2, self.t_a})
self.s3_c1_i = StructureAgent("K", {self.a2, self.t_i})
sequence_c1 = (self.s1, self.s2, self.s3, self.s4, self.s6)
mid_c1 = 2
compartments_c1 = ["cyt"] * 5
complexes_c1 = [(0, 0), (1, 1), (2, 3), (4, 4)]
pairs_c1 = [(0, 2), (1, 3), (None, 4)]
rate_c1 = Rate("3*[K()::cyt]/2*v_1")
self.c1_c1 = Complex([self.s2_c1_u], "cyt") # B(U{u})::cyt
self.c1_c2 = Complex([self.s2_c1_p], "cyt") # B(U{p})::cyt
self.c1_c3 = Complex([self.s1_c1_a], "cyt") # K(S{u},T{a})::cyt
self.c1_c4 = Complex([self.s1_c1_i], "cyt") # K(S{u},T{i})::cyt
self.c1_c5 = Complex([self.s3_c1_a, self.s2_c1_u], "cyt") # K(S{p},T{a}).B(U{u})::c
self.c1_c6 = Complex([self.s3_c1_i, self.s2_c1_u], "cyt") # K(S{p},T{i}).B(U{u})::c
self.c1_c7 = Complex([self.s3_c1_i, self.s2_c1_p], "cyt") # K(S{p},T{i}).B(U{p})::c
self.c1_c8 = Complex([self.s3_c1_a, self.s2_c1_p], "cyt") # K(S{p},T{a}).B(U{p})::c
self.c1_c9 = Complex([self.s6_c1_p], "cyt") # D(C{p})::cyt
self.c1_c10 = Complex([self.s6_c1_u], "cyt") # D(C{u})::cyt
self.rule_c1 = Rule(sequence_c1, mid_c1, compartments_c1, complexes_c1, pairs_c1, rate_c1)
self.reaction_c1_1 = Reaction(Side([self.c1_c1, self.c1_c3]),
Side([self.c1_c5, self.c1_c9]), rate_c1)
self.reaction_c1_2 = Reaction(Side([self.c1_c1, self.c1_c3]),
Side([self.c1_c5, self.c1_c10]), rate_c1)
self.reaction_c1_3 = Reaction(Side([self.c1_c2, self.c1_c4]),
Side([self.c1_c7, self.c1_c10]), rate_c1)
self.reaction_c1_4 = Reaction(Side([self.c1_c1, self.c1_c4]),
Side([self.c1_c6, self.c1_c9]), rate_c1)
self.reaction_c1_5 = Reaction(Side([self.c1_c2, self.c1_c3]),
Side([self.c1_c8, self.c1_c9]), rate_c1)
self.reaction_c1_6 = Reaction(Side([self.c1_c2, self.c1_c3]),
Side([self.c1_c8, self.c1_c10]), rate_c1)
self.reaction_c1_7 = Reaction(Side([self.c1_c1, self.c1_c4]),
Side([self.c1_c6, self.c1_c10]), rate_c1)
self.reaction_c1_8 = Reaction(Side([self.c1_c2, self.c1_c4]),
Side([self.c1_c7, self.c1_c9]), rate_c1)
self.reactions_c1 = {self.reaction_c1_1, self.reaction_c1_2, self.reaction_c1_3, self.reaction_c1_4,
self.reaction_c1_5, self.reaction_c1_6, self.reaction_c1_7, self.reaction_c1_8}
# context no change
sequence_no_change = (self.s1_c1_a, self.s2_c1_u, self.s3_c1_a, self.s2_c1_u, self.s6_c1_p)
self.rule_no_change = Rule(sequence_no_change, mid_c1, compartments_c1, complexes_c1, pairs_c1, rate_c1)
# parsing
self.parser = Parser("rule")
self.rule_no_rate = Rule(sequence_1, mid_1, compartments_1, complexes_1, pairs_1, None)
def test_eq(self):
self.assertEqual(self.r1, self.r1)
def test_print(self):
self.assertEqual(str(self.r1), "K(S{u}).B()::cyt => K(S{p})::cyt + B()::cyt @ 3.0*[K()::cyt]/2.0*v_1")
self.assertEqual(str(self.r2),
"K(S{u}).B()::cyt => K(S{p})::cyt + B()::cyt + D(B{_})::cell @ 3.0*[K()::cyt]/2.0*v_1")
def test_create_complexes(self):
lhs = Side([self.c1])
rhs = Side([self.c2, self.c3, self.c4])
self.assertEqual(self.r2.create_complexes(), (lhs, rhs))
def test_to_reaction(self):
self.assertEqual(self.r2.to_reaction(), self.reaction1)
def test_create_reactions(self):
atomic_signature = {"T": {"a", "i"}, "U": {"p", "u"}, "C": {"p", "u"}, "S": {"p", "u"}}
structure_signature = {"K": {"T", "S"}, "B": {"U"}, "D": {"C"}}
self.assertEqual(self.rule_c1.create_reactions(atomic_signature, structure_signature),
self.reactions_c1)
self.assertEqual(self.rule_no_change.create_reactions(atomic_signature, structure_signature),
{self.reaction_c1_1})
rule_exp = "K(T{a}).K().K()::cyt => K(T{i}).K().K()::cyt @ k1*[K(T{a}).K().K()::cyt]"
rule = self.parser.parse(rule_exp).data
result = rule.create_reactions(atomic_signature, structure_signature)
reactions = set()
with open("Testing/reactions.txt") as file:
for complex in file.readlines():
rule = self.parser.parse(complex).data
reactions.add(rule.to_reaction())
self.assertEqual(result, reactions)
def test_parser(self):
rule_expr = "K(S{u}).B()::cyt => K(S{p})::cyt + B()::cyt + D(B{_})::cell @ 3*[K()::cyt]/2*v_1"
self.assertEqual(self.parser.parse(rule_expr).data, self.r2)
rule_expr = "K(B{-}).B()::cyt + D(B{_})::cell => K(B{+})::cyt + B()::cyt @ 3*[K(T{3+})::cyt]/2*v_1"
self.assertEqual(self.parser.parse(rule_expr).data, self.r3)
rule_expr = "X()::rep => @ k1*[X()::rep]"
self.assertEqual(self.parser.parse(rule_expr).data, self.r4)
rule_expr = "=> Y()::rep @ 1/(1+([X()::rep])**4)"
self.assertEqual(self.parser.parse(rule_expr).data, self.r5)
rule_expr = "K(S{u}).B()::cyt => K(S{p})::cyt + B()::cyt"
self.assertEqual(self.parser.parse(rule_expr).data, self.rule_no_rate)
def test_compatible(self):
self.assertTrue(self.r1.compatible(self.r2))
self.assertFalse(self.r2.compatible(self.r1))
rule_expr_1 = "K(S{u}).B()::cyt => K(S{p})::cyt + B()::cyt + D(B{_})::cell @ 3*[K()::cyt]/2*v_1"
rule_expr_2 = "K().B()::cyt => K()::cyt + B()::cyt + D(B{_})::cell @ 3*[K()::cyt]/2*v_1"
rule1 = self.parser.parse(rule_expr_1).data
rule2 = self.parser.parse(rule_expr_2).data
self.assertFalse(rule1.compatible(rule2))
self.assertTrue(rule2.compatible(rule1))
def test_reduce_context(self):
rule_expr_1 = "K(S{u}).B{i}::cyt => K(S{p})::cyt + B{a}::cyt + D(B{_})::cell @ 3*[K(S{u}).B{i}::cyt]/2*v_1"
rule1 = self.parser.parse(rule_expr_1).data
rule_expr_2 = "K().B{_}::cyt => K()::cyt + B{_}::cyt + D()::cell @ 3*[K().B{_}::cyt]/2*v_1"
rule2 = self.parser.parse(rule_expr_2).data
self.assertEqual(rule1.reduce_context(), rule2)
# next case
rule_expr_1 = "K(S{u})::cyt => K(S{p})::cyt + D(B{_})::cell @ 3*[K(S{u})::cyt]/2*v_1"
rule1 = self.parser.parse(rule_expr_1).data
rule_expr_2 = "K()::cyt => K()::cyt + D()::cell @ 3*[K()::cyt]/2*v_1"
rule2 = self.parser.parse(rule_expr_2).data
self.assertEqual(rule1.reduce_context(), rule2)
# next case - covering replication
rule_expr_1 = "K(S{u})::cyt => 2 K(S{u})::cyt @ 3*[K(S{u})::cyt]/2*v_1"
rule1 = self.parser.parse(rule_expr_1).data
rule_expr_2 = "K()::cyt => 2 K()::cyt @ 3*[K()::cyt]/2*v_1"
rule2 = self.parser.parse(rule_expr_2).data
self.assertEqual(rule1.reduce_context(), rule2)
# next case - covering replication
rule_expr_1 = "K(S{u})::cyt => 3 K(S{u})::cyt @ 3*[K(S{u})::cyt]/2*v_1"
rule1 = self.parser.parse(rule_expr_1).data
rule_expr_2 = "K()::cyt => 3 K()::cyt @ 3*[K()::cyt]/2*v_1"
rule2 = self.parser.parse(rule_expr_2).data
self.assertEqual(rule1.reduce_context(), rule2)
def test_exists_compatible_agent(self):
complex_parser = Parser("rate_complex")
agent = "K(S{a}).A{a}::cyt"
complex = complex_parser.parse(agent).data.children[0]
rule_expr = "K().A{i}::cyt => K().A{a}::cyt"
rule = self.parser.parse(rule_expr).data
self.assertTrue(rule.exists_compatible_agent(complex))
| StarcoderdataPython |
63451 | import yaml
from util import AttrDict
class SchemaOrField(object):
def __init__(self, optional=False, default=None):
self.optional = optional
self.default = default
def is_optional(self):
return self.optional
def keyify(self, parents, key=None):
if key is not None:
parents = parents + [key]
return ".".join(parents)
class Schema(SchemaOrField):
"""This is a general purpose class to allow enforcing of schemas
from python dicts."""
def __init__(self, schema_dict, **kwargs):
SchemaOrField.__init__(self, **kwargs)
self.schema_dict = schema_dict
def __str__(self):
return str({key: str(val) for (key, val) in self.schema_dict.items()})
def get_missing_required_keys(self, _dict):
required = [key for (key, schema_or_field) in self.schema_dict.iteritems() if not schema_or_field.is_optional]
present_keys = _dict.keys()
return [key for key in required if key not in present_keys]
def get_variable_key(self, key):
if key and key[0] == '_':
return key[1:]
return None
def validate(self, value, parents=[]):
# Schemas are recursively enforced
failed = []
succeeded = {}
if isinstance(value, dict):
_dict = value
variables = [(self.get_variable_key(k), v) for (k, v) in self.schema_dict.items() if self.get_variable_key(k)]
if variables and len(variables) != 1:
err = "schema has mixed variable and fixed settings here {}".format(self.keyify(parents))
return [err], None
if variables: # for variables, we iterate on each (key, value) in received dict
variable_name, schema = variables[0]
if not value.values():
failed.append("{} missing dict for variable \"{}\" and schema {}".format(self.keyify(parents), variable_name, schema))
for (key, subval) in value.iteritems():
_failed, _succeeded = schema.validate(subval, parents + [key])
for fail in _failed:
failed.append(fail)
if _succeeded:
_succeeded[variable_name] = key
succeeded[key] = _succeeded
else: # for non-variables, we enforce the sche
for (key, schema_or_field) in self.schema_dict.iteritems():
subval = _dict.get(key)
if subval is not None: # optional case is checked after
try:
_failed, _succeeded = schema_or_field.validate(subval, parents + [key])
succeeded[key] = _succeeded
for fail in _failed:
failed.append(fail)
except AttributeError:
err_str = "value {} for key {} is not field or schema".format(schema_or_field, self.keyify(parents, key))
return [err_str], None
for missing_key, missing_schema in [(mk, ms) for (mk, ms) in self.schema_dict.items() if mk not in succeeded.keys()]:
if missing_schema.is_optional():
succeeded[missing_key] = missing_schema.default
elif not self.get_variable_key(missing_key): # variable keys handled above
failed.append("required key {} missing".format(self.keyify(parents, missing_key)))
return failed, AttrDict(succeeded)
else:
err_str = "key {} expected to be schema {} but is real value {}".format(self.keyify(parents), self, value)
return [err_str], None
class Field(SchemaOrField):
def __init__(self, field_type, **kwargs):
SchemaOrField.__init__(self, **kwargs)
self.field_type = field_type
def __str__(self):
return str(self.field_type)
def validate(self, value, parents):
validated = value if isinstance(value, self.field_type) else None
if validated is None:
try: # supports validating int strings as ints for example
validated = self.field_type(value)
except ValueError:
pass
except TypeError:
pass
if validated is not None:
return [], validated
return ["{} value({}) doesn't match desired type({})".format(self.keyify(parents), value, self.field_type)], None
class SchemaParser(object):
"""Validates schemas"""
@staticmethod
def from_yaml_file(path, schema):
"""Validates a schema from a yaml file"""
try:
parsed = yaml.load(open(path, 'r'))
return Schema(schema).validate(parsed)
except IOError:
return None, "No such file: {}".format(path)
except yaml.YAMLError as ye:
return None, "YAML file invalid: {}".format(str(ye))
| StarcoderdataPython |
11350677 | <filename>ex010.py
n = float(input('Quanto de dinheiro você tem? '))
d = n / 3.27
print('Com {:.2f} reais você pode comprar {:.2f} dolares'.format(n, d))
| StarcoderdataPython |
8105606 | # https://www.codewars.com/kata/5648b12ce68d9daa6b000099/train/python
# There is a bus moving in the city, and it takes and drop some people
# in each bus stop.
# You are provided with a list (or array) of integer arrays (or
# tuples). Each integer array has two items which represent number
# of people get into bus (The first item) and number of people
# get off the bus (The second item) in a bus stop.
# Your task is to return number of people who are still in the
# bus after the last bus station (after the last array). Even
# though it is the last bus stop, the bus is not empty and some
# people are still in the bus, and they are probably sleeping
# there :D
# Take a look on the test cases.
# Please keep in mind that the test cases ensure that the number
# of people in the bus is always >= 0. So the return integer
# can't be negative.
# The second value in the first integer array is 0, since the
# bus is empty in the first bus stop.
def number_people_bus(bus_stops):
count = 0
for e in bus_stops:
count += e[0]
count -= e[1]
return count
# Alternative
# def number(bus_stops):
# return sum([stop[0] - stop[1] for stop in bus_stops])
| StarcoderdataPython |
4996844 | from setuptools import find_packages, setup
with open("elasticdl/requirements.txt") as f:
requirements = f.read().splitlines()
setup(
name="elasticdl",
version="0.0.1",
description="A Kubernetes-native Deep Learning Framework",
author="<NAME>",
url="https://github.com/sql-machine-learning/elasticdl",
install_requires=requirements,
packages=find_packages(exclude=["*test*"]),
package_data={"": ["proto/elasticdl.proto", "docker/*", "Makefile"]},
entry_points={
"console_scripts": ["elasticdl=elasticdl.python.elasticdl.client:main"]
},
)
| StarcoderdataPython |
8080513 | import copy
from typing import *
from cognite.client.data_classes._base import *
# GenClass: relationshipResponse, relationship
class Relationship(CogniteResource):
"""Representation of a relationship in CDF, consists of a source and a target and some additional parameters.
Args:
source (Dict[str, Any]): Reference by external id to the source of the relationship. Since it is a reference by external id, the targeted resource may or may not exist in CDF. If resource is `threeD` or `threeDRevision` the `resourceId` is a set of internal ids concatenated by a colons. Otherwise, the resourceId follows the formatting rules as described in `resourceId`. If resource id of type `threeD`, the externalId must follow the pattern `<nodeId>:<modelId>:<revisionId>`. If resource id of type `threeDRevision`, the externalId must follow the pattern `<revisionId>:<modelId>`. The values `<nodeId>`, `<modelId>` and `<revisionId>` are the corresponding internal ids to identify the referenced resource uniquely.
target (Dict[str, Any]): Reference by external id to the target of the relationship. Since it is a reference by external id, the targeted resource may or may not exist in CDF. If resource is `threeD` or `threeDRevision` the `resourceId` is a set of internal ids concatenated by a colons. Otherwise, the resourceId follows the formatting rules as described in `resourceId`. If resource id of type `threeD`, the externalId must follow the pattern `<nodeId>:<modelId>:<revisionId>`. If resource id of type `threeDRevision`, the externalId must follow the pattern `<revisionId>:<modelId>`. The values `<nodeId>`, `<modelId>` and `<revisionId>` are the corresponding internal ids to identify the referenced resource uniquely.
start_time (float): Time when this relationship was established in milliseconds since Jan 1, 1970.
end_time (float): Time when this relationship was ceased to exist in milliseconds since Jan 1, 1970.
confidence (float): Confidence value of the existence of this relationship. Humans should enter 1.0 usually, generated relationships should provide a realistic score on the likelihood of the existence of the relationship. Generated relationships should never have the a confidence score of 1.0.
data_set (str): String describing the source system storing or generating the relationship.
external_id (str): Disallowing leading and trailing whitespaces. Case sensitive. The external Id must be unique within the project.
relationship_type (str): Type of the relationship in order to distinguish between different relationships. In general relationship types should reflect references as they are expressed in natural sentences. E.g. a flow through a pipe can be naturally represented by a `flowsTo`-relationship. On the other hand an alternative asset hierarchy can be represented with the `isParentOf`-relationship. The `implements`-relationship is intended to reflect references between a functional asset hierarchy and its implementation.
created_time (float): Time when this relationship was created in CDF in milliseconds since Jan 1, 1970.
last_updated_time (float): Time when this relationship was last updated in CDF in milliseconds since Jan 1, 1970.
cognite_client (CogniteClient): The client to associate with this object.
"""
def __init__(
self,
source: Dict[str, Any] = None,
target: Dict[str, Any] = None,
start_time: float = None,
end_time: float = None,
confidence: float = None,
data_set: str = None,
external_id: str = None,
relationship_type: str = None,
created_time: float = None,
last_updated_time: float = None,
cognite_client=None,
):
self.source = source
self.target = target
self.start_time = start_time
self.end_time = end_time
self.confidence = confidence
self.data_set = data_set
self.external_id = external_id
self.relationship_type = relationship_type
self.created_time = created_time
self.last_updated_time = last_updated_time
self._cognite_client = cognite_client
# GenStop
def _copy_resolve_targets(self):
rel = copy.copy(self)
rel.source = self._resolve_target(rel.source)
rel.target = self._resolve_target(rel.target)
return rel
@staticmethod
def _resolve_target(target):
if isinstance(target, dict) or target is None:
return target
from cognite.client.data_classes import Asset, Event, FileMetadata, TimeSeries
_TARGET_TYPES = {Asset: "Asset", TimeSeries: "TimeSeries", FileMetadata: "File", Event: "Event"}
typestr = _TARGET_TYPES.get(target.__class__)
if typestr:
return {"resource": typestr, "resourceId": target.external_id}
raise ValueError("Invalid source or target '{}' of type {} in relationship".format(target, target.__class__))
# GenClass: relationshipsAdvancedListRequest.filter
class RelationshipFilter(CogniteFilter):
"""Filter on events filter with exact match
Args:
source_resource (str): Resource type of the source node.
source_resource_id (str): Resource ID of the source node.
target_resource (str): Resource type of the target node.
target_resource_id (str): Resource ID of the target node.
start_time (Dict[str, Any]): Range to filter the field for. (inclusive)
end_time (Dict[str, Any]): Range to filter the field for. (inclusive)
confidence (Dict[str, Any]): Range to filter the field for. (inclusive)
last_updated_time (Dict[str, Any]): Range to filter the field for. (inclusive)
created_time (Dict[str, Any]): Range to filter the field for. (inclusive)
data_set (str): String describing the source system storing or generating the relationship.
relationship_type (str): Type of the relationship in order to distinguish between different relationships. In general relationship types should reflect references as they are expressed in natural sentences.
cognite_client (CogniteClient): The client to associate with this object.
"""
def __init__(
self,
source_resource: str = None,
source_resource_id: str = None,
target_resource: str = None,
target_resource_id: str = None,
start_time: Dict[str, Any] = None,
end_time: Dict[str, Any] = None,
confidence: Dict[str, Any] = None,
last_updated_time: Dict[str, Any] = None,
created_time: Dict[str, Any] = None,
data_set: str = None,
relationship_type: str = None,
cognite_client=None,
):
self.source_resource = source_resource
self.source_resource_id = source_resource_id
self.target_resource = target_resource
self.target_resource_id = target_resource_id
self.start_time = start_time
self.end_time = end_time
self.confidence = confidence
self.last_updated_time = last_updated_time
self.created_time = created_time
self.data_set = data_set
self.relationship_type = relationship_type
self._cognite_client = cognite_client
# GenStop
class RelationshipUpdate(CogniteUpdate):
pass
class RelationshipList(CogniteResourceList):
_RESOURCE = Relationship
_UPDATE = RelationshipUpdate
| StarcoderdataPython |
3409600 | <gh_stars>0
import unittest
import yaml
import os
from bok_choy.web_app_test import WebAppTest
from pages.ec2_configuration_subpage import Ec2ConfigurationSubPage
class TestEc2ConfigurationSubPage(WebAppTest):
def setUp(self):
super(TestEc2ConfigurationSubPage, self).setUp()
config_path = os.getenv('CONFIG_PATH')
try:
yaml_contents = open(
"{}/ec2_config.yml".format(config_path), 'r'
).read()
except IOError:
pass
self.ec2_config = yaml.safe_load(yaml_contents)
self.ec2_cloud_config = self.ec2_config['CLOUDS']
self.config_page = Ec2ConfigurationSubPage(self.browser)
def test_ec2_config(self):
"""
Verify a couple of the configuration options of the EC2 plugin from
the Jenkins configuration console
"""
self.config_page.visit()
# Since there may be several clouds and amis, get a list
# of the configurable settings and make sure content from
# the yaml file exists in them.
cloud_names = self.config_page.get_cloud_names()
cloud_credential_id = self.config_page.get_cloud_credential_id()
cloud_role_arns = self.config_page.get_cloud_role_arns()
ami_descriptions = self.config_page.get_ami_descriptions()
ami_ids = self.config_page.get_ami_ids()
ami_zones = self.config_page.get_ami_zones()
ssh_ports = self.config_page.get_ssh_ports()
fs_roots = self.config_page.get_fs_roots()
idle_termination_times = self.config_page.get_idle_termination_times()
for cloud in self.ec2_cloud_config:
assert cloud["NAME"] in cloud_names
assert cloud["CREDENTIAL_ID"] in cloud_credential_id
assert cloud["ROLE_ARN"] in cloud_role_arns
for ami in cloud["AMIS"]:
assert ami["AMI_ID"] in ami_ids
assert ami["DESCRIPTION"] in ami_descriptions
assert ami["AVAILABILITY_ZONE"] in ami_zones
assert ami["AMI_TYPE"]["REMOTE_SSH_PORT"] in ssh_ports
assert ami["REMOTE_FS_ROOT"] in fs_roots
assert ami["IDLE_TERMINATION_MINUTES"] in idle_termination_times
| StarcoderdataPython |
9627581 | <reponame>gbd-consult/windrose
"""
Skript zum Erstellen von Windrose Plots aus CSV Dateien.
python3 fromcsv.py windrose pfad/zur/datei.csv ordner/fuer/ausgabe
"""
import csv
import sys
import os.path
from windrose import windrose, balken
csv_file = sys.argv[2]
out_path = sys.argv[3]
stations = []
with open(csv_file, encoding = 'latin1') as csvfile:
reader = csv.DictReader(csvfile, delimiter = ',')
for row in reader:
stations.append(dict(row))
for s in stations:
values = [int(s.get('n' + str(x).rjust(3,'0'))) for x in range(0, 360, 30)]
# Draw a windrose plot
if sys.argv[1].startswith('w'):
values = [int(s.get('n' + str(x).rjust(3,'0'))) for x in range(0, 360, 30)]
print('generating windrose plot: %s %s' % (s.get('net'), s.get('id')))
windrose(values, int(s.get('ntotal')),
s.get('id'), s.get('station'), s.get('net'),
s.get('start'), s.get('enddate'), s.get('hasl'),
s.get('hagr'), float(s.get('avgff')),
int(s.get('calm')), os.path.join(out_path, '%s_%s.png' % (s.get('net'), s.get('id'))))
# Draw a bar plot
elif sys.argv[1].startswith('b'):
values = [int(s.get('wgk' + str(x))) for x in range(1,10)]
print('generating bar plot: %s %s' % (s.get('net'), s.get('id')))
balken(values,
s.get('id'), s.get('station'), s.get('net'),
s.get('start'), s.get('enddate'), s.get('hasl'),
s.get('hagr'), float(s.get('avgff')),
output_file = os.path.join(out_path, '%s_%s_balken.png' % (s.get('net'), s.get('id'))))
# unrecognized command
else:
print('unrecognized command: %s' % sys.argv[1])
| StarcoderdataPython |
11249152 | <reponame>JeronimoMendes/Tomatimer<gh_stars>0
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pref_win.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
import json
from PyQt5.QtGui import QIcon
class Ui_pref_win(object):
def __init__(self, window):
self.window = window
with open ("settings.json", "r") as settings:
self.data = json.load(settings)
settings.close()
self.main_time = self.data["main_time"]
self.big_time = self.data["long_break"]
self.small_time = self.data["short_break"]
def setupUi(self, pref_win):
pref_win.setObjectName("pref_win")
pref_win.resize(390, 251)
pref_win.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
pref_win.setWindowIcon(QIcon("material/images/tomato.png"))
self.centralwidget = QtWidgets.QWidget(pref_win)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 30, 81, 17))
self.label.setObjectName("label")
self.spinBox = QtWidgets.QSpinBox(self.centralwidget)
self.spinBox.setGeometry(QtCore.QRect(150, 30, 48, 26))
self.spinBox.setProperty("value", self.main_time)
self.spinBox.setObjectName("spinBox")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(10, 80, 121, 17))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(10, 130, 121, 17))
self.label_3.setObjectName("label_3")
self.spinBox_2 = QtWidgets.QSpinBox(self.centralwidget)
self.spinBox_2.setGeometry(QtCore.QRect(150, 130, 48, 26))
self.spinBox_2.setProperty("value", self.big_time)
self.spinBox_2.setObjectName("spinBox_2")
self.spinBox_3 = QtWidgets.QSpinBox(self.centralwidget)
self.spinBox_3.setGeometry(QtCore.QRect(150, 80, 48, 26))
self.spinBox_3.setProperty("value", self.small_time)
self.spinBox_3.setObjectName("spinBox_3")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(50, 190, 89, 25))
self.pushButton.setObjectName("pushButton")
self.pushButton.pressed.connect(self.window.close)
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(240, 190, 89, 25))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.pressed.connect(self.changeTime)
pref_win.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(pref_win)
self.menubar.setGeometry(QtCore.QRect(0, 0, 390, 22))
self.menubar.setObjectName("menubar")
pref_win.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(pref_win)
self.statusbar.setObjectName("statusbar")
pref_win.setStatusBar(self.statusbar)
self.retranslateUi(pref_win)
QtCore.QMetaObject.connectSlotsByName(pref_win)
def retranslateUi(self, pref_win):
_translate = QtCore.QCoreApplication.translate
pref_win.setWindowTitle(_translate("pref_win", "Tomatimer - Preferences"))
self.label.setText(_translate("pref_win", "Focus time"))
self.label_2.setText(_translate("pref_win", "Short break time"))
self.label_3.setText(_translate("pref_win", "Long break time"))
self.pushButton.setText(_translate("pref_win", "Cancel"))
self.pushButton_2.setText(_translate("pref_win", "Apply"))
def changeTime(self):
self.small_time = self.spinBox_3.value()
self.big_time = self.spinBox_2.value()
self.main_time = self.spinBox.value()
print("main time value changed to", self.main_time)
print("small time value changed to", self.small_time)
print("big time value changed to", self.big_time)
self.data["main_time"] = self.main_time
self.data["short_break"] = self.small_time
self.data["long_break"] = self.big_time
self.data["subject"] = self.data["subject"]
jsonFile = open("settings.json", "w+")
jsonFile.write(json.dumps(self.data))
jsonFile.close() | StarcoderdataPython |
4940032 | <gh_stars>0
#
# Copyright (c) 2018 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS,
# BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, <EMAIL>.
#
# Distributed under 3-Clause BSD license. See LICENSE file for more information.
#
"""
Assembly module.
Module for assembly algorithms.
"""
# --- STRUCTURAL ASSEMBLY ---
from .assembly import *
from .structural_assembly import *
from .tools import *
| StarcoderdataPython |
11259826 | <gh_stars>1-10
#!/usr/bin/env python
from distutils.core import setup
setup(name='qosy',
version='1.0',
description='Quantum Operators from SYmmetries',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/ClarkResearchGroup/qosy',
packages=['qosy']
)
| StarcoderdataPython |
1636555 | from model.contact import Contact
import re
import time
class ContactHelper:
def __init__(self, app):
self.app = app
def open_homepage(self):
wd = self.app.wd
if not (wd.current_url.endswith("/addressbook/") and len(wd.find_elements_by_name("searchstring")) > 0):
wd.find_element_by_link_text("home").click()
def create(self, contact):
wd = self.app.wd
self.open_homepage()
# init contact creation
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contact)
self.add_avatar(contact)
self.change_birth_date(contact)
self.change_anniversary_date(contact)
# submit contact creation
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.contact_cache = None
def create_light(self, contact):
# creation of a contact without avatar and dates
wd = self.app.wd
self.open_homepage()
# init contact creation
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contact)
# submit contact creation
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.open_homepage()
self.contact_cache = None
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_field_value("firstname", contact.first_name)
self.change_field_value("middlename", contact.middle_name)
self.change_field_value("lastname", contact.surname)
self.change_field_value("nickname", contact.nickname)
self.change_field_value("title", contact.title)
self.change_field_value("company", contact.company_name)
self.change_field_value("address", contact.address)
self.change_field_value("home", contact.land_line)
self.change_field_value("mobile", contact.mobile)
self.change_field_value("work", contact.work)
self.change_field_value("fax", contact.fax)
self.change_field_value("email", contact.email_1)
self.change_field_value("email2", contact.email_2)
self.change_field_value("email3", contact.email_3)
self.change_field_value("homepage", contact.homepage)
# self.change_birth_date(contact)
self.change_field_value("byear", contact.birthday_year)
# self.change_anniversary_date(contact)
self.change_field_value("ayear", contact.anniversary_year)
self.change_field_value("address2", contact.second_address)
self.change_field_value("phone2", contact.land_line_2)
self.change_field_value("notes", contact.notes)
def change_anniversary_date(self, contact):
wd = self.app.wd
if not wd.find_element_by_xpath(contact.anniversary_day).is_selected():
wd.find_element_by_xpath(contact.anniversary_day).click()
if not wd.find_element_by_xpath(contact.anniversary_month).is_selected():
wd.find_element_by_xpath(contact.anniversary_month).click()
def modify_first_contact_anniversary_date(self, contact):
wd = self.app.wd
self.select_first_contact()
wd.find_element_by_xpath("//table[@id='maintable']/tbody/tr[2]/td[8]/a/img").click()
if not wd.find_element_by_xpath(contact.anniversary_day).is_selected():
wd.find_element_by_xpath(contact.anniversary_day).click()
if not wd.find_element_by_xpath(contact.anniversary_month).is_selected():
wd.find_element_by_xpath(contact.anniversary_month).click()
wd.find_element_by_name("ayear").click()
wd.find_element_by_name("ayear").clear()
wd.find_element_by_name("ayear").send_keys(contact.anniversary_year)
wd.find_element_by_name("update").click()
def change_birth_date(self, contact):
wd = self.app.wd
if not wd.find_element_by_xpath(contact.birthday_day).is_selected():
wd.find_element_by_xpath(contact.birthday_day).click()
if not wd.find_element_by_xpath(contact.birthday_month).is_selected():
wd.find_element_by_xpath(contact.birthday_month).click()
def modify_first_contact_birthday(self, contact):
wd = self.app.wd
self.select_first_contact()
wd.find_element_by_xpath("//table[@id='maintable']/tbody/tr[2]/td[8]/a/img").click()
if not wd.find_element_by_xpath(contact.birthday_day).is_selected():
wd.find_element_by_xpath(contact.birthday_day).click()
if not wd.find_element_by_xpath(contact.birthday_month).is_selected():
wd.find_element_by_xpath(contact.birthday_month).click()
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys(contact.birthday_year)
wd.find_element_by_name("update").click()
def add_avatar(self, contact):
wd = self.app.wd
wd.find_element_by_name("photo").send_keys(contact.avatar)
def modify_first_contact_avatar(self, contact):
wd = self.app.wd
self.select_first_contact()
wd.find_element_by_xpath("//table[@id='maintable']/tbody/tr[2]/td[8]/a/img").click()
wd.find_element_by_name("photo").send_keys(contact.avatar)
wd.find_element_by_name("update").click()
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def modify_first_contact(self, new_group_data):
self.modify_contact_by_index(0, new_group_data)
self.contact_cache = None
def modify_contact_by_index(self, index, new_contact_data):
wd = self.app.wd
self.open_homepage()
self.select_contact_by_index(index)
# open modification form
wd.find_elements_by_xpath("(//table[@id='maintable']//img[@alt='Edit'])")[index].click()
self.fill_contact_form(new_contact_data)
# submit modification
wd.find_element_by_name("update").click()
self.contact_cache = None
def modify_contact_by_id(self, id, new_contact_data):
wd = self.app.wd
self.open_homepage()
self.select_contact_by_id(id)
# open modification form
wd.find_element_by_xpath('//a[@href="edit.php?id=%s"]' %id).click()
self.fill_contact_form(new_contact_data)
# submit modification
wd.find_element_by_name("update").click()
self.contact_cache = None
def select_first_contact(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def delete_first_contact(self):
self.delete_contact_by_index(0)
self.contact_cache = None
def delete_contact_by_index(self, index):
wd = self.app.wd
self.open_homepage()
self.select_contact_by_index(index)
# submit deletion
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.open_homepage()
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.open_homepage()
self.select_contact_by_id(id)
# submit deletion
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.open_homepage()
self.contact_cache = None
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def count(self):
wd = self.app.wd
self.open_homepage()
return len(wd.find_elements_by_name("selected[]"))
contact_cache=None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.open_homepage()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
first_name = cells[2].text
surname = cells[1].text
address = cells[3].text
all_emails = cells[4].text
id = cells[0].find_element_by_name("selected[]").get_attribute("value")
all_phones = cells[5].text
self.contact_cache.append(Contact(first_name=first_name, surname=surname, id=id,
all_phones_from_home_page=all_phones, address=address,
all_emails_from_home_page=all_emails))
return list(self.contact_cache)
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.open_homepage()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.open_homepage()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
first_name = wd.find_element_by_name("firstname").get_attribute("value")
surname = wd.find_element_by_name("lastname").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
land_line = wd.find_element_by_name("home").get_attribute("value")
mobile = wd.find_element_by_name("mobile").get_attribute("value")
work = wd.find_element_by_name("work").get_attribute("value")
land_line_2 = wd.find_element_by_name("phone2").get_attribute("value")
email_1 = wd.find_element_by_name("email").get_attribute("value")
email_2 = wd.find_element_by_name("email2").get_attribute("value")
email_3 = wd.find_element_by_name("email3").get_attribute("value")
return Contact(first_name=first_name, surname=surname, address=address, id=id,
land_line=land_line, mobile=mobile, work=work, land_line_2=land_line_2,
email_1=email_1, email_2=email_2, email_3=email_3 )
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
if re.search("H:", text) is None:
land_line = None
else:
land_line = re.search("H: (.*)", text).group(1)
if re.search("M:", text) is None:
mobile = None
else:
mobile = re.search("M: (.*)", text).group(1)
if re.search("W:", text) is None:
work = None
else:
work = re.search("W: (.*)", text).group(1)
if re.search("P:", text) is None:
land_line_2 = None
else:
land_line_2 = re.search("P: (.*)", text).group(1)
return Contact(land_line=land_line, mobile=mobile, work=work, land_line_2=land_line_2)
def add_contact_to_group(self, contact_id, group_id):
wd = self.app.wd
self.open_homepage()
self.select_contact_by_id(contact_id)
# select a group to add contact to
wd.find_element_by_css_selector("select[name='to_group']>option[value='%s']" % group_id).click()
wd.find_element_by_name("add").click()
self.open_homepage()
self.contact_cache = None
def remove_contact_from_group(self, contact_id, group_id):
wd = self.app.wd
self.open_homepage()
wd.find_element_by_css_selector("select[name='group']>option[value='%s']" % group_id).click()
self.select_contact_by_id(contact_id)
wd.find_element_by_name("remove").click()
self.open_homepage()
self.contact_cache = None | StarcoderdataPython |
121746 | from virtool.hmm.fake import create_fake_hmms
async def test_fake_hmms(app, snapshot, tmp_path, dbi, example_path, pg):
hmm_dir = tmp_path / "hmm"
hmm_dir.mkdir()
await create_fake_hmms(app)
assert await dbi.hmm.find().to_list(None) == snapshot
with open(hmm_dir / "profiles.hmm", "r") as f_result:
with open(example_path / "hmms/profiles.hmm") as f_expected:
assert f_result.read() == f_expected.read()
| StarcoderdataPython |
11301292 | # -*- coding: utf-8 -*-
# Copyright 2020 <NAME>, Modified by Trinhlq (@l4zyf9x)
# MIT License (https://opensource.org/licenses/MIT)
"""Train FastSpeech."""
import argparse
import logging
import os
import sys
import numpy as np
import tensorflow as tf
import yaml
import tensorflow_tts
from tqdm import tqdm
from tensorflow_tts.trainers import Seq2SeqBasedTrainer
from examples.aligntts.aligntts_dataset import LJSpeechDataset
from tensorflow_tts.losses.mdn import MixDensityLoss
from tensorflow_tts.configs import AlignTTSConfig
from tensorflow_tts.models import TFAlignTTS
from tensorflow_tts.models import Viterbi
from tensorflow_tts.optimizers import WarmUp
from tensorflow_tts.optimizers import AdamWeightDecay
from tensorflow_tts.utils import plot_utils
from tensorflow_tts.processor.ljspeech import _id_to_symbol
class AlignTTSTrainer(Seq2SeqBasedTrainer):
"""FastSpeech Trainer class based on Seq2SeqBasedTrainer."""
def __init__(self,
config,
steps=0,
epochs=0,
is_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_mixed_precision (bool): Use mixed precision or not.
"""
super().__init__(steps=steps,
epochs=epochs,
config=config,
is_mixed_precision=is_mixed_precision)
# define metrics to aggregates data and use tf.summary logs them
self.list_metrics_name = [
"duration_loss",
"energy_loss",
"f0_loss",
"mel_loss_after",
"mel_loss_before",
"attention_loss",
"mdn_loss"
]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
self.config = config
def init_train_eval_metrics(self, list_metrics_name):
"""Init train and eval metrics to save it to tensorboard."""
self.train_metrics = {}
self.eval_metrics = {}
for name in list_metrics_name:
self.train_metrics.update(
{name: tf.keras.metrics.Mean(name='train_' + name, dtype=tf.float32)}
)
self.eval_metrics.update(
{name: tf.keras.metrics.Mean(name='eval_' + name, dtype=tf.float32)}
)
def reset_states_train(self):
"""Reset train metrics after save it to tensorboard."""
for metric in self.train_metrics.keys():
self.train_metrics[metric].reset_states()
def reset_states_eval(self):
"""Reset eval metrics after save it to tensorboard."""
for metric in self.eval_metrics.keys():
self.eval_metrics[metric].reset_states()
def compile(self, model, optimizer):
super().compile(model, optimizer)
self.mse_log = tf.keras.losses.MeanSquaredLogarithmicError()
self.mse = tf.keras.losses.MeanSquaredError()
self.mae = tf.keras.losses.MeanAbsoluteError()
# self.attn_measure = MaskAttentionLoss()
self.mdn_loss = MixDensityLoss()
self.viterbi = Viterbi()
def _train_step(self, batch):
"""Train model one step."""
# charactor, duration, mel, speaker_id = batch
characters, mels, speaker_ids, character_lengths, mel_lengths = batch
self._one_step_fastspeech(characters,
mels,
speaker_ids,
character_lengths,
mel_lengths)
# update counts
self.steps += 1
self.tqdm.update(1)
self._check_train_finish()
@tf.function(experimental_relax_shapes=True,
input_signature=[tf.TensorSpec([None, None], dtype=tf.int32),
tf.TensorSpec([None, None, 80], dtype=tf.float32),
tf.TensorSpec([None], dtype=tf.int32),
tf.TensorSpec([None], dtype=tf.int32),
tf.TensorSpec([None], dtype=tf.int32),
])
def _one_step_fastspeech(self,
characters,
mels,
speaker_ids,
character_lengths,
mel_lengths):
with tf.GradientTape() as tape:
masked_mel_before, masked_mel_after, \
masked_duration_outputs, mu_sigma = self.model(
input_ids=characters,
speaker_ids=speaker_ids,
durations=None,
character_lengths=character_lengths,
mel_lengths=mel_lengths,
training=True)
log_prob, mdn_loss, _ = self.mdn_loss((mels, mu_sigma, mel_lengths, character_lengths))
loss = mdn_loss
if self.is_mixed_precision:
scaled_loss = self.optimizer.get_scaled_loss(loss)
if self.is_mixed_precision:
scaled_gradients = tape.gradient(scaled_loss, self.model.trainable_variables)
gradients = self.optimizer.get_unscaled_gradients(scaled_gradients)
else:
gradients = tape.gradient(loss, self.model.trainable_variables)
gradients = [(tf.clip_by_value(grad, -1.0, 1.0)) for grad in gradients]
self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables), 5.0)
self.train_metrics["mdn_loss"].update_state(mdn_loss)
# accumulate loss into metrics
# self.train_metrics["duration_loss"].update_state(duration_loss)
# self.train_metrics["energy_loss"].update_state(energy_loss)
# self.train_metrics["f0_loss"].update_state(f0_loss)
# self.train_metrics["attention_loss"].update_state(attention_loss)
# self.train_metrics["mel_loss_after"].update_state(mel_loss_after)
# self.train_metrics["mel_loss_before"].update_state(mel_loss_before)
def _eval_epoch(self):
"""Evaluate model one epoch."""
logging.info(f"(Steps: {self.steps}) Start evaluation.")
# calculate loss for each batch
for eval_steps_per_epoch, batch in enumerate(tqdm(self.eval_data_loader, desc="[eval]"), 1):
# eval one step
characters, mels, speaker_ids, character_lengths, mel_lengths = batch
self._eval_step(characters=characters,
mels=mels,
speaker_ids=speaker_ids,
character_lengths=character_lengths,
mel_lengths=mel_lengths)
if eval_steps_per_epoch <= self.config["num_save_intermediate_results"]:
# save intermedia
self.generate_and_save_intermediate_result(batch)
logging.info(f"(Steps: {self.steps}) Finished evaluation "
f"({eval_steps_per_epoch} steps per epoch).")
# average loss
for key in self.eval_metrics.keys():
logging.info(f"(Steps: {self.steps}) eval_{key} = {self.eval_metrics[key].result():.4f}.")
# record
self._write_to_tensorboard(self.eval_metrics, stage='eval')
# reset
self.reset_states_eval()
@tf.function(experimental_relax_shapes=True,
input_signature=[tf.TensorSpec([None, None], dtype=tf.int32),
tf.TensorSpec([None, None, 80], dtype=tf.float32),
tf.TensorSpec([None], dtype=tf.int32),
tf.TensorSpec([None], dtype=tf.int32),
tf.TensorSpec([None], dtype=tf.int32),
])
def _eval_step(self,
characters,
mels,
speaker_ids,
character_lengths,
mel_lengths):
"""Evaluate model one step."""
masked_mel_before, masked_mel_after, \
masked_duration_outputs, mu_sigma = self.model(
input_ids=characters,
speaker_ids=speaker_ids,
durations=None,
character_lengths=character_lengths,
mel_lengths=mel_lengths,
training=False)
log_prob, mdn_loss, _ = self.mdn_loss((mels, mu_sigma, mel_lengths, character_lengths))
self.eval_metrics["mdn_loss"].update_state(mdn_loss)
# self.eval_metrics["duration_loss"].update_state(duration_loss)
# self.eval_metrics["energy_loss"].update_state(energy_loss)
# self.eval_metrics["f0_loss"].update_state(f0_loss)
# self.eval_metrics["mel_loss_before"].update_state(mel_loss_before)
# self.eval_metrics["mel_loss_after"].update_state(mel_loss_after)
# self.eval_metrics["attention_loss"].update_state(attention_loss)
def _check_log_interval(self):
"""Log to tensorboard."""
if self.steps % self.config["log_interval_steps"] == 0:
for metric_name in self.list_metrics_name:
logging.info(
f"(Step: {self.steps}) train_{metric_name} = {self.train_metrics[metric_name].result():.4f}.")
self._write_to_tensorboard(self.train_metrics, stage="train")
# reset
self.reset_states_train()
@tf.function(experimental_relax_shapes=True,
input_signature=[tf.TensorSpec([None, None], dtype=tf.int32),
tf.TensorSpec([None, None, 80], dtype=tf.float32),
tf.TensorSpec([None], dtype=tf.int32),
tf.TensorSpec([None], dtype=tf.int32),
tf.TensorSpec([None], dtype=tf.int32)
])
def predict(self, characters, mels, speaker_ids, character_lengths, mel_lengths):
"""Predict."""
masked_mel_before, masked_mel_after, duration_pred, mu_sigma = self.model(
input_ids=characters,
speaker_ids=speaker_ids,
durations=None,
character_lengths=character_lengths,
mel_lengths=mel_lengths,
training=False
)
log_prob, _, alphas = self.mdn_loss((mels, mu_sigma, mel_lengths, character_lengths))
align_paths = self.viterbi((log_prob, mel_lengths, character_lengths))
return masked_mel_before, masked_mel_after, duration_pred, log_prob, alphas, align_paths
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
import matplotlib.pyplot as plt
# unpack input.
characters, mels, speaker_ids, character_lengths, mel_lengths = batch
# predict with tf.function.
masked_mel_before, masked_mel_after, duration_preds, log_probs, alphas, align_paths = self.predict(
characters,
mels,
speaker_ids,
character_lengths,
mel_lengths)
# check directory
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (log_prob, alpha, align_path, mel_length, character_length, character, mel) in enumerate(
zip(log_probs, alphas, align_paths, mel_lengths, character_lengths, characters, mels),
1):
log_prob = log_prob.numpy()
alpha = alpha.numpy()
alpha[alpha < -1e6] = -1e6
mel_length = mel_length.numpy().astype(np.float32)
character_length = character_length.numpy().astype(np.float32)
character = [_id_to_symbol[c] for c in character.numpy()]
character = character[:int(character_length)]
mel = mel.numpy()
align_path = align_path.numpy()
# Plot predict and ground truth mel spectrogram
figname = os.path.join(dirname, f"{idx}.png")
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
im = ax1.imshow(log_prob.T, aspect='auto', interpolation='none', origin="lower")
rect = plt.Rectangle((mel_length - 1.5, character_length - 1.5), 1, 1, fill=False, color="red", linewidth=3)
ax1.add_patch(rect)
ax1.set_title(f'log_prob_{self.steps}')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax1)
ax2 = fig.add_subplot(312)
im = ax2.imshow(alpha.T, aspect='auto', interpolation='none', origin="lower")
rect = plt.Rectangle((mel_length - 1.5, character_length - 1.5), 1, 1, fill=False, color="red", linewidth=3)
ax2.add_patch(rect)
ax2.set_title(f'alpha_{self.steps}')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax2)
plt.tight_layout()
plt.savefig(figname)
plt.close()
plot_utils.plot_mel_and_alignment(
save_folder=dirname, mel=mel, align_path=align_path,
tokens=character, idx=idx, step=self.steps,
mel_length=mel_length, character_length=character_length)
def _check_train_finish(self):
"""Check training finished."""
if self.steps >= self.config["train_max_steps"]:
self.finish_train = True
def fit(self, train_data_loader, valid_data_loader, saved_path, resume=None):
self.set_train_data_loader(train_data_loader)
self.set_eval_data_loader(valid_data_loader)
self.create_checkpoint_manager(saved_path=saved_path, max_to_keep=10000)
if resume is not None:
self.load_checkpoint(resume)
logging.info(f"Successfully resumed from {resume}.")
self.run()
def save_checkpoint(self):
"""Save checkpoint."""
self.ckpt.steps.assign(self.steps)
self.ckpt.epochs.assign(self.epochs)
self.ckp_manager.save(checkpoint_number=self.steps)
self.model.save_weights(self.saved_path + 'model-{}.h5'.format(self.steps))
def load_checkpoint(self, pretrained_path):
"""Load checkpoint."""
self.ckpt.restore(pretrained_path)
self.steps = self.ckpt.steps.numpy()
self.epochs = self.ckpt.epochs.numpy()
self.optimizer = self.ckpt.optimizer
# re-assign iterations (global steps) for optimizer.
self.optimizer.iterations.assign(tf.cast(self.steps, tf.int64))
# load weights.
self.model.load_weights(self.saved_path + 'model-{}.h5'.format(self.steps))
def main():
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train FastSpeech (See detail in tensorflow_tts/bin/train-fastspeech.py)"
)
parser.add_argument("--train-dir", default=None, type=str,
help="directory including training data.")
parser.add_argument("--valid-dir", default=None, type=str,
help="directory including validating data.")
parser.add_argument("--use-norm", default=1, type=int,
help="usr norm-mels for train or raw.")
parser.add_argument("--outdir", type=str, required=True,
help="directory to save checkpoints.")
parser.add_argument("--config", type=str, required=True,
help="yaml format configuration file.")
parser.add_argument("--resume", default=None, type=str, nargs="?",
help="checkpoint file path to resume training. (default=\"\")")
parser.add_argument("--verbose", type=int, default=1,
help="logging level. higher is more logging. (default=1)")
parser.add_argument("--mixed_precision", default=0, type=int,
help="using mixed precision for generator or not.")
args = parser.parse_args()
# set mixed precision config
if args.mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.mixed_precision = bool(args.mixed_precision)
args.use_norm = bool(args.use_norm)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG, stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO, stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
else:
logging.basicConfig(
level=logging.WARN, stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# # check arguments
# if args.train_dir is None:
# raise ValueError("Please specify --train-dir")
# if args.dev_dir is None:
# raise ValueError("Please specify --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# # get dataset
# if config["remove_short_samples"]:
# mel_length_threshold = config["mel_length_threshold"]
# else:
# mel_length_threshold = None
# if config["format"] == "npy":
# charactor_query = "*-ids.npy"
# mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
# duration_query = "*-durations.npy"
# charactor_load_fn = np.load
# mel_load_fn = np.load
# duration_load_fn = np.load
# else:
# raise ValueError("Only npy are supported.")
# define train/valid dataset
train_dataset = LJSpeechDataset(
root_dir=args.train_dir,
max_mel_length=800
).create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
)
valid_dataset = LJSpeechDataset(
root_dir=args.valid_dir,
max_mel_length=1000
).create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
)
fastspeech = TFAlignTTS(config=AlignTTSConfig(**config["fastspeech_params"]))
fastspeech._build()
fastspeech.summary()
# define trainer
trainer = AlignTTSTrainer(config=config,
steps=0,
epochs=0,
is_mixed_precision=False)
# AdamW for fastspeech
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_steps=config["optimizer_params"]["decay_steps"],
end_learning_rate=config["optimizer_params"]["end_learning_rate"]
)
learning_rate_fn = WarmUp(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_schedule_fn=learning_rate_fn,
warmup_steps=int(config["train_max_steps"] * config["optimizer_params"]["warmup_proportion"])
)
# optimizer = tf.keras.optimizers.Adam(
# learning_rate=learning_rate_fn,
# beta_1=0.9,
# beta_2=0.98,
# epsilon=1e-9)
optimizer = AdamWeightDecay(
learning_rate=learning_rate_fn,
weight_decay_rate=config["optimizer_params"]["weight_decay"],
beta_1=0.9,
beta_2=0.98,
epsilon=1e-9,
exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias']
)
# compile trainer
trainer.compile(model=fastspeech,
optimizer=optimizer)
# start training
try:
trainer.fit(train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], 'checkpoints/'),
resume=args.resume)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| StarcoderdataPython |
6648117 | #!/usr/bin/env python3.5
import json
from json import JSONDecodeError
import sys
import argparse
import gzip
import re
parser = argparse.ArgumentParser(description='Match multiple patterns against a JSON field')
parser.add_argument('--field', help='Field to grep', default='url')
parser.add_argument('file', help='File to look in')
parser.add_argument('patterns', nargs=argparse.REMAINDER, help='Patterns to match')
args = parser.parse_args()
lineno = 0
with gzip.open(args.file, 'rt') as fp:
for line in fp:
try:
o = json.loads(line)
lineno += 1
success = False
if args.field in o:
fval = o[args.field]
for p in args.patterns:
if p in fval:
success = True
break
if success:
print(json.dumps(o))
except JSONDecodeError as err:
print('{0}: JSON parse error: {1}'.format(lineno, err), file=sys.stderr)
except KeyError as err:
print('{0}: Missing field: {1}'.format(lineno, err), file=sys.stderr)
| StarcoderdataPython |
15101 | # Copyright 2021 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple coloring problem (MIP approach) in OR-tools CP-SAT Solver.
Inspired by the GLPK:s model color.mod
'''
COLOR, Graph Coloring Problem
Written in GNU MathProg by <NAME> <<EMAIL>>
Given an undirected loopless graph G = (V, E), where V is a set of
nodes, E <= V x V is a set of arcs, the Graph Coloring Problem is to
find a mapping (coloring) F: V -> C, where C = {1, 2, ... } is a set
of colors whose cardinality is as small as possible, such that
F(i) != F(j) for every arc (i,j) in E, that is adjacent nodes must
be assigned different colors.
'''
This is a port of my old OR-tools CP solver coloring_ip.py
This model was created by <NAME> (<EMAIL>)
Also see my other OR-tols models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
# from cp_sat_utils import *
def main():
model = cp.CpModel()
# max number of colors
# [we know that 4 suffices for normal maps]
nc = 5
# number of nodes
n = 11
# set of nodes
V = list(range(n))
num_edges = 20
#
# Neighbours
#
# This data correspond to the instance myciel3.col from:
# http://mat.gsia.cmu.edu/COLOR/instances.html
#
# Note: 1-based (adjusted below)
E = [[1, 2], [1, 4], [1, 7], [1, 9], [2, 3], [2, 6], [2, 8], [3, 5], [3, 7],
[3, 10], [4, 5], [4, 6], [4, 10], [5, 8], [5, 9], [6, 11], [7, 11],
[8, 11], [9, 11], [10, 11]]
#
# declare variables
#
# x[i,c] = 1 means that node i is assigned color c
x = {}
for v in V:
for j in range(nc):
x[v, j] = model.NewIntVar(0, 1, 'v[%i,%i]' % (v, j))
# u[c] = 1 means that color c is used, i.e. assigned to some node
u = [model.NewIntVar(0, 1, 'u[%i]' % i) for i in range(nc)]
# number of colors used, to minimize
num_colors = model.NewIntVar(0,nc, "num_colors")
model.Add(num_colors == sum(u))
#
# constraints
#
# each node must be assigned exactly one color
for i in V:
model.Add(sum([x[i, c] for c in range(nc)]) == 1)
# adjacent nodes cannot be assigned the same color
# (and adjust to 0-based)
for i in range(num_edges):
for c in range(nc):
model.Add(x[E[i][0] - 1, c] + x[E[i][1] - 1, c] <= u[c])
# objective
model.Minimize(num_colors)
#
# solution
#
solver = cp.CpSolver()
status = solver.Solve(model)
if status == cp.OPTIMAL:
print()
print('number of colors:', solver.Value(num_colors))
print('colors used:', [solver.Value(u[i]) for i in range(nc)])
print()
for v in V:
print('v%i' % v, ' color ', end=' ')
for c in range(nc):
if solver.Value(x[v, c]) == 1:
print(c)
print()
print('NumConflicts:', solver.NumConflicts())
print('NumBranches:', solver.NumBranches())
print('WallTime:', solver.WallTime())
if __name__ == '__main__':
main()
| StarcoderdataPython |
79145 | <gh_stars>0
#!/usr/bin/env python
"""utils.py: Utility methods for AnalogMethod"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2019"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import xarray as xr
import pandas as pd
def calc_normalized_anomalies(ds_prep, window_size=21):
"""
This method prepares the input Dataset. It calculates the normalized anomalies with means
and std calculated with a centered window of size window_size
"""
# Resample with mean over data if temporal resolution is higher than daily (Lower resolution not supported here)
if pd.infer_freq(ds_prep.time.data) not in 'D':
ds_prep = ds_prep.resample(time='1D').mean()
ds_prep = ds_prep.chunk({'time':-1})
# calculates the climatology and the standard deviation for further anomaly calculation
ds_prep_roll = ds_prep.rolling(time=window_size, center=True).construct('window_dim') # creates the rolling window as a new dimension
# calculate climatology (dayofyear mean) for rolling window over Target Day ± 10days (pool)
# IMPORTANT DIFFERENCE:
# mean rolling window before meaning time: all windows where at least one timestep is missing are dropped
# ds_prep_clim_alt = ds_prep.rolling(time=window_size, center=True).mean().groupby('time.dayofyear').mean('time')
# mean after construct with dropna: same as above (all windows, where at least one timestep is missing are dropped)
# ds_prep_clim = ds_prep_roll.dropna('time').groupby('time.dayofyear').mean(dim=['window_dim','time'])
# mean after construct without dropna: first and last windows are considered, even if there are timesteps with missing values
ds_prep_clim = ds_prep_roll.groupby('time.dayofyear').mean(dim=['window_dim','time'])
# calculate standard deviation (dayofyear std) for rolling window over Target Day ± 10days (pool)
ds_prep_std = ds_prep_roll.groupby('time.dayofyear').std(dim=xr.ALL_DIMS) # Calculates the std for dayofyear of TD + pool, shape(365,)
# calculate daily normalized anomalies with mean and std from TD + pool
ds_prep = ds_prep.groupby('time.dayofyear') - ds_prep_clim
ds_prep = ds_prep.groupby('time.dayofyear') / ds_prep_std
# Rechunking necessary after groupby
ds_prep = ds_prep.chunk({'time': -1})
return ds_prep
| StarcoderdataPython |
3241085 | <filename>src/fairtest/modules/metrics/metric.py
"""
Abstract Fairness Metric.
"""
import abc
import numpy as np
class Metric(object):
"""
An abstract fairness metric.
"""
__metaclass__ = abc.ABCMeta
# Types of metrics
DATATYPE_CT = 'ct' # Metrics over a contingency table
DATATYPE_CORR = 'corr' # Correlation metrics
DATATYPE_REG = 'reg' # Regression metrics
# this Metric's data type
dataType = None
# max data size for approximate tests
approx_LIMIT_P = None
# max data size for approximate confidence intervals
approx_LIMIT_CI = None
def __init__(self):
self.stats = None
def get_size(self, data):
"""
Returns the size of the data for this metric.
Parameters
----------
data :
the data to be evaluated
Returns
-------
size :
the size of the data
"""
if self.dataType == self.DATATYPE_CT:
size = np.array(data).sum()
elif self.dataType == self.DATATYPE_CORR:
if np.array(data).shape == (6,):
size = data[5]
else:
size = len(data)
else:
size = len(data)
return size
def compute(self, data, conf, exact=True):
"""
Computes a confidence interval and p-value for given data.
Exact methods are used for confidence intervals and p-values when
`exact' is set to `True' and the size of the data is smaller than
respective class attributes `approx_LIMIT_CI' and `approx_LIMIT_P'
Parameters
----------
data :
the data to be evaluated
conf :
the confidence level for confidence intervals
exact :
indicates whether exact methods should be used
Returns
-------
self :
a pointer to the current Metric object. The computed statistics
are stored as an attribute `stats'
"""
size = self.get_size(data)
if not exact or size > min(self.approx_LIMIT_P, self.approx_LIMIT_CI):
try:
ci_low, ci_high, pval = self.approx_stats(data, conf)
except ValueError:
ci_low, ci_high, pval = 0, 0, 10*10
if exact and size <= self.approx_LIMIT_P:
pval = self.exact_test(data)
if exact and size <= self.approx_LIMIT_CI:
ci_low, ci_high = self.exact_ci(data, conf)
self.stats = [ci_low, ci_high, pval]
return self
@abc.abstractmethod
def abs_effect(self):
"""
Converts a confidence interval into an absolute effect size that can
be compared over different contexts.
Returns
-------
effect :
the absolute effect of this Metric
"""
return
@staticmethod
@abc.abstractmethod
def exact_test(data):
"""
Performs an exact test of independence.
Parameters
----------
data :
the data to be evaluated
Returns
-------
pval :
the p-value
"""
return
@staticmethod
@abc.abstractmethod
def validate(sens, output, expl):
"""
Validates the use of this metric for the current investigation.
Parameters
----------
sens :
the sensitive feature
output :
the target feature
expl :
the explanatory feature
"""
return
@staticmethod
@abc.abstractmethod
def exact_ci(data, conf):
"""
Computes an exact confidence interval.
Parameters
----------
data :
the data to be evaluated
conf :
the confidence level
Returns
-------
ci_low :
the lower end of the confidence interval
ci_high :
the higher end of the confidence interval
"""
return
@staticmethod
@abc.abstractmethod
def approx_stats(data, conf):
"""
Computes an approximate confidence interval and p-value.
Parameters
----------
data :
the data to be evaluated
conf :
the confidence level
Returns
-------
ci_low :
the lower end of the confidence interval
ci_high :
the higher end of the confidence interval
pval :
the p-value
"""
return
| StarcoderdataPython |
11359460 | <reponame>jrieke/feedback-nns
import torch
from torchvision import transforms, datasets
from PIL import Image
import random
import numpy as np
import matplotlib.pyplot as plt
import utils
def load_mnist(val_size=5000, seed=None):
"""Return the train (55k), val (5k, randomly drawn from the original test set) and test (10k) dataset for MNIST."""
image_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
raw_train_dataset = datasets.MNIST('data/mnist', train=True, download=True, transform=image_transform)
test_dataset = datasets.MNIST('data/mnist', train=False, download=True, transform=image_transform)
# Split 5k samples from the train dataset for validation (similar to Sacramento et al. 2018).
utils.seed_torch(seed)
train_dataset, val_dataset = torch.utils.data.dataset.random_split(raw_train_dataset, (len(raw_train_dataset)-val_size, val_size))
return train_dataset, val_dataset, test_dataset
def load_emnist(val_size=10000, seed=None):
"""Return the train (55k), val (5k, randomly drawn from the original test set) and test (10k) dataset for MNIST."""
image_transform = transforms.Compose([
# EMNIST images are flipped and rotated by default, fix this here.
transforms.RandomHorizontalFlip(1),
transforms.RandomRotation((90, 90)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
target_transform = lambda x: x-1 # make labels start at 0 instead of 1
raw_train_dataset = datasets.EMNIST('data/emnist', split='letters', train=True, download=True, transform=image_transform, target_transform=target_transform)
test_dataset = datasets.EMNIST('data/emnist', split='letters', train=False, download=True, transform=image_transform, target_transform=target_transform)
# Split 5k samples from the train dataset for validation (similar to Sacramento et al. 2018).
utils.seed_torch(seed)
train_dataset, val_dataset = torch.utils.data.dataset.random_split(raw_train_dataset, (len(raw_train_dataset)-val_size, val_size))
return train_dataset, val_dataset, test_dataset
class AddGaussianNoise():
"""An image transform that adds Gaussian noise to the image."""
def __init__(self, mean=0, std=64, scaling_factor=0.5):
self.mean = mean
self.std = std
self.scaling_factor = scaling_factor
def __call__(self, img):
img_array = np.asarray(img)
noisy_img_array = img_array + self.scaling_factor * np.random.normal(self.mean, self.std, img_array.shape)
noisy_img_array = np.clip(noisy_img_array, 0, 255)
noisy_img_array = noisy_img_array.astype(img_array.dtype)
return Image.fromarray(noisy_img_array)
def __repr__(self):
return self.__class__.__name__ + '(mean={}, std={}, scaling_factor={})'.format(self.mean, self.std, self.intensity)
# TODO: Maybe use normal load methods from above here and exchange the transform.
def load_noisy_mnist(mean=0, std=64, scaling_factor=0.5):
"""Return the test dataset of MNIST with added Gaussian noise."""
image_transform = transforms.Compose([
AddGaussianNoise(mean=mean, std=std, scaling_factor=scaling_factor),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
return datasets.MNIST('data/mnist', train=False, download=True, transform=image_transform)
def load_noisy_emnist(mean=0, std=64, scaling_factor=0.5):
"""Return the test dataset of MNIST with added Gaussian noise."""
image_transform = transforms.Compose([
transforms.RandomHorizontalFlip(1), # EMNIST images are flipped and rotated by default
transforms.RandomRotation((90, 90)),
AddGaussianNoise(mean=mean, std=std, scaling_factor=scaling_factor),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
target_transform = lambda x: x-1 # make labels start at 0 instead of 1
return datasets.EMNIST('data/emnist', split='letters', train=False, download=True, transform=image_transform, target_transform=target_transform)
class ImageSequenceDataset(torch.utils.data.Dataset):
"""A dataset where each sample consists of a sequence of images and labels."""
def __init__(self, allowed_seqs, image_dataset, num_classes, num_samples=10000, noisy_image_dataset=None):
self.num_samples = num_samples
self.allowed_seqs = allowed_seqs
self.images_per_class = self._split_images_into_classes(image_dataset, num_classes)
if noisy_image_dataset is None:
self.add_noise = False
else:
self.add_noise = True
self.noisy_images_per_class = self._split_images_into_classes(noisy_image_dataset, num_classes)
def _split_images_into_classes(self, dataset, num_classes):
images_per_class = {i: [] for i in range(num_classes)}
for image, class_ in dataset:
images_per_class[class_.item()].append(image)
return images_per_class
def __len__(self):
return self.num_samples
def __getitem__(self, i):
seq = self.allowed_seqs[i % len(self.allowed_seqs)]
if self.add_noise:
images = [random.choice(self.images_per_class[class_]) for class_ in seq[:3]] + [random.choice(self.noisy_images_per_class[class_]) for class_ in seq[3:]]
else:
images = [random.choice(self.images_per_class[class_]) for class_ in seq]
return torch.cat(images), seq
def plot_sequence(images, targets, target_transform=None):
"""Plot a sequence of images and corresponding labels."""
for i in range(len(images)):
plt.subplot(1, len(images), i+1)
plt.imshow(images[i], cmap='Greys')
plt.axis('off')
plt.title(targets[i] if target_transform is None else target_transform(targets[i]))
# TODO: Delete this once the EMNIST dataset is available again.
# Temporary workaround to process the emnist dataset, because it cannot automatically be
# downloaded through pytorch (due to the shutdown).
def process_emnist():
def get_int(b):
return int(codecs.encode(b, 'hex'), 16)
def read_label_file(path):
with open(path, 'rb') as f:
data = f.read()
assert get_int(data[:4]) == 2049
length = get_int(data[4:8])
parsed = np.frombuffer(data, dtype=np.uint8, offset=8)
return torch.from_numpy(parsed).view(length).long()
def read_image_file(path):
with open(path, 'rb') as f:
data = f.read()
assert get_int(data[:4]) == 2051
length = get_int(data[4:8])
num_rows = get_int(data[8:12])
num_cols = get_int(data[12:16])
images = []
parsed = np.frombuffer(data, dtype=np.uint8, offset=16)
return torch.from_numpy(parsed).view(length, num_rows, num_cols)
splits = ('byclass', 'bymerge', 'balanced', 'letters', 'digits', 'mnist')
def _training_file(split):
return 'training_{}.pt'.format(split)
def _test_file(split):
return 'test_{}.pt'.format(split)
import os
import codecs
from six.moves import urllib
import gzip
import shutil
import zipfile
root = 'data/emnist'
processed_folder = 'processed'
raw_folder = 'data/emnist/raw'
# process and save as torch files
for split in splits:
print('Processing ' + split)
training_set = (
read_image_file(os.path.join(raw_folder, 'emnist-{}-train-images-idx3-ubyte'.format(split))),
read_label_file(os.path.join(raw_folder, 'emnist-{}-train-labels-idx1-ubyte'.format(split)))
)
test_set = (
read_image_file(os.path.join(raw_folder, 'emnist-{}-test-images-idx3-ubyte'.format(split))),
read_label_file(os.path.join(raw_folder, 'emnist-{}-test-labels-idx1-ubyte'.format(split)))
)
with open(os.path.join(root, processed_folder, _training_file(split)), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(root, processed_folder, _test_file(split)), 'wb') as f:
torch.save(test_set, f)
print('Done!')
| StarcoderdataPython |
11275391 | # Copyright 2022 The etils Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.core.utils.gpath."""
from __future__ import annotations
import os
import pathlib
from etils import epath
from etils import epy
import pytest
_GCS_SCHEME = 'gs://'
# Test mock gpath on each backend
@pytest.fixture(params=[
epath.backend.os_backend,
])
def gcs_mocked_path(tmp_path: pathlib.Path, request):
"""Fixture which patch the gfile API to redirect `gs://` calls."""
backend = request.param # SubRequest
prefix_path = os.fspath(tmp_path) + '/'
def _norm_path(path: str):
return path.replace(_GCS_SCHEME, prefix_path)
def _call(fn):
def new_fn(_, p):
return fn(_norm_path(p))
return new_fn
with epath.testing.mock_epath(
open=lambda _, p, *args, **kwargs: backend.open( # pylint: disable=g-long-lambda
_norm_path(p), *args, **kwargs),
copy=lambda _, p1, p2, *args, **kwargs: backend.copy( # pylint: disable=g-long-lambda
_norm_path(p1), _norm_path(p2), **kwargs),
rename=lambda _, p1, p2, *args, **kwargs: backend.rename( # pylint: disable=g-long-lambda
_norm_path(p1), _norm_path(p2), **kwargs),
replace=lambda _, p1, p2, *args, **kwargs: backend.replace( # pylint: disable=g-long-lambda
_norm_path(p1), _norm_path(p2), **kwargs),
exists=_call(backend.exists),
glob=_call(backend.glob),
isdir=_call(backend.isdir),
listdir=_call(backend.listdir),
makedirs=_call(backend.makedirs),
mkdir=_call(backend.mkdir),
remove=_call(backend.remove),
rmtree=_call(backend.rmtree),
):
yield tmp_path
def test_repr_gcs():
path = epath.Path('gs://bucket/dir')
assert isinstance(path, epath.Path)
assert repr(path) == f'PosixGPath(\'{_GCS_SCHEME}bucket/dir\')'
assert str(path) == f'{_GCS_SCHEME}bucket/dir'
assert os.fspath(path) == f'{_GCS_SCHEME}bucket/dir'
path = path.parent / 'some/other/file.json'
assert isinstance(path, epath.Path)
assert os.fspath(path) == f'{_GCS_SCHEME}bucket/some/other/file.json'
path = epath.Path(path, 'other')
assert isinstance(path, epath.Path)
assert os.fspath(path) == f'{_GCS_SCHEME}bucket/some/other/file.json/other'
def test_repr_s3():
path = epath.Path('s3://bucket/dir')
assert isinstance(path, epath.Path)
assert repr(path) == "PosixGPath('s3://bucket/dir')"
assert str(path) == 's3://bucket/dir'
assert os.fspath(path) == 's3://bucket/dir'
path = path.parent / 'some/other/file.json'
assert isinstance(path, epath.Path)
assert os.fspath(path) == 's3://bucket/some/other/file.json'
path = epath.Path(path, 'other')
assert isinstance(path, epath.Path)
assert os.fspath(path) == 's3://bucket/some/other/file.json/other'
def test_repr_windows():
path = epath.gpath.WindowsGPath('C:\\Program Files\\Directory')
assert isinstance(path, epath.gpath.WindowsGPath)
assert str(path) == 'C:\\Program Files\\Directory'
assert os.fspath(path) == 'C:\\Program Files\\Directory'
path = path.parent / 'other/file.json'
assert isinstance(path, epath.gpath.WindowsGPath)
assert os.fspath(path) == 'C:\\Program Files\\other\\file.json'
@pytest.mark.parametrize(
'parts',
[
(), # No args
('.',),
('~',),
('relative/path',),
('/tmp/to/something',),
(
'/tmp/to',
'something',
),
(
pathlib.Path('/tmp/to'),
'something',
),
('~/to/something',),
],
)
def test_repr(parts):
path = pathlib.Path(*parts)
gpath = epath.Path(*parts)
assert isinstance(gpath, epath.Path)
assert str(gpath) == str(path)
assert os.fspath(gpath) == os.fspath(path)
assert gpath == path
assert str(gpath.resolve()) == str(path.resolve())
assert str(gpath.expanduser()) == str(path.expanduser())
assert isinstance(gpath.resolve(), epath.Path)
assert isinstance(gpath.expanduser(), epath.Path)
# pylint: disable=redefined-outer-name
def test_gcs(gcs_mocked_path: pathlib.Path):
# mkdir()
gpath = epath.Path(f'{_GCS_SCHEME}bucket/dir')
gcs_mocked_path = gcs_mocked_path.joinpath('bucket/dir')
assert not gpath.exists()
gpath.mkdir(parents=True)
# exists()
assert gpath.exists()
assert gcs_mocked_path.exists()
# is_dir()
assert gpath.is_dir()
assert gcs_mocked_path.is_dir()
gpath /= 'some_file.txt'
gcs_mocked_path /= 'some_file.txt'
# touch()
assert not gpath.exists()
gpath.touch()
assert gpath.exists()
assert gcs_mocked_path.exists()
# is_file()
assert gpath.is_file()
assert gcs_mocked_path.is_file()
# iterdir()
gpath = gpath.parent
gcs_mocked_path = gcs_mocked_path.parent
assert list(gpath.iterdir()) == [
epath.Path('gs://bucket/dir/some_file.txt'),
]
assert isinstance(gpath, epath.Path)
assert not isinstance(gcs_mocked_path, epath.Path)
def test_open(gcs_mocked_path: pathlib.Path):
files = [
'foo.py', 'bar.py', 'foo_bar.py', 'dataset.json', 'dataset_info.json',
'readme.md'
]
dataset_path = epath.Path('gs://bucket/dataset')
dataset_path.mkdir(parents=True)
assert dataset_path.exists()
with pytest.raises(ValueError, match='Only UTF-8 encoding supported.'):
dataset_path.open('w', encoding='latin-1')
# open()
for file in files:
with dataset_path.joinpath(file).open('w') as f:
f.write(' ')
# encoding argument
with dataset_path.joinpath('foo.py').open('r', encoding='UTF-8') as f:
f.read()
# `+` mode broken in GFile
with pytest.raises(ValueError, match='mode='):
dataset_path.joinpath('foo.py').open('r+')
# Only utf8 encoding supported (like tf.io.gfile)
with pytest.raises(ValueError, match='encoding'):
dataset_path.joinpath('foo.py').open(encoding='ascii')
# iterdir()
assert len(list(gcs_mocked_path.joinpath('bucket/dataset').iterdir())) == 6
@pytest.mark.usefixtures('gcs_mocked_path')
def test_touch():
root_path = epath.Path('gs://bucket/')
root_path.mkdir(parents=True)
assert root_path.exists()
# File don't exists, touch create it
file_path = root_path / 'test.txt'
assert not file_path.exists()
file_path.touch()
assert file_path.exists()
assert file_path.read_text() == '' # File content is empty # pylint: disable=g-explicit-bool-comparison
file_path.write_text('Some content')
file_path.touch() # Should be a no-op
assert file_path.read_text() == 'Some content' # Content still exists
with pytest.raises(FileExistsError):
file_path.touch(exist_ok=False)
@pytest.mark.usefixtures('gcs_mocked_path')
def test_read_write():
gpath = epath.Path('gs://file.txt')
with gpath.open('w') as f:
f.write('abcd')
with gpath.open('r') as f:
assert f.read() == 'abcd'
gpath.write_text('def')
assert gpath.read_text() == 'def'
with gpath.open('wb') as f:
f.write(b'ghi')
with gpath.open('rb') as f:
assert f.read() == b'ghi'
gpath.write_bytes(b'def')
assert gpath.read_bytes() == b'def'
@pytest.mark.usefixtures('gcs_mocked_path')
def test_unlink():
path = epath.Path('gs://bucket')
path.mkdir()
path = path / 'text.txt'
with pytest.raises(FileNotFoundError):
path.unlink()
path.unlink(missing_ok=True) # no-op if missing_ok=True
path.touch() # Path created
assert path.exists()
path.unlink() # Path deleted
assert not path.exists()
def test_mkdir(gcs_mocked_path: pathlib.Path):
g_path = epath.Path('gs://bucket')
assert not g_path.exists()
g_path.mkdir()
assert g_path.exists()
with pytest.raises(FileExistsError, match='already exists'):
g_path.mkdir()
assert gcs_mocked_path.joinpath('bucket').exists()
def test_rename(gcs_mocked_path: pathlib.Path):
src_path = epath.Path('gs://foo.py')
src_path.write_text(' ')
assert gcs_mocked_path.joinpath('foo.py').exists()
src_path.rename('gs://bar.py')
assert not gcs_mocked_path.joinpath('foo.py').exists()
assert gcs_mocked_path.joinpath('bar.py').exists()
file_name = lambda l: l.name
assert sorted(list(map(file_name, gcs_mocked_path.iterdir()))) == ['bar.py']
def test_replace(tmp_path: pathlib.Path):
file_path = epath.Path(os.path.join(tmp_path, 'tfds.py'))
file_path.write_text('tfds')
file_path.replace(os.path.join(tmp_path, 'tfds-dataset.py'))
assert not tmp_path.joinpath('tfds.py').exists()
assert tmp_path.joinpath('tfds-dataset.py').exists()
assert tmp_path.joinpath('tfds-dataset.py').read_text() == 'tfds'
mnist_path = epath.Path(tmp_path / 'mnist.py')
mnist_path.write_text('mnist')
mnist_path.replace(tmp_path / 'mnist-100.py')
assert not tmp_path.joinpath('mnist.py').exists()
assert tmp_path.joinpath('mnist-100.py').exists()
assert tmp_path.joinpath('mnist-100.py').read_text() == 'mnist'
assert len(list(tmp_path.iterdir())) == 2
assert sorted(epath.Path(tmp_path).iterdir()) == [
tmp_path / 'mnist-100.py', tmp_path / 'tfds-dataset.py'
]
@pytest.mark.usefixtures('gcs_mocked_path')
def test_copy():
src_path = epath.Path('gs://foo.py')
src_path.write_text('abc')
assert not src_path.parent.joinpath('bar.py').exists()
assert not src_path.parent.joinpath('bar2.py').exists()
src_path.copy('gs://bar.py')
src_path.copy(epath.Path('gs://bar2.py'))
assert src_path.exists()
assert src_path.parent.joinpath('bar.py').read_text() == 'abc'
assert src_path.parent.joinpath('bar2.py').read_text() == 'abc'
def test_format():
template_path = epath.Path('/home/{user}/foo.py')
template_path = template_path.format(user='adibou')
assert template_path == epath.Path('/home/adibou/foo.py')
def test_default():
path = epath.Path()
assert isinstance(path, epath.Path)
assert os.fspath(path) == '.'
assert path == epath.Path('.')
path = epath.Path('a/x', 'y', 'z')
assert isinstance(path, epath.Path)
assert os.fspath(path) == 'a/x/y/z'
assert path == epath.Path('a/x/y/z')
@epy.testing.non_hermetic
def test_public_access():
# Test a public bucket
p = epath.Path('gs://tfds-data/datasets')
assert p.exists()
| StarcoderdataPython |
3252013 |
import requests
from bs4 import BeautifulSoup
YOUTUBE_TRENDING_URL='https://www.youtube.com/feed/trending'
# Doesn't execute the Javascript
response=requests.get(YOUTUBE_TRENDING_URL)
print('Status Code',response.status_code)
# with open('trending.html','w') as f:
# f.write(response.text)
doc=BeautifulSoup(response.text)
print('Page title :',doc.title.text)
# Find all the video divs
video_divs=doc.find_all('div',class_='ytd-video-renderer')
print(f'Found {len(video_divs)} videos') | StarcoderdataPython |
71441 | import numpy as np
km2 = np.array([44410., 5712., 37123., 0., 25757.])
anos2 = np.array([2003, 1991, 1990, 2019, 2006])
idade = 2019 - anos2
km_media = km2 / idade
| StarcoderdataPython |
3574807 | ##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
__doc__='''Batch class, for iterating over a sequence in batches
'''
__docformat__ = 'restructuredtext'
class LazyPrevBatch:
def __of__(self, parent):
return Batch(parent._sequence, parent.size,
parent.first - parent._size + parent.overlap, 0,
parent.orphan, parent.overlap)
class LazyNextBatch:
def __of__(self, parent):
try: parent._sequence[parent.end]
except IndexError: return None
return Batch(parent._sequence, parent.size,
parent.end - parent.overlap, 0,
parent.orphan, parent.overlap)
class LazySequenceLength:
def __of__(self, parent):
parent.sequence_length = l = len(parent._sequence)
return l
class Batch:
"""Create a sequence batch"""
__allow_access_to_unprotected_subobjects__ = 1
previous = LazyPrevBatch()
next = LazyNextBatch()
sequence_length = LazySequenceLength()
def __init__(self, sequence, size, start=0, end=0,
orphan=0, overlap=0):
'''Encapsulate "sequence" in batches of "size".
Arguments: "start" and "end" are 0-based indexes into the
sequence. If the next batch would contain no more than
"orphan" elements, it is combined with the current batch.
"overlap" is the number of elements shared by adjacent
batches. If "size" is not specified, it is computed from
"start" and "end". If "size" is 0, it is the length of
the sequence. Failing that, it is 7.
Attributes: Note that the "start" attribute, unlike the
argument, is a 1-based index (I know, lame). "first" is the
0-based index. "length" is the actual number of elements in
the batch.
"sequence_length" is the length of the original, unbatched, sequence
Note: "_size" is the "actual" size used to perform batch calulcations,
while "size" is the "representative" size. (ie. a "special value" of
"size" used by the templates may translate to a different value for
"_size" which is used internally for batch calculations).
'''
start = start + 1
start,end,sz = opt(start,end,size,orphan,sequence)
self._sequence = sequence
self.size = size
self._size = sz
self.start = start
self.end = end
self.orphan = orphan
self.overlap = overlap
self.first = max(start - 1, 0)
self.length = self.end - self.first
if self.first == 0:
self.previous = None
def __getitem__(self, index):
if index < 0:
if index + self.end < self.first: raise IndexError(index)
return self._sequence[index + self.end]
if index >= self.length: raise IndexError(index)
return self._sequence[index + self.first]
def __len__(self):
return self.length
def opt(start,end,size,orphan,sequence):
if size < 1:
if size == 0:
size=len(sequence)
elif start > 0 and end > 0 and end >= start:
size=end+1-start
else: size=7
if start > 0:
try: sequence[start-1]
except IndexError: start=len(sequence)
if end > 0:
if end < start: end=start
else:
end=start+size-1
try: sequence[end+orphan-1]
except IndexError: end=len(sequence)
elif end > 0:
try: sequence[end-1]
except IndexError: end=len(sequence)
start=end+1-size
if start - 1 < orphan: start=1
else:
start=1
end=start+size-1
try: sequence[end+orphan-1]
except IndexError: end=len(sequence)
return start,end,size
| StarcoderdataPython |
5084868 | from loguru import logger
from sqlalchemy import Column, Integer, String, create_engine
from sqlalchemy.orm import Session, declarative_base
def test_database_with_sqlalchemy():
# Declare tables https://www.tutorialspoint.com/sqlalchemy/sqlalchemy_orm_declaring_mapping.htm
Base = declarative_base()
class Customers(Base):
__tablename__ = 'customers'
id = Column(Integer, primary_key=True)
name = Column(String)
address = Column(String)
email = Column(String)
# Create engine https://www.tutorialspoint.com/sqlalchemy/sqlalchemy_orm_creating_session.htm
engine = create_engine('sqlite+pysqlite:///:memory:', echo=False, future=True)
# Create tables
Base.metadata.create_all(engine)
# Start session
with Session(engine, autocommit=False) as session:
# Insert new item https://www.tutorialspoint.com/sqlalchemy/sqlalchemy_orm_adding_objects.htm
c1 = Customers(name='<NAME>', address='Station Road Nanded', email='<EMAIL>')
session.add(c1)
# Add multiple
session.add_all(
[
Customers(
name='<NAME>',
address='Koti, Hyderabad',
email='<EMAIL>',
),
Customers(
name='<NAME>',
address='Sector 40, Gurgaon',
email='<EMAIL>',
),
Customers(
name='S.M.Krishna',
address='Budhwar Peth, Pune',
email='<EMAIL>',
),
],
)
session.commit()
# List all https://www.tutorialspoint.com/sqlalchemy/sqlalchemy_orm_using_query.htm
result = session.query(Customers).all()
row: Customers
for row in result:
logger.info(f'SQLAlchemy: Name: {row.name}, Address: {row.address}, Email: {row.email}')
# Filtered result
result2 = session.query(Customers).filter(Customers.name == '<NAME>')
for row in result2:
logger.info(f'Filter result: Name: {row.name}, Address: {row.address}, Email: {row.email}')
| StarcoderdataPython |
309384 | <filename>couchbase_utils/index_utls/index_ready_functions.py
'''
Created on 07-May-2021
@author: riteshagarwal
'''
from global_vars import logger
import time
from membase.api.rest_client import RestConnection
class IndexUtils:
def __init__(self, cluster, server_task, n1ql_node):
self.cluster = cluster
self.task = server_task
self.task_manager = self.task.jython_task_manager
self.n1ql_node = n1ql_node
self.log = logger.get("test")
def build_deferred_indexes(self, indexes_to_build):
"""
Build secondary indexes that were deferred
"""
self.log.info("Building indexes")
for bucket, bucket_data in indexes_to_build.items():
for scope, collection_data in bucket_data.items():
for collection, gsi_index_names in collection_data.items():
build_query = "BUILD INDEX on `%s`.`%s`.`%s`(%s) " \
"USING GSI" \
% (bucket, scope, collection, gsi_index_names)
result = self.run_cbq_query(build_query)
self.assertTrue(result['status'] == "success", "Build query %s failed." % build_query)
self.wait_for_indexes_to_go_online(gsi_index_names)
query = "select state from system:indexes where state='deferred'"
result = self.run_cbq_query(query)
self.log.info("deferred indexes remaining: {0}".format(len(result['results'])))
query = "select state from system:indexes where state='online'"
result = self.run_cbq_query(query)
self.log.info("online indexes count: {0}".format(len(result['results'])))
self.sleep(60, "Wait after building indexes")
def create_indexes(self, buckets, gsi_base_name="gsi",
replica=0, defer=True):
"""
Create gsi indexes on collections - according to number_of_indexes_per_coll
"""
self.log.info("Creating indexes with defer:{} build".format(defer))
indexes_to_build = dict()
count = 0
couchbase_buckets = [bucket for bucket in buckets
if bucket.bucketType == "couchbase"]
for bucket in couchbase_buckets:
indexes_to_build[bucket.name] = dict()
for _, scope in bucket.scopes.items():
indexes_to_build[bucket.name][scope.name] = dict()
for _, collection in scope.collections.items():
for _ in range(self.number_of_indexes_per_coll):
gsi_index_name = gsi_base_name + str(count)
create_index_query = "CREATE INDEX `%s` " \
"ON `%s`.`%s`.`%s`(`age`) " \
"WITH { 'defer_build': %s, 'num_replica': %s }" \
% (gsi_index_name, bucket.name,
scope.name, collection.name,
defer, replica)
result = self.run_cbq_query(create_index_query)
# self.assertTrue(result['status'] == "success", "Defer build Query %s failed." % create_index_query)
if collection.name not in indexes_to_build[bucket.name][scope.name]:
indexes_to_build[bucket.name][scope.name][collection.name] = list()
indexes_to_build[bucket.name][scope.name][collection.name].append(gsi_index_name)
count += 1
return indexes_to_build
def recreate_dropped_indexes(self, indexes_dropped):
"""
Recreate dropped indexes given indexes_dropped dict
"""
self.log.info("Recreating dropped indexes")
for bucket, bucket_data in indexes_dropped.items():
for scope, collection_data in bucket_data.items():
for collection, gsi_index_names in collection_data.items():
for gsi_index_name in gsi_index_names:
create_index_query = "CREATE INDEX `%s` " \
"ON `%s`.`%s`.`%s`(`age`)" \
"WITH { 'defer_build': true, 'num_replica': 0 }" \
% (gsi_index_name, bucket, scope, collection)
result = self.run_cbq_query(create_index_query)
self.build_deferred_indexes(indexes_dropped)
def drop_indexes(self, num_indexes_to_drop=15):
"""
Drop gsi indexes
Returns dropped indexes dict
"""
self.log.info("Dropping {0} indexes".format(num_indexes_to_drop))
indexes_dropped = dict()
count = 0
for bucket, bucket_data in self.indexes_to_build.items():
indexes_dropped[bucket] = dict()
for scope, collection_data in bucket_data.items():
indexes_dropped[bucket][scope] = dict()
for collection, gsi_index_names in collection_data.items():
for gsi_index_name in gsi_index_names:
drop_index_query = "DROP INDEX `%s` ON " \
"`%s`.`%s`.`%s`" \
"USING GSI" \
% (gsi_index_name, bucket, scope, collection)
result = self.run_cbq_query(drop_index_query)
if collection not in indexes_dropped[bucket][scope]:
indexes_dropped[bucket][scope][collection] = list()
indexes_dropped[bucket][scope][collection].append(gsi_index_name)
count = count + 1
if count >= num_indexes_to_drop:
return indexes_dropped
def run_cbq_query(self, query, n1ql_node=None, timeout=1300):
"""
To run cbq queries
Note: Do not run this in parallel
"""
n1ql_node = n1ql_node or self.n1ql_node
conn = RestConnection(n1ql_node)
result = conn.query_tool(query, timeout)
return result
def wait_for_indexes_to_go_online(self, gsi_index_names, timeout=300):
"""
Wait for indexes to go online after building the deferred indexes
"""
self.log.info("Waiting for indexes to go online")
start_time = time.time()
stop_time = start_time + timeout
for gsi_index_name in gsi_index_names:
while True:
check_state_query = "SELECT state FROM system:indexes WHERE name='%s'" % gsi_index_name
result = self.run_cbq_query(check_state_query)
if result['results'][0]['state'] == "online":
break
if time.time() > stop_time:
self.fail("Index availability timeout of index: {0}".format(gsi_index_name))
| StarcoderdataPython |
5018111 | <reponame>basepipe/developer_onboarding<filename>resources/dot_PyCharm/system/python_stubs/-762174762/PySide/QtGui/QIconEngine.py
# encoding: utf-8
# module PySide.QtGui
# from C:\Python27\lib\site-packages\PySide\QtGui.pyd
# by generator 1.147
# no doc
# imports
import PySide.QtCore as __PySide_QtCore
import Shiboken as __Shiboken
class QIconEngine(__Shiboken.Object):
# no doc
def actualSize(self, *args, **kwargs): # real signature unknown
pass
def addFile(self, *args, **kwargs): # real signature unknown
pass
def addPixmap(self, *args, **kwargs): # real signature unknown
pass
def paint(self, *args, **kwargs): # real signature unknown
pass
def pixmap(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
| StarcoderdataPython |
6704062 | <reponame>m3at/chainer-mask-rcnn<gh_stars>10-100
import copy
import chainer
from chainer import reporter
from chainercv.utils import apply_to_iterator
import numpy as np
import tqdm
from .. import utils
class InstanceSegmentationCOCOEvaluator(chainer.training.extensions.Evaluator):
name = 'validation'
def __init__(self, iterator, target, device=None, label_names=None,
show_progress=False):
super(InstanceSegmentationCOCOEvaluator, self).__init__(
iterator=iterator, target=target, device=device)
self.label_names = label_names
self._show_progress = show_progress
def evaluate(self):
iterator = self._iterators['main']
target = self._targets['main']
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
if self._show_progress:
it = tqdm.tqdm(it, total=len(it.dataset))
in_values, out_values, rest_values = apply_to_iterator(
target.predict, it)
del in_values
pred_bboxes, pred_masks, pred_labels, pred_scores = out_values
if len(rest_values) == 5:
gt_bboxes, gt_labels, gt_masks, gt_crowdeds, gt_areas = rest_values
elif len(rest_values) == 3:
gt_bboxes, gt_labels, gt_masks = rest_values
gt_crowdeds = None
gt_areas = None
# evaluate
result = utils.eval_instseg_coco(
pred_masks, pred_labels, pred_scores,
gt_masks, gt_labels, gt_crowdeds, gt_areas)
report = {
'map': result['map/iou=0.50:0.95/area=all/maxDets=100'],
'map@0.5': result['map/iou=0.50/area=all/maxDets=100'],
'map@0.75': result['map/iou=0.75/area=all/maxDets=100'],
}
if self.label_names is not None:
for l, label_name in enumerate(self.label_names):
try:
report['ap/{:s}'.format(label_name)] = \
result['ap/iou=0.50:0.95/area=all/maxDets=100'][l]
except IndexError:
report['ap/{:s}'.format(label_name)] = np.nan
observation = dict()
with reporter.report_scope(observation):
reporter.report(report, target)
return observation
| StarcoderdataPython |
163610 | <reponame>zhuzhenping/Wt4ElegantRL<filename>run_toy.py
from runner import entry, Runner
class SimpleRunner(Runner):
def test(self):
print('test1')
if __name__ == '__main__':
entry(obj=SimpleRunner())
| StarcoderdataPython |
9708916 | <filename>util/contact_info.py
def str_mj_arr(arr):
return " ".join(["%0.3f" % arr[i] for i in range(arr.shape[0])])
def print_contact_info(sim):
if sim.data.ncon == 0:
print("No contacts/collisions")
return
# Print contact metadata
for coni in range(sim.data.ncon):
print(" Contact %d:" % (coni,))
con = sim.data.contact[coni]
print(" dist = %0.3f" % (con.dist,))
print(" pos = %s" % (str_mj_arr(con.pos),))
print(" frame = %s" % (str_mj_arr(con.frame),))
print(" friction = %s" % (str_mj_arr(con.friction),))
print(" dim = %d" % (con.dim,))
print(" geom1 = %d, %s" % (con.geom1, sim.model.geom_id2name(con.geom1)))
print(" geom2 = %d, %s" % (con.geom2, sim.model.geom_id2name(con.geom2)))
| StarcoderdataPython |
1989388 | from django.db import models
from django.utils import timezone
import datetime
class Vehicle(models.Model):
def __str__(self):
return self.manufacturer + " " + self.model + "(" + self.vehicle_number + ")"
manufacturer = models.CharField(max_length=128)
model = models.CharField(max_length=128)
color = models.CharField(max_length=128)
vehicle_number = models.CharField(max_length=128)
class Person(models.Model):
def __str__(self):
return self.name + " " + self.second_name
vehicles = models.ManyToManyField(Vehicle, through='Ownership')
name = models.CharField(max_length=128)
second_name = models.CharField(max_length=128)
date = models.DateField()
class AdditionalData(models.Model):
owner = models.OneToOneField(Person, on_delete=models.CASCADE, primary_key=True)
passport_num = models.CharField(max_length=128)
home_address = models.CharField(max_length=128)
nationality = models.CharField(max_length=128)
class Ownership(models.Model):
owner = models.ForeignKey(
'Person',
on_delete=models.CASCADE,
)
vehicle = models.ForeignKey(
'Vehicle',
on_delete=models.CASCADE,
)
start_date = models.DateField()
end_date = models.DateField()
class DriverLicence(models.Model):
def __str__(self):
return self.number
person = models.ForeignKey(
'Person',
on_delete=models.CASCADE,
)
TYPE = (
(1, 'A'),
(2, 'B'),
(3, 'C'),
(4, 'D'),
)
number = models.CharField(max_length=128)
date = models.DateField()
type = models.PositiveSmallIntegerField(
choices=TYPE,
default=1
) | StarcoderdataPython |
5119960 | from flask import Flask, render_template, jsonify, request
import urllib
import recipe_api
from recipe_api import *
import transformations.transformations
from transformations.transformations import *
import scraper
from pprint import pprint
app = Flask(__name__)
@app.route("/")
def index():
return render_template('index.html')
@app.route('/_recipe_scraper/<path:url>')
def get_recipe(url):
output = parse_recipe_from_url(url)
recipe = scraper.get_recipe(url)
ingredients = recipe['ingredients']
title = recipe['title']
output['vegetarian'] = is_category('vegetarian',ingredients,title)
output['vegan'] = is_category('vegan',ingredients,title)
output['low_carb'] = is_category('low-carb',ingredients,title)
output['low_sodium'] = is_category('low-sodium',ingredients,title)
output['chinese'] = is_category('chinese',ingredients,title)
output['italian'] = is_category('italian',ingredients,title)
if output['vegan']:
output['vegetarian'] = True
return jsonify(output)
@app.route('/_transform/<path:url>/<to_or_from>/<category>')
def transform(url, to_or_from, category):
recipe = scraper.get_recipe(url)
tr = transformations.transformations.transform(recipe, category, to_or_from)
results = {}
output = parse_recipe(tr)
ingredients = tr['ingredients']
title = tr['title']
output['vegetarian'] = is_category('vegetarian',ingredients,title)
output['vegan'] = is_category('vegan',ingredients,title)
output['low_carb'] = is_category('low-carb',ingredients,title)
output['low_sodium'] = is_category('low-sodium',ingredients,title)
output['chinese'] = is_category('chinese',ingredients,title)
output['italian'] = is_category('italian',ingredients,title)
output[category] = to_or_from == 'to'
if output['vegan']:
output['vegetarian'] = True
return jsonify(output)
if __name__ == "__main__":
app.run()
| StarcoderdataPython |
11212557 | import pytest
from api_object_schema._compat import with_metaclass # pylint: disable=no-name-in-module
from api_object_schema import Field, Fields, FieldsMeta
from sentinels import NOTHING
# pylint: disable=redefined-outer-name
class MyObj(object):
pass
def test_field_string_types():
f = Field('name', type='{0}:MyObj'.format(__name__))
assert f.type.type is MyObj
@pytest.mark.parametrize('invalid_type', ['a', 'a.b.c', __name__, 'x:y:z', 'x:y', __name__+':NonExistClass'])
def test_field_invalid_string_types(invalid_type):
f = Field('name', type=invalid_type)
with pytest.raises(ValueError) as caught:
f.type.type # pylint: disable=pointless-statement
assert 'Invalid type string: ' in str(caught.value)
def test_field(field):
assert field.name == "field_name"
def test_fields_get_by_name(fields, field):
assert fields.get("field_name") is field
assert fields.get("field_api_name") is None
def test_fields_get_by_api_name(fields, field):
assert fields.get_by_api_name("field_name") is None
assert fields.get_by_api_name("field_api_name") is field
def test_get_or_fabricate(fields, field):
assert fields.get_or_fabricate("field_name") is field
assert fields.get_or_fabricate("fake_field").name == "fake_field"
def test_get_all_field_names(obj):
fields_json = {"field_a": "", "field_b_api_name": "", "fake_field": ""}
expected = set(["field_a", "field_b", "fake_field"])
assert obj.fields.get_all_field_names_or_fabricate(fields_json) == expected
def test_data_model_functions(fields):
assert len(fields) == 1
assert list(fields) == [fields['field_name']]
aliases = [fields.field_name,
fields['field_name'], fields.get('field_name')]
assert len(set(aliases)) == 1
with pytest.raises(AttributeError):
fields.fake_name # pylint: disable=pointless-statement
def test_fields_fields(field):
fields = Fields.from_fields_list([field])
fields.update([Field(name="field_a"), Field(name="field_b")])
fields.add_field(Field(name="field_c", is_identity=True))
assert fields.get_identity_fields() == [fields.field_c]
def test_generate_field_default():
assert Field(name="field_name").generate_default() is NOTHING
assert Field(name="field_name", default=1).generate_default() == 1
assert Field(name="field_name", default=lambda:
True).generate_default() is True
def test_set_field_default():
field = Field(name="field_name")
assert field.generate_default() is NOTHING
field.set_default(1)
assert field.generate_default() == 1
field.set_default(lambda: True)
assert field.generate_default() is True
def test_get_is_visible():
obj = {'some_val': None}
assert Field(name="field_name").get_is_visible(obj) is True
assert Field(name="field_name",
is_visible=False).get_is_visible(obj) is False
assert Field(name="field_name", is_visible=lambda obj:
obj['some_val']).get_is_visible(obj) is None
def test_fields_contains(field):
fields = Fields.from_fields_list([field])
assert field in fields
def test_from_fields_list_with_names_duplications():
fields_list = [Field(name='field_name'), Field(name='field_name')]
fields = Fields.from_fields_list(fields_list)
field = fields.get('field_name')
assert field is not fields_list[0]
assert field is fields_list[-1]
with pytest.raises(AssertionError):
Fields.from_fields_list(fields_list, forbid_name_overrides=True)
def test_from_fields_list_with_api_names_duplications():
fields_list = [Field(name='field_a', api_name='some_field'), Field(name='field_b', api_name='some_field')]
fields = Fields.from_fields_list(fields_list)
field = fields.get_by_api_name('some_field')
assert field is not fields_list[0]
assert field is fields_list[-1]
with pytest.raises(AssertionError):
Fields.from_fields_list(fields_list, forbid_api_name_overrides=True)
# Fixtures
@pytest.fixture
def field():
returned = Field(name="field_name", api_name="field_api_name")
return returned
@pytest.fixture
def fields(field):
returned = Fields()
returned.add_field(field)
return returned
@pytest.fixture
def obj():
class ObjectWithFields(with_metaclass(FieldsMeta)):
FIELDS = [
Field(name="field_a"),
Field(name="field_b", api_name="field_b_api_name"),
]
obj = ObjectWithFields()
return obj
| StarcoderdataPython |
1708452 | <reponame>NathanKr/python-playground<gh_stars>0
a = 1
b = "hello"
a = a +1
b = b + " world"
print(a,b)
a="hello !!!"
print(a) | StarcoderdataPython |
5096059 | from arm.logicnode.arm_nodes import *
class RpMSAANode(ArmLogicTreeNode):
"""Sets the MSAA quality."""
bl_idname = 'LNRpMSAANode'
bl_label = 'Set MSAA Quality'
arm_version = 1
property0: HaxeEnumProperty(
'property0',
items = [('1', '1', '1'),
('2', '2', '2'),
('4', '4', '4'),
('8', '8', '8'),
('16', '16', '16')
],
name='', default='1')
def arm_init(self, context):
self.add_input('ArmNodeSocketAction', 'In')
self.add_output('ArmNodeSocketAction', 'Out')
def draw_buttons(self, context, layout):
layout.prop(self, 'property0')
| StarcoderdataPython |
6573571 | <filename>notebooks/2022-02-15_01_downloading cordex data.py
#%%
# =============================================================================
# Dependencies
# =============================================================================
# Get the dependencies
from re import I
import cdsapi
import datetime as dt
import os.path
#%%
# =============================================================================
# Run definitions
# =============================================================================
"""
Combo's used (only used combo's that have all scenario's available in one experiment)
EC-Earth:
RACMO [r12i1p1] (1950 -- 2100)
RCA [r12i1p1] (1970 -- 2100)
HIRHAM [r3i1p1] (1951 -- 2100)
HadGEM2-ES:
RACMO [r1i1p1] (1951 -- 2100)
RCA [r1i1p1] (1970 -- 2100)
CNRM-CERFACS-CM5:
RACMO [r1i1p1] (1951 -- 2100)
MPI-ESM-LR:
RCA [r1i1p1] (1970 -- 2100)
NCC-NorESM1-M:
REMO [r1i1p1] (1950 -- 2100)
RCA [r1i1p1] (1970 -- 2100)
"""
GCM_model = 'cnrm_cerfacs_cm5' # ichec_ec_earth, mohc_hadgem2_es, cnrm_cerfacs_cm5, mpi_esm_lr, ncc_noresm1_m
RCM_model = 'knmi_racmo22e' # smhi_rca4, knmi_racmo22e, dmi_hirham5, gerics_remo2015
ENSEMBLE_member = 'r1i1p1'
# define the storage location
dir_root = Path('/home/orage/Code/ai4er/phd/ph4/1_downloader_weather_data/')
dir_data = dir_root / 'data/CORDEX-EU/origin_CM_RACMO/'
# Specific sub-experiment
experiment ='historical' # 'historical' 'rcp_2_6' 'rcp_4_5' 'rcp_8_5'
# The year definitions
# - range(N_0,N_I+1)
# years = [str(i) for i in range(1950,2101)] # full
years=[str(i) for i in range(2004,2005)] # single year 2004
year = '2004'
# Make a shortcut for the CDS download client
c = cdsapi.Client()
# %%
print(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+' NOTIFY: Starting to retrieve '+year)
# %% Surface Solar Radiation
file=dir_data+'CORDEX-EU_'+experiment+'_ssrd_'+year+'.zip'
# Check if file exist to allow for easy redo
if os.path.isfile(file) == True:
# Tell us the file exist
print('NOTIFY: this file was allready done! '+file)
# if file doesn't exist, we download it
elif os.path.isfile(file) == False:
c.retrieve(
'projections-cordex-domains-single-levels',
{
'domain': 'europe',
'experiment': experiment,
'horizontal_resolution': '0_11_degree_x_0_11_degree',
'temporal_resolution': '3_hours',
'variable': 'surface_solar_radiation_downwards',
'gcm_model': GCM_model,
'rcm_model': RCM_model,
'ensemble_member': ENSEMBLE_member,
'start_year': year,
'end_year': str(int(year)+1),
'format': 'zip',
},
file)
| StarcoderdataPython |
11346740 | from typing import Any, Dict, List, Optional
from typing_extensions import TypedDict
from graphql.error import format_error as format_graphql_error
from strawberry.types import ExecutionResult
class GraphQLHTTPResponse(TypedDict, total=False):
data: Optional[Dict[str, Any]]
errors: Optional[List[Any]]
def process_result(result: ExecutionResult) -> GraphQLHTTPResponse:
data: GraphQLHTTPResponse = {"data": result.data}
if result.errors:
data["errors"] = [format_graphql_error(err) for err in result.errors]
return data
| StarcoderdataPython |
37886 | <reponame>rudra012/django_rest
from django.conf.urls import url
from api.snippets import snippets_api
urlpatterns = [
url(r'^$', snippets_api.snippet_list),
url(r'^(?P<pk>[0-9]+)/$', snippets_api.snippet_detail),
]
| StarcoderdataPython |
1875488 | import base64
from django.test import TestCase
from django.test import Client
from django.conf import settings
class test_image_controller(TestCase):
def test_create_image(self):
data_path = settings.STORAGE_DIR+"/image/image_8.nii"
data = open(data_path, "rb").read()
encoded = base64.b64encode(data)
c = Client()
response = c.post('/app/images', data=encoded.decode(),
content_type='image/nii')
self.assertTrue(response.status_code == 200)
print('test create image_validate validate')
def test_get_id(self):
c = Client()
response = c.get('/app/images')
self.assertTrue(response.status_code == 200)
print('test get_id for images validate')
| StarcoderdataPython |
6432682 | <reponame>MJochim/seahub
from django.core import mail
from django.conf import settings
from shibboleth import backends
from seahub.base.accounts import User
from seahub.auth import authenticate
from seahub.test_utils import BaseTestCase
import importlib
SAMPLE_HEADERS = {
"REMOTE_USER": '<EMAIL>',
"Shib-Application-ID": "default",
"Shib-Authentication-Method": "urn:oasis:names:tc:SAML:2.0:ac:classes:unspecified",
"Shib-AuthnContext-Class": "urn:oasis:names:tc:SAML:2.0:ac:classes:unspecified",
"Shib-Identity-Provider": "https://sso.college.edu/idp/shibboleth",
"Shib-Session-ID": "1",
"Shib-Session-Index": "12",
"Shibboleth-affiliation": "<EMAIL>;<EMAIL>",
"Shibboleth-schoolBarCode": "12345678",
"Shibboleth-schoolNetId": "Sample_Developer",
"Shibboleth-schoolStatus": "active",
"Shibboleth-department": "University Library, Integrated Technology Services",
"Shibboleth-displayName": "<NAME>",
"Shibboleth-eppn": "<EMAIL>",
"Shibboleth-givenName": "Sample",
"Shibboleth-isMemberOf": "SCHOOL:COMMUNITY:EMPLOYEE:ADMINISTRATIVE:BASE;SCHOOL:COMMUNITY:EMPLOYEE:STAFF:SAC:P;COMMUNITY:ALL;SCHOOL:COMMUNITY:EMPLOYEE:STAFF:SAC:M;",
"Shibboleth-mail": "<EMAIL>",
"Shibboleth-persistent-id": "https://sso.college.edu/idp/shibboleth!https://server.college.edu/shibboleth-sp!sk1Z9qKruvXY7JXvsq4GRb8GCUk=",
"Shibboleth-sn": "Developer",
"Shibboleth-title": "Library Developer",
"Shibboleth-unscoped-affiliation": "member;staff"
}
settings.SHIBBOLETH_ATTRIBUTE_MAP = {
# "eppn": (True, "username"),
"givenname": (False, "givenname"),
"surname": (False, "surname"),
"emailaddress": (False, "contact_email"),
"organization": (False, "institution"),
}
settings.AUTHENTICATION_BACKENDS += (
'shibboleth.backends.ShibbolethRemoteUserBackend',
)
settings.MIDDLEWARE_CLASSES += (
'shibboleth.middleware.ShibbolethRemoteUserMiddleware',
)
class ShibbolethRemoteUserBackendTest(BaseTestCase):
def setUp(self):
self.remote_user = '<EMAIL>'
self.remove_user(self.remote_user)
def test_create_unknown_user(self):
with self.assertRaises(User.DoesNotExist):
self.assertFalse(User.objects.get(self.remote_user))
user = authenticate(remote_user=self.remote_user,
shib_meta=SAMPLE_HEADERS)
assert user.is_active is True
self.assertEqual(user.username, '<EMAIL>')
self.assertEqual(User.objects.get(self.remote_user).username,
'<EMAIL>')
def test_notify_admins_on_activate_request(self):
self.assertEqual(len(mail.outbox), 0)
with self.assertRaises(User.DoesNotExist):
self.assertFalse(User.objects.get(self.remote_user))
with self.settings(SHIB_ACTIVATE_AFTER_CREATION=False):
# reload our shibboleth.backends module, so it picks up the settings change
importlib.reload(backends)
user = authenticate(remote_user=self.remote_user,
shib_meta=SAMPLE_HEADERS)
self.assertEqual(user.username, '<EMAIL>')
assert user.is_active is False
assert len(mail.outbox) != 0
assert 'a newly registered account need to be activated' in mail.outbox[0].body
# now reload again, so it reverts to original settings
importlib.reload(backends)
| StarcoderdataPython |
171103 | # Copyright (c) 2015 Cloudera, Inc. All rights reserved.
import pytest
from subprocess import check_call
from tests.common.test_vector import *
from tests.common.impala_test_suite import *
from tests.util.filesystem_utils import WAREHOUSE, IS_S3
TEST_DB = 'hidden_files_db'
TEST_TBL = 'hf'
class TestHiddenFiles(ImpalaTestSuite):
"""
Tests that files with special prefixes/suffixes are considered 'hidden' when
loading table metadata and running queries.
"""
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestHiddenFiles, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(create_single_exec_option_dimension())
cls.TestMatrix.add_dimension(create_uncompressed_text_dimension(cls.get_workload()))
# Only run in exhaustive mode on hdfs since this test takes a long time.
if cls.exploration_strategy() != 'exhaustive' and not IS_S3:
cls.TestMatrix.clear()
def setup_method(self, method):
self.cleanup_db(TEST_DB)
self.client.execute("create database %s location '%s/%s'" % (TEST_DB, WAREHOUSE,
TEST_DB))
self.client.execute(
"create table %s.%s like functional.alltypes" % (TEST_DB, TEST_TBL))
self.client.execute(
"alter table %s.%s add partition (year=2010, month=1)" % (TEST_DB, TEST_TBL))
self.client.execute(
"alter table %s.%s add partition (year=2010, month=2)" % (TEST_DB, TEST_TBL))
self.__populate_test_table()
def teardown_method(self, method):
self.cleanup_db(TEST_DB)
def __populate_test_table(self):
"""Copy files into the HDFS directories of two partitions of the table.
The goal is to have both an empty and non-empty partition with hidden files."""
ALLTYPES_LOC = "%s/alltypes" % WAREHOUSE
TEST_TBL_LOC = "%s/%s/%s" % (WAREHOUSE, TEST_DB, TEST_TBL)
# Copy a visible file into one of the partitions.
check_call(["hadoop", "fs", "-cp",
"%s/year=2010/month=1/100101.txt" % ALLTYPES_LOC,
"%s/year=2010/month=1/100101.txt" % TEST_TBL_LOC], shell=False)
# Add hidden files to the non-empty partition. Use upper case hidden suffixes.
check_call(["hadoop", "fs", "-cp",
"%s/year=2010/month=1/100101.txt" % ALLTYPES_LOC,
"%s/year=2010/month=1/.100101.txt" % TEST_TBL_LOC], shell=False)
check_call(["hadoop", "fs", "-cp",
"%s/year=2010/month=1/100101.txt" % ALLTYPES_LOC,
"%s/year=2010/month=1/_100101.txt" % TEST_TBL_LOC], shell=False)
check_call(["hadoop", "fs", "-cp",
"%s/year=2010/month=1/100101.txt" % ALLTYPES_LOC,
"%s/year=2010/month=1/100101.txt.COPYING" % TEST_TBL_LOC], shell=False)
check_call(["hadoop", "fs", "-cp",
"%s/year=2010/month=1/100101.txt" % ALLTYPES_LOC,
"%s/year=2010/month=1/100101.txt.TMP" % TEST_TBL_LOC], shell=False)
# Add hidden files to the empty partition. Use lower case hidden suffixes.
check_call(["hadoop", "fs", "-cp",
"%s/year=2010/month=2/100201.txt" % ALLTYPES_LOC,
"%s/year=2010/month=2/.100201.txt" % TEST_TBL_LOC], shell=False)
check_call(["hadoop", "fs", "-cp",
"%s/year=2010/month=2/100201.txt" % ALLTYPES_LOC,
"%s/year=2010/month=2/_100201.txt" % TEST_TBL_LOC], shell=False)
check_call(["hadoop", "fs", "-cp",
"%s/year=2010/month=2/100201.txt" % ALLTYPES_LOC,
"%s/year=2010/month=2/100201.txt.copying" % TEST_TBL_LOC], shell=False)
check_call(["hadoop", "fs", "-cp",
"%s/year=2010/month=2/100201.txt" % ALLTYPES_LOC,
"%s/year=2010/month=2/100201.txt.tmp" % TEST_TBL_LOC], shell=False)
@pytest.mark.execute_serially
def test_hidden_files_load(self, vector):
"""Tests that an incremental refresh ignores hidden files."""
self.client.execute("invalidate metadata %s.%s" % (TEST_DB, TEST_TBL))
self.run_test_case('QueryTest/hidden-files', vector)
# This test runs on one dimension. Therefore, running in it parallel is safe, given no
# other method in this test class is run.
def test_hidden_files_refresh(self, vector):
"""Tests that an incremental refresh ignores hidden files."""
self.client.execute("refresh %s.%s" % (TEST_DB, TEST_TBL))
self.run_test_case('QueryTest/hidden-files', vector)
| StarcoderdataPython |
6634619 | # <NAME>
field_size = int(input())
mines = []
response = []
for i in range(0, field_size):
aux = int(input())
mines.append(aux)
for i in range(field_size):
count = 0
if (i != 0):
if (mines[i - 1] == 1):
count += 1
if (mines[i] == 1):
count += 1
if (i < field_size - 1):
if (mines[i + 1] == 1):
count += 1
response.append(count)
for value in response:
print(value)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.