blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
91aaaa9397546c8f92ae2fd813674cf87646d94c | fe09c822f90bddbead6ed2b9f2023707148f13fc | /day10/Person.py | 92ccb3ba2f20c11b4de210b922b8c7621736ad1d | [] | no_license | Bobdfm0126/Python20210713_Bob | 89bc67ac1e545283da9dd161692731ac07907640 | c4da6abd74d10b20099f28571cdc84d7d4e4cea5 | refs/heads/master | 2023-07-15T19:24:44.753589 | 2021-08-26T11:59:28 | 2021-08-26T11:59:28 | 385,594,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | class Human:
def __init__(self, name, age, sex) -> None:
self.name = name
self.age = age
self.sex = sex
def __str__(self) -> str:
return "name: %s age: %d sex: %s" % (self.name, self.age, self.sex)
class Student(Human):
def __init__(self, name, age, sex, number, grade) -> None:
super().__init__(name, age, sex)
self.number = number
self.grade = grade
def __str__(self) -> str:
return super().__str__() + " number: %d grade: %s" % (self.number, self.grade)
if __name__ == '__main__':
student1 = Student('John', 18, '男', 1, '一年級')
student2 = Student('Mary', 19, '女', 2, '二年級')
student3 = Student('Bobo', 20, '女', 3, '三年級')
students = [student1, student2, student3]
for student in students:
print(student.name, student.age) | [
"bob940126bob@gmail.com"
] | bob940126bob@gmail.com |
1ebeccd76c77fb7295b05092f26a7ad953d07807 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2330/60796/280647.py | 2ebf0db21d495fb1321ec9d1115f099b73a5cb61 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,846 | py | import math
N=int(input())
ls=[]
for i in range(N):
ls.append(input().split(","))
ls[i]=[int(x) for x in ls[i]]
r=[]
for i1 in range(len(ls)-3):
for i2 in range(i1+1,len(ls)-2):
for i3 in range(i2+1,len(ls)-1):
for i4 in range(i3+1,len(ls)):
a=ls[i1]
b=ls[i2]
c=ls[i3]
d=ls[i4]
if (a[0]==c[0] and d[0]==d[0] and (a[1]==b[1] and c[1]==d[1])):
r.append(abs(a[1]-c[1])*abs(b[0]-a[0]))
elif (a[0]==b[0] and c[0]==d[0] and (d[1]==b[1] and a[1]==c[1])):
r.append(abs(a[1]-b[1])*abs(c[0]-a[0]))
elif (a[0]==d[0] and c[0]==b[0] and (d[1]==c[1] and a[1]==b[1])):
r.append(abs(a[0]-b[0])*abs(d[1]-a[1]))
elif (a[0]==b[0] and c[0]==d[0] and (c[1]==b[1] and a[1]==d[1])):
r.append(abs(a[0]-d[0])*abs(b[1]-a[1]))
elif (a[0]==c[0] and b[0]==d[0] and (c[1]==b[1] and a[1]==d[1])):
r.append(abs(a[0]-d[0])*abs(c[1]-a[1]))
elif (a[0]==d[0] and c[0]==b[0] and (d[1]==b[1] and a[1]==c[1])):
r.append(abs(a[0]-c[0])*abs(d[1]-a[1]))
elif (a[0]-b[0])!=0 and (c[0]-d[0])!=0 and (a[0]-c[0])!=0 and (d[0]-b[0])!=0:
if abs((a[1]-b[1])/(a[0]-b[0]))==abs((c[1]-d[1])/(c[0]-d[0])) and abs((a[1]-c[1])/(a[0]-c[0]))==abs((d[1]-b[1])/(d[0]-b[0])):
x=math.sqrt(pow(a[0]-b[0],2)+pow(a[1]-b[1],2))
y=math.sqrt(pow(a[0]-c[0],2)+pow(a[1]-c[1],2))
r.append(x*y)
elif (a[0]-d[0])!=0 and (c[0]-b[0])!=0 and (a[0]-c[0])!=0 and (d[0]-b[0])!=0:
if abs((a[1]-d[1])/(a[0]-d[0]))==abs((b[1]-c[1])/(b[0]-c[0])) and abs((a[1]-c[1])/(a[0]-c[0]))==abs((d[1]-b[1])/(d[0]-b[0])):
x=math.sqrt(pow(a[0]-d[0],2)+pow(a[1]-d[1],2))
y=math.sqrt(pow(a[0]-c[0],2)+pow(a[1]-c[1],2))
r.append(x*y)
elif (a[0] - d[0]) != 0 and (c[0] - b[0]) != 0 and (a[0] - b[0]) != 0 and (d[0] - c[0]) != 0:
if abs((a[1] - d[1]) / (a[0] - d[0])) == abs((b[1] - c[1]) / (b[0] - c[0])) and abs(
(a[1] - b[1]) / (a[0] - b[0])) == abs((d[1] - c[1]) / (d[0] - c[0])):
x = math.sqrt(pow(a[0] - d[0], 2) + pow(a[1] - d[1], 2))
y = math.sqrt(pow(a[0] - b[0], 2) + pow(a[1] - b[1], 2))
r.append(x * y)
if len(r)==0:
print("0.0000")
else:
s=str(min(r))
if not s.__contains__("."):
s=s+".0000"
else:
i=s.index(".")
t=s[i+1:]
while len(t)<4:
t="0"+t
if len(t)>4:
s=s[:i+1]+t[:4]
print(s) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
69f7ccb625eee40b8adaaf35cde841ca47c70bdb | 2ebe9a9972228e5b0b2c316c503b8eb85ddb5b3e | /script.py | caef3e62b3a59cc5fbe8413ee4f4f8723562d17c | [] | no_license | HoloGenesis/BlenderAnimation | e0a40f2fea203f26fa307e01cfed70ec8f373de8 | 8f5f949dfb1975dd1870ae1315271eb56eb909ea | refs/heads/master | 2021-01-19T03:22:09.820674 | 2013-02-24T17:51:11 | 2013-02-24T17:51:11 | 8,789,113 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,049 | py | #----------------------------------------------
# Pythonscript voor de animatie van vogels die
# een bepaald object volgen en hindernissen
# ontwijken.
#
# Wouter Pinnoo en Eveline Hoogstoel
#----------------------------------------------
import bpy
import random
# Maken van hindernissen op willekeurige plaatsen
for i in range(20):
bpy.ops.mesh.primitive_cylinder_add(radius = 1, depth = 100, location=(random.random()*100, random.random()*100, 0))
bpy.ops.object.modifier_add(type='COLLISION')
# Camera
layers = 20*[False]
layers[0] = True
cam = Camera.New('ortho')
cam.location = (76.48082733154297, -48.86098861694336, 20.241960525512695)
cam.scale = (24.341266632080078, 24.341266632080078, 24.341266632080078)
scn = Scene.GetCurrent()
ob = scn.objects.new(cam)
scn.setCurrentCamera(ob)
# Import van een vogel-object
bpy.ops.wm.link_append(link=False,instance_groups=False, filename="Bird.blend")
bird = bpy.context.object
# Leeg object dat fungeert als doel dat de vogels zullen volgen
bpy.ops.object.add()
target = bpy.context.object
# De plain met de particles
bpy.ops.mesh.primitive_plane_add(view_align=False, enter_editmode=False);
bpy.context.scene.objects.active = emitter
bpy.ops.object.particle_system_add()
particleSystem = emitter.particle_systems[-1]
particleSystem.name = 'BirdsPartSystem'
particleSettings = particleSystem.settings
particleSystem.name = 'BirdsPartSettings'
particleSettings.count = 77
particleSettings.frame_start = 1
particleSettings.frame_end = 1
particleSettings.lifetime = 300000.000
particleSettings.emit_from = 'FACE'
particleSettings.use_render_emitter = True
particleSettings.physics_type = 'BOIDS'
particleSettings.particle_size = 0.859
particleSettings.draw_percentage = 1
particleSettings.draw_method = 'RENDER'
particleSettings.dupli_object = cube
particleSettings.material = 1
particleSettings.render_type = 'OBJECT'
bpy.ops.boid.rule_add(type='GOAL')
particleSettings.active_boid_rule.object="target"
goal.object = target
| [
"pinnoo.wouter@gmail.com"
] | pinnoo.wouter@gmail.com |
7e1126158e18b8d4e8fd1a814f0c7b1db2c31236 | 35d9d8bc51755df93f7c2a11fe07ea502dd316f6 | /babyshare/catalog/migrations/0003_auto_20200301_0203.py | c3c4087ececda2e58174d762c77f8d00945a4d97 | [] | no_license | MEF4232/baby-share | f854333110a366d06fcf47a1ece89686b334abe8 | 38c630e584e2de9a8623eec209a4d8debc5c26b5 | refs/heads/master | 2021-02-07T12:25:14.943740 | 2020-03-01T17:18:08 | 2020-03-01T17:18:08 | 244,025,269 | 0 | 3 | null | 2020-03-01T17:18:09 | 2020-02-29T19:01:17 | HTML | UTF-8 | Python | false | false | 1,836 | py | # Generated by Django 3.0.3 on 2020-03-01 02:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0002_auto_20200229_1954'),
]
operations = [
migrations.RemoveField(
model_name='item',
name='name',
),
migrations.RemoveField(
model_name='user',
name='name',
),
migrations.AddField(
model_name='item',
name='price',
field=models.FloatField(default=0.0, help_text='price'),
),
migrations.AddField(
model_name='item',
name='product_id',
field=models.IntegerField(default=0, help_text='product id'),
),
migrations.AddField(
model_name='item',
name='product_name',
field=models.CharField(help_text='product name', max_length=200, null=True, verbose_name='Product Name'),
),
migrations.AddField(
model_name='user',
name='email',
field=models.EmailField(help_text='Email', max_length=100, null=True, verbose_name='Email ID'),
),
migrations.AddField(
model_name='user',
name='first_name',
field=models.CharField(help_text='First Name', max_length=20, null=True, verbose_name='First Name'),
),
migrations.AddField(
model_name='user',
name='last_name',
field=models.CharField(help_text='Last Name', max_length=20, null=True, verbose_name='Last Name'),
),
migrations.AddField(
model_name='user',
name='password',
field=models.CharField(help_text='Password', max_length=20, null=True, verbose_name='Password'),
),
]
| [
"noreply@github.com"
] | MEF4232.noreply@github.com |
61c8435f832d61befe8894c8dbea7b181fd8b002 | b26c41926fa3a7c2c061132d80e91a2750f2f468 | /tensorflow_probability/python/internal/backend/numpy/gen/linear_operator_adjoint.py | 69c51544cf53762799d4572987b1106ebd7474ad | [
"Apache-2.0"
] | permissive | tensorflow/probability | 22e679a4a883e408f8ef237cda56e3e3dfa42b17 | 42a64ba0d9e0973b1707fcd9b8bd8d14b2d4e3e5 | refs/heads/main | 2023-09-04T02:06:08.174935 | 2023-08-31T20:30:00 | 2023-08-31T20:31:33 | 108,053,674 | 4,055 | 1,269 | Apache-2.0 | 2023-09-13T21:49:49 | 2017-10-23T23:50:54 | Jupyter Notebook | UTF-8 | Python | false | false | 9,797 | py | # Copyright 2020 The TensorFlow Probability Authors. All Rights Reserved.
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# THIS FILE IS AUTO-GENERATED BY `gen_linear_operators.py`.
# DO NOT MODIFY DIRECTLY.
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# pylint: disable=g-import-not-at-top
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=g-bad-import-order
# pylint: disable=unused-import
# pylint: disable=line-too-long
# pylint: disable=reimported
# pylint: disable=g-bool-id-comparison
# pylint: disable=g-statement-before-imports
# pylint: disable=bad-continuation
# pylint: disable=useless-import-alias
# pylint: disable=property-with-parameters
# pylint: disable=trailing-whitespace
# pylint: disable=g-inconsistent-quotes
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Takes the adjoint of a `LinearOperator`."""
from tensorflow_probability.python.internal.backend.numpy import ops
from tensorflow_probability.python.internal.backend.numpy import numpy_array as array_ops
from tensorflow_probability.python.internal.backend.numpy import numpy_math as math_ops
from tensorflow_probability.python.internal.backend.numpy import linalg_impl as linalg
from tensorflow_probability.python.internal.backend.numpy.gen import linear_operator
from tensorflow_probability.python.internal.backend.numpy.gen import linear_operator_util
# from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorAdjoint"]
# @tf_export("linalg.LinearOperatorAdjoint")
# @linear_operator.make_composite_tensor
class LinearOperatorAdjoint(linear_operator.LinearOperator):
"""`LinearOperator` representing the adjoint of another operator.
This operator represents the adjoint of another operator.
```python
# Create a 2 x 2 linear operator.
operator = LinearOperatorFullMatrix([[1 - i., 3.], [0., 1. + i]])
operator_adjoint = LinearOperatorAdjoint(operator)
operator_adjoint.to_dense()
==> [[1. + i, 0.]
[3., 1 - i]]
tensor_shape.TensorShape(operator_adjoint.shape)
==> [2, 2]
operator_adjoint.log_abs_determinant()
==> - log(2)
x = ... Shape [2, 4] Tensor
operator_adjoint.matmul(x)
==> Shape [2, 4] Tensor, equal to operator.matmul(x, adjoint=True)
```
#### Performance
The performance of `LinearOperatorAdjoint` depends on the underlying
operators performance.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operator,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize a `LinearOperatorAdjoint`.
`LinearOperatorAdjoint` is initialized with an operator `A`. The `solve`
and `matmul` methods effectively flip the `adjoint` argument. E.g.
```
A = MyLinearOperator(...)
B = LinearOperatorAdjoint(A)
x = [....] # a vector
assert A.matvec(x, adjoint=True) == B.matvec(x, adjoint=False)
```
Args:
operator: `LinearOperator` object.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`. Default is `operator.name +
"_adjoint"`.
Raises:
ValueError: If `operator.is_non_singular` is False.
"""
parameters = dict(
operator=operator,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name,
)
self._operator = operator
# The congruency of is_non_singular and is_self_adjoint was checked in the
# base operator.
combine_hint = (
linear_operator_util.use_operator_or_provided_hint_unless_contradicting)
is_square = combine_hint(
operator, "is_square", is_square,
"An operator is square if and only if its adjoint is square.")
is_non_singular = combine_hint(
operator, "is_non_singular", is_non_singular,
"An operator is non-singular if and only if its adjoint is "
"non-singular.")
is_self_adjoint = combine_hint(
operator, "is_self_adjoint", is_self_adjoint,
"An operator is self-adjoint if and only if its adjoint is "
"self-adjoint.")
is_positive_definite = combine_hint(
operator, "is_positive_definite", is_positive_definite,
"An operator is positive-definite if and only if its adjoint is "
"positive-definite.")
# Initialization.
if name is None:
name = operator.name + "_adjoint"
with ops.name_scope(name):
super(LinearOperatorAdjoint, self).__init__(
dtype=operator.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
@property
def operator(self):
"""The operator before taking the adjoint."""
return self._operator
def _linop_adjoint(self) -> linear_operator.LinearOperator:
return self.operator
def _assert_non_singular(self):
return self.operator.assert_non_singular()
def _assert_positive_definite(self):
return self.operator.assert_positive_definite()
def _assert_self_adjoint(self):
return self.operator.assert_self_adjoint()
def _shape(self):
# Rotate last dimension
shape = tensor_shape.TensorShape(self.operator.shape)
return shape[:-2].concatenate([shape[-1], shape[-2]])
def _shape_tensor(self):
# Rotate last dimension
shape = self.operator.shape_tensor()
return prefer_static.concat([
shape[:-2], [shape[-1], shape[-2]]], axis=-1)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
return self.operator.matmul(
x, adjoint=(not adjoint), adjoint_arg=adjoint_arg)
def _matvec(self, x, adjoint=False):
return self.operator.matvec(x, adjoint=(not adjoint))
def _determinant(self):
if self.is_self_adjoint:
return self.operator.determinant()
return math_ops.conj(self.operator.determinant())
def _log_abs_determinant(self):
return self.operator.log_abs_determinant()
def _trace(self):
if self.is_self_adjoint:
return self.operator.trace()
return math_ops.conj(self.operator.trace())
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
return self.operator.solve(
rhs, adjoint=(not adjoint), adjoint_arg=adjoint_arg)
def _solvevec(self, rhs, adjoint=False):
return self.operator.solvevec(rhs, adjoint=(not adjoint))
def _to_dense(self):
if self.is_self_adjoint:
return self.operator.to_dense()
return linalg.adjoint(self.operator.to_dense())
def _add_to_tensor(self, x):
return self.to_dense() + x
def _eigvals(self):
eigvals = self.operator.eigvals()
if not self.operator.is_self_adjoint:
eigvals = math_ops.conj(eigvals)
return eigvals
def _cond(self):
return self.operator.cond()
@property
def _composite_tensor_fields(self):
return ("operator",)
@property
def _experimental_parameter_ndims_to_matrix_ndims(self):
return {"operator": 0}
import numpy as np
from tensorflow_probability.python.internal.backend.numpy import linalg_impl as _linalg
from tensorflow_probability.python.internal.backend.numpy import ops as _ops
from tensorflow_probability.python.internal.backend.numpy.gen import tensor_shape
from tensorflow_probability.python.internal.backend.numpy import private
distribution_util = private.LazyLoader(
"distribution_util", globals(),
"tensorflow_probability.substrates.numpy.internal.distribution_util")
tensorshape_util = private.LazyLoader(
"tensorshape_util", globals(),
"tensorflow_probability.substrates.numpy.internal.tensorshape_util")
prefer_static = private.LazyLoader(
"prefer_static", globals(),
"tensorflow_probability.substrates.numpy.internal.prefer_static")
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
475a4838da27863cb4d1120b9af1b225db6ca765 | 8c1caeff0571245dea78b8d20367f4785754c8a8 | /ilmo_app/migrations/0005_auto_20190304_1057.py | 26a907cb95d0baf483daae329d89f057bc2b52f8 | [
"MIT"
] | permissive | osakunta/django-ilmo-app | 56f8bde38e34e8c7e68c336386d214c22390fe27 | f36dadd45b3be86b60ceeb7dfeea77f5e39dde5e | refs/heads/master | 2023-04-27T07:02:02.947258 | 2022-03-24T12:14:46 | 2022-03-24T12:14:46 | 63,781,597 | 6 | 3 | MIT | 2023-04-21T22:01:15 | 2016-07-20T12:58:33 | Python | UTF-8 | Python | false | false | 653 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-04 08:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ilmo_app', '0004_auto_20190303_1627'),
]
operations = [
migrations.AlterField(
model_name='eventattendee',
name='attendee_details',
field=models.CharField(blank=True, max_length=10000),
),
migrations.AlterField(
model_name='eventattendee',
name='attendee_name',
field=models.CharField(max_length=100),
),
]
| [
"remes@iki.fi"
] | remes@iki.fi |
9b3edf70e10697d58b5502bfc9bcc0ca143c381a | 09e5611a79e24bbc7ac5cea96590dbc7504db13b | /bootstrap.py | ba2117c64dd7214a7e611ed9329e96789b9f14b3 | [] | no_license | UPCnet/supervisor | 07b9ebc42359e688e425abab93d48430c9cd4b75 | 6ebdcbe64a342dec24d73000ea0a3a43978fa5b6 | refs/heads/master | 2021-01-18T14:11:46.602638 | 2015-06-17T07:27:38 | 2015-06-17T07:27:38 | 27,213,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,830 | py | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
options, args = parser.parse_args()
######################################################################
# Custom bootstraping step to generate extra .cfg files
def install_configfile(source, destination):
"""
Install a configfile from a template, only when destination file doesn't exists.
Generates an empty file if source is not defined
"""
if not os.path.exists(destination):
if source:
shutil.copyfile(source, destination)
else:
open(destination, 'w').write('')
print 'Generated config file {}'.format(os.path.realpath(destination))
return True
install_configfile('templates/customizeme.cfg.in', 'customizeme.cfg')
######################################################################
# load/install setuptools
try:
if options.allow_site_packages:
import setuptools
import pkg_resources
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
sys.path[:] = [x for x in sys.path if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
| [
"carlesba@gmail.com"
] | carlesba@gmail.com |
2136dc07ff505cbe7a9973d1a20d3ba5a0b7c1eb | 25d0364f4ac823c0cad2c7d4f2ced403389a162d | /panel/telemetry/hyperbole.py | 0269ea24dfc1fbf082fb0ca6521e6e705de1af30 | [
"MIT"
] | permissive | Snoo-py/ksp_panel | f1ae3730ccd084981e17b0a4f90d3439ea6379e3 | 4eee6784c49a6bc14be25275d983d60090795e63 | refs/heads/master | 2021-05-26T05:29:11.494906 | 2021-04-22T19:35:31 | 2021-04-22T19:37:46 | 127,621,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,058 | py | import numpy as np
from panel.planet_data import PLANET_DATA
from panel.telemetry.telemetry import Telemetry, telemetry_cache
class HyperboleData(Telemetry):
@property
def _c3(self):
# https://en.wikipedia.org/wiki/Hyperbolic_trajectory
return self.speed**2 - 2 * PLANET_DATA[self.ref_body_name]['mu'] * 10**9 / self.radius
@property
def _a(self):
# https://en.wikipedia.org/wiki/Characteristic_energy
return -PLANET_DATA[self.ref_body_name]['mu'] * 10**9 / self._c3
@property
def _l(self):
# https://en.wikipedia.org/wiki/Characteristic_energy
return self._a * (1 - self.eccentricity**2)
@property
def _p(self):
return self._l
@property
@telemetry_cache('eccentricity', '_l')
def _limit_soi(self):
return np.arccos((self._l / (PLANET_DATA[self.ref_body_name]['soi']*1000*3) - 1) / self.eccentricity)
@property
@telemetry_cache('eccentricity')
def _limit_asymp(self):
return np.arccos(-1 / self.eccentricity)
@property
def _limit(self):
if self._limit_soi < self._limit_asymp:
return self._limit_soi
return self._limit_asymp
@property
@telemetry_cache('_limit')
def _t(self):
return np.linspace(-self._limit, self._limit, num=100)
@property
@telemetry_cache('eccentricity', '_l', '_t')
def _r(self):
# https://en.wikipedia.org/wiki/Characteristic_energy
return self._l / (1 + self.eccentricity * np.cos(self._t))
@property
@telemetry_cache('_r', '_t')
def _xx(self):
return self._r * np.cos(self._t)
@property
@telemetry_cache('_r', '_t')
def _yy(self):
return self._r * np.sin(self._t)
@property
def hyperbole_x(self):
return self._xx * self.cos_longitude_of_periapsis - self._yy * self.sin_longitude_of_periapsis
@property
def hyperbole_y(self):
return self._xx * self.sin_longitude_of_periapsis + self._yy * self.cos_longitude_of_periapsis
| [
"37975133+Snoo-py@users.noreply.github.com"
] | 37975133+Snoo-py@users.noreply.github.com |
6a502db19189aa356da1848cb757a2b4f3f9cb10 | f41cf763f5302becfd945433d9e1677322a3207b | /covid_cases/covid_cases/spiders/postcode_pop.py | 3dcf85c73a8672cf0104b6a042a67b3df04b188a | [] | no_license | k-dhingra/NSW-Health | 61d64b04f911df6c37ca45b1a279507f6a1e6734 | 1ea1b8e860dcf060e8c274ae8bf7fb902510bb18 | refs/heads/master | 2022-04-27T11:09:32.960244 | 2020-05-02T09:33:45 | 2020-05-02T09:33:45 | 260,655,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | # -*- coding: utf-8 -*-
import scrapy, json
from covid_cases.items import CovidCasesItem
class PostcodePopSpider(scrapy.Spider):
name = 'postcode_pop'
def start_requests(self):
url = 'https://nswdac-np-covid-19-postcode-heatmap.azurewebsites.net/datafiles/population.json'
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
covid_nsw_pop = json.loads(response.text)
for i in covid_nsw_pop:
item = CovidCasesItem()
item['Postcode'] = i['POA_NAME16']
item['Population'] = i['Tot_p_p']
yield item
| [
"36874541+k-dhingra@users.noreply.github.com"
] | 36874541+k-dhingra@users.noreply.github.com |
047b3cc504e6dca0064f248701233f28e807138b | f0cfe73cf2d634389480ea3d480b49196a03bf3f | /scripts/fix_barcode_kaya_okur.py | 5764f4eea128f18eb2ccd58ab3d51d85f4c559bc | [] | no_license | valavro/scCut-Tag_2020 | e2a1c738920fb92d8c4bffd0b52e5d243f16df44 | 949c19a44a6e98444d40e5b8cd253f17d2e54aad | refs/heads/master | 2023-05-26T04:14:57.784128 | 2021-02-10T15:24:08 | 2021-02-10T15:24:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | #!/usr/bin/env python3
import argparse
import pysam
import sys
import gzip
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument("--bam_file","-b",type=str,action="store")
parser.add_argument("--out","-o",type=str,action="store")
args = parser.parse_args()
def main(args):
N=1
with pysam.AlignmentFile(args.bam_file,'r') as p:
with pysam.AlignmentFile(args.out,'wb',template = p) as out:
for line in p:
line.tags += [("RG",line.query_name)]
line.query_name = line.query_name + "_read_" + str(N)
out.write(line)
N += 1
main(args) | [
"bartosovic.marek@gmail.com"
] | bartosovic.marek@gmail.com |
d42cbf0bf2afd62a2579620909e70dd12be67b67 | 935621ea08ac0bd2265e1935f6301fed595d9d3f | /Untitled Folder/tst3.py | f5101269ece9909201aa3e137ccb5fd84952613a | [] | no_license | itsmeadi/neural | 3d0b79979385951aa499efd9c89c5e490f2819fe | 551b1ee359807f23b0115648ae4ab5f301efcabf | refs/heads/master | 2021-01-21T00:11:54.722418 | 2018-01-15T17:54:40 | 2018-01-15T17:54:40 | 101,861,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | a=0
def function():
global a
a=9
function()
print a
globvar = 0
def set_globvar_to_one():
global globvar # Needed to modify global copy of globvar
globvar = 1
def print_globvar():
print(globvar) # No need for global declaration to read value of globvar
set_globvar_to_one()
print_globvar() | [
"itsmeadityaagarwal@gmail.com"
] | itsmeadityaagarwal@gmail.com |
5af09d318451d19bd9ea7816ce192d7223139fc5 | 44e91073035047a294b64dd4eccb7ee3c566ed2f | /jina/executors/segmenters/__init__.py | e4e56ccd21cf94d57514f5cb53bfdb21e0ae6715 | [
"Apache-2.0"
] | permissive | chunyuema/jina | 728131a352956b7de503ae6d6ff70c95834228ec | e01e57df00deda8ea7bbda1f0a26ba25c60782a6 | refs/heads/master | 2023-04-06T13:37:05.055330 | 2021-03-05T23:14:25 | 2021-03-05T23:14:25 | 344,590,097 | 0 | 1 | Apache-2.0 | 2023-04-01T18:18:29 | 2021-03-04T19:43:53 | Python | UTF-8 | Python | false | false | 809 | py | import inspect
from typing import Dict, List
from .. import BaseExecutor
from ...helper import typename
class BaseSegmenter(BaseExecutor):
""":class:`BaseSegmenter` works on doc-level,
it chunks Documents into set of Chunks"""
def __init__(self, *args, **kwargs):
"""Constructor."""
super().__init__(*args, **kwargs)
self.required_keys = {
k for k in inspect.getfullargspec(self.segment).args if k != 'self'
}
if not self.required_keys:
self.logger.warning(
f'{typename(self)} works on keys, but no keys are specified'
)
def segment(self, *args, **kwargs) -> List[Dict]:
"""
:return: a list of chunks-level info represented by a dict
"""
raise NotImplementedError
| [
"noreply@github.com"
] | chunyuema.noreply@github.com |
c5971d938e49b66b654a919ac6e2e69b5337945b | a4a754bb5d2b92707c5b0a7a669246079ab73633 | /8_kyu/derive.py | 6efcdb1118f8b8cb017f87a2a9c1cd42ddd88128 | [] | no_license | halfendt/Codewars | f6e0d81d9b10eb5bc66615eeae082adb093c09b3 | 8fe4ce76824beece0168eb39776a2f9e078f0785 | refs/heads/master | 2023-07-11T13:58:18.069265 | 2021-08-15T18:40:49 | 2021-08-15T18:40:49 | 259,995,259 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | def derive(coefficient, exponent):
"""
Take the Derivative Kata
https://www.codewars.com/kata/5963c18ecb97be020b0000a2
"""
return str(coefficient*exponent)+'x^'+str(exponent - 1) | [
"36609861+halfendt@users.noreply.github.com"
] | 36609861+halfendt@users.noreply.github.com |
46276733c353e6830238baf3b508505a6efd2f47 | e1ad2b7a7aa64045bec467cecadd99c97c284868 | /results/guia3-04.py | eeead95922a376499a49680ca8e608a05ca67661 | [] | no_license | juanmzaragoza/ejercicios-simulacion | 503d44d966d6189dff38b87275e20e03189d4390 | c9a7688a5d7637f0d70f8b59ea077565d9be6fa1 | refs/heads/master | 2020-05-22T02:16:56.222903 | 2019-06-30T21:26:10 | 2019-06-30T21:26:10 | 186,196,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | import numpy as np
from scipy import stats
from random import random
import matplotlib.pyplot as plt
# it starts working
checks = 40
states = [0]
for i in range(checks):
u = random()
if(states[i-1] == 1):
if(u < 0.95): #continue working
states.append(1)
else:
states.append(0)
else:
if(u < 0.4): # if was break
states.append(1)
else:
states.append(0)
plt.plot(states)
plt.ylabel('State')
plt.xlabel('Time')
plt.show() | [
"jzaragoza@fi.uba.ar"
] | jzaragoza@fi.uba.ar |
808ac04d8b0105120d3c78e0d0dac9ac656771c7 | bbdc377bfe1f94364de4f7edc1cb19942904cb24 | /Manifolds2D.py | 8e27ee58b4ff17cb0e88be881a86cd8e3c763290 | [] | no_license | ctralie/TwistyTakens | e166139f13b25b8a9885dee11b7267017f73dc28 | 9e1200a1ad9e10b31eb0a32b5073854cacdefcc9 | refs/heads/master | 2021-03-19T17:34:37.457455 | 2018-08-28T15:54:22 | 2018-08-28T15:54:22 | 93,522,641 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,285 | py | import numpy as np
from Utilities import *
def getSphereTimeSeries(theta, phi, u, geodesic = False):
N = phi.size
X = np.zeros((N, 3))
X[:, 0] = np.cos(theta)*np.cos(phi)
X[:, 1] = np.sin(theta)*np.cos(phi)
X[:, 2] = np.sin(phi)
d = X.dot(u)
d[d < -1] = -1
d[d > 1] = 1
x = d
if geodesic:
x = np.arccos(d)
return x.flatten()
def getRP2TimeSeries(theta, phi):
N = phi.size
X = np.zeros((N, 3))
X[:, 0] = np.cos(theta)*np.cos(phi)
X[:, 1] = np.sin(theta)*np.cos(phi)
X[:, 2] = np.sin(phi)
u = np.random.randn(3, 1)
u = u/np.sqrt(np.sum(u**2))
d = X.dot(u)
d[d < -1] = -1
d[d > 1] = 1
x = np.arccos(np.abs(d))
return x.flatten()
def getBulgingSphereTimeSeries(theta, phi):
N = phi.size
X = np.zeros((N, 3))
X[:, 0] = np.cos(theta)*(0.5+np.cos(phi))
X[:, 1] = np.sin(theta)*(0.5+np.cos(phi))
X[:, 2] = np.sin(phi)
u = np.random.randn(3, 1)
u = u/np.sqrt(np.sum(u**2))
d = X.dot(u)
d[d < -1] = -1
d[d > 1] = 1
return (X, d.flatten())
def getKleinTimeSeries(T1, slope, eps = 0.02):
"""
Make a Klein bottle time series
Parameters
----------
T1 : int
The number of samples per period on the circle part
slope : float
Slope of the trajectory along principal domain of the Klein bottle
eps : float
Fuzz close to the boundary in the y direction. Or if negative,
the number of periods to complete
"""
NPeriods = 1.0/slope
N = T1*NPeriods
print("NPeriods = %i, N = %i"%(NPeriods, N))
if eps < 0:
print("Expanding period")
N *= -eps
y = np.linspace(0, np.pi*(-eps), N)
else:
y = np.linspace(0, np.pi, N)
x = np.arange(N)*2*np.pi/T1
if eps > 0:
idx = (y>eps)*(y<np.pi-eps) #Exlude points close to the boundary
x = x[idx]
y = y[idx]
return np.cos(2*x) + np.cos(x)*np.sin(y) + np.cos(y)
def getTorusDistance(x, theta, phi, alpha_theta = 1.0, alpha_phi = 1.0, L1 = False):
"""
Get a distance from points to an observation point x on the torus
Parameters
----------
x : ndarray (2)
Position of observation point (theta, phi)
theta : ndarray (N)
Theta (x) coordinates of points on the flow
phi : ndarray (N)
Phi (y) coordinates of points on the flow
alpha_theta : float
Weight of metric along the x direction
alpha_phi : float
Weight of metric along the y direction
"""
dx = np.abs(x[0]-theta)
dx = np.minimum(dx, np.abs(x[0]+2*np.pi-theta))
dx = np.minimum(dx, np.abs(x[0]-2*np.pi-theta))
dy = np.abs(x[1]-phi)
dy = np.minimum(dy, np.abs(x[1]+2*np.pi-phi))
dy = np.minimum(dy, np.abs(x[1]-2*np.pi-phi))
dx = alpha_theta*dx
dy = alpha_phi*dy
if L1:
dist = dx + dy
else:
dist = np.sqrt(dx**2 + dy**2)
return dist
def getKleinDistance(x1, theta, phi, alpha_theta = 1.0, alpha_phi = 1.0, L1 = False):
"""
Get a distance from points to an observation point x on the Klein bottle, where
the points are specified on its double cover on [0, 2*pi] x [0, 2*pi] and the
identification is [x, y] ~ [x + pi, -y]
x1 : ndarray (2)
Position of observation point on the double cover (theta, phi)
theta : ndarray (N)
Theta (x) coordinates of points on the flow
phi : ndarray (N)
Phi (y) coordinates of points on the flow
alpha_theta : float
Weight of metric along the x direction
alpha_phi : float
Weight of metric along the y direction
"""
x2 = [x1[0]+np.pi, -x1[1]] #Quotient map
x2 = np.mod(x2, 2*np.pi)
d1 = getTorusDistance(x1, theta, phi, alpha_theta, alpha_phi, L1)
d2 = getTorusDistance(x2, theta, phi, alpha_theta, alpha_phi, L1)
return np.minimum(d1, d2)
def intersectSegments2D(A, B, C, D, countEndpoints = True):
"""
Find the intersection of two lines segments in a numerically stable
way by looking at them parametrically
"""
denomDet = (D[0]-C[0])*(A[1]-B[1]) - (D[1]-C[1])*(A[0]-B[0])
if (denomDet == 0): #Segments are parallel
return np.array([])
num_t = (A[0]-C[0])*(A[1]-B[1]) - (A[1]-C[1])*(A[0]-B[0])
num_s = (D[0]-C[0])*(A[1]-C[1]) - (D[1]-C[1])*(A[0]-C[0])
t = float(num_t) / float(denomDet)
s = float(num_s) / float(denomDet)
if (s < 0 or s > 1):
return np.array([]) #Intersection not within the bounds of segment 1
if (t < 0 or t > 1):
return np.array([]) #Intersection not within the bounds of segment 2
#Don't count intersections that occur at the endpoints of both segments
#if the user so chooses
if ((t == 0 or t == 1) and (s == 0 or s == 1) and (not countEndpoints)):
return np.array([])
ret = A + s*(B-A)
return ret
def get2HoledTorusTraj(x0, dx, NPoints):
"""
Come up with a trajectory on the unit octagon representation
of the 2-holed torus
Parameters
----------
x0 : ndarray (2, 1)
Initial position on the 2-holed torus
dx : ndarray (2, 1)
Vector between adjacent points on the trajectory
NPoints : int
Number of points on the trajectory
"""
x0 = np.array(x0)
dx = np.array(dx)
thetas = np.linspace(0, 2*np.pi, 9) - np.pi/8
endpts = np.zeros((9, 2))
endpts[:, 0] = np.cos(thetas)
endpts[:, 1] = np.sin(thetas)
normals = endpts[1::, :] - endpts[0:-1, :]
normals[:, 0], normals[:, 1] = normals[:, 1], -normals[:, 0]
normals = normals/np.sqrt(np.sum(normals**2, 1))[:, None]
width = endpts[0, 0] - endpts[5, 0]
X = [x0]
for i in range(1, NPoints):
x1 = X[i-1]
x2 = x1 + dx
# Check if out of bounds of torus
k = 0
while k < 8:
res = intersectSegments2D(x1, x2, endpts[k, :], endpts[k+1, :])
if res.size > 0:
x1 = res - width*normals[k, :] #Go to other side of octagon
x2 = x1 + (x2 - res)
x1 = x1+1e-10*normals[k, :]
k = 0
continue
k += 1
X.append(x2)
X = np.array(X)
return {'X':X, 'endpts':endpts}
def get2HoledTorusDist(X, x0, endpts):
"""
Compute the distance from a set of points to a chosen point on
the 2-holed torus, using the flat Euclidean metric on the octagon
Parameters
----------
X: ndarray (N, 2)
A set of points inside of the octagon
x0: ndarray (2)
A point to which to measure distances
endpts: ndarray (9, 2)
Endpoints on the octagon model
"""
offsets = endpts[1:9, :] + endpts[0:8, :]
Y = x0 + offsets
Y = np.concatenate((x0[None, :], Y), 0)
XSqr = np.sum(X**2, 1)
YSqr = np.sum(Y**2, 1)
D = XSqr[:, None] + YSqr[None, :] - 2*X.dot(Y.T)
distSqr = np.min(D, 1)
distSqr[distSqr < 0] = 0
return distSqr
def doSphereExample():
np.random.seed(100)
N = 6000
NPeriods = 50
S = np.zeros((N, 3))
theta = np.linspace(0, 2*np.pi*NPeriods, N)
phi = np.pi*np.linspace(-0.5, 0.5, N)
#Observation function
x = getSphereTimeSeries(theta, phi)
#x = getRP2TimeSeries(theta, phi)
#Sliding window
X = getSlidingWindowNoInterp(x, int(N/NPeriods))
Y = plotSlidingWindowResults(x, X)
plt.savefig("SphereTimeSeries.svg", bbox_inches='tight')
Z = np.zeros((Y.shape[0], 4))
Z[:, 0:3] = Y[:, 0:3]
Z[:, 3] = x[0:Z.shape[0]]
savePCOff(Y, "Sphere.off")
def doKleinExample():
x = getKleinTimeSeries(40, 0.05)
plt.plot(x)
plt.show()
if __name__ == '__main__':
x0 = [0.1, 0.1]
dx = 3*np.array([0.02*(1+np.sqrt(5))/2, 0.04])
res = get2HoledTorusTraj(x0, dx, 1000)
endpts, X = res['endpts'], res['X']
c = plt.get_cmap('Spectral')
C = c(np.array(np.round(np.linspace(0, 255, X.shape[0])), dtype=np.int32))
C = C[:, 0:3]
x0 = np.array([0.1, 0.1])
y = get2HoledTorusDist(X, x0, endpts)
plt.subplot(121)
plt.plot(endpts[:, 0], endpts[:, 1])
plt.scatter(x0[0], x0[1], 80, 'k')
plt.scatter(X[:, 0], X[:, 1], 20, c=C)
plt.axis('equal')
plt.subplot(122)
plt.plot(y)
plt.show() | [
"chris.tralie@gmail.com"
] | chris.tralie@gmail.com |
669156f6db99c3a12296788632039ebd7151b2cc | 558e577e91fae58b6eb2fd263ef385854d6f71d9 | /bookphone/migrations/0009_auto_20180905_1529.py | 9bc7997384c42691a4afd18b32caffd73b26ffa5 | [] | no_license | NatalyAlvarez8655176/SISTEMA-DE-CONTACTOS | 526a426f0e924fbad2abcdbcc96e0e7abd213dcc | c6feb55ff51f691bae926bc1ab0b09ec74086d5d | refs/heads/master | 2020-03-29T03:46:54.091966 | 2018-09-19T19:13:42 | 2018-09-19T19:13:42 | 149,500,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | # Generated by Django 2.0.7 on 2018-09-05 19:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bookphone', '0008_auto_20180905_1459'),
]
operations = [
migrations.AlterField(
model_name='personal_secretaria',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='fotos', verbose_name='FOTO'),
),
]
| [
"naty7sex@gmail.com"
] | naty7sex@gmail.com |
884a88e01e635965ec82e70474a4a475242084e0 | 67ba46673606c3dcc824d3ba50eeee1e63c96810 | /visual.py | f7901850c9364404953732d1fc8f449e63a4d1a2 | [] | no_license | garrrikkotua/WikiParser_bot | 26650c719b195e957c670b1fa11f962ba0a61a9d | d95c2030e40ef7521521429591e381b3799354d7 | refs/heads/master | 2021-06-20T00:52:59.386961 | 2019-11-18T15:08:28 | 2019-11-18T15:08:28 | 133,263,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,815 | py | from wordcloud import WordCloud
from matplotlib import pyplot as plt
from io import BytesIO
"""
This class performs operations
to make graphs, wordcloud for the bot
"""
class Visual:
@staticmethod
def cloud(self, freq, colormap):
wc = WordCloud(
colormap=colormap).generate_from_frequencies(freq)
image = wc.to_image()
bio = BytesIO()
image.save(bio, 'PNG')
bio.seek(0)
return bio
@staticmethod
def word_dist(self, c):
freq = sorted(list(c.values()), reverse=True)
ranks = list(range(1, len(freq) + 1))
plt.title("Word Frequencies")
plt.ylabel("Total Number of Occurrences")
plt.xlabel("Rank of words")
plt.loglog(ranks, freq, basex=10)
plt.grid()
plt.legend()
image = BytesIO()
plt.savefig(image, format='PNG')
plt.close()
image.seek(0)
return image
@staticmethod
def word_lens(self, c):
lens = [len(i) for i in c.keys()]
lens.sort(reverse=True)
ranks = list(range(1, len(lens) + 1))
plt.title("Word Lengths")
plt.ylabel("Length")
plt.xlabel("Rank of words")
plt.loglog(ranks, lens, basex=10)
plt.grid()
plt.legend()
image = BytesIO()
plt.savefig(image, format='PNG')
plt.close()
image.seek(0)
return image
@staticmethod
def word_rank(self, c, word):
freq = sorted(list(c.values()), reverse=True)
ranks = list(range(1, len(freq) + 1))
word_freq = c[word]
word_rank = ranks[freq.index(word_freq)]
plt.title("Info about {}".format(word))
plt.ylabel("Total Number of Occurrences")
plt.xlabel("Rank of words")
plt.loglog(ranks, freq, basex=10)
plt.plot(word_rank, word_freq, 'r*',
label='{}\'s frequency is {}\n its rank is {}'.format(
word, word_freq, word_rank
))
plt.grid()
plt.legend()
image = BytesIO()
plt.savefig(image, format='PNG')
plt.close()
image.seek(0)
return image
@staticmethod
def dist_by_sentence(self, d, word):
plt.title('Distribution of {}\'s'
' positions in sentences'.format(word))
plt.ylabel("Number of Occurrences")
plt.xlabel("Position in sentence")
pos = sorted(d.keys())
occur = [d[i] for i in pos]
plt.plot(pos, occur)
plt.plot(pos, occur, 'r*')
plt.grid()
plt.legend()
image = BytesIO()
plt.savefig(image, format='PNG')
plt.close()
image.seek(0)
return image
| [
"noreply@github.com"
] | garrrikkotua.noreply@github.com |
f16f23b6b8819d380f4d6acb22d3f3dd867dbb81 | 42a5b53a4c18b6d637be763e6eb3b43a1eca9ed8 | /hw3/mydecode.py | a11ba563a719ee18729b14b45f484caca9eb0a93 | [] | no_license | lelouchmatlab/sp2015.11-731 | 877ef0e5d5e069b3be2ce2b696a608ee5aabba37 | 73981e68ac128ae9b54864aea0dcb899b018d0d1 | refs/heads/master | 2020-12-13T21:46:17.087027 | 2015-04-23T10:27:08 | 2015-04-23T10:27:08 | 29,413,990 | 0 | 0 | null | 2015-01-18T03:06:33 | 2015-01-18T03:06:32 | null | UTF-8 | Python | false | false | 8,000 | py | #!/usr/bin/env python
import argparse
import sys
import models
import heapq
from collections import namedtuple
parser = argparse.ArgumentParser(description='Simple phrase based decoder.')
parser.add_argument('-i', '--input', dest='input', default='data/input', help='File containing sentences to translate (default=data/input)')
parser.add_argument('-t', '--translation-model', dest='tm', default='data/tm', help='File containing translation model (default=data/tm)')
parser.add_argument('-s', '--stack-size', dest='s', default=1, type=int, help='Maximum stack size (default=1)')
parser.add_argument('-n', '--num_sentences', dest='num_sents', default=sys.maxint, type=int, help='Number of sentences to decode (default=no limit)')
parser.add_argument('-l', '--language-model', dest='lm', default='data/lm', help='File containing ARPA-format language model (default=data/lm)')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Verbose mode (default=off)')
opts = parser.parse_args()
tm = models.TM(opts.tm, sys.maxint)
lm = models.LM(opts.lm)
sys.stderr.write('Decoding %s...\n' % (opts.input,))
input_sents = [tuple(line.strip().split()) for line in open(opts.input).readlines()[:opts.num_sents]]
hypothesis = namedtuple('hypothesis', 'logprob, lm_state, predecessor,predecessor_end, current_start, current_end, phrase')
for f in input_sents:
# The following code implements a DP monotone decoding
# algorithm (one that doesn't permute the target phrases).
# Hence all hypotheses in stacks[i] represent translations of
# the first i words of the input sentence.
# HINT: Generalize this so that stacks[i] contains translations
# of any i words (remember to keep track of which words those
# are, and to estimate future costs)
initial_hypothesis = hypothesis(0.0, lm.begin(), None, -1, 0, 0, None)
#print "Source sentence: ",f
stacks = [{} for _ in f] + [{}]
stacks[0][lm.begin()] = initial_hypothesis
for i, stack in enumerate(stacks[:-1]):
# extend the top s hypotheses in the current stack
#print "\n",i, " --------------------------------\n\n"
#print "/////////////////////////////////////////////"
#for h in heapq.nlargest(opts.s, stack.itervalues(), key=lambda h:h.logprob):
#print h.predecessor_end, h.current_start, h.current_end,h.lm_state,h.logprob
#print "////////////////////////////////////////////"
#print h.predecessor_end, h.current_start, h.current_end,h.lm_state,h.logprob, '**********'
for h in heapq.nlargest(opts.s, stack.itervalues(), key=lambda h: h.logprob):
if h.predecessor_end + 1 == h.current_start:
for j in xrange(i+1,len(f)+1):
if f[i:j] in tm:
#print f[i:j], '######## MONO ########'
for phrase in tm[f[i:j]]:
#print phrase.logprob,
logprob = h.logprob + phrase.logprob
lm_state = h.lm_state
for word in phrase.english.split():
(lm_state, word_logprob) = lm.score(lm_state, word)
#print word, word_logprob
logprob += word_logprob
logprob += lm.end(lm_state) if j == len(f) else 0.0
#print logprob
new_hypothesis = hypothesis(logprob,lm_state, h, i-1, i, j-1, phrase)
#print j, new_hypothesis.logprob,new_hypothesis.lm_state,new_hypothesis.predecessor_end,new_hypothesis.current_start,new_hypothesis.current_end
if lm_state not in stacks[j] or stacks[j][lm_state].logprob < logprob: # second case is recombination
stacks[j][lm_state] = new_hypothesis
#print j, new_hypothesis.logprob,new_hypothesis.lm_state,new_hypothesis.predecessor_end,new_hypothesis.current_start, new_hypothesis.current_end
for k in xrange(j+1,len(f)+1):
if f[j:k] in tm:
#print f[j:k], '$$$$$$$$ SWAP $$$$$$$$'
for phrase in tm[f[j:k]]:
logprob = h.logprob + phrase.logprob
lm_state = h.lm_state
for word in phrase.english.split():
(lm_state, word_logprob) = lm.score(lm_state,word)
logprob += word_logprob
#logprob += lm.end(lm_state) if k == len(f) else 0.0
new_hypothesis = hypothesis(logprob,lm_state, h,i-1,j, k-1, phrase)
#print i+k-j,new_hypothesis.logprob,new_hypothesis.lm_state,new_hypothesis.predecessor_end,new_hypothesis.current_start,new_hypothesis.current_end
if lm_state not in stacks[i+k-j] or stacks[i+k-j][lm_state].logprob < logprob:
stacks[i+k-j][lm_state] = new_hypothesis
#print i+k-j, new_hypothesis.logprob,new_hypothesis.lm_state,new_hypothesis.predecessor_end,new_hypothesis.current_start,new_hypothesis.current_end
else:
jump_back = f[h.predecessor_end+1:h.current_start]
#print h.lm_state, h.predecessor_end+1,h.current_start,jump_back,'******** jump_back ********'
for phrase in tm[jump_back]:
logprob = h.logprob + phrase.logprob
lm_state = h.lm_state
for word in phrase.english.split():
(lm_state, word_logprob) = lm.score(lm_state, word)
logprob += word_logprob
logprob += lm.end(lm_state) if h.current_end == len(f)-1 else 0.0
new_hypothesis = hypothesis(logprob,lm_state, h, h.predecessor_end, h.predecessor_end+1,h.current_end, phrase)
#print h.current_end+1,new_hypothesis.logprob,new_hypothesis.lm_state,new_hypothesis.predecessor_end,new_hypothesis.current_start,new_hypothesis.current_end
if lm_state not in stacks[h.current_end+1] or stacks[h.current_end+1][lm_state].logprob < logprob:
stacks[h.current_end+1][lm_state] = new_hypothesis
#print h.current_end+1, new_hypothesis.logprob,new_hypothesis.lm_state,new_hypothesis.predecessor_end,new_hypothesis.current_start,new_hypothesis.current_end
# find best translation by looking at the best scoring hypothesis
# on the last stack
winner = max(stacks[-1].itervalues(), key=lambda h: h.logprob)
def extract_english_recursive(h):
return '' if h.predecessor is None else '%s%s ' % (extract_english_recursive(h.predecessor), h.phrase.english)
print extract_english_recursive(winner)
def extract_tm_logprob(h):
return 0.0 if h.predecessor is None else h.phrase.logprob + extract_tm_logprob(h.predecessor)
for ele in stacks[-1].itervalues():
tm_logprob = extract_tm_logprob(ele)
#print extract_english_recursive(ele),tm_logprob, ele.logprob - tm_logprob, ele.logprob
if opts.verbose:
#def extract_tm_logprob(h):
# return 0.0 if h.predecessor is None else h.phrase.logprob + extract_tm_logprob(h.predecessor)
tm_logprob = extract_tm_logprob(winner)
sys.stderr.write('LM = %f, TM = %f, Total = %f\n' %
(winner.logprob - tm_logprob, tm_logprob, winner.logprob))
#for ele in stacks[-1].itervalues():
# tm_logprob = extract_tm_logprob(ele)
# sys.stderr.write('LM = %f, TM = %f, Total = %f\n' %(ele.logprob -
# tm_logprob, tm_logprob, ele.logprob))
| [
"qhlegendzh@gmail.com"
] | qhlegendzh@gmail.com |
4283a1e957fe7ad7a2df4be6c3c809f47be77b75 | d37fb3b727a6d438764559e9965d4c640ecc35ef | /Project 3/roipoly.py | a385780e12e6ec63643fd97efe23d0a8c4f16df1 | [] | no_license | govindak-umd/ENPM673 | ab65a890881682a309b85bb1884663985be5ce82 | e39744a4fb062d00eac6cbe3d455e9788aa85bb3 | refs/heads/master | 2021-01-09T00:46:37.425744 | 2020-05-11T22:15:38 | 2020-05-11T22:15:38 | 242,192,147 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,688 | py | '''Draw polygon regions of interest (ROIs) in matplotlib images,
similar to Matlab's roipoly function.
See the file example.py for an application.
Created by Joerg Doepfert 2014 based on code posted by Daniel
Kornhauser.
'''
import numpy as np
import sys
import matplotlib.pyplot as plt
import matplotlib.path as mplPath
class roipoly:
def __init__(self, fig=[], ax=[], roicolor='b'):
if fig == []:
fig = plt.gcf()
if ax == []:
ax = plt.gca()
self.previous_point = []
self.allxpoints = []
self.allypoints = []
self.start_point = []
self.end_point = []
self.line = None
self.roicolor = roicolor
self.fig = fig
self.ax = ax
#self.fig.canvas.draw()
self.__ID1 = self.fig.canvas.mpl_connect(
'motion_notify_event', self.__motion_notify_callback)
self.__ID2 = self.fig.canvas.mpl_connect(
'button_press_event', self.__button_press_callback)
if sys.flags.interactive:
plt.show(block=False)
else:
plt.show()
def getMask(self, currentImage):
ny, nx = np.shape(currentImage)
poly_verts = [(self.allxpoints[0], self.allypoints[0])]
for i in range(len(self.allxpoints)-1, -1, -1):
poly_verts.append((self.allxpoints[i], self.allypoints[i]))
# Create vertex coordinates for each grid cell...
# (<0,0> is at the top left of the grid in this system)
x, y = np.meshgrid(np.arange(nx), np.arange(ny))
x, y = x.flatten(), y.flatten()
points = np.vstack((x,y)).T
ROIpath = mplPath.Path(poly_verts)
grid = ROIpath.contains_points(points).reshape((ny,nx))
return grid,poly_verts
def displayROI(self,**linekwargs):
l = plt.Line2D(self.allxpoints +
[self.allxpoints[0]],
self.allypoints +
[self.allypoints[0]],
color=self.roicolor, **linekwargs)
ax = plt.gca()
ax.add_line(l)
plt.draw()
def displayMean(self,currentImage, **textkwargs):
mask,_= self.getMask(currentImage)
_,mask2 = self.getMask(currentImage)
meanval = np.mean(np.extract(mask, currentImage))
stdval = np.std(np.extract(mask, currentImage))
string = "%.3f +- %.3f" % (meanval, stdval)
plt.text(self.allxpoints[0], self.allypoints[0],
string, color=self.roicolor,
bbox=dict(facecolor='w', alpha=0.6), **textkwargs)
def get_vertices(self,currentImage):
_, vertices = self.getMask(currentImage)
return(vertices)
def __motion_notify_callback(self, event):
if event.inaxes:
ax = event.inaxes
x, y = event.xdata, event.ydata
if (event.button == None or event.button == 1) and self.line != None: # Move line around
self.line.set_data([self.previous_point[0], x],
[self.previous_point[1], y])
self.fig.canvas.draw()
def __button_press_callback(self, event):
if event.inaxes:
x, y = event.xdata, event.ydata
ax = event.inaxes
if event.button == 1 and event.dblclick == False: # If you press the left button, single click
if self.line == None: # if there is no line, create a line
self.line = plt.Line2D([x, x],
[y, y],
marker='o',
color=self.roicolor)
self.start_point = [x,y]
self.previous_point = self.start_point
self.allxpoints=[x]
self.allypoints=[y]
ax.add_line(self.line)
self.fig.canvas.draw()
# add a segment
else: # if there is a line, create a segment
self.line = plt.Line2D([self.previous_point[0], x],
[self.previous_point[1], y],
marker = 'o',color=self.roicolor)
self.previous_point = [x,y]
self.allxpoints.append(x)
self.allypoints.append(y)
event.inaxes.add_line(self.line)
self.fig.canvas.draw()
elif ((event.button == 1 and event.dblclick==True) or
(event.button == 3 and event.dblclick==False)) and self.line != None: # close the loop and disconnect
self.fig.canvas.mpl_disconnect(self.__ID1) #joerg
self.fig.canvas.mpl_disconnect(self.__ID2) #joerg
self.line.set_data([self.previous_point[0],
self.start_point[0]],
[self.previous_point[1],
self.start_point[1]])
ax.add_line(self.line)
self.fig.canvas.draw()
self.line = None
if sys.flags.interactive:
pass
else:
#figure has to be closed so that code can continue
plt.close(self.fig) | [
"noreply@github.com"
] | govindak-umd.noreply@github.com |
4392d1adcce1c93371a6728ecfff29e616948c28 | ec78f8ab63aec0753b9360715a4276a971b78a82 | /py/data_analysis/np/matrix.py | 2df729d2746c2ba43d2ec102e1595d3cf8c1e176 | [] | no_license | anderscui/ml | 4ace7e7b8cf248042d224bd54e81b691963b2e0e | 39238ba6d802df7e8bf1089ef3605cfc83b333ac | refs/heads/master | 2021-06-03T16:09:55.207202 | 2018-11-01T18:50:49 | 2018-11-01T18:50:49 | 23,989,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | import numpy as np
arr = np.arange(15).reshape((3, 5))
print(arr.T)
print(arr.transpose())
print(np.dot(arr.T, arr))
# also swapaxes()
| [
"anderscui@gmail.com"
] | anderscui@gmail.com |
9eb67ffe2ea826e54b02aeea668cce466c27be07 | 172bbf2a6cc463bcc8d94d5782d0c05a6468d7aa | /tennis_court/scripts/__init__.py | 3128316586114ef2784109afd30f45b077d9c135 | [] | no_license | TilletJ/TennisBallCollector | 4638203f1377b1ab61e2683fc37bd23805248632 | a2539de240e067bea05408a1a2f06cbe45cab655 | refs/heads/master | 2023-03-05T12:34:19.709932 | 2021-02-19T13:32:46 | 2021-02-19T13:32:46 | 333,339,012 | 1 | 1 | null | 2021-02-17T09:00:26 | 2021-01-27T07:29:56 | Python | UTF-8 | Python | false | false | 75 | py | from .gazebo_ros_paths import GazeboRosPaths
__all__ = ['GazeboRosPaths']
| [
"remi.rigal@ensta-bretagne.org"
] | remi.rigal@ensta-bretagne.org |
18d455255174074fa25245eff853e14ddcd5ecfb | 565c0a7b3916aa9ac322fe444914e7bd3b589b9a | /tests.py | 13461a07720164100dd85feb1208384d7318bbc6 | [] | no_license | megh2493/zendesk-assignment | 66acb761e91638b115049c19f475f0fabc77d6d7 | bcb5598edcf5f4f7c8a2a924ec734b647d0ada48 | refs/heads/master | 2021-01-25T13:41:12.410705 | 2018-03-07T08:27:41 | 2018-03-07T08:27:41 | 123,604,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,547 | py | import unittest
from unittest.mock import patch
from app import app
# unit tests with api end points mocked to simulate situations
class TestCase(unittest.TestCase):
def setUp(self):
app.testing = True
self.app = app.test_client()
def post(self, path, data=None):
return self.app.post(path, data=data, follow_redirects=True)
def get(self, path, data=None):
return self.app.get(path, query_string=data, follow_redirects=True)
# tests successful login and logout
@patch('app.models.requests.Session.get')
def test_login_logout(self, mock_get):
json_data = {'user': {'id': 10, 'name': 'Alice', 'photo': {
'thumbnails': {'content_url': 'https://dummyurl.com/dummycontent.png'}}}}
mock_get.return_value.json = lambda: json_data
mock_get.return_value.status_code = 200
r = self.post('/login', dict(email='alice@abc.com', password='password', domain='abc'))
self.assertEqual(r.status_code, 200)
r = self.get('/logout')
self.assertNotIn(b'id="user-dropdown"', r.data)
# tests login with invalid credentials
@patch('app.models.requests.Session.get')
def test_invalid_credentials(self, mock_get):
json_data = {'user': {'id': None}}
mock_get.return_value.json = lambda: json_data
mock_get.return_value.status_code = 200
r = self.post('/login', dict(email='alice@abc.com', password='password', domain='abc'))
self.assertEqual(r.status_code, 401)
self.assertIn(b'*Invalid Credentials', r.data)
# test login with invalid zendesk domain
@patch('app.models.requests.Session.get')
def test_invalid_domain(self, mock_get):
mock_get.return_value.status_code = 404
r = self.post('/login', dict(email='alice@abc.com', password='password', domain='abc'))
self.assertEqual(r.status_code, 404)
self.assertIn(b'*Invalid Domain', r.data)
# tests for api being unavailable
@patch('app.models.requests.Session.get')
def test_api_unavailable(self, mock_get):
mock_get.return_value.status_code = 503
r = self.post('/login', dict(email='alice@abc.com', password='password', domain='abc'))
self.assertGreaterEqual(r.status_code, 500)
self.assertIn(b'Sorry, the API is unavailable.', r.data)
# tests for successful search of ticket by ID
@patch('app.models.requests.Session.get')
def test_search_ticket(self, mock_get):
json_data = {'user': {'id': 10, 'name': 'Alice', 'photo': {
'thumbnails': {'content_url': 'https://dummyurl.com/dummycontent.png'}}}}
mock_get.return_value.json = lambda: json_data
mock_get.return_value.status_code = 200
self.post('/login', dict(email='alice@abc.com', password='password', domain='abc'))
json_data = {'users': [{'id': 10, 'name': 'Alice'}],
'ticket': {'id': 1, 'subject': 'Dummy Ticket', 'requester_id': 10,
'created_at': '1990-01-01T00:00:00Z', 'description': 'Dummy description',
'status': 'open'}}
mock_get.return_value.json = lambda: json_data
mock_get.return_value.status_code = 200
r = self.get('/search', dict(id=1))
self.assertEqual(r.status_code, 200)
self.assertIn(b'Dummy Ticket', r.data)
# tests for search of ticket for invalid ticket ID
@patch('app.models.requests.Session.get')
def test_search_ticket_not_exists(self, mock_get):
json_data = {'user': {'id': 10, 'name': 'Alice', 'photo': {
'thumbnails': {'content_url': 'https://dummyurl.com/dummycontent.png'}}}}
mock_get.return_value.json = lambda: json_data
mock_get.return_value.status_code = 200
self.post('/login', dict(email='alice@abc.com', password='password', domain='abc'))
mock_get.return_value.status_code = 404
r = self.get('/search', dict(id=100))
self.assertEqual(r.status_code, 404)
self.assertIn(b'The Ticket ID specified was not found.', r.data)
# tests for successful retrieval of all tickets in the account
@patch('app.models.requests.Session.get')
def test_all_tickets(self, mock_get):
json_data = {'user': {'id': 10, 'name': 'Alice', 'photo': {
'thumbnails': {'content_url': 'https://dummyurl.com/dummycontent.png'}}}}
mock_get.return_value.json = lambda: json_data
mock_get.return_value.status_code = 200
self.post('/login', dict(email='alice@abc.com', password='password', domain='abc'))
json_data = {'users': [{'id': 10, 'name': 'Alice'}],
'tickets': [{'id': 1, 'subject': 'Dummy Ticket 1', 'requester_id': 10,
'created_at': '1990-01-01T00:00:00Z', 'description': 'Dummy description',
'status': 'open'},
{'id': 2, 'subject': 'Dummy Ticket 2', 'requester_id': 10,
'created_at': '1990-01-01T00:00:01Z', 'description': 'Dummy Description',
'status': 'pending'}],
'next_page': None}
mock_get.return_value.json = lambda: json_data
mock_get.return_value.status_code = 200
r = self.get('/tickets')
self.assertEqual(r.status_code, 200)
self.assertIn(b'Dummy Ticket 1', r.data)
self.assertIn(b'Dummy Ticket 2', r.data)
if __name__ == '__main__':
unittest.main()
| [
"mmadhusu@usc.edu"
] | mmadhusu@usc.edu |
0f84e55319bb87c08589d43b0fb86bcb244e0049 | 85efec09cae6a8017d2b0a8f69883a8f6e2a988a | /index.py | 959c67dfa25d2a5e742d2f2891ea2dd9104df13f | [] | no_license | paulluka/Testgit | 04c5c44799c2eccdbc9f302b39c2474cf62e76f8 | 73c20ee43acbfd6c9611ba736d9ebc3e2f52dc0e | refs/heads/main | 2023-05-13T02:45:56.671222 | 2021-06-02T14:22:12 | 2021-06-02T14:22:12 | 373,193,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | import streamlit as st
import numpy as np
import matplotlib.pyplot as plt
'''
# This is the document title
This is some _markdown_.
'''
print("hello world !!!")
st.color_picker('Pick a color')
| [
"paul.luka@hotmail.fr"
] | paul.luka@hotmail.fr |
9e90f539311fbb9565c36deea3caf03511eeacda | 1c1c72937fff02bcec56f8d7877d76cdfb339b42 | /SemiPy/Documentation/ScientificPaper.py | 28cc06d0e6beca0be338e4cd16bf3bc8bce348f6 | [] | no_license | aravindhk/SemiPy | 0fd7372569dcd121e8ddb7536a3100327405cae5 | 729c69d4910a43627edaa609127ce673ce4b2821 | refs/heads/master | 2021-03-23T05:27:15.567487 | 2020-03-14T00:27:34 | 2020-03-14T00:27:34 | 247,426,030 | 0 | 0 | null | 2020-03-15T08:10:33 | 2020-03-15T08:10:33 | null | UTF-8 | Python | false | false | 1,060 | py | """
Module for Scientific Papers
"""
import warnings
list_of_publishers = ['Journal of Applied Physics']
class ScientificPaper(object):
name = ''
publisher = ''
authors = []
date = None
doi = ''
synopsis = ''
def __init__(self, *args, **kwargs):
if self.publisher not in list_of_publishers:
warnings.warn('The publisher {0} for paper {1} is not in the list of publishers.'.format(self.publisher, self.name))
@classmethod
def doi_link(cls):
return 'https://www.doi.org/' + cls.doi
@classmethod
def reference_short(cls):
return '{0}, {1}. {2}; {3}'.format(cls.authors[0], cls.authors[-1], cls.publisher, cls.date.year)
@classmethod
def doc_string_ref(cls):
return '`{0} <{1}>`_'.format(cls.reference_short(), cls.doi_link())
def __str__(self):
return self.reference_short()
def citation_decorator(citation):
def dec(obj):
obj.__doc__ = obj.__doc__.replace('<citation>', citation.doc_string_ref())
return obj
return dec
| [
"cmcclellan1993@gmail.com"
] | cmcclellan1993@gmail.com |
b2d0fc494e361edacb2c59246242262a3668aa8e | 0eaf0d3f0e96a839f2ef37b92d4db5eddf4b5e02 | /abc098/a.py | aeece8b578c7feb1792ec03018003d9edab1c62b | [] | no_license | silphire/atcoder | b7b02798a87048757745d99e8564397d1ca20169 | f214ef92f13bc5d6b290746d5a94e2faad20d8b0 | refs/heads/master | 2023-09-03T17:56:30.885166 | 2023-09-02T14:16:24 | 2023-09-02T14:16:24 | 245,110,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | a, b = map(int, input().split())
print(max(a + b, a - b, a * b)) | [
"silphire@gmail.com"
] | silphire@gmail.com |
eb8ca0533b3576c10c7673e10928c10f18803fac | a1ea4bb213801a2f49e9b3d178f402f108d8a803 | /AI(BE)/bullseyes/bullseyes/settings.py | 0cd8bb3f9a00a7080b4c0388a0e00b09b89ddf1f | [
"MIT"
] | permissive | osamhack2021/AI_WEB_Bullseyes_Bullseyes | 537df4c35550917b963442538926c0b4bbef3cd6 | ec6aa6ce093e93b5666a0fd5ede28585c27a3590 | refs/heads/master | 2023-08-18T10:42:24.212460 | 2021-10-20T02:49:35 | 2021-10-20T02:49:35 | 407,145,903 | 4 | 2 | MIT | 2021-10-17T05:23:18 | 2021-09-16T11:54:11 | JavaScript | UTF-8 | Python | false | false | 4,101 | py | """
Django settings for bullseyes project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_&*#@x)6_c7#1y4e65x)+!*75if7gyn4kz469&v2h6aw$om&m3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'bullseyes_server',
'rest_framework.authtoken',
'django_filters',
'corsheaders',
]
MIDDLEWARE = [
"corsheaders.middleware.CorsMiddleware",
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'corsheaders.middleware.CorsPostCsrfMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ALLOWED_ORIGIN_REGEXES = [
r".*",
]
#CORS_ORIGIN_ALLOW_ALL = True
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 100,
'DEFAULT_FILTER_BACKENDS':['django_filters.rest_framework.DjangoFilterBackend'],
# 'DATE_INPUT_FORMATS': ['iso-8601', '%Y-%m-%dT%H:%M:%S.%fZ'],
}
ROOT_URLCONF = 'bullseyes.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bullseyes.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| [
"noreply@github.com"
] | osamhack2021.noreply@github.com |
583af2270b80a23b7861eecdbe9975aab0e9f02a | 61a35093e28e3d626370e06ddc09c6795ed8b1b9 | /plots_for_paper.py | 90a4a47a1a88b07e54b8df37cfbfa6ee99eab06f | [] | no_license | malelu92/ucnstudy_analysis | fa73227db4961b2a0d66e99f1beb3b7ddf542360 | e8c23c497dda31a8a1bdde4bd4c195befda6e52a | refs/heads/master | 2021-01-20T15:38:58.645733 | 2017-06-14T18:23:35 | 2017-06-14T18:23:35 | 82,828,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,354 | py | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from collections import defaultdict
from model.Base import Base
from model.Device import Device
from model.DnsReq import DnsReq
from model.HttpReq import HttpReq
from model.User import User
from sqlalchemy import create_engine, text, func
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import NullPool
from IPython.display import display
DB='postgresql+psycopg2:///ucnstudy'
engine = create_engine(DB, echo=False, poolclass=NullPool)
Base.metadata.bind = engine
Session = sessionmaker(bind=engine)
def get_dns_http():
ses = Session()
devices = ses.query(Device)
sql_dns = """select count(*) from dnsreqs where devid = :d_id"""
sql_http = """select count(*) from httpreqs2 where devid = :d_id;"""
dns_http_dict = defaultdict(list)
for device in devices:
print ('user: ' + device.login)
for row in ses.execute(text(sql_dns).bindparams(d_id = device.id)):
dns_http_dict[device.login].append(row[0])
for row in ses.execute(text(sql_http).bindparams(d_id = device.id)):
dns_http_dict[device.login].append(row[0])
plot_histogram(dns_http_dict, devices)
def plot_histogram(dns_http_dict, devices):
sns.set_style('whitegrid')
x = []
y_dns = []
y_http = []
cont = 0
for device in devices:
dns_http_elem = dns_http_dict[device.login]
x.append(cont)
y_dns.append(dns_http_elem[0])
y_http.append(dns_http_elem[1])
cont += 1
fig = plt.figure(figsize=(15, 10), dpi=100)
ax = fig.add_subplot(111)
N = 47#len(devices)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
dns = ax.bar(ind, y_dns, width, color='blue')
http = ax.bar(ind+width, y_http, width, color='red')
ax.set_title('Http and dns traces', fontsize=30)
ax.set_ylabel('Number of traces', fontsize=20)
ax.set_xticks([0, 15, 30, 46], fontsize = 15)
ax.set_xlim(0, len(x))
ax.set_xlabel('Devices', fontsize=20)
ax.legend( (dns[0], http[0]), ('Dns requests', 'Http requests'), fontsize = 15 )
plt.savefig('figs_paper/dns_http_histogram.png')
plt.close()
if __name__ == '__main__':
get_dns_http()
| [
"mleaoluc@ucn.inria.fr"
] | mleaoluc@ucn.inria.fr |
c5867b1bf92f95bbfef2af61fe39616f12e7e28c | 7174dccdc3995c06f11ba0a27917fbc0aa0dd689 | /2019-08/test_error.py | 7b323d115cefc94d7e203876476e2d6a5f6c1c96 | [] | no_license | tang2606/Python_Learn | ca6b27fc7a2ace6758209da82c4f637099bcf45d | 7178675b2462fe1e91d2a320d75a274be6262a43 | refs/heads/master | 2021-07-12T00:42:33.846562 | 2020-07-01T10:27:48 | 2020-07-01T10:27:48 | 162,948,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # -*- coding: utf-8 -*-
# @Time : 2019/8/25 18:32
# @Author : wadedong
# @Site :
# @File : test_error.py
# @Software: PyCharm
class wadeError(Exception):
def __init__(self,msg):
self.err_msg = msg
a = 1
try:
assert a == 2, '123'
raise wadeError('这他妈就不合理')
except wadeError as e:
print(e)
except AssertionError as e :
print(e) | [
"418953115@qq.com"
] | 418953115@qq.com |
9f2ee51ec4611d5e3aadd1092d033ed0c2af04bf | cfac9b2c72762c9abdb1e9b05a33ab6ff0d58bd6 | /tasks/__init__.py | db92059da5d7f4b7c80eb9befeef1a8a52ede756 | [] | no_license | ar0ne/task_adventure | 48437aa7faf79066166643234f5fb54cd8c68756 | 3bc16e18a8dcdf4ce21e5e85684d4d8f7955046e | refs/heads/master | 2021-01-10T13:54:41.271356 | 2015-06-06T15:46:22 | 2015-06-06T15:46:22 | 36,503,844 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | from task_controller import TaskController | [
"CAMTbITAKOU@yandex.ru"
] | CAMTbITAKOU@yandex.ru |
aa244cde75c122a711974787f5c86bc7b61311a9 | ca09e068739d473d29061961b4c3f7124a5e2ff1 | /lists/forms.py | ad375874a6f30e502ce7510cc5bf200678539248 | [] | no_license | edujtm/tdd-django-book | a0d12603261e099d8a01471cfe3dbc7975ea4306 | 65ecb6317f7483c5f6b7341f7f5323995d2bc24a | refs/heads/master | 2021-09-25T04:49:40.858429 | 2020-12-01T22:49:50 | 2020-12-01T22:49:50 | 252,591,425 | 0 | 0 | null | 2021-09-22T18:50:58 | 2020-04-03T00:01:18 | JavaScript | UTF-8 | Python | false | false | 1,412 | py | from django import forms
from django.core.exceptions import ValidationError
from lists.models import Item, List
EMPTY_ITEM_ERROR = "You can't have an empty list item"
DUPLICATE_ITEM_ERROR = "You've already got this in your list"
class ItemForm(forms.models.ModelForm):
class Meta:
model = Item
fields = ('text',)
widgets = {
'text': forms.fields.TextInput(attrs={
'placeholder': 'Enter a to-do item',
'class': 'form-control input-lg',
})
}
error_messages = {
'text': {'required': EMPTY_ITEM_ERROR}
}
class NewListForm(ItemForm):
def save(self, owner=None, commit=True):
if not commit:
return super().save(commit=False)
if owner is not None and owner.is_authenticated:
return List.create_new(first_item_text=self.cleaned_data['text'], owner=owner)
else:
return List.create_new(first_item_text=self.cleaned_data['text'])
class ExistingListItemForm(ItemForm):
def __init__(self, for_list, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instance.list = for_list
def validate_unique(self):
try:
self.instance.validate_unique()
except ValidationError as e:
e.error_dict = {'text': [DUPLICATE_ITEM_ERROR]}
self._update_errors(e)
| [
"eduzemacedo@hotmail.com"
] | eduzemacedo@hotmail.com |
391e199af1fa6be6a64f00ab28750cf11324aad2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02660/s614607751.py | 34c48997173930d6f69893b65345505f7e034156 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | N = int(input())
ans = 0
for i in range(2, N):
if i * i > N:
break
e = 0
while N % i == 0:
e += 1
N //= i
if e > 0:
for j in range(1, 10):
if e >= j:
e -= j
ans += 1
else:
break
if N > 1:
ans += 1
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
33e87fe2280b584f9fab54d1712d053ca31d4dcd | 4d8d542058f91bc2a1fede92a7ebc614b61aed22 | /environments/mujoco/rand_param_envs/gym/envs/debugging/__init__.py | ebdd5b7c14979e41c5700c705b69cfaddace0c6c | [
"MIT"
] | permissive | NagisaZj/varibad | 9ea940e168fea336457636e33f61400d48a18a27 | df7cda81588c62a2a3bee69e4173228701bd7000 | refs/heads/master | 2023-02-07T15:50:47.912644 | 2020-12-27T01:51:10 | 2020-12-27T01:51:10 | 270,474,411 | 0 | 0 | NOASSERTION | 2020-06-08T00:34:09 | 2020-06-08T00:34:08 | null | UTF-8 | Python | false | false | 552 | py | from environments.mujoco.rand_param_envs.gym.envs.debugging.one_round_deterministic_reward import \
OneRoundDeterministicRewardEnv
from environments.mujoco.rand_param_envs.gym.envs.debugging.one_round_nondeterministic_reward import \
OneRoundNondeterministicRewardEnv
from environments.mujoco.rand_param_envs.gym.envs.debugging.two_round_deterministic_reward import \
TwoRoundDeterministicRewardEnv
from environments.mujoco.rand_param_envs.gym.envs.debugging.two_round_nondeterministic_reward import \
TwoRoundNondeterministicRewardEnv
| [
"lmzintgraf@gmail.com"
] | lmzintgraf@gmail.com |
42ccabfd89b1d00cd7df7184e283bdbb70020766 | 41777d4d219ea97b4632f4a8a31ab6c82a60772c | /kubernetes-stubs/config/incluster_config.pyi | 79a0d4354db777d9f57fa699d05bd128f72a24d2 | [
"Apache-2.0"
] | permissive | gordonbondon/kubernetes-typed | 501d9c998c266386dc7f66f522f71ac3ba624d89 | 82995b008daf551a4fe11660018d9c08c69f9e6e | refs/heads/master | 2023-07-18T12:06:04.208540 | 2021-09-05T19:50:05 | 2021-09-05T19:50:05 | 319,183,135 | 24 | 2 | Apache-2.0 | 2021-09-05T19:50:06 | 2020-12-07T02:34:12 | Python | UTF-8 | Python | false | false | 635 | pyi | # Code generated by `stubgen`. DO NOT EDIT.
from .config_exception import ConfigException as ConfigException
from kubernetes.client import Configuration as Configuration
from typing import Any
SERVICE_HOST_ENV_NAME: str
SERVICE_PORT_ENV_NAME: str
SERVICE_TOKEN_FILENAME: str
SERVICE_CERT_FILENAME: str
class InClusterConfigLoader:
def __init__(self, token_filename, cert_filename, try_refresh_token: bool = ..., environ=...) -> None: ...
def load_and_set(self, client_configuration: Any | None = ...) -> None: ...
def load_incluster_config(client_configuration: Any | None = ..., try_refresh_token: bool = ...) -> None: ...
| [
"noreply@github.com"
] | gordonbondon.noreply@github.com |
f2227868f0e190a17b34fc2e26915c2d6ecde170 | 59ea50070942960aef4781402e748bdc1908c243 | /venv/Lib/site-packages/graphene_django/converter.py | 11df638bd8731837367323856b7d6013ad8b75e6 | [] | no_license | Addy209/ADC_DASHBOARD_BACKEND | 7d972610464310928ea2112cea549c1c97bb388d | 993b4edb04e9d7f7fbc322e31f711374d1eac32b | refs/heads/main | 2023-07-14T00:18:31.300162 | 2021-08-24T05:49:55 | 2021-08-24T05:49:55 | 389,420,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,883 | py | from collections import OrderedDict
from functools import singledispatch
from django.db import models
from django.utils.encoding import force_str
from django.utils.functional import Promise
from django.utils.module_loading import import_string
from graphene import (
ID,
UUID,
Boolean,
Date,
DateTime,
Dynamic,
Enum,
Field,
Float,
Int,
List,
NonNull,
String,
Time,
Decimal,
)
from graphene.types.json import JSONString
from graphene.utils.str_converters import to_camel_case
from graphql import GraphQLError, assert_valid_name
from graphql.pyutils import register_description
from .compat import ArrayField, HStoreField, JSONField, PGJSONField, RangeField
from .fields import DjangoListField, DjangoConnectionField
from .settings import graphene_settings
from .utils.str_converters import to_const
def convert_choice_name(name):
name = to_const(force_str(name))
try:
assert_valid_name(name)
except GraphQLError:
name = "A_%s" % name
return name
def get_choices(choices):
converted_names = []
if isinstance(choices, OrderedDict):
choices = choices.items()
for value, help_text in choices:
if isinstance(help_text, (tuple, list)):
for choice in get_choices(help_text):
yield choice
else:
name = convert_choice_name(value)
while name in converted_names:
name += "_" + str(len(converted_names))
converted_names.append(name)
description = str(
help_text
) # TODO: translatable description: https://github.com/graphql-python/graphql-core-next/issues/58
yield name, value, description
def convert_choices_to_named_enum_with_descriptions(name, choices):
choices = list(get_choices(choices))
named_choices = [(c[0], c[1]) for c in choices]
named_choices_descriptions = {c[0]: c[2] for c in choices}
class EnumWithDescriptionsType(object):
@property
def description(self):
return str(named_choices_descriptions[self.name])
return Enum(name, list(named_choices), type=EnumWithDescriptionsType)
def generate_enum_name(django_model_meta, field):
if graphene_settings.DJANGO_CHOICE_FIELD_ENUM_CUSTOM_NAME:
# Try and import custom function
custom_func = import_string(
graphene_settings.DJANGO_CHOICE_FIELD_ENUM_CUSTOM_NAME
)
name = custom_func(field)
elif graphene_settings.DJANGO_CHOICE_FIELD_ENUM_V2_NAMING is True:
name = to_camel_case("{}_{}".format(django_model_meta.object_name, field.name))
else:
name = "{app_label}{object_name}{field_name}Choices".format(
app_label=to_camel_case(django_model_meta.app_label.title()),
object_name=django_model_meta.object_name,
field_name=to_camel_case(field.name.title()),
)
return name
def convert_choice_field_to_enum(field, name=None):
if name is None:
name = generate_enum_name(field.model._meta, field)
choices = field.choices
return convert_choices_to_named_enum_with_descriptions(name, choices)
def convert_django_field_with_choices(
field, registry=None, convert_choices_to_enum=True
):
if registry is not None:
converted = registry.get_converted_field(field)
if converted:
return converted
choices = getattr(field, "choices", None)
if choices and convert_choices_to_enum:
enum = convert_choice_field_to_enum(field)
required = not (field.blank or field.null)
converted = enum(
description=get_django_field_description(field), required=required
)
else:
converted = convert_django_field(field, registry)
if registry is not None:
registry.register_converted_field(field, converted)
return converted
def get_django_field_description(field):
return str(field.help_text) if field.help_text else None
@singledispatch
def convert_django_field(field, registry=None):
raise Exception(
"Don't know how to convert the Django field %s (%s)" % (field, field.__class__)
)
@convert_django_field.register(models.CharField)
@convert_django_field.register(models.TextField)
@convert_django_field.register(models.EmailField)
@convert_django_field.register(models.SlugField)
@convert_django_field.register(models.URLField)
@convert_django_field.register(models.GenericIPAddressField)
@convert_django_field.register(models.FileField)
@convert_django_field.register(models.FilePathField)
def convert_field_to_string(field, registry=None):
return String(
description=get_django_field_description(field), required=not field.null
)
@convert_django_field.register(models.AutoField)
def convert_field_to_id(field, registry=None):
return ID(description=get_django_field_description(field), required=not field.null)
@convert_django_field.register(models.UUIDField)
def convert_field_to_uuid(field, registry=None):
return UUID(
description=get_django_field_description(field), required=not field.null
)
@convert_django_field.register(models.PositiveIntegerField)
@convert_django_field.register(models.PositiveSmallIntegerField)
@convert_django_field.register(models.SmallIntegerField)
@convert_django_field.register(models.BigIntegerField)
@convert_django_field.register(models.IntegerField)
def convert_field_to_int(field, registry=None):
return Int(description=get_django_field_description(field), required=not field.null)
@convert_django_field.register(models.NullBooleanField)
@convert_django_field.register(models.BooleanField)
def convert_field_to_boolean(field, registry=None):
return Boolean(
description=get_django_field_description(field), required=not field.null
)
@convert_django_field.register(models.DecimalField)
def convert_field_to_decimal(field, registry=None):
return Decimal(description=field.help_text, required=not field.null)
@convert_django_field.register(models.FloatField)
@convert_django_field.register(models.DurationField)
def convert_field_to_float(field, registry=None):
return Float(
description=get_django_field_description(field), required=not field.null
)
@convert_django_field.register(models.DateTimeField)
def convert_datetime_to_string(field, registry=None):
return DateTime(
description=get_django_field_description(field), required=not field.null
)
@convert_django_field.register(models.DateField)
def convert_date_to_string(field, registry=None):
return Date(
description=get_django_field_description(field), required=not field.null
)
@convert_django_field.register(models.TimeField)
def convert_time_to_string(field, registry=None):
return Time(
description=get_django_field_description(field), required=not field.null
)
@convert_django_field.register(models.OneToOneRel)
def convert_onetoone_field_to_djangomodel(field, registry=None):
model = field.related_model
def dynamic_type():
_type = registry.get_type_for_model(model)
if not _type:
return
# We do this for a bug in Django 1.8, where null attr
# is not available in the OneToOneRel instance
null = getattr(field, "null", True)
return Field(_type, required=not null)
return Dynamic(dynamic_type)
@convert_django_field.register(models.ManyToManyField)
@convert_django_field.register(models.ManyToManyRel)
@convert_django_field.register(models.ManyToOneRel)
def convert_field_to_list_or_connection(field, registry=None):
model = field.related_model
def dynamic_type():
_type = registry.get_type_for_model(model)
if not _type:
return
if isinstance(field, models.ManyToManyField):
description = get_django_field_description(field)
else:
description = get_django_field_description(field.field)
# If there is a connection, we should transform the field
# into a DjangoConnectionField
if _type._meta.connection:
# Use a DjangoFilterConnectionField if there are
# defined filter_fields or a filterset_class in the
# DjangoObjectType Meta
if _type._meta.filter_fields or _type._meta.filterset_class:
from .filter.fields import DjangoFilterConnectionField
return DjangoFilterConnectionField(
_type, required=True, description=description
)
return DjangoConnectionField(_type, required=True, description=description)
return DjangoListField(
_type,
required=True, # A Set is always returned, never None.
description=description,
)
return Dynamic(dynamic_type)
@convert_django_field.register(models.OneToOneField)
@convert_django_field.register(models.ForeignKey)
def convert_field_to_djangomodel(field, registry=None):
model = field.related_model
def dynamic_type():
_type = registry.get_type_for_model(model)
if not _type:
return
return Field(
_type,
description=get_django_field_description(field),
required=not field.null,
)
return Dynamic(dynamic_type)
@convert_django_field.register(ArrayField)
def convert_postgres_array_to_list(field, registry=None):
inner_type = convert_django_field(field.base_field)
if not isinstance(inner_type, (List, NonNull)):
inner_type = (
NonNull(type(inner_type))
if inner_type.kwargs["required"]
else type(inner_type)
)
return List(
inner_type,
description=get_django_field_description(field),
required=not field.null,
)
@convert_django_field.register(HStoreField)
@convert_django_field.register(PGJSONField)
@convert_django_field.register(JSONField)
def convert_pg_and_json_field_to_string(field, registry=None):
return JSONString(
description=get_django_field_description(field), required=not field.null
)
@convert_django_field.register(RangeField)
def convert_postgres_range_to_string(field, registry=None):
inner_type = convert_django_field(field.base_field)
if not isinstance(inner_type, (List, NonNull)):
inner_type = (
NonNull(type(inner_type))
if inner_type.kwargs["required"]
else type(inner_type)
)
return List(
inner_type,
description=get_django_field_description(field),
required=not field.null,
)
# Register Django lazy()-wrapped values as GraphQL description/help_text.
# This is needed for using lazy translations, see https://github.com/graphql-python/graphql-core-next/issues/58.
register_description(Promise)
| [
"46429259+Addy209@users.noreply.github.com"
] | 46429259+Addy209@users.noreply.github.com |
815e520340df997b75851fb782aa353ffe68aa1c | 85667d8714a5939925f49067711f10c2d0227ad3 | /moon_orbit.py | 3553bee9d7ade2b70ed9489fd68354afaecf7c09 | [] | no_license | damjan-netizen/dynamics | 1a837ee6a950019372f14864334159d4d436d456 | 84c383ff2c24e8ca50167918f59bc5b1a8b6476b | refs/heads/master | 2023-06-24T15:48:04.647752 | 2021-07-28T09:09:08 | 2021-07-28T09:09:08 | 329,882,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 25 18:32:29 2021
@author: Gael
"""
# PROBLEM 1
#
# Modify the orbit function below to model
# one revolution of the moon around the earth,
# assuming that the orbit is circular.
#
# Use the math.cos(angle) and math.sin(angle)
# functions in order to accomplish this.
import math
import numpy as np
import matplotlib.pyplot as plt
moon_distance = 384e6 # in m
def orbit():
num_steps = 200;
x = np.zeros([num_steps+1,2])
for n in range(num_steps+1):
x[n,0] = moon_distance * math.cos(2*math.pi*n/num_steps)
x[n,1] = moon_distance * math.sin(2*math.pi*n/num_steps)
return x
x = orbit()
def plot_orbit():
plt.axis('equal')
plt.plot(x[:,0],x[:,1])
axes = plt.gca()
axes.set_xlabel('Longitudinal position in m')
axes.set_ylabel('Lateral postion in m')
plot_orbit() | [
"dmustur@protonmail.com"
] | dmustur@protonmail.com |
415a1c56bce1ec6d3e7dd0e48149d9e4901900da | 3ed8c142eff79649620c014c8b575e976c17cc1a | /jogos/venv/Scripts/easy_install-3.6-script.py | 2288dc654f2823f2bad85f02f27ce6eeb1a626e3 | [] | no_license | thideoli/python | f7c74075a5663f2f3c5f609881a96208b886d403 | 8d67477bcc97764f4c15b26438f90a631f68be39 | refs/heads/master | 2020-03-21T18:26:06.123838 | 2018-06-28T12:23:57 | 2018-06-28T12:23:57 | 138,891,340 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | #!"C:\Thiago Oliveira\Estudo\Python\jogos\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"thiago.oliveira@mult-e.com.br"
] | thiago.oliveira@mult-e.com.br |
84ece81cf5a24f822792544c6bd65c49a80e6349 | 3c7a68285286f3ae9bf7e0a2802c16dcd66bcdf2 | /zipfile_workaround.py | daedcd7754dd1e76775ece7adcf831d841ed74cb | [] | no_license | sanderjo/zipfile_encoding_info | 05de5c4641787cba4eb2417c803e172f886fade0 | 38be6e5c7ce408ed103eaf2070dbf03a50df36a2 | refs/heads/master | 2019-01-21T21:40:07.629259 | 2016-05-16T20:37:27 | 2016-05-16T20:37:27 | 58,886,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import zipfile
basedir = u"blabla"
inputfile = sys.argv[1]
zip=zipfile.ZipFile(inputfile)
files = zip.namelist()
print files
for _file in files:
print "file is", _file
try:
print "join is", os.path.join(basedir, _file)
'''
The above can give this error with Windows zipped files with special characters in them:
print "join is", os.path.join(basedir, _file)
File "/usr/lib/python2.7/posixpath.py", line 80, in join
path += '/' + b
'''
except:
# the encoding was a problem, so let's assume it was a zip with special CP437 characters in the filename
print "join is", os.path.join(basedir, _file.decode('cp437'))
| [
"sander.jonkers+github@gmail.com"
] | sander.jonkers+github@gmail.com |
62d1ae2c46d48bc3d3e16bd9b81e2084798ac493 | aa9e71fa553295a9f0bd54b957833af307cf9357 | /imageex/algo/featureextraction/color.py | 7d97e6ee9bb5bb31aba28b6cf887283e70aa8a58 | [] | no_license | tehm28/image-extraction-server | 87b3c5355113004fb3b06d0949207cfbbde5a35a | 327abb630414b243b454ce91942eb90e0fef38dc | refs/heads/master | 2021-01-19T12:07:35.072418 | 2013-10-25T13:40:46 | 2013-10-25T13:40:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,137 | py | import numpy as np
import cv2
import numpy.ma as ma
import imageex.util.imtransform as imtransform
def extract(img, mask):
hsv = imtransform.rgb2hsv(img)
h = np.array(hsv[:,:,0])
s = np.array(hsv[:,:,1])
maskedH = ma.array(h, mask=mask)
maskedS = ma.array(s, mask=mask)
objH = maskedH.compressed()
objS = maskedS.compressed()
# histogram
histH, bin_edges_h = np.histogram(objH, bins=10, normed=True)
# color moments
moments = cv2.moments(h, binary = 0)
huMoments = cv2.HuMoments(moments)
# 1D moments
# mean
moment_H_1 = objH.mean()
# standard deviation
moment_H_2 = objH.std()
# skewness
objHtemp = (objH - moment_H_1) ** 3
objmean = objHtemp.mean()
sign = objmean/abs(objmean)
moment_H_3 = sign * (abs(objmean) ** (1./3))
#normalize
moment_H_1 = moment_H_1/255
moment_H_2 = moment_H_2/255
moment_H_3 = moment_H_3/255
features = []
features += list(histH) # 10
features += list(huMoments) # 7
#features += moment_H_1, moment_H_2, moment_H_3 # 3
return features # 17 # 20
| [
"dekervit@gmail.com"
] | dekervit@gmail.com |
3fd824655ecc1554eef05797fb63038bf981a58b | c7f80bf70d51a0880975958d026b984fe3cf55c4 | /ml/nn/intro/ch4.py | 4bcbff5552208f94343c327035c07ed310229a59 | [] | no_license | mdaughtrey/personal-projects | a1c5ad723da411bea6b4f55b5d9c26b4feeccfcf | 6d082523dad32a52ae6e8c3f5a7335a5a8bd8c06 | refs/heads/master | 2023-07-24T22:50:51.100441 | 2023-07-20T02:18:05 | 2023-07-20T02:18:05 | 4,237,208 | 3 | 0 | null | 2023-07-06T23:33:31 | 2012-05-05T23:04:03 | C | UTF-8 | Python | false | false | 2,480 | py | #!/usr/bin/env python3
#https://python-course.eu/machine-learning/data-representation-and-visualization-data.php
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from sklearn.datasets import load_iris
import pandas as pd
import pdb
iris = load_iris()
def histogram():
fig, ax = plt.subplots()
x_index = 3
colors = ['blue', 'red', 'green']
for label, color in zip(range(len(iris.target_names)), colors):
ax.hist(iris.data[iris.target==label, x_index],
label=iris.target_names[label],
color=color)
ax.set_xlabel(iris.feature_names[x_index])
ax.legend(loc='upper right')
plt.show()
def scatterplot():
fig, ax = plt.subplots()
x_index = 3
y_index = 0
colors = ['blue', 'red', 'green']
for label, color in zip(range(len(iris.target_names)), colors):
ax.scatter(iris.data[iris.target==label, x_index],
iris.data[iris.target==label, y_index],
label=iris.target_names[label],
c=color)
ax.set_xlabel(iris.feature_names[x_index])
ax.set_ylabel(iris.feature_names[y_index])
ax.legend(loc='upper left')
plt.show()
def features():
n = len(iris.feature_names)
fig, ax = plt.subplots(n, n, figsize=(16, 16))
colors = ['blue', 'red', 'green']
for x in range(n):
for y in range(n):
xname = iris.feature_names[x]
yname = iris.feature_names[y]
for color_ind in range(len(iris.target_names)):
ax[x, y].scatter(iris.data[iris.target==color_ind, x],
iris.data[iris.target==color_ind, y],
label=iris.target_names[color_ind],
c=colors[color_ind])
ax[x, y].set_xlabel(xname)
ax[x, y].set_ylabel(yname)
ax[x, y].legend(loc='upper left')
plt.show()
def panda():
iris_df = pd.DataFrame(iris.data, columns=iris.feature_names)
pd.plotting.scatter_matrix(iris_df, c=iris.target,
figsize=(8, 8))
def threeD():
X = []
for iclass in range(3):
X.append([[], [], []])
for i in range(len(iris.data)):
if iris.target[i] == iclass:
X[iclass][0].append(iris.data[i][0])
X[iclass][1].append(iris.data[i][1])
X[iclass][2].append(sum(iris.data[i][2:]))
colours = ("r", "g", "y")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for iclass in range(3):
ax.scatter(X[iclass][0], X[iclass][1], X[iclass][2], c=colours[iclass])
plt.show()
histogram()
scatterplot()
features()
panda()
threeD()
| [
"mattd@pop-os.localdomain"
] | mattd@pop-os.localdomain |
e325857a904d0df6ed0627ab009f34fc96c74972 | 329cc042bb5829ab26a51d0b3a0bd310f05e0671 | /main.py | 60f84aa47980f4c797b50f2df6697f82314f4908 | [] | no_license | bkhoward/WLC-PSK-Change | 53afe64e767889ce967679d8aeb798745166fa72 | 1b92fd1d5afae4bc64bfc61bc4935c635cca12f0 | refs/heads/master | 2023-03-25T01:33:53.765751 | 2021-03-11T18:59:03 | 2021-03-11T18:59:03 | 345,891,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,263 | py | #!/usr/bin/env python
#
# Author: Brian Howard
# Date: 02Feb2021
# Version: 1.0
# Abstract: Create SSH connection to corporate Cisco WLCs and change the PSK used for ONEguest SSID
# - Find the WLAN_ID for the ONEguest SSID
# - Disable the WLAN_ID for ONEguest
# - Modify the existing PSK for the ONEguest SSID
# - Re-Enable the WLAN_ID for ONEguest
# - Save the config
# - Create logfiles for all SSH transactions
#
# Source Files:
# main.py - main python script
# credentials.py - file to store login credentials
# ios_wlan_id-finder.py - logs into an ios host and finds the WLAN_ID associated with the ONEguest SSID
# aireos_wlan_id-finder.py - logs into an aireos host and finds the WLAN_ID associated with the ONEguest SSID
# host_file.py - python list containing ip addresses of Cisco WLCs
# cmd_file.py - python list containing Cisco commands to run within the script
# Note: 'show run | include hostname' must be element 0
#
# Output Files:
# log.txt - log file containing all information from the SSH channel.
# This is an all inclusive file for all hosts connected to
# {hostname}.txt - each host connected to has an individual log file of commands only.
# this log is not as detailed as the log.txt file.
# ------------------------------------------------------------------------------------------------#
# ------------------------------------------------------------------------------------------------#
# Function definitions
# ------------------------------------------------------------------------------------------------#
import logging
import coloredlogs
from netmiko import ConnectHandler
from ntc_templates.parse import parse_output
from host_file import host
from credentials import credentials
from cmd_file import cmd
from pprint import pprint
##### Begin Logging section #####
# Basic logging allows Netmiko detailed logging of the ssh stream written to a file
logging.basicConfig(filename='log.txt', level=logging.DEBUG, datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger('Netmiko')
# Create a console handler object for the console Stream
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# # Create a ColoredFormatter to use as formatter for the Console Handler
formatter = coloredlogs.ColoredFormatter(fmt='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
ch.setFormatter(formatter)
# assign console handler to logger
logger.addHandler(ch)
# ##### End Logging section #####
if __name__ == '__main__':
# Capture new PSK
print()
PSK = input("Please enter New PSK: ")
print()
for wlc in host:
logfile = wlc['hostname'] + '.log'
# Netmiko SSH connection
ssh_connect = ConnectHandler(ip=wlc['ip'], username=credentials['username'], password=credentials['password'],
device_type=wlc['device_type'], session_log=logfile)
# Netmiko connection sends show command
# use_textfsm automatically looks in the \venv\Lib\site-packages\ntc_templates\templates directory
# for a template matching the device type + command name to convert the unstructured output of the show
# command to structured data (list of dictionaries)
# Note: ntc_templates and fsmtext are automatically installed with Netmiko
show_wlan_raw = ssh_connect.send_command(cmd['get_wlan'])
show_wlan = parse_output(platform=wlc['device_type'], command="show wlan sum", data=show_wlan_raw)
for wlan in show_wlan:
if wlan['ssid'] == 'ONEguest':
print()
print('*******************************************************************************')
print()
# Connect to host and Show current state of WLANs
logger.critical('Connecting to ' + wlc['hostname'])
logger.warning(ssh_connect.send_command(cmd['get_wlan']))
# Disable ONEguest WLAN and Show current state of WLANs
logger.critical('Disabling WLAN on ' + wlan['ssid'] + ' for WLAN-ID: ' + wlan['wlanid'])
if wlc['device_type'] == 'cisco_wlc_ssh':
ssh_connect.send_command(cmd['aireos_wlan_disable'] + ' ' + wlan['wlanid'])
logger.warning(ssh_connect.send_command(cmd['get_wlan']))
print()
else:
# change to wlan profile sub menu for ONEguest SSID and shutdown SSID
# send_config_set automatically enters config mode, executes a list of commands,
# then exits config mode. Note if only one command is in the list it does not stay in config mode
ssh_connect.send_config_set(['wlan ' + wlan['profile'], cmd['ios_shutdown']])
logger.warning(ssh_connect.send_command(cmd['get_wlan']))
print()
# Change PSK
logger.critical('Changing PSK on ' + wlc['hostname'] + ' for WLAN-ID: ' + wlan['wlanid'])
if wlc['device_type'] == 'cisco_wlc_ssh':
ssh_connect.send_command(cmd['aireos_psk'] + ' ' + PSK + ' ' + wlan['wlanid'])
logger.warning('New PSK is: ' + PSK)
print()
else:
ssh_connect.enable()
# change to wlan profile sub menu for ONEguest SSID and chnage PSK
ssh_connect.send_config_set(['wlan ' + wlan['profile'], cmd['ios_psk'] + ' ' + PSK])
logger.warning('New PSK is: ' + PSK)
print()
# Enable ONEguest WLAN and Show current state of WLANs
logger.critical('Enabling WLAN on ' + wlan['ssid'] + ' for WLAN-ID: ' + wlan['wlanid'])
if wlc['device_type'] == 'cisco_wlc_ssh':
ssh_connect.send_command(cmd['aireos_wlan_enable'] + ' ' + wlan['wlanid'])
logger.warning(ssh_connect.send_command(cmd['get_wlan']))
print()
else:
ssh_connect.enable()
# change to wlan profile sub menu for ONEguest SSID and enable it
ssh_connect.send_config_set(['wlan ' + wlan['profile'], cmd['ios_no_shutdown']])
logger.warning(ssh_connect.send_command(cmd['get_wlan']))
print()
# Save Config
logger.critical('Saving Config on host: ' + wlc['hostname'])
if wlc['device_type'] == 'cisco_wlc_ssh':
ssh_connect.save_config(cmd['aireos_save'], confirm_response='y')
print()
print('*******************************************************************************')
print()
else:
ssh_connect.save_config()
print()
print('*******************************************************************************')
print()
ssh_connect.disconnect()
| [
"bkhoward@live.com"
] | bkhoward@live.com |
102450eccb8fcad7b0362df30fb062da3054d97a | 779291cb83ec3cab36d8bb66ed46b3afd4907f95 | /migration/rnaseq-wf_cleanup.py | 7a26326b9cd9e55a223e034fa72a8b9827c72f1c | [] | no_license | Shengqian95/ncbi_remap | ac3258411fda8e9317f3cdf951cc909cc0f1946e | 3f2099058bce5d1670a672a69c13efd89d538cd1 | refs/heads/master | 2023-05-22T06:17:57.900135 | 2020-11-01T17:16:54 | 2020-11-01T17:16:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,369 | py | import os
import re
import shutil
from pathlib import Path
CLEAN_UP = os.environ.get("CLEAN_UP", False)
SRR_PATTERN = re.compile(r"^[SED]RR\d+$")
TARGETS = [
"../output/rnaseq-wf/aln_stats/{srx}.parquet",
"../output/rnaseq-wf/gene_counts/{srx}.parquet",
"../output/rnaseq-wf/junction_counts/{srx}.parquet",
"../output/rnaseq-wf/intergenic_counts/{srx}.parquet",
"../output/rnaseq-wf/segment_counts/{srx}.parquet",
"../output/rnaseq-wf/fusion_counts/{srx}.parquet",
"../output/rnaseq-wf/flybase_bigwigs/{srx}.flybase.first.bw",
"../output/rnaseq-wf/flybase_bigwigs/{srx}.flybase.second.bw",
"../output/rnaseq-wf/ucsc_bigwigs/{srx}.first.bw",
"../output/rnaseq-wf/ucsc_bigwigs/{srx}.second.bw",
"../output/rnaseq-wf/samples/{srx}/{srx}.bam",
"../output/rnaseq-wf/samples/{srx}/{srx}.bam.bai",
]
def main():
for srx_path in Path("../output/rnaseq-wf/samples").iterdir():
srx = srx_path.name
remove_temp(srx)
if (
Path(f"../output/rnaseq-wf/atropos_bad/{srx}").exists()
or Path(f"../output/rnaseq-wf/alignment_bad/{srx}").exists()
):
remove_srx_folder(srx)
continue
if all(check_target(target.format(srx=srx)) for target in TARGETS):
Path(f"../output/rnaseq-wf/done/{srx}").touch()
remove_srr_folders(srx)
remove_processed_files(srx)
remove_misc_files(srx)
def remove_temp(srx: str):
for pth in Path(f"../output/rnaseq-wf/samples/{srx}").glob("*.tmp"):
pth.unlink()
def remove_srx_folder(srx: str):
pth = Path(f"../output/rnaseq-wf/samples/{srx}")
if pth.exists() and CLEAN_UP:
shutil.rmtree(pth)
elif pth.exists():
print("Removing SRX Folder:", pth, sep="\t")
def check_target(file_name: str):
if Path(file_name).exists():
return True
print("Missing Target:", file_name, sep="\t")
def remove_srr_folders(srx: str):
for pth in Path(f"../output/rnaseq-wf/samples/{srx}").iterdir():
if pth.is_dir() and re.match(SRR_PATTERN, pth.name):
if CLEAN_UP:
shutil.rmtree(pth)
else:
print("Removing SRR Folder:", pth, sep="\t")
def remove_file(file_name: str):
pth = Path(file_name)
if pth.exists() and CLEAN_UP:
pth.unlink()
elif pth.exists():
print("Removing File:", pth, sep="\t")
def remove_processed_files(srx: str):
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.samtools.stats")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.bamtools.stats")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.counts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.counts.jcounts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.intergenic.counts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_fusions.counts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_segments.counts")
def remove_misc_files(srx: str):
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.trim.clean.tsv")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.hisat2.bam.tsv")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.samtools.idxstats")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.counts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.counts.summary")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.counts.log")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.intergenic.counts.jcounts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.intergenic.counts.summary")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.intergenic.counts.log")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_fusions.counts.jcounts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_fusions.counts.summary")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_fusions.counts.log")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_segments.counts.jcounts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_segments.counts.summary")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_segments.counts.log")
if __name__ == "__main__":
main()
| [
"justin.m.fear@gmail.com"
] | justin.m.fear@gmail.com |
9b8e1d93b2f68bc34a67adea4f49a273d934c106 | a6b8263a42b96f317b818b3ba7e45bb8cb4458f6 | /shipsnake/__main__.py | e0db2bc14affd6ceb307dfe465e53a7f63042a48 | [
"MIT"
] | permissive | cole-wilson/test-ship | 5002add3b7f84162a064fcc4496f82a512fe4ff3 | 95f2ff585efd7564e60caad9a4806939923bc525 | refs/heads/master | 2023-01-30T01:52:55.111219 | 2020-12-07T05:18:12 | 2020-12-07T05:18:12 | 319,211,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,108 | py | if __name__ != '__main__':
print("Please run shipsnake as script or command, not module.")
import toml
import os
import sys
import glob
import shutil
# mode = sys.argv[1]
# mode="upload"
version = ""
if len(sys.argv) == 1:
print("Provide a mode:\n\tshipsnake [wizard | build | dev | upload]")
sys.exit(0)
mode = sys.argv[1]
if len(sys.argv) < 3 and mode in ["upload",'build']:
print("Provide a version:\n\tshipsnake "+mode+" <version>")
sys.exit(0)
if len(sys.argv)>2:
version = sys.argv[2]
if mode=="dev" and version=="":
version = "dev_build"
if os.getenv('TEST_SNAKE')=="TRUE":
os.chdir('tester')
if mode == "wizard":
import wizard
wizard.main()
elif mode in ["build","dev","upload"]:
open('.'+os.sep+'.gitignore','w+').write('*'+os.sep+'__pychache__')
if not os.path.isfile('.'+os.sep+'shipsnake.toml'):
print('Please create a config file with `shipsnake wizard` first.')
sys.exit(0)
with open('.'+os.sep+'shipsnake.toml') as datafile:
data = toml.loads(datafile.read())
with open(prefix+os.sep+'setup.py.template') as datafile:
template = datafile.read()
setup = template.format(
**data,
version = version,
entry_points = [data["short_name"]+"="+data["short_name"]+".__main__"] if data["file"]!="" else [""]
)
open('setup.py','w+').write(setup)
source_dir = os.getcwd()
target_dir = data["short_name"]+os.sep
types = ('*.py',*data["data_files"])
file_names = []
for files in types:
file_names.extend(glob.glob(files))
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
for file_name in file_names:
if file_name in ["setup.py","shipsnake.toml"]:
continue
shutil.move(os.path.join(source_dir, file_name), target_dir+os.sep+file_name)
open(target_dir+'__init__.py','w+').write('')
if data['file']!="" and not os.path.isfile(data['short_name']+os.sep+'__main__.py'):
try:
os.rename(data['short_name']+os.sep+data['file'],data['short_name']+os.sep+'__main__.py')
open(data['short_name']+os.sep+data['file'],'w+').write('# Please edit __main__.py for the main code. Thanks!\n(you can delete this file.)')
except FileNotFoundError:
pass
try:
shutil.rmtree('dist')
except:
pass
try:
os.mkdir('bin')
except:
pass
open("bin"+os.sep+data['short_name'],'w+').write(f"#!"+os.sep+"usr"+os.sep+"bin"+os.sep+f"env bash\npython3 -m {data['short_name']} $@ || echo 'Error. Please re-install shipsnake with:\\n`pip3 install shipsnake --upgrade`'")
if mode == "build" or mode=="upload":
os.system('python3 .'+os.sep+'setup.py sdist bdist_wheel')
try:
shutil.rmtree('build')
except:
pass
elif mode == "dev":
os.system('python3 .'+os.sep+'setup.py develop')
for x in glob.glob('*.egg-info'):
shutil.rmtree(x)
else:
print(f'Illegeal option `{mode}`')
sys.exit(0)
if mode=="upload":
print("Please make sure that you have a https://pypi.org/ account.")
try:
import twine
except:
input('Press enter to continue installing `twine`. Press ctrl+x to exit.')
os.system('python3 -m pip install --user --upgrade twine || python3 -m pip install --upgrade twine')
os.system('python3 -m twine upload dist'+os.sep+'*')
| [
"you@example.com"
] | you@example.com |
c55a8e2182d5d41cfdec8d784434d47f4767b32e | f1812503a3293037ac5a1f60012c4e803fe8f358 | /8-Kyuu/Convert_a_string_to_a_number.py | 86fa4dfd690ab0467f91abbda29c94892d617edb | [] | no_license | AgaK1/codewars_python | 197ef2a8d92ef058d0a4738756bdd79f9349c1bc | 36e6b9bfaa172b4b2434baa855d59f8839cf5f94 | refs/heads/master | 2023-04-04T13:46:09.262690 | 2021-04-24T10:50:15 | 2021-04-24T10:50:15 | 315,000,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | # We need a function that can transform a string into a number. What ways of achieving this do you know?
def string_to_number(s):
return int(s) | [
"AgaK1@users.noreply.github.com"
] | AgaK1@users.noreply.github.com |
199390424fddb7b89b231c304224800f4fb4fb79 | 3a1c1373d8f1617485893dea46323c9d07dedc4d | /python_algo/프로그래머스/20210429_다리를 지나는 트럭.py | b1fd3a58b2856feedb64be0b86420485b12daf0c | [] | no_license | GaYoung87/Algorithm | 28b95c3eed054454a06a14d1a255ea1d57486b22 | 59abce98ff14879bc88b72ef2e562ce55dae5335 | refs/heads/master | 2023-08-31T07:52:31.487648 | 2023-08-29T15:09:04 | 2023-08-29T15:09:04 | 199,405,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | def solution(bridge_length, weight, truck_weights):
bridge_sum = 0
bridge_count = 0
time = 0
while truck_weights:
x = truck_weights[0]
if bridge_count <= bridge_length:
if bridge_sum + x <= weight:
bridge_count += 1
return time | [
"gyyoon4u@naver.com"
] | gyyoon4u@naver.com |
23c69e00ad46f9ee03768aacfc5cf00056b1c08b | 669cb5d54971a3167bb60cb2d5d301c7fd2e3ac5 | /keaw/studentclass.py | 1bb7c8fd45814eaf08ea1ae9365d4824a874ad01 | [
"MIT"
] | permissive | darklord40/keaw | bc98469a77d08c894a1e7ca1c65c65e27f3138fa | 1cf290e3cde45432f243eceec6fe49020294a8d3 | refs/heads/main | 2023-02-16T02:37:25.023478 | 2021-01-18T03:58:42 | 2021-01-18T03:58:42 | 330,548,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,006 | py | class Student:
def __init__(self,name): #ฟังกชันพิเศษ เพื่อให้สามารถรันได้ self คำพิเศษ เพื่อใช้เเทนตัวมันเอง ใช้คำอืนเเทน self ได้
self.name=name
self.exp=0
self.lesson=0
self.AddEXP(10)
def Hello(self):
print('สวัสดี เราชื่อ {}'.format(self.name))
def Coding(self):
print('{} กำลังเขียนโปนเเกรม'.format(self.name))
self.exp +=5
self.lesson +=1
def ShowExp(self):
print('{} exp {}'.format(self.name, self.exp))
print('เรียนไป {} ครั้ง'.format(self.lesson))
def AddEXP(self,score):
self.lesson +=1
self.exp +=score
class SpecialStudent(Student):
def __init__(self,name,father):
super().__init__(name) #ฟังชันพิเศษ กำหนด __init__ เรียกคล่าส student เเละเอ่ name เข้าไป
self.father=father
mafia=['Gates','Tomas Edison']
if father in mafia:
self.exp +=100
def AddEXP(self,score):
self.lesson += 1
self.exp += (score*3)
def AskEXP(self,score=10):
print('ครู ! ขอคะเเนนผมหน่อยสิสัก {} EXP'.format(score))
self.AddEXP(10)
print(__name__)
if __name__=='__main__': #เชคว่าอยุ่ในไฟล์ main ไหม
jack = Student('spadun') # st = ออปเจค
print(jack.name)
jack.Hello()
student2 = Student('Steve')
print(student2.name)
student2.Hello()
jack.AddEXP(10)
print(jack.name, jack.exp)
print(student2.name, student2.exp)
for i in range(5):
student2.Coding()
jack.ShowExp()
student2.ShowExp()
| [
"noreply@github.com"
] | darklord40.noreply@github.com |
e72616d937a919a0e6b04d52e831bc5ede17c1d7 | 75324a206e4369523ff807dae9d8c76e53897eb3 | /util/decorators.py | 2016522c194291d402be25702627f733df6eaafb | [] | no_license | 77dx/selenium_UIAutomation | 426300efdae1e7ebbf3875fc3090d0c70fef3095 | d6629014726952f2bb7a295abe536cbe1f8a536b | refs/heads/master | 2020-08-13T17:47:23.560125 | 2019-12-06T01:17:01 | 2019-12-06T01:17:01 | 215,010,574 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | # -*- coding:utf-8 -*-
class Decorators():
def retry(times):
def outer(f):
def inner(*args, **kwargs):
for i in range(times):
try:
return f(*args, **kwargs)
except Exception as e:
if (i + 1) < times:
print('第{}次执行失败,再次执行'.format(i+1))
else:
raise e
return inner
return outer | [
"396321556@qq.com"
] | 396321556@qq.com |
b86b6586ed3da7fa83a3a45383ce369cf1633df0 | 99d436394e47571160340c95d527ecadaae83541 | /algorithms_questions/ch17_shortest_path/q39_2.py | 841ee2b4be8737f7003f3b85123dcedfc6c83627 | [] | no_license | LeeSeok-Jun/Algorithms | b47ba4de5580302e9e2399bcf85d245ebeb1b93d | 0e8573bd03c50df3f89dd0ee9eed9cf8716ef8d8 | refs/heads/main | 2023-03-02T06:47:20.939235 | 2021-02-08T05:18:24 | 2021-02-08T05:18:24 | 299,840,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | """
화성 탐사 - 3회차
"""
# 풀이 제한 시간 : 40분
# 2021/01/26 16:06 ~ 16:16
# 정답!
import heapq
import sys
input = sys.stdin.readline
dr = [-1, 1, 0, 0]
dc = [0, 0, -1, 1]
for tc in range(int(input())):
n = int(input())
graph = []
for _ in range(n):
graph.append(list(map(int, input().split())))
INF = int(1e9)
distance = [[INF] * n for _ in range(n)]
distance[0][0] = graph[0][0]
q = []
heapq.heappush(q, (graph[0][0], 0, 0))
while q:
dist, r, c = heapq.heappop(q)
if distance[r][c] < dist:
continue
for i in range(4):
nr = r + dr[i]
nc = c + dc[i]
if nr < 0 or nr >= n or nc < 0 or nc >= n:
continue
cost = dist + graph[nr][nc]
if cost < distance[nr][nc]:
distance[nr][nc] = cost
heapq.heappush(q, (cost, nr, nc))
print(distance[n-1][n-1]) | [
"seok9376@gmail.com"
] | seok9376@gmail.com |
c45eb5f1c3777c3c501733e0224bf45deaa1c22e | d6589ff7cf647af56938a9598f9e2e674c0ae6b5 | /waf-openapi-20190910/setup.py | ecd45e94dd0948313db72333337965dd00c423a0 | [
"Apache-2.0"
] | permissive | hazho/alibabacloud-python-sdk | 55028a0605b1509941269867a043f8408fa8c296 | cddd32154bb8c12e50772fec55429a9a97f3efd9 | refs/heads/master | 2023-07-01T17:51:57.893326 | 2021-08-02T08:55:22 | 2021-08-02T08:55:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,604 | py | # -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import os
from setuptools import setup, find_packages
"""
setup module for alibabacloud_waf-openapi20190910.
Created on 25/04/2021
@author: Alibaba Cloud SDK
"""
PACKAGE = "alibabacloud_waf_openapi20190910"
NAME = "alibabacloud_waf-openapi20190910" or "alibabacloud-package"
DESCRIPTION = "Alibaba Cloud waf-openapi (20190910) SDK Library for Python"
AUTHOR = "Alibaba Cloud SDK"
AUTHOR_EMAIL = "sdk-team@alibabacloud.com"
URL = "https://github.com/aliyun/alibabacloud-python-sdk"
VERSION = __import__(PACKAGE).__version__
REQUIRES = [
"alibabacloud_tea_util>=0.3.3, <1.0.0",
"alibabacloud_tea_openapi>=0.2.4, <1.0.0",
"alibabacloud_endpoint_util>=0.0.3, <1.0.0"
]
LONG_DESCRIPTION = ''
if os.path.exists('./README.md'):
with open("README.md", encoding='utf-8') as fp:
LONG_DESCRIPTION = fp.read()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache License 2.0",
url=URL,
keywords=["alibabacloud","waf","openapi20190910"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=REQUIRES,
python_requires=">=3.6",
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"Topic :: Software Development"
)
)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
3f01fb995de680fe18acd66bcf3883a4dcc1cb2b | 74fb8d8e0bc821291773196e967820987ce7333f | /Script2-part 1-calculating-contour-plot-of-size(Figure-3a).py | 300af8340e723a5219fe042e049274f910b947a1 | [] | no_license | dahaj1897/Heritability | b567df4c38fa7c9ade647e993960a3a9027b55e8 | 9851eb9e3951a9505c8a743d38c2af8b0aa63696 | refs/heads/master | 2021-05-12T15:53:01.428552 | 2020-05-18T08:10:40 | 2020-05-18T08:10:40 | 116,995,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,853 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 06 10:26:07 2016
@author: wratcliff3
"""
import numpy as np
np.set_printoptions(threshold=np.nan)
import math
from scipy.stats import linregress
import matplotlib.pyplot as plt
import pandas as pd
cell_genos=np.linspace(1,2,10)
cluster_size=32
st_dev_cell=0.25
st_dev_group=0.25
timesteps_to_run=3 #this is equivalent to the number of generations the seed population of 10 clusters (each with a different genetic mean size) goes through.
replicates=1 #keep to 1 for now until an averaging function is implemented, otherwise it won't plot in 3d
granularity=32 #number of x and y values to calculate (higher=more fine grained). This will generate an n x n grid for plotting relative heritability (in the paper we use 32 x 32 = 1024 points).
group_sizes=[]
st_dev_cells=[]
st_dev_groups=[]
slope_cell=[]
slope_group_volume=[]
slope_group_radius=[]
slope_group_settling=[]
cell_stdev_plotting_list=[]
group_stdev_plotting_list=[]
relative_heritability=[]
"""So, here's how the population is initialized:
Col 1: Cell ID (#)
Col 2: Cell genetic size
Col 3: Cellular phenotype
Col 4: Cell parent
Col 5: Cluster ID (#)
Col 6: Cluster parent
Col 7: empty
Col 8: empty
Col 9: empty
Col 10: Cell parental phenotype"""
for ii in np.linspace(0.0001,st_dev_cell,granularity):
for a in np.linspace(0.0001,st_dev_group,granularity):
for b in range(0,replicates):
pop=np.zeros((len(cell_genos)*cluster_size,10))
st_dev_cell=ii
st_dev_group=a
#initialize the population
for i in range(0,np.shape(pop)[0]):
pop[i][0]=i #Number each cell
pop[i][1]=cell_genos[math.floor(i/cluster_size)]
pop[i][2]=np.random.normal(pop[i][1],st_dev_cell)
pop[i][4]=math.floor(i/cluster_size)
timestep=1
#run through a round of reproduction
for j in range(0,timesteps_to_run):
#print pop
cell_max=int(max(pop[:,0]))+1
cluster_max=int(max(pop[:,4]))+1
cells_added=len(cell_genos)*cluster_size*2**(timestep-1)*2
cells_added_first=len(cell_genos)*cluster_size*2**(timestep-1) #this counts up the first reproductive event within the timepoint, total cells added is for both offspring clusters
print("cell stdev", ii)
print("group stdev", a)
print("generation number", timestep)
#first cluster produced
cluster_variance_factor=np.random.normal(1,st_dev_group)
for i in range(0,cells_added_first): #this loops through every additional cell for the first cluster offspring
if (cluster_max+math.floor(i/cluster_size))!=(cluster_max+math.floor((i-1)/cluster_size)): #if your cluster number does not equal the one lower down from you, then you get a new cluster-level variance factor.
cluster_variance_factor=np.random.normal(1,st_dev_group)
#print "cluster_variance_factor and cluster number is", cluster_variance_factor, (cluster_max+math.floor(i/cluster_size)) #I've confirmed that every cell in the group is getting the same environmental variance
pop=np.vstack([pop,[cell_max+i,pop[(cell_max+i)-cells_added_first][1],np.random.normal(pop[(cell_max+i)-cells_added_first][1],st_dev_cell)*cluster_variance_factor,pop[(cell_max+i)-cells_added_first][0],cluster_max+math.floor(i/cluster_size),pop[(cell_max+i)-cells_added_first][4],0,0,0,pop[(cell_max+i)-cells_added_first][2]]])
cell_max=int(max(pop[:,0]))+1
cluster_max=int(max(pop[:,4]))+1
#second cluster produced
for i in range(0,cells_added_first):
pop=np.vstack([pop,[cell_max+i,pop[(cell_max+i)-cells_added][1],np.random.normal(pop[(cell_max+i)-cells_added][1],st_dev_cell)*cluster_variance_factor,pop[(cell_max+i)-cells_added][0],cluster_max+math.floor(i/cluster_size),pop[(cell_max+i)-cells_added][4],0,0,0,pop[(cell_max+i)-cells_added][2]]])
timestep+=1
#np.savetxt("full population.csv", pop, delimiter=",") #this will save a CSV of the whole run, useful for statistics or error checking
cell_x=pop[:,9]
cell_y=pop[:,2]
cell_x=cell_x[len(cell_genos)*cluster_size:]
cell_y=cell_y[len(cell_genos)*cluster_size:]
#linear regression of parent on offspring phenotype
#Pandas dataframe work isolating groups
df=pd.DataFrame(pop)
size_by_ID=df.groupby(4)[2].sum()
parent_by_ID=df.groupby(4)[5].mean()
joined=pd.concat([size_by_ID,parent_by_ID], axis=1, ignore_index=True)
parent_size=[]
for i in range(0,len(joined[0])):
j=joined[1][i]
parent_size.append(joined[0][j])
offspring_size=joined[0]
parent_size_cleaned=list(parent_size[len(cell_genos):])
offspring_size_cleaned=list(offspring_size[len(cell_genos):])
tempratio=(linregress(parent_size_cleaned,offspring_size_cleaned)[0]) / (linregress(cell_x,cell_y)[0])
cell_stdev_plotting_list.append(ii)
group_stdev_plotting_list.append(a)
relative_heritability.append(tempratio)
print("cell_stdev_plotting_list", cell_stdev_plotting_list)
print("group_stdev_plotting_list", group_stdev_plotting_list)
print("relative_heritability", relative_heritability)
np.savetxt("plot_me.csv", list(zip(cell_stdev_plotting_list, group_stdev_plotting_list, relative_heritability)), delimiter=",")
| [
"al.dahaj@gatech.edu"
] | al.dahaj@gatech.edu |
c43723518c38cd87ae4b9016e72799eb58bbd6dd | 607da6730ec5f53d89df43a558906b1ce5fd5aea | /tuples.py | 790ceb74f74e1f7195016be12f86606b458b5793 | [] | no_license | pavaniakhila01/python-lab | 9cbe62a3823b4a9b91cd4278c59c9b1b7ddfc36b | ac517b5911eeffea14768a6dfd1f7ca0826b1dbf | refs/heads/main | 2023-08-25T02:08:03.441812 | 2021-09-22T09:48:18 | 2021-09-22T09:48:18 | 400,031,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | myList = [(2, 5), (9, 4), (9, 0), (1, 4), (1, 5)]
print("Initially the list is : " + str(myList))
# joining tuples if similar initial element
joinList = []
for val in myList:
if joinList and joinList[-1][0] == val[0]:
joinList[-1].extend(val[1:])
else:
joinList.append([ele for ele in val])
joinList = list(map(tuple, joinList))
# Printing the joined List
print("Joined list : " + str(joinList))
| [
"noreply@github.com"
] | pavaniakhila01.noreply@github.com |
a9fe0490d5262902bf3cd11bf5e13fac1b292451 | 86f2fb885a152b2191fc7d6408981fe697d2e42d | /user_action_tracing/wizard/__init__.py | b834a94892082234db4b0a6d0d08e35c90614a6a | [] | no_license | falconsoft3d/user_action_tracing | efff9dacc3675bf58df15d37dd0aecdc4b9fc2b0 | 0f37fe2856247486f8b7a846c047cd847a99d3e6 | refs/heads/master | 2020-03-23T09:21:02.788518 | 2018-07-18T05:00:05 | 2018-07-18T05:00:05 | 141,382,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | # -*- coding: utf-8 -*-
import user_tracing_wizard
| [
"noreply@github.com"
] | falconsoft3d.noreply@github.com |
9df4c6c22dd5f1a6617e7d9785b27930315c61e8 | 09cd0ff99f571ba16da96051d1d6cca9cf224c88 | /qtim_tools/qtim_pipelines/deep_learning.py | aa54c7b4ffc8369f1d311625208c58f793526431 | [] | no_license | gdcohan/deepmedic-preprocess | eb3511a587b23f54dc1ee4c77653f98586b8bd68 | 7de3daf84fa7ff5c8b602ad587909f29374a397a | refs/heads/master | 2020-07-30T10:53:37.330951 | 2019-09-12T05:31:55 | 2019-09-12T05:31:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,014 | py | import numpy as np
import glob
import os
import yaml
from shutil import copy, rmtree
from ..qtim_preprocessing.motion_correction import motion_correction
from ..qtim_preprocessing.threshold import crop_with_mask
from ..qtim_preprocessing.resample import resample
from ..qtim_preprocessing.normalization import zero_mean_unit_variance
from ..qtim_preprocessing.bias_correction import bias_correction
from ..qtim_preprocessing.skull_strip import skull_strip
from ..qtim_utilities.file_util import nifti_splitext
def deep_learning_preprocess(study_name, base_directory, skull_strip_label='T2SPACE', skip_modalities=[]):
""" This script is meant for members of the QTIM lab at MGH. It takes in one of our study names, finds the
COREGISTRATION folder, presumed to have been created in an earlier part of the pipeline, and applies
the following pre-processing steps to our data to make it ready for input into deep-learning algorithms:
1) N4 Bias Correction
2) Isotropic Resampling
3) Skull-Stripping
4) Zero Mean Normalization
This is a work in progress, and may not be appropriate for every deep-learning task. For example, data
completion tasks may not benefit from zero-mean normalization.
Parameters
----------
study_name: str
A QTIM study name code, usually three letters.
base_directory: str
The full path to the directory from which to search for studies. The study directory
should be contained in this directory.
skull_strip_label: str
An text identifier (e.g. "FLAIR") for which volume should be skull-stripped. All other volumes will be stripped according to this identifier.
skip_modalities: str or list of str
Any modalities that should not be processed.
"""
# NiPype is not very necessary here, but I want to get used to it. DataGrabber is a utility for
# for recursively getting files that match a pattern.
study_files = nio.DataGrabber()
study_files.inputs.base_directory = base_directory
study_files.inputs.template = os.path.join(study_name, 'ANALYSIS', 'COREGISTRATION', study_name + '*', 'VISIT_*', '*.nii.gz')
study_files.inputs.sort_filelist = True
results = study_files.run().outputs.outfiles
bias_correct_vols = ['FLAIR', 'MPRAGE', 'T1', 'T2SPACE']
# Remove modality codes that will not be processed for deep-learning.
dl_volumes = []
for output in results:
if any(modality in output for modality in skip_modalities):
continue
dl_volumes += [output]
# We prefer the same skull-stripping in each case. Thus, we first iterate through skull-strip volumes.
skull_strip_volumes = [volume for volume in dl_volumes if skull_strip_label in volume]
# Strip the chosen volume..
for skull_strip_volume in skull_strip_volumes:
# Find the output folder
# TODO: Do this with nipype DataSink instead.
# Also TODO: Make this easier to change in case directory structure changes.
split_path = os.path.normpath(skull_strip_volume).split(os.sep)
output_folder = os.path.join(base_directory, study_name, 'ANALYSIS', 'DEEPLEARNING', split_path[-3], split_path[-2])
# Make the output directory if it does not exist.
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# If skull-stripping has not yet been performed, perform it.
skull_strip_mask = os.path.join(output_folder, split_path[-3] + '-' + split_path[-2] + '-' + 'SKULL_STRIP_MASK.nii.gz')
skull_strip_output = os.path.join(output_folder, nifti_splitext(os.path.basename(skull_strip_volume))[0] + '_ss' + nifti_splitext(skull_strip_volume)[-1])
if not os.path.exists(skull_strip_mask):
skull_strip(skull_strip_volume, skull_strip_output, skull_strip_mask)
# Skull-Strip, Resample, and Normalize the rest.
for dl_volume in dl_volumes:
try:
split_path = os.path.normpath(dl_volume).split(os.sep)
output_folder = os.path.join(base_directory, study_name, 'ANALYSIS', 'DEEPLEARNING', split_path[-3], split_path[-2])
deep_learning_output = os.path.join(output_folder, nifti_splitext(os.path.basename(dl_volume))[0] + '_DL' + nifti_splitext(dl_volume)[-1])
if os.path.exists(deep_learning_output):
continue
print(output_folder)
# Make sure a mask was created in the previous step.
skull_strip_mask = os.path.join(output_folder, split_path[-3] + '-' + split_path[-2] + '-' + 'SKULL_STRIP_MASK.nii.gz')
if not os.path.exists(skull_strip_mask):
print('No skull-stripping mask created, skipping volume ', dl_volume)
continue
# Use existing mask to skull-strip if necessary.
n4_bias_output = os.path.join(output_folder, nifti_splitext(os.path.basename(dl_volume))[0] + '_n4' + nifti_splitext(dl_volume)[-1])
if any(bias_vol in n4_bias_output for bias_vol in bias_correct_vols):
if not os.path.exists(n4_bias_output):
bias_correction(dl_volume, output_filename=n4_bias_output, mask_filename=skull_strip_mask)
else:
copy(dl_volume, n4_bias_output)
# Use existing mask to skull-strip if necessary.
skull_strip_output = os.path.join(output_folder, nifti_splitext(n4_bias_output)[0] + '_ss' + nifti_splitext(n4_bias_output)[-1])
if not os.path.exists(skull_strip_output):
crop_with_mask(n4_bias_output, skull_strip_mask, output_filename=skull_strip_output)
os.remove(n4_bias_output)
# Resample and remove previous file.
# resample_output = os.path.join(output_folder, nifti_splitext(skull_strip_output)[0] + '_iso' + nifti_splitext(skull_strip_output)[-1])
# print resample_output
# if not os.path.exists(resample_output):
# resample(skull_strip_output, output_filename=resample_output)
# os.remove(skull_strip_output)
# Mean normalize and remove previous file.
if not os.path.exists(deep_learning_output):
zero_mean_unit_variance(skull_strip_output, input_mask=skull_strip_mask, output_filename=deep_learning_output)
os.remove(skull_strip_output)
except:
print('Error encountered on', os.path.basename(dl_volume))
return
def deep_learning_experiment(base_directory, output_directory, config_file):
""" This script creates a deep learning "experiment" from our available studies. This means
sorting patient visits into testing and training folders with certain pre-specified modalities.
Parameters
----------
base_directory: str
The full path to the directory from which to search for studies. The study directory
should be contained in this directory.
config_file: str
A configuration file that dictates how studies should be split and which modalities.
"""
if config_file is None:
config_file = os.path.abspath(os.path.join(os.path.realpath(__file__), '..', 'default_configs', 'deep_learning_experiment.yaml'))
else:
config_file = os.path.asbpath(config_file)
with open(config_file, 'r') as stream:
config = yaml.load(stream)
if not os.path.exists(output_directory):
os.mkdir(output_directory)
for train_test in ['Train', 'Test', 'Validation']:
output_folder = os.path.join(os.path.abspath(output_directory), train_test)
# Make output folder
rmtree(output_folder)
# if not os.path.exists(output_folder):
os.mkdir(output_folder)
if train_test == 'Train':
for study in ['FMS']:
for patient in ['01']:
for visit in ['01','02','03', '04', '05']:
for modality in ['MPRAGE_POST', 'FLAIR_r_T2', 'T2SPACE_DL', 'T1Pre']:
try:
target_file = glob.glob(os.path.join(base_directory, study, 'ANALYSIS', 'DEEPLEARNING', '_'.join([study, patient]), '_'.join(['VISIT', visit]), '*' + modality + '*'))[0]
target_folder = os.path.join(output_folder, '_'.join([study, patient, visit]))
if not os.path.exists(target_folder):
os.mkdir(target_folder)
copy(target_file, os.path.join(target_folder, os.path.basename(target_file)))
except:
pass
for modality in ['SUV']:
try:
target_file = glob.glob(os.path.join(base_directory, study, 'ANALYSIS', 'DEEPLEARNING', '_'.join([study, patient]), '_'.join(['VISIT', visit]), '*' + modality + '*'))[0]
target_folder = os.path.join(output_folder, '_'.join([study, patient, visit]))
if not os.path.exists(target_folder):
os.mkdir(target_folder)
copy(target_file, os.path.join(target_folder, os.path.basename(target_file)))
except:
pass
if train_test == 'Validation':
for study in ['FMS']:
for patient in ['02']:
for visit in ['01','02','03', '04', '05']:
for modality in ['MPRAGE_POST', 'FLAIR_r_T2', 'T2SPACE_DL', 'T1Pre']:
try:
target_file = glob.glob(os.path.join(base_directory, study, 'ANALYSIS', 'DEEPLEARNING', '_'.join([study, patient]), '_'.join(['VISIT', visit]), '*' + modality + '*'))[0]
target_folder = os.path.join(output_folder, '_'.join([study, patient, visit]))
if not os.path.exists(target_folder):
os.mkdir(target_folder)
copy(target_file, os.path.join(target_folder, os.path.basename(target_file)))
except:
pass
for modality in ['SUV']:
try:
target_file = glob.glob(os.path.join(base_directory, study, 'ANALYSIS', 'DEEPLEARNING', '_'.join([study, patient]), '_'.join(['VISIT', visit]), '*' + modality + '*'))[0]
target_folder = os.path.join(output_folder, '_'.join([study, patient, visit]))
if not os.path.exists(target_folder):
os.mkdir(target_folder)
copy(target_file, os.path.join(target_folder, os.path.basename(target_file)))
except:
pass
if train_test == 'Test':
for study in ['FMS']:
for patient in ['05']:
for visit in ['01','02','03','04','05']:
for modality in ['MPRAGE_POST', 'FLAIR_r_T2', 'T2SPACE_DL', 'T1Pre']:
try:
target_file = glob.glob(os.path.join(base_directory, study, 'ANALYSIS', 'DEEPLEARNING', '_'.join([study, patient]), '_'.join(['VISIT', visit]), '*' + modality + '*'))[0]
target_folder = os.path.join(output_folder, '_'.join([study, patient, visit]))
if not os.path.exists(target_folder):
os.mkdir(target_folder)
copy(target_file, os.path.join(target_folder, os.path.basename(target_file)))
except:
pass
for modality in ['SUV']:
try:
target_file = glob.glob(os.path.join(base_directory, study, 'ANALYSIS', 'DEEPLEARNING', '_'.join([study, patient]), '_'.join(['VISIT', visit]), '*' + modality + '*'))[0]
target_folder = os.path.join(output_folder, '_'.join([study, patient, visit]))
if not os.path.exists(target_folder):
os.mkdir(target_folder)
copy(target_file, os.path.join(target_folder, os.path.basename(target_file)))
except:
pass
# print config[train_test]
# for study in config[train_test]['Study']:
# study_dir = study
return
def run_test():
pass
if __name__ == '__main__':
run_test() | [
"robinwang08@gmail.com"
] | robinwang08@gmail.com |
4e130ef31f30b5217fd2709684e34f1155d12ac8 | ed6df1997c7ad0c2f68fce9417d172db8f5c343b | /venv/Scripts/easy_install-3.8-script.py | 06b717a8cb81939fff4861b2fdedb1827cd95725 | [] | no_license | fahadaliawan-nbs/cdkProject | 495a74e4dec878a241be8f02d8d5017c6624db27 | 9e8a43140b30e44d575c22b1707143e538f38e7a | refs/heads/master | 2020-09-23T04:41:41.958853 | 2019-12-02T16:12:45 | 2019-12-02T16:12:45 | 225,405,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | #!C:\Users\fahad.awan\PycharmProjects\test\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
| [
"fahad.awan@northbaysolutions.net"
] | fahad.awan@northbaysolutions.net |
91c6529d48374f097f3ec3fa4d27be96c323c22e | c99b5f97ca44a7ff5584e1ca1d1d9f245199af21 | /face_id.py | 51a372be2619abf5b478c65b8141e03bbe91b056 | [] | no_license | aabdygaziev/facedetection | 7073b86e9efd9673502a841fca6b6944ea01d4c3 | 5b980ac8ccb04487815b743ecd1c1577c1e19d6c | refs/heads/master | 2022-11-15T12:37:42.910728 | 2020-06-28T22:56:54 | 2020-06-28T22:56:54 | 275,674,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,213 | py | import cv2
import pickle
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 500)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 250)
cap.set(cv2.CAP_PROP_FPS, 25)
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer.yml')
labels = {'person_name': 1}
with open('labels.pickle', 'rb') as f:
og_labels = pickle.load(f)
labels = {v: k for k, v in og_labels.items()}
while True:
# capture frame-by-frame
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(
gray,
scaleFactor=2,
minNeighbors=5
)
for (x, y, w, h) in faces:
# print(x, y, w, h)
# roi stands for region of interest
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
# recognize?
id_, conf = recognizer.predict(roi_gray)
if 45 <= conf <= 85:
print(id_)
print(labels[id_])
font = cv2.FONT_HERSHEY_PLAIN
name = labels[id_]
color = (255, 255, 255)
stroke = 2
cv2.putText(
frame, name,
(x, y), font,
1, color,
stroke, cv2.LINE_AA)
# save img
img_item = 'my_image.png'
cv2.imwrite(img_item, roi_gray)
# draw rectangle
# BGR color
color = (255, 0, 0)
stroke = 2
end_cord_x = x + w
end_cord_y = y + h
cv2.rectangle(
frame,
(x, y),
(end_cord_x, end_cord_y),
color,
stroke
)
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color,
(ex, ey),
(ex+ew, ey+eh),
(0, 255, 0), 2)
# Display the resulting time
cv2.imshow('frame', frame)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| [
"aabdygaziev@gmail.com"
] | aabdygaziev@gmail.com |
7c88f4fbc9319188f58c65097529d37b03a66cd0 | b3f361e363952584491e899ded6f06aba844d468 | /scripts/portal/fantasticPark.py | e9668cb3434a56e6f5410dfc9d84efd9b8216d9f | [] | no_license | AsuraRevenge/Swordie | b18dd6ad3563f94b0135c797ecb20f44233f5ac3 | c6af25557c9e5fd06a04592371a7d9fa9bdb917b | refs/heads/master | 2020-03-23T04:01:47.777093 | 2018-07-15T14:46:07 | 2018-07-15T14:46:07 | 141,061,559 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | def init():
fieldID = sm.getFieldID()
warp = True
if fieldID == 220000000:
map = 223000000
portal = 0
else:
sm.chat("(Portal) This script (fantasticPark.py) is not coded for this map. (ID: " + str(fieldID) + ")")
map = sm.getChr().getField().getReturnMap()
portal = 0
if warp:
sm.warp(map, portal)
sm.dispose() | [
"camotoad@mail.com"
] | camotoad@mail.com |
51d3e25a3f9393e840e9d469bede9f2e2c61a714 | ed6a0df62021113c56ef034f43e6a0fb1be1b58e | /venv/Scripts/easy_install-3.7-script.py | 6edf0e11b9b9c8436f598459d7a690eb7593e259 | [] | no_license | FernandaVaroli/chatbot-Python | cbe1d04499d2c7e45018611e2b6eb9a9f63617c6 | d6d5c3c24a7c48df0c46c43345850566b8aa992e | refs/heads/master | 2022-11-04T17:16:08.317443 | 2018-11-02T19:05:30 | 2018-11-02T19:05:30 | 155,909,849 | 0 | 1 | null | 2022-10-20T04:05:15 | 2018-11-02T18:59:42 | Python | UTF-8 | Python | false | false | 458 | py | #!C:\Users\ferna\PycharmProjects\Chatbot\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"fernandavaroli@gmail.com"
] | fernandavaroli@gmail.com |
0444a1b717ec3efb266650f6c6d1c1864efa0795 | 1c317b0778540a181869765ec0a17a19cdde5244 | /Win7TrashClean.py | e1af080105ea89c5f3377096ece2f88efe398631 | [] | no_license | myselfyyl/Win7TrashClean | 253fcfe7b181ad66e91ecc7f5775ad2e9eba3e58 | 43237597f90c2b78224fc803d77091b079d71962 | refs/heads/master | 2021-01-20T09:01:39.232905 | 2014-12-25T07:57:55 | 2014-12-25T07:57:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | #!/bin/python
import os, exceptions, logging
def removeFile( f ):
print f
try:
if os.path.isdir( f ):
files = os.listdir( f )
if 0 < len( files ):
for t in files:
removeFile( f + os.sep + t )
os.removedirs( f )
else:
os.unlink( f )
logging.info( f )
except exceptions.StandardError as e:
pass
def removeFiles( files ):
for f in files:
removeFile( f )
if '__main__' == __name__ :
logname = os.path.join( os.getcwd(), "CleanLog.txt" )
if os.path.exists( logname ):
os.unlink( logname )
logging.basicConfig( filename = logname, level = logging.INFO )
dirs = ('C:\\windows\\temp', )
for d in dirs:
if os.path.exists( d ):
files = os.listdir( d )
removeFiles( map( lambda f: d + os.sep + f , files ) )
raw_input('press any key to continue .')
| [
"myselfyyl@outlook.com"
] | myselfyyl@outlook.com |
113fbb344660632523fa0995cab1d0cf32e4824a | d1886c7ebf431149fc37ce12f01161793459209d | /tester/settings.py | 0d58b72bd71315f7db9188a27f6e09915fb38d38 | [] | no_license | mostley/rubberpencilgame | 303d91589f3e9b0e8086d218a71ce55c87fb0e57 | af04479c5fd1bb5eed2e65bb349d91c0fcb0dca1 | refs/heads/master | 2020-06-05T04:39:35.377715 | 2009-07-26T09:12:33 | 2009-07-26T09:12:33 | 32,443,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | from pyglet.window import key
Settings = {
"MapPath": "maps/",
"PenetrationForce": 2.0,
"Keyboard_Camera_Center": key.C,
"Keyboard_Player_Attack": key.X,
"Keyboard_Player_Left": key.LEFT,
"Keyboard_Player_Right": key.RIGHT,
"Keyboard_Player_Up": key.UP,
"Keyboard_Player_Down": key.DOWN,
}
try:
file = open("rpg.conf", 'r')
for line in file:
data = line.split("=")
Settings[data[0].strip()] = eval(data[1].strip())
except:
print "Error parsing ConfigFile"
| [
"sven.hecht@35ccbd06-6266-11de-9e79-e94433246604"
] | sven.hecht@35ccbd06-6266-11de-9e79-e94433246604 |
c642bda474582d7a38bff7dcb5c49dbe6fc93d0c | 0b9470f9a839d87b21fd575421b5223afb4573c6 | /07day/01-捕获输入异常.py | 676dc64ff5bbe99306b430ca519aeb1cedf9871d | [] | no_license | ZiHaoYa/1808 | 351356b4fa920a5075899c8abdce24a61502097f | 891582547fef4c6fd4fd4132da033e48e069901f | refs/heads/master | 2020-03-30T06:20:46.898840 | 2018-09-29T08:56:53 | 2018-09-29T08:56:53 | 147,603,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | try:
number = int(input("请输入一个"))
except Exception as ret:
print("输入有误")
print(ret)
number = input("请输入一个")
if number.isdigit():
print("出数字")
number = int(number)
else:
print("输入有误")
| [
"qingyuan@geekniu.com"
] | qingyuan@geekniu.com |
615ba348fbfdb06117a47ae2ec12a7560c51a640 | 825ea684bfb30d423a8af9c9faab3189a1fee350 | /bit/image/noise.py | 3bf043cad2e38133263ce0ab2754a8d893b53242 | [] | no_license | chbrandt/bit | af946cfb3c1aa7317b5d2b1a188c32cd83591b22 | 57d18b369d3cd3affe6e5fe03d48ff0e07a78d66 | refs/heads/master | 2021-01-10T18:53:49.618204 | 2018-02-03T23:30:29 | 2018-02-03T23:30:29 | 4,549,827 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | # -*- coding:utf-8 -*-
"""
Methods to simulate(add) different noise models on images
"""
import numpy as np
def gaussian(img, stdev=3.):
"""
Generate zero-mean additive gaussian noise
"""
noiz = np.random.normal(0., stdev, img.shape)
noisy_img = (1. * img) + noiz
return noisy_img
def poisson(img):
"""
Add poisson noise to image array
"""
img_nzy = np.random.poisson(img).astype(float);
return img_nzy;
def salt_n_pepper(img, perc=10):
"""
Generate salt-and-pepper noise in an image.
"""
# Create a flat copy of the image
flat = img.ravel().copy
# The total number of pixels
total = len(flat)
# The number of pixels we need to modify
nmod = int(total * (perc/100.))
# Random indices to modify
indices = np.random.random_integers(0, total-1, (nmod,))
# Set the first half of the pixels to 0
flat[indices[:nmod/2]] = 0
# Set the second half of the pixels to 255
flat[indices[nmod/2:]] = 255
return flat.reshape(img.shape)
| [
"carloshenriquebrandt@gmail.com"
] | carloshenriquebrandt@gmail.com |
c8d27c67f6cce1958bbca51c37ac39873692a82c | 9d443196fdb242d02275ed64574eb0f4e2bb3789 | /monty_hall/monty_hall.py | 3b8c5fbb9b86bdee4edf3f116d33e8d085a7e266 | [] | no_license | bkmy43/python_climbers | 31f67199486573746f8664c140eb338be678ecba | 436cc635917ea85f6adb083cb9c84ffe4101bce9 | refs/heads/master | 2021-05-15T02:36:33.663812 | 2017-01-19T16:26:27 | 2017-01-19T16:26:27 | 74,461,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,935 | py | import random
__author__ = "Ilya Vladimirskiy"
__email__ = "bkmy43@googlemail.com"
NUMBER_OR_TRIES = 10000
NUMBER_OF_BOXES = 3
# def new_try():
# return True
def main():
total_changed_count = total_unchanged_count = 0
changed_success_count = unchanged_success_count = 0
boxes = [True] + [False] * (NUMBER_OF_BOXES - 1)
for i in range(NUMBER_OR_TRIES):
random.shuffle(boxes)
strategy = random.choice(('CHANGED ', 'UNCHANGED'))
guess1 = random.choice(range(NUMBER_OF_BOXES))
guess2 = guess1
success = False
if strategy == 'CHANGED ':
total_changed_count += 1
for j in range(len(boxes)):
if j != guess1 and boxes[j] is False:
empty_box = j
break
guess2 = guess1
while guess2 == guess1 or guess2 == empty_box:
guess2 = random.choice(range(NUMBER_OF_BOXES))
if boxes[guess2]:
success = True
changed_success_count += 1
else:
success = False
else:
total_unchanged_count +=1
if boxes[guess2] is True:
success = True
unchanged_success_count += 1
else:
success = False
print("Try {}: Boxes {}, Strategy {}\t\t guesses: {}, {}\t\t {}".format(i, boxes, strategy, guess1, guess2, success))
print("---------------------------------------------------------------------------------------------\n" \
"\t{} tests done\n\tCHANGED {}/{} success {}%\n\tUNCHANGED {}/{} success {}%".format(
i + 1, changed_success_count, total_changed_count, round(100*changed_success_count/total_changed_count),
unchanged_success_count, total_unchanged_count, round(100*unchanged_success_count/total_unchanged_count)))
if __name__ == "__main__":
main() | [
"bkmy43@googlemail.com"
] | bkmy43@googlemail.com |
7e70145cfeb5e6dd6bdd34b22fcb495fefd4b772 | 1bcbf84572c0c5c6acee906c85809b008415416a | /Api/api.py | 749f4911df99567e91b7c7b5bc088d320bc932c0 | [] | no_license | s1ntaxe770r/Docker-UI | 54a98fa4b9fa8e44e3d839e0e688e5b59e32d5e5 | 757e1776c24b601dc548c4435e080faed70f38a6 | refs/heads/master | 2023-03-20T10:09:22.366903 | 2021-03-17T21:48:30 | 2021-03-17T21:48:30 | 304,615,416 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | # Routes return pure json
from flask import Blueprint,request
from flask.json import jsonify
from containerman import ContainerMan as cm
api = Blueprint("api", __name__)
# @api.route("/containers", methods=["GET"])
# def index():
@api.route("/containers/running",methods=["GET"])
def running():
containers = cm.DockerInfo()
running_containers = containers['running_containers']
jsonresp = {"containers":running_containers}
return jsonify(jsonresp)
@api.route("/containers/all", methods=["GET"])
def all():
all_containers = cm.AllContainers()
jsonresp = {"all_containers":all_containers}
return jsonify(jsonresp)
# @api.route("images/pull",methods=["POST"])
# def pull():
# ImageName = request.data["image"]
| [
"cyberdev01@protonmail.com"
] | cyberdev01@protonmail.com |
1d034b6b06e94315ceda06e8a8cc67681b8b3e9e | 6a7d8b67aad59c51dafdfb8bcffd53864a3d65b0 | /LeetCode/toeplitzMatrix.py | ef8e74f26346b2369d234c7f7ba1f11b002541a5 | [] | no_license | dicao425/algorithmExercise | 8bba36c1a08a232678e5085d24bac1dbee7e5364 | 36cb33af758b1d01da35982481a8bbfbee5c2810 | refs/heads/master | 2021-10-07T08:56:18.030583 | 2018-12-04T05:59:17 | 2018-12-04T05:59:17 | 103,611,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | #!/usr/bin/python
import sys
class Solution(object):
def isToeplitzMatrix(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: bool
"""
m = len(matrix)
n = len(matrix[0])
for l in range(m-1):
if matrix[l][:n-1] != matrix[l+1][1:]:
return False
return True
def main():
aa = Solution()
return 0
if __name__ == "__main__":
sys.exit(main()) | [
"di.cao425@gmail.com"
] | di.cao425@gmail.com |
eafcec2d83b6f74f3c5fb1a599b2afcc0e01c2a7 | 0ef02884177d4d0185be485dbb27c3194e888e49 | /Plotting/generateUpDownFake.py | 2be0488f98e9fc64b13937c309eb9aaeb5740b36 | [] | no_license | pietrzkowskimateusz/my_python_apps | 9bd03e2c81eb047bbe9b61c08c51221cb358a6ac | dd98742485041001885a03ba1994888c42e90586 | refs/heads/master | 2020-08-31T07:11:40.375165 | 2019-11-27T18:04:31 | 2019-11-27T18:04:31 | 218,630,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,070 | py | import sys
import random
x = 250
y = 250
x0=x1=x2=x3=x
y0=y1=y2=y3=y
delta = 10
delta3 = 100
accuracy1 = 50
accuracy2 = 10
accuracy3 = 800
randomowa = []
if __name__ == '__main__':
if len(sys.argv) == 5:
try:
len1 = int(sys.argv[1])
len2 = int(sys.argv[2])
len3 = int(sys.argv[3])
len4 = int(sys.argv[4])
for i in range(2*len1):
randomowa.append(random.randint(0,accuracy2/5))
# Ścieżka
# # if mode == 0:
# file = open('UpDown/data0.txt','w')
# str = "{},{}\n".format(x,y)
# file.write(str)
# for j in range(4):
# for i in range(len1):
# x += delta
# str = "{},{}\n".format(x,y)
# file.write(str)
# for i in range(len2):
# y += delta
# str = "{},{}\n".format(x,y)
# file.write(str)
# for i in range(len1):
# x -= delta
# str = "{},{}\n".format(x,y)
# file.write(str)
# for i in range(len2):
# y += delta
# str = "{},{}\n".format(x,y)
# file.write(str)
# for i in range(len1):
# x += delta
# str = "{},{}\n".format(x,y)
# file.write(str)
# # RTLS
# if mode == 1:
# file = open('UpDown/data1.txt','w')
# str = "{},{}\n".format(x,y)
# file.write(str)
# for j in range(4):
# for i in range(len1):
# x += delta
# str = "{},{}\n".format(x + random.randint(-accuracy1,accuracy1),y + random.randint(-accuracy1,accuracy1))
# file.write(str)
# for i in range(len2):
# y += delta
# str = "{},{}\n".format(x + random.randint(-accuracy1,accuracy1),y + random.randint(-accuracy1,accuracy1))
# file.write(str)
# for i in range(len1):
# x -= delta
# str = "{},{}\n".format(x + random.randint(-accuracy1,accuracy1),y + random.randint(-accuracy1,accuracy1))
# file.write(str)
# for i in range(len2):
# y += delta
# str = "{},{}\n".format(x + random.randint(-accuracy1,accuracy1),y + random.randint(-accuracy1,accuracy1))
# file.write(str)
# for i in range(len1):
# x += delta
# str = "{},{}\n".format(x + random.randint(-accuracy1,accuracy1),y + random.randint(-accuracy1,accuracy1))
# file.write(str)
# # Odometria
# if mode == 2:
# file = open('UpDown/data2.txt','w')
# str = "{},{}\n".format(x,y)
# file.write(str)
# for j in range(4):
# for i in range(len1):
# x += delta
# str = "{},{}\n".format(x + random.randint(-accuracy2,accuracy2),y + random.randint(-accuracy2,accuracy2))
# file.write(str)
# for i in range(len2):
# y += delta
# str = "{},{}\n".format(x + random.randint(-accuracy2,accuracy2),y + random.randint(-accuracy2,accuracy2))
# file.write(str)
# for i in range(len1):
# x -= delta
# str = "{},{}\n".format(x + random.randint(-accuracy2,accuracy2),y + random.randint(-accuracy2,accuracy2))
# file.write(str)
# for i in range(len2):
# y += delta
# str = "{},{}\n".format(x + random.randint(-accuracy2,accuracy2),y + random.randint(-accuracy2,accuracy2))
# file.write(str)
# for i in range(len1):
# x += delta
# str = "{},{}\n".format(x + random.randint(-accuracy2,accuracy2),y + random.randint(-accuracy2,accuracy2))
# file.write(str)
# # GPS
# if mode == 3:
# file = open('UpDown/data3.txt','w')
# str = "{},{}\n".format(x,y)
# file.write(str)
# for j in range(4):
# for i in range(len1):
# x += delta3
# str = "{},{}\n".format(x + random.randint(-accuracy3,accuracy3),y + random.randint(-accuracy3,accuracy3))
# file.write(str)
# for i in range(len2):
# y += delta3
# str = "{},{}\n".format(x + random.randint(-accuracy3,accuracy3),y + random.randint(-accuracy3,accuracy3))
# file.write(str)
# for i in range(len1):
# x -= delta3
# str = "{},{}\n".format(x + random.randint(-accuracy3,accuracy3),y + random.randint(-accuracy3,accuracy3))
# file.write(str)
# for i in range(len2):
# y += delta3
# str = "{},{}\n".format(x + random.randint(-accuracy3,accuracy3),y + random.randint(-accuracy3,accuracy3))
# file.write(str)
# for i in range(len1):
# x += delta3
# str = "{},{}\n".format(x + random.randint(-accuracy3,accuracy3),y + random.randint(-accuracy3,accuracy3))
# file.write(str)
# Ścieżka
# if mode == 0:
file0 = open('UpDown2/data0.txt','w')
str0 = "{},{}\n".format(x,y)
file0.write(str0)
for j in range(4):
for i in range(len1):
x0 += delta
str0 = "{},{}\n".format(x0,y0)
file0.write(str0)
for i in range(len2):
y0 += delta
str0 = "{},{}\n".format(x0,y0)
file0.write(str0)
for i in range(len1):
x0 -= delta
str0 = "{},{}\n".format(x0,y0)
file0.write(str0)
for i in range(len2):
y0 += delta
str0 = "{},{}\n".format(x0,y0)
file0.write(str0)
for i in range(len1):
x0 += delta
str0 = "{},{}\n".format(x0,y0)
file0.write(str0)
# RTLS
# if mode == 1:
file1 = open('UpDown2/data1.txt','w')
str1 = "{},{}\n".format(x1,y1)
file1.write(str1)
for j in range(4):
for i in range(len1):
x1 += delta + randomowa[i]
y1 += randomowa[i+len1]
str1 = "{},{}\n".format(x1 + random.randint(-accuracy1,accuracy1),y1 + random.randint(-accuracy1,accuracy1))
file1.write(str1)
for i in range(len2):
x1 += randomowa[i]
y1 += delta + randomowa[i+len1]
str1 = "{},{}\n".format(x1 + random.randint(-accuracy1,accuracy1),y1 + random.randint(-accuracy1,accuracy1))
file1.write(str1)
for i in range(len1):
x1 -= delta + randomowa[i]
y1 += randomowa[i+len1]
str1 = "{},{}\n".format(x1 + random.randint(-accuracy1,accuracy1),y1 + random.randint(-accuracy1,accuracy1))
file1.write(str1)
for i in range(len2):
x1 += randomowa[i]
y1 += delta + randomowa[i+len1]
str1 = "{},{}\n".format(x1 + random.randint(-accuracy1,accuracy1),y1 + random.randint(-accuracy1,accuracy1))
file1.write(str1)
for i in range(len1):
x1 += delta + randomowa[i]
y1 += randomowa[i+len1]
str1 = "{},{}\n".format(x1 + random.randint(-accuracy1,accuracy1),y1 + random.randint(-accuracy1,accuracy1))
file1.write(str1)
# Odometria
# if mode == 2:
file2 = open('UpDown2/data2.txt','w')
str2 = "{},{}\n".format(x2,y2)
file2.write(str2)
for j in range(4):
for i in range(len1):
x2 += delta
str2 = "{},{}\n".format(x2 + random.randint(-accuracy2,accuracy2),y2 + random.randint(-accuracy2,accuracy2))
file2.write(str2)
for i in range(len2):
y2 += delta
str2 = "{},{}\n".format(x2 + random.randint(-accuracy2,accuracy2),y2 + random.randint(-accuracy2,accuracy2))
file2.write(str2)
for i in range(len1):
x2 -= delta
str2 = "{},{}\n".format(x2 + random.randint(-accuracy2,accuracy2),y2 + random.randint(-accuracy2,accuracy2))
file2.write(str2)
for i in range(len2):
y2 += delta
str2 = "{},{}\n".format(x2 + random.randint(-accuracy2,accuracy2),y2 + random.randint(-accuracy2,accuracy2))
file2.write(str2)
for i in range(len1):
x2 += delta
str2 = "{},{}\n".format(x2 + random.randint(-accuracy2,accuracy2),y2 + random.randint(-accuracy2,accuracy2))
file2.write(str2)
# GPS
# if mode == 3:
file3 = open('UpDown2/data3.txt','w')
str3 = "{},{}\n".format(x3,y3)
file3.write(str3)
for j in range(4):
for i in range(len3):
x3 += delta3 + 10*randomowa[i]
y3 += 10*randomowa[i+len1]
# str3 = "{},{}\n".format(x3,y3)
str3 = "{},{}\n".format(x3 + random.randint(-accuracy3,accuracy3),y3 + random.randint(-accuracy3,accuracy3))
file3.write(str3)
for i in range(len4):
x3 += 10*randomowa[i]
y3 += delta3 + 10*randomowa[i+len1]
# str3 = "{},{}\n".format(x3,y3)
str3 = "{},{}\n".format(x3 + random.randint(-accuracy3,accuracy3),y3 + random.randint(-accuracy3,accuracy3))
file3.write(str3)
for i in range(len3):
x3 -= delta3 + 10*randomowa[i]
y3 += 10*randomowa[i+len1]
# str3 = "{},{}\n".format(x3,y3)
str3 = "{},{}\n".format(x3 + random.randint(-accuracy3,accuracy3),y3 + random.randint(-accuracy3,accuracy3))
file3.write(str3)
for i in range(len4):
x3 += 10*randomowa[i]
y3 += delta3 + 10*randomowa[i+len1]
# str3 = "{},{}\n".format(x3,y3)
str3 = "{},{}\n".format(x3 + random.randint(-accuracy3,accuracy3),y3 + random.randint(-accuracy3,accuracy3))
file3.write(str3)
for i in range(len3):
x3 += delta3 + 10*randomowa[i]
y3 += 10*randomowa[i+len1]
# str3 = "{},{}\n".format(x3,y3)
str3 = "{},{}\n".format(x3 + random.randint(-accuracy3,accuracy3),y3 + random.randint(-accuracy3,accuracy3))
file3.write(str3)
except:
print("Error")
else:
print("More arguments")
| [
"pietrzkowskimateusz@gmail.com"
] | pietrzkowskimateusz@gmail.com |
e3e45a43ed66f41c9fb650e1e3fd424b815ac734 | 5d5312a0b42016ec918117472399512e6e48e3e3 | /pi_zero_w/off_parser.py | 50baf49a9e0a922aa5d911a784ddb86563277438 | [
"MIT"
] | permissive | jishii14/YezzusFrezzeUs | ac50e0cd16932864101d47fd1c74b332a54abdd5 | 18094093e6c1fbda97f6649cad7642941f608844 | refs/heads/master | 2022-01-08T07:06:44.195600 | 2019-05-01T10:01:13 | 2019-05-01T10:01:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,917 | py | ################################################################################
## OFF_Parser contains utility methods for parsing out the JSON object
## obtained from Open Food Facts into our FoodItem class
################################################################################
import json
from off_food_item import FoodItem
def parse_food_item(raw_food_data):
if not 'product_name' in raw_food_data:
print('ERROR: Product not found')
return
food_item = FoodItem(raw_food_data['product_name'])
print('Scanned %s' % food_item.name)
#################################################
## Add data from Open Food Facts to instantiation
if 'allergens_from_ingredients' in raw_food_data:
food_item.add_allergies(raw_food_data['allergens_from_ingredients'])
if 'image_front_thumb_url' in raw_food_data:
food_item.add_image(raw_food_data['image_front_thumb_url'])
if 'ingredients_text' in raw_food_data:
food_item.add_ingredients(raw_food_data['ingredients_text'])
if 'categories' in raw_food_data:
food_item.add_categories(raw_food_data['categories'])
if 'nutriments' in raw_food_data:
food_item.add_nutriments(raw_food_data['nutriments'])
if 'nutrient_levels' in raw_food_data:
levels = raw_food_data['nutrient_levels']
# Verify all levels exist
salt = 'X'
sugars = 'X'
saturated_fat = 'X'
fat = 'X'
if 'salt' in levels:
salt = levels['salt']
if 'sugars' in levels:
sugars = levels['sugars']
if 'saturated_fat' in levels:
saturated_fat = levels['saturated_fat']
if 'fat' in levels:
fat = levels['fat']
food_item.set_nutrient_levels(salt, sugars, saturated_fat, fat)
return food_item | [
"jishii14@apu.edu"
] | jishii14@apu.edu |
302567e4bd9eb3e7ffba5d8c54980f1fe599c004 | e63b9e6ca4bd729bced324d372a95364839844f7 | /v/vdrnfofs-0.8/vdrnfofs/vdr.py | d28eca529ae02c0baeee511370f6f69143484a03 | [
"BSD-3-Clause"
] | permissive | annishoelzel/five | a724d5fe5682983fbb8a69b9019ad3775eb3a499 | b1db1f2fd78fae6bcdde24ea1bd2e2916adf9d10 | refs/heads/master | 2023-03-30T09:30:58.930532 | 2020-11-15T15:52:55 | 2020-11-15T15:52:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,499 | py | # -*- coding: utf-8 -*-
#
# VDR-NFO-FS creates a file system for VDR recordings, which maps each
# recording to a single mpg-file and nfo-file containing some meta data.
#
# Copyright (c) 2010 - 2011 by Tobias Grimm
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the author/copyright holder nor the names of
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class InfoVdr:
def __init__(self, filename = None):
self.values = {'T' : 'Unknown', 'D': 'No Description'}
if filename:
isOldFormat = filename.endswith('summary.vdr')
with open(filename, 'r') as file:
if isOldFormat:
lines = file.readlines()
if len(lines) > 0:
self.values['T'] = lines[0].rstrip("\r\n")
self.values['D'] = ' '.join([l.rstrip("\r\n") for l in lines[1:]])
else:
for line in file:
line = line.rstrip("\r\n")
self.values[line[0]] = line[2:]
def __getitem__(self, key):
return self.values[key] if self.values.has_key(key) else ''
| [
"mango@mail.com"
] | mango@mail.com |
fad0b75b8fe6d36558d2a73428f62c8394b94e6e | c818ffc5613ec8f9918a0280bbb6df2642d83233 | /Semantic-differential-scales-generator.py | ff90831b8f2e63f300aa92dfc228d1f3b56178be | [] | no_license | SamSSLF/Semantic-differential-scales-generator | af1a21c4c738a251969201835e253b4e02363fb8 | ba7775c1f45522628324b71dd945946a564f9c8c | refs/heads/master | 2020-10-01T07:00:24.233661 | 2019-12-09T23:52:09 | 2019-12-09T23:52:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,206 | py | import matplotlib.pyplot as plt
import numpy as np
import random as r
import math as m
import os
import string
class Material():
"Class for a material"
def __init__(self, material):
"Instantiates a material."
self.material = material
def __repr__(self):
return str(self.material)
class Property():
"Class for a material property."
def __init__(self, name):
"Instantiates a property."
self.name = name
self.materials = {}
self.headers = ["Material", "Average", "Standard Deviation"]
def __repr__(self):
return str(self.name)
def add_a_material(self,material,avg,std_dev):
"Adds a material to the materials dictionary for that property. with average, and standard deviation values"
property_values = PropertyValues(avg,std_dev)
# Updates the materials dictionary with the material and its values.
self.materials.update( {str(material) : property_values.values() } )
def gen_array(self):
"Generates the array of data for numpy to make graphs for that property."
array_list = []
material_list = []
# Iterates over the materials in the materials list
for mat in self.materials.keys():
# Creates an array_row list and appends the material to it
array_row = []
material_list.append(mat)
# Iterates over the values for avg and std deviation for each material
for value in self.materials[mat]:
# Appends the values to the array row
array_row.append(value)
# Appends the row to the list, for each material.
array_list.append(array_row)
# Returns a generated array from the array list, and the header list.
return np.array(array_list), material_list
def make_graph(self,max_min):
"Makes an error bar graph based on the array and other parameters supplied."
data = self.gen_array()[0]
mat_names = self.gen_array()[1]
# Generates a range of x values for each value in first column of the array
x = np.arange(1,len(data[:,0])+1)
# Assigns y to equal the first column data array
y = data[:,0]
# Makes x-ticks for the graph from the mat_headers
plt.xticks(x, mat_names, rotation = 30)
# Adds error bars from the second coulmn of the data array
e = data[:,1]
plt.ylabel(str(self.name), fontweight="bold")
plt.xlabel("Materials", fontweight="bold")
plt.ylim(float(max_min[0]),float(max_min[1]))
plt.xlim(min(x)-0.2*len(x),max(x)+0.2*len(x))
plt.grid(linestyle='dashed')
return plt.errorbar(x, y, yerr=e, fmt = "o", capsize=2)
def gen_csv(self, file_path):
"Method to generate a CSV file for the property."
data = self.gen_array()[0]
mat_names = self.gen_array()[1]
no_headers = np.insert(data.astype(str), 0, mat_names, 1 )
csv_array = np.insert(no_headers, 0, self.headers, 0)
return np.savetxt(file_path, csv_array, fmt="%s", delimiter= ",")
class PropertyValues():
"Class for property values to generate lists on the fly."
def __init__(self,avg,std_dev):
"Instnatiates a property value."
self.property_values = [avg,std_dev]
def values(self):
return self.property_values
class FilePath():
"Class for generating safe file paths and names."
def __init__(self, new_folder_name, file_name):
self.safe_file_name = self.make_name_safe(str(file_name))
self.new_folder = os.path.join(os.getcwd(), new_folder_name)
def make_name_safe(self,file_name):
'Makes the file name safe for computer consumption by replacing all punctuation to be a "-"'
for char in file_name:
if char in string.punctuation:
file_name = file_name.replace(char, "-")
return file_name
def safe_file_path(self, file):
'''Returns the safe file path with the file'''
return str(os.path.join(self.new_folder, file))
def add_extras(self, file_extension, *extras):
'''Adds extra words and a file extension to the safe file name attribute.
The method will separate extras with a "-" and add a "." before the file extension.'''
extra_file_name = self.safe_file_name
for extra in extras:
extra_file_name += ( "-" + str(extra))
extra_file_name += ("." + str(file_extension))
return extra_file_name
def y_or_n(yn):
"Default method for answering yes or no to questions. Returns None when user makes a false input."
if yn == "y":
return True
elif yn == "n":
return False
else:
print("Please only enter y or n!")
return None
def y_n_loop(string):
"Yes no loop function for resolving conflicts with false y/n input."
y_n = True
while y_n:
print(string)
ans = input()
if y_or_n(ans) is None:
y_n = True
else:
if y_or_n(ans):
return True
elif not y_or_n(ans):
return False
y_n = False
def add_thing(thing_class,thing_list):
"adds things to their respected lists in the __main__ function"
print("Please enter a " + thing_class.__name__.lower() + ".")
#creates a thing of thing_class via the input
this_thing = thing_class(input())
#appends the thing to the list of things
thing_list.append(this_thing)
#asks if you want to make more things?
if y_n_loop("Do you want to add another " + thing_class.__name__.lower() + "? (y/n)"):
return False
else:
return True
def only_num(string):
"Checks whether a user input is a numeric value, if not, loops around the specified previous question."
only_num = True
while only_num:
print(string)
try:
thing = float(input())
only_num = False
return thing
except ValueError:
print("Please only enter numeric values!")
only_num = True
if __name__ == '__main__':
material_list = []
property_list = []
adding_materials = True
adding_properties = False
adding_values = False
draw_graphs = False
gen_random = 0
within_limits = True
the_headers = ["Material", "Average", "Standard Deviation"]
max_min = []
max_min.append(only_num("Please enter the minimum rating limit:"))
max_min.append(only_num("Please enter the maximum rating limit:"))
if y_n_loop("Do you want to generate totally random values for your material properties and standard deviations? (y/n)"):
if y_n_loop("WARNING!!! These values are completely and totally random. No guarantee can be made for their relevance to the materials specfied. Do you still want to proceed? (y/n)"):
gen_random = 2
else:
if y_n_loop("Would you like to add a small amount of randomness to inputted values? (y/n)"):
gen_random = 1
else:
if y_n_loop("Would you like to add a small amount of randomness to inputted values? (y/n)"):
gen_random = 1
while adding_materials:
#Uses the add_thing function to add Material instances to the material_list
if add_thing(Material, material_list):
adding_materials = False
adding_properties = True
while adding_properties:
#Uses the add_thing function to add property instances to the material_list
if add_thing(Property, property_list):
adding_properties = False
adding_values = True
#Adding values to the properties and the materials.
print("Your materials are " + str(material_list))
print("Your properties are " + str(property_list))
#Iterates over the list of properties
for prprty in property_list:
#iterates over the list of materials for each property
for mat in material_list:
#If total randomness was selected
if gen_random == 2:
#adds properties with random values based on maximum and minimum limits.
prprty.add_a_material(mat,r.uniform(float(max_min[0]),float(max_min[1])),r.uniform(float(max_min[0]), float(max_min[1])/4))
else:
while within_limits:
avg = only_num("Enter a value for average " + str(prprty) + "-ness, of " + str(mat) + ":")
# If the user wants some randomness
if gen_random==1:
# Multiplies the avg by a small randomness factor
avg *= r.uniform(0.8,1.2)
std_dev = only_num("Enter a value for standard deviation around " + str(mat) + "'s " + str(prprty) + "-ness:")
# If the user wants some randomness
if gen_random ==1:
# Multiplies the avg by a small randomness factor
std_dev *= r.uniform(0.8,1.2)
# Checks whether the average and standard deviation values are within the max and min values.
if avg > max_min[1] or avg < max_min[0] or std_dev > max_min[1] or std_dev < max_min[0]:
# Queries a yes or no loop to check whether they want to continue with the out of max_min range values.
if y_n_loop("WARNING!!! The values specified for average and standard deviation of " + str(mat) + "'s " + str(prprty) + "-ness are outside of the maximum and minimum values. Do you wish to continue? (y/n)"):
# If they do wish to continue, Breaks the within_limits loop
within_limits = False
else:
# If they don't want continue, keeps within_limits loop, forcing user to re-enter maerial property values.
within_limits = True
else:
# Breaks the within limits loop as they have entered values within the limits of the max_min values.
within_limits = False
# Resets the within limits loop to true so that they are queried about material properties for the next material in the list.
within_limits = True
#adds a material to the property with the avg and std dev values specified
prprty.add_a_material(mat,avg,std_dev)
plot = prprty.make_graph(max_min)
# Makes a new file name based on the string of the property
new_file_name = str(prprty) + "-differential-scales"
# Makes the file name safe for computer consumption by changing all punctuation to a -
the_path = FilePath("semantic-differential-scales", prprty)
the_file_png = the_path.add_extras("png")
the_file_csv = the_path.add_extras("csv","csv","file")
if os.path.exists(the_path.new_folder):
print("semantic-differential-scales found!")
else:
print("semantic-differential-scales directory not found. Making a new directory")
print("...")
os.makedirs(the_path.new_folder)
print("Done!")
if os.path.exists(the_path.new_folder):
print("Directory successfuly made")
print("Saving a semantic differential scale graph for " + str(prprty) + " as " + the_file_png )
print("...")
plt.savefig(the_path.safe_file_path(the_file_png), dpi = 300, bbox_inches ="tight")
print("Done!")
plt.clf()
no_headers = np.insert(prprty.gen_array()[0].astype(str), 0, prprty.gen_array()[1], 1 )
print("Saving a CSV file of semantic differential data for " + str(prprty) + " as " + the_file_csv )
print("...")
prprty.gen_csv(the_path.safe_file_path(the_file_csv))
# np.savetxt(the_path.safe_file_path(the_file_csv), np.insert(no_headers, 0, the_headers, 0), fmt="%s", delimiter= ",")
print("Done!")
print("Your graph and CSV file have been saved in " + the_path.new_folder) | [
"robert.field19@imperial.ac.uk"
] | robert.field19@imperial.ac.uk |
6d88d4b61b6122625fc034a6f3aba6e5dd93cae2 | ef91abff45756b09946dbb73255760ea39b6438a | /cn/chap9/dog_test.py | 780dc9da85c5adee0c4e8cc5d1ac8931a91a7a4f | [] | no_license | qichangjian/PythonLearn | 3006f8822161d485e6d1d5c1664cf42efe1bd5f7 | 7b40bae2a768d92b687376fa01033998d2bf7690 | refs/heads/master | 2020-03-23T18:59:42.832199 | 2018-07-23T01:56:18 | 2018-07-23T01:56:18 | 141,947,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | #ecoding=UTF-8
'''
Created on 2018年7月17日
@author: Administrator
'''
#from cn.chap9.dog import *
#ctrl + shift + o
from cn.chap9.dog import Dog
#引用另一个类实例化
my_dog = Dog('dd',6)
my_dog.roll_over()
you_dog = Dog('qi',4)
you_dog.sit() | [
"Administrator@admin-PC"
] | Administrator@admin-PC |
3088c511911536432dfcecc2ec111059c1742357 | 7b274bfb0f13e81034a29cc0c53184fa1c88011e | /security/models.py | ef11d08aead8240ca6d470b958cae8b83ba941fe | [] | no_license | atorrese/FarmacyEcommerce | e4cf83687e82ac2b9cc3dfebc76b3f461019c029 | d35e4aef1e77f22841b2d6857e842f1bde4db2ef | refs/heads/master | 2023-08-09T19:30:45.305360 | 2021-09-11T20:25:26 | 2021-09-11T20:25:26 | 331,850,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | import datetime
from django.db import models
from django.utils.timezone import datetime
#Clase Base Para eliminacion
class ModelBase(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
status = models.BooleanField(default=True)
class Meta:
abstract = True | [
"atorrese@unemi.edu.ec"
] | atorrese@unemi.edu.ec |
a659bcedb38bb75845f1a14a480d97ab804d998c | e3b2da692237c02919dd98e7b1aab6b6c3f39597 | /rev_list.py | 0175241ec765e4cca50c39ebf2b070e02e6fc093 | [] | no_license | lavatharini/Project-176051T | b5e6e84de73bf1eed149b9592f2c465d3189e3b2 | c098d51812e320885e612c4a518a9a8f6e9e0eaf | refs/heads/master | 2022-12-26T03:54:27.481312 | 2020-10-10T18:51:29 | 2020-10-10T18:51:29 | 302,946,636 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | def Reverse(lst):
new_lst = lst[::-1]
return new_lst
#its the reverse list python code
| [
"lavatharini.17@business.mrt.ac.lk"
] | lavatharini.17@business.mrt.ac.lk |
e8e98afdde5ea6d6625b1b737aa624cfc45ca24c | 386a5b505d77c9798aaab78495d0f00c349cf660 | /Prognos Project/Working/Latiket Jaronde Git/DJango examples/DynamicUrls/DynamicUrls/urls.py | 35264c0f4b24f8a9bef2b8a7b45683a883428d3a | [] | no_license | namratarane20/MachineLearning | 2da2c87217618d124fd53f607c20641ba44fb0b7 | b561cc74733b655507242cbbf13ea09a2416b9e2 | refs/heads/master | 2023-01-20T18:54:15.662179 | 2020-03-09T14:12:44 | 2020-03-09T14:12:44 | 237,597,461 | 0 | 0 | null | 2023-01-05T12:37:12 | 2020-02-01T10:22:20 | Python | UTF-8 | Python | false | false | 801 | py | """DynamicUrls URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include("urlDemo.urls")),
]
| [
"namrata.ashok@impelsys.com"
] | namrata.ashok@impelsys.com |
0a6f6a24a5c718849def667cd7b9fda3075dad7b | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D05B/REBORDD05BUN.py | cff49e05af4ddd814da48c06f274ce8bd1247a17 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,871 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD05BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'GEI', MIN: 1, MAX: 6},
{ID: 'NAD', MIN: 1, MAX: 9, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 9},
]},
{ID: 'DTM', MIN: 1, MAX: 6},
{ID: 'FTX', MIN: 0, MAX: 6},
{ID: 'ARD', MIN: 1, MAX: 999, LEVEL: [
{ID: 'CUX', MIN: 1, MAX: 1},
{ID: 'GEI', MIN: 0, MAX: 5},
{ID: 'LOC', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'FTX', MIN: 0, MAX: 3},
{ID: 'RFF', MIN: 1, MAX: 9},
{ID: 'REL', MIN: 1, MAX: 999, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'GEI', MIN: 0, MAX: 7},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'NAD', MIN: 0, MAX: 7},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'FTX', MIN: 0, MAX: 6},
{ID: 'PCD', MIN: 0, MAX: 99, LEVEL: [
{ID: 'NAD', MIN: 0, MAX: 1},
]},
{ID: 'MOA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'GEI', MIN: 0, MAX: 2},
{ID: 'PCD', MIN: 0, MAX: 3},
{ID: 'DTM', MIN: 0, MAX: 2},
{ID: 'RFF', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 3},
{ID: 'COM', MIN: 0, MAX: 1},
]},
{ID: 'CUX', MIN: 0, MAX: 1},
]},
]},
]},
{ID: 'UNS', MIN: 1, MAX: 1},
{ID: 'MOA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 3},
{ID: 'COM', MIN: 0, MAX: 1},
]},
{ID: 'PCD', MIN: 0, MAX: 3},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
fe76267ea42ef325228c25e6645053532e8586d0 | b5ac186a6fe29e5b98c58d94213b2c72ee9796d0 | /P4/bmv2/targets/simple_switch/tests/CLI_tests/run_one_test.py.in | 9b999369cb8b591f147959a4f5b522c6b6002b86 | [
"Apache-2.0"
] | permissive | P4Archive/P4_ROHC | 41a4735cb68ee93ed3e5304e7397f1156d2905d5 | 1b7b544928c3fdce3ad48e16708d93561f08977f | refs/heads/master | 2021-04-15T08:57:12.378284 | 2016-08-19T20:46:37 | 2016-08-19T20:46:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,641 | in | #!/usr/bin/env python2
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import subprocess
import os
import time
import random
import re
# Class written by Mihai Budiu, for Barefoot Networks, Inc.
class ConcurrentInteger(object):
# Generates exclusive integers in a range 0-max
# in a way which is safe across multiple processes.
# It uses a simple form of locking using folder names.
# This is necessary because this script may be invoked
# concurrently many times by make, and we need the many simulator instances
# to use different port numbers.
def __init__(self, folder, max):
self.folder = folder
self.max = max
def lockName(self, value):
return "lock_" + str(value)
def release(self, value):
os.rmdir(self.lockName(value))
def generate(self):
# try 10 times
for i in range(0, 10):
index = random.randint(0, self.max)
file = self.lockName(index)
try:
os.makedirs(file)
os.rmdir(file)
return index
except:
time.sleep(1)
continue
return None
def main():
if len(sys.argv) != 4:
sys.exit(1)
testdata_dir = sys.argv[1]
testname = sys.argv[2]
jsonname = sys.argv[3]
command_path = os.path.join(testdata_dir, testname + ".in")
output_path = os.path.join(testdata_dir, testname + ".out")
json_path = os.path.join(testdata_dir, jsonname)
concurrent = ConcurrentInteger(os.getcwd(), 1000)
rand = concurrent.generate()
if rand is None:
sys.exit(2)
thrift_port = str(9090 + rand)
device_id = str(rand)
# TODO(antonin): fragile if the name changes in the bmv2 code
rpc_path = "/tmp/bmv2-{}-notifications.ipc".format(device_id)
# makes sure that the switch will start right away
simple_switch_p = subprocess.Popen(
["@abs_top_builddir@/targets/simple_switch/simple_switch", "-h"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
simple_switch_p.wait()
# start simple_switch
simple_switch_p = subprocess.Popen(
["@abs_top_builddir@/targets/simple_switch/simple_switch",
json_path, "--thrift-port", thrift_port, "--device-id", device_id],
stdout=subprocess.PIPE)
# while True:
# line = simple_switch_p.stdout.readline()
# if "Thrift" in line:
# break
time.sleep(1)
def cleanup():
simple_switch_p.kill()
try:
os.remove(rpc_path)
except:
pass
cmd = ["@abs_top_srcdir@/tools/runtime_CLI.py",
"--thrift-port", thrift_port]
out = None
with open(command_path, "r") as f:
sub_env = os.environ.copy()
pythonpath = ""
if "PYHTONPATH" in sub_env:
pythonpath = sub_env["PYTHONPATH"] + ":"
sub_env["PYTHONPATH"] = pythonpath + \
"@abs_top_builddir@/thrift_src/gen-py/"
p = subprocess.Popen(cmd, stdin=f, stdout=subprocess.PIPE, env=sub_env)
out, _ = p.communicate()
rc = p.returncode
if rc:
cleanup()
sys.exit(3)
assert(out)
def parse_data(s, pattern):
m = re.findall("{}(.*?)(?={})".format(pattern, pattern), s,
re.DOTALL)
return m
# remove noise
out = out[out.find("RuntimeCmd: "):]
out_parsed = parse_data(out, "RuntimeCmd: ")
with open(output_path, "r") as f:
expected_parse = parse_data(f.read(), "\?\?\?\?\n")
success = True
if len(out_parsed) != len(expected_parse):
success = False
for o, e in zip(out_parsed, expected_parse):
if o != e:
success = False
if success:
cleanup()
sys.exit(0)
else:
print "Expected"
print "\n".join(expected_parse)
print "But got"
print "\n".join(out_parsed)
cleanup()
sys.exit(4)
if __name__ == '__main__':
main()
| [
"eng.jefersonsantiago@gmail.com"
] | eng.jefersonsantiago@gmail.com |
2327b68ca835a0ec379d05b0c398d71e4e0705a7 | 3ef1b6a2385db90bad1895d65a74965fe6b22a74 | /yolo3/yolo_det.py | fe4018976f8ef9484900261d3904b2bee6773230 | [] | no_license | jianwu585218/BOT | 82f156e3c9c6d83409febaa2b529396935c58116 | a82c29088289ad4ad52a7d3097c939e6b9b5c853 | refs/heads/master | 2020-03-27T19:18:59.704782 | 2018-09-01T06:37:15 | 2018-09-01T06:37:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | import cv2
from camera import *
'''
给图片加框和名字
'''
def Image_rectangle(img,c1,c2,name):
c1 = tuple(c1.int())
c2 = tuple(c2.int())
color = (0, 0, 255)
cv2.rectangle(img, c1, c2, color,1) # 加框
t_size = cv2.getTextSize(str(name), cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2, color, -1) # -1填充作为文字框底色
cv2.putText(img, str(name), (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225, 255, 255], 1)
return img
if __name__ == '__main__':
model_yolo3 = Init_yolo("yolov3.weights","cfg/yolov3.cfg")
img = cv2.imread('./test.jpg')
outputs, haveperson = yolo3(model_yolo3, img, biggest_per=False)
if haveperson:
for i,output in enumerate(outputs):
print(i,output)
img = Image_rectangle(img,output[1:3],output[3:5],str(i))
else :
print("Yolo3 can not detect person")
cv2.imwrite('bot4-result.jpg',img)
cv2.imshow('result',img)
cv2.waitKey(0)
| [
"252051347@qq.com"
] | 252051347@qq.com |
4daef8f80bed480e3d19976990b2156d0cfa204b | 546059fa6366015d185420bce9dc4e30768ece70 | /my_app/urls.py | e693d6990c9a8f8b50a1834b60cb232eefb1fb0c | [] | no_license | serik867/cragslist_django | c5a02b51e61f62cf6b184e3af3e13b3325a4a3bb | 28bdb6cafe385d80da886793072f47f820498388 | refs/heads/master | 2022-12-18T13:40:40.954233 | 2020-03-29T19:10:55 | 2020-03-29T19:10:55 | 250,866,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | from django.urls import path
from . import views
urlpatterns =[
path('', views.home, name='home'),
path('new_search',views.new_search, name='new_search'),
] | [
"serdardurbayev@Serdars-MacBook-Pro.local"
] | serdardurbayev@Serdars-MacBook-Pro.local |
62335b2a6fca8cb7f83724b6cf30a376ccc1606e | 08121e24b74d426958151a56db8bbb10600a5968 | /manage.py | bf20ef2c2edbbd38dbdbbfe71ec0e15c7ee6b59d | [] | no_license | rlichiere/rallyman | d54702e0e1507e1bc181de4d3a6fb840f691a9b8 | 11b09d79fc341af2ab1be3db9b679d9b9e39fa20 | refs/heads/master | 2021-06-21T01:38:32.331904 | 2019-06-09T20:29:20 | 2019-06-09T20:29:20 | 189,528,511 | 1 | 0 | null | 2021-03-25T22:41:11 | 2019-05-31T04:39:16 | Python | UTF-8 | Python | false | false | 806 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rallyman.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"rlichiere@visiativ.com"
] | rlichiere@visiativ.com |
c86f5b433b36f634a6acbc924f55b96e5c83ee72 | 319817a8d08ade28c443e6da3f2eafdb975dc94e | /main.py | ca8742622a17b68f9d1c12586c61ea8b241c7d13 | [] | no_license | srianbury/MFRP | b54e37faa965108587f21d932f4278ce468592ca | 047787a72342ce5fddf62a6434d50a3fc7f03561 | refs/heads/master | 2020-05-01T03:20:46.250602 | 2019-04-21T14:18:03 | 2019-04-21T14:18:03 | 177,242,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | import backend
import config
app = backend.create_app(config)
# This is only used when running locally. When running live, gunicorn runs
# the application.
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True) | [
"noreply@github.com"
] | srianbury.noreply@github.com |
9b9370b731506ca645cef1f21250d63e763a078f | 6f79ee97f230433b83a809e9fe79ffb096b950f1 | /DHTClient.py | b7a9aa2ac3c232431661db9d3bc674f172dae5f2 | [] | no_license | qq431169079/GetInfo | eb85beffdc6a20a3d69234262ce75ad08c1ea0f9 | e347ebe0d38bfac58ddbd5e43b0c69a0399140c0 | refs/heads/master | 2020-04-27T01:15:35.102257 | 2014-11-29T09:02:50 | 2014-11-29T09:02:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | #encoding: utf-8
#!/usr/bin
#!/usr/bin/env python
import logging
from utility import *
from DHTNode import *
stdger = logging.getLogger("std_log")
fileger = logging.getLogger("file_log")
peerger = logging.getLogger("peer_log")
#using example
class Master(object):
def log(self, infohash, address=None):
stdger.debug("%s from %s:%s" % (infohash.encode("hex"), address[0], address[1]))
fileger.debug('%s from %s:%s' % (infohash.encode('hex').upper(),address[0],address[1]))
def logPeer(self,mag,ip,port):
peerger.debug("%s is located in %s:%d",mag,ip,port)
if __name__ == "__main__":
#max_node_qsize bigger, bandwith bigger, spped higher
initialLog()
stdger.debug("start get peer %d" % 9501)
dht=DHT(Master(), "0.0.0.0", 9512, max_node_qsize=1000)
dht.start()
dht.join_DHT()
while True:
stdger.debug("new loop")
dht.initialBeforGet("9f9bfd28e052442b6836b5ff0c3aae826ea0eecf")
dht.client()
| [
"moon_well@live.cn"
] | moon_well@live.cn |
d2c7b1a404f6c93fed77c4cb510b56f3b6ee5128 | b36d7c21908f9a372187c84c8f2e5e38e4b8c4c5 | /web_manage/apps.py | 46f291a6d3530c4331cbbb8e3a5cd88c4cdb6f29 | [] | no_license | markgwaps04/digos-ccts-v2 | 4d44ccbc13292e73363fe47738a507d76b873874 | 8d2c4a26b76fc50e7f45fbd700042dd3542af708 | refs/heads/master | 2023-01-14T01:34:37.478870 | 2020-11-24T15:10:26 | 2020-11-24T15:10:26 | 315,667,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | from django.apps import AppConfig
class WebManageConfig(AppConfig):
name = 'web_manage'
| [
"marklibres345@gmail.com"
] | marklibres345@gmail.com |
87106543b18be244d93c9da08b0590ad94d9143d | e35dabca6d42c4fb6b8b6d36739e5980aba4065c | /Raspberry/RhaegalRaspi/demo_sprint2_rectif/sharedRessources.py | f8e19288cb269101ba64249c776ae73fa779b6e3 | [] | no_license | TeamRhaegal/Road-Analysis-Project | b7bcdbe3dd89a583b48e3289060e668aac559db8 | 945d1f7e9708bcc6aaf0d1b3d88f06582dfa9567 | refs/heads/master | 2020-08-05T15:31:41.494959 | 2020-01-09T14:19:45 | 2020-01-09T14:19:45 | 212,581,931 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | from mutex import mutex
from threading import Lock
#Global values
wheelSpeed = 0
turbo ="off"
joystick = "none"
mode = "assist"
sign = "none"
signWidth = 0
maxDistanceUS = 15 #distance max ultrasons obstacle
obstacleRear = 0
obstacleFront = 0
listMessagesToSend = []
listMessagesReceived = []
connectedDevice= False
#Locks
lockMessagesToSend = Lock()
lockMessagesReceived = Lock()
lockConnectedDevice = Lock()
modeLock =Lock()
joystickLock = Lock()
turboLock = Lock()
signLock = Lock()
signWidthLock = Lock()
speedLock = Lock()
| [
"anais_walle@yahoo.fr"
] | anais_walle@yahoo.fr |
d08dcdc3b0c9dc63dfaf73fa44457a1d7af97a27 | 65b6e843df4c2e8b9abed79b33be24eba1686fa2 | /absence/wsgi.py | 2d6983105aada3e12ea2bdcaac1b9b198f064d05 | [] | no_license | The-Super-Stack/abs-backend | 563fba90b36f45a0bac82aa5ace7c7d079309b09 | d9335ec0a9fe9fdfa1d416d8277c11c2ac23cb5a | refs/heads/main | 2023-08-14T13:33:05.631317 | 2021-10-11T06:18:44 | 2021-10-11T06:18:44 | 415,801,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for absence project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'absence.settings')
application = get_wsgi_application()
| [
"60544749+bintangx1902@users.noreply.github.com"
] | 60544749+bintangx1902@users.noreply.github.com |
e005b0cdb5dbaf2b1440c37359feb6c7e0913af5 | 0a0693bed6e81febdee9502ebde4ee1156b105e6 | /venv/Scripts/pip-script.py | 916f0b6ff9d40eb7462fc7c5097aa1c89d6bf8c5 | [] | no_license | laoyouqing/sms_middleground | 3755d206bfa9ade24a94f1981cb60c0b393e3767 | dbafd3410802135f13e68de43cbc5b0246cb981f | refs/heads/master | 2022-12-11T03:52:51.737971 | 2019-08-21T05:59:55 | 2019-08-21T05:59:55 | 203,516,088 | 0 | 0 | null | 2022-12-08T05:20:56 | 2019-08-21T05:55:43 | Python | UTF-8 | Python | false | false | 392 | py | #!E:\sms_middleground\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"lingo.lin@foxmail.com"
] | lingo.lin@foxmail.com |
07c7200e0a0385597710e92944ef77aaf0a85a97 | 8f875bfd80bad1e2dbfa5648c8592141ad4fb711 | /Ciclo1/210703/test_sample.py | 4fd2afe4136235436306262f8b61f55d3c3ae5b7 | [] | no_license | gonsan20/misiontic | 02b4a2ed33559d35e1e12d9de391d19b59409fbb | 8bceb0d0c4c8aeb6b26aae1e2f71b8432dc097c1 | refs/heads/main | 2023-06-16T18:19:52.773514 | 2021-07-13T22:58:17 | 2021-07-13T22:58:17 | 371,520,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | def inc(x):
return x+1
def test_answer():
assert inc(4) == 4
| [
"gonsan20@gmail.com"
] | gonsan20@gmail.com |
aa481def6a06b873146e9768a76a282d6caa1f0a | 7abfa4cf2e1dfd889bddd49bc5190060397ce4dc | /day2/python/part2.py | 0999eb4b45df2fb47748e7999ab065c5114b705d | [] | no_license | AshleySetter/AdventOfCode2017 | 8bbd65550d1ff26af5b2016fe679d2d5dee03761 | eed9d756bc85a78832ecb9ad03ec491c2c71fbe6 | refs/heads/master | 2021-08-28T08:23:36.768487 | 2017-12-11T17:58:42 | 2017-12-11T17:58:42 | 113,334,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | def get_equal_dividers(row):
for i, num in enumerate(row):
for j, div in enumerate(row):
if i != j:
if num % div == 0:
return num/div
difference = largest - smallest
return difference
def string_to_list(string):
"""
converts string of numbers to numpy array of numbers
Parameters
----------
string : str
string of integers
Returns
-------
sequence : ndarray
sequence of integers
"""
numlist = []
for num in string.split('\t'):
numlist.append(int(num))
return numlist
def calc_checksum(filename):
checksum = 0
with open(filename) as infile:
for line in infile:
rowlist = string_to_list(line[:-1])
difference = get_equal_dividers(rowlist)
checksum += difference
return checksum
if __name__ == "__main__":
for i in range(1000):
result = calc_checksum('input.txt')
print(result)
| [
"ajs3g11@soton.ac.uk"
] | ajs3g11@soton.ac.uk |
f7e439dd8ba455eb23a6391c7473c287767c53a1 | e4f8a8d282de50c0132403d56ca1b5f47ee51e96 | /Darsh_Clock.py | bd276445ea4c396ceb508a6563c61a111a567a82 | [
"BSL-1.0"
] | permissive | DarshPro/Darsh-Clock | 2ad84db976ded9991a84e133676b39d58cbfe739 | f64987402ce8f1ec4d01d4e37f793e6c076b2409 | refs/heads/main | 2023-03-22T09:20:06.485110 | 2021-03-20T11:38:17 | 2021-03-20T11:38:17 | 349,705,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,070 | py | from turtle import *
from datetime import datetime
title("Darsh Clock")
def jump(distanz, winkel=0):
penup()
right(winkel)
forward(distanz)
left(winkel)
pendown()
def hand(laenge, spitze):
fd(laenge*1.15)
rt(90)
fd(spitze/2.0)
lt(120)
fd(spitze)
lt(120)
fd(spitze)
lt(120)
fd(spitze/2.0)
def make_hand_shape(name, laenge, spitze):
reset()
jump(-laenge*0.15)
begin_poly()
hand(laenge, spitze)
end_poly()
hand_form = get_poly()
register_shape(name, hand_form)
def clockface(radius):
reset()
pensize(7)
for i in range(60):
jump(radius)
if i % 5 == 0:
fd(25)
jump(-radius-25)
else:
dot(3)
jump(-radius)
rt(6)
def setup():
global second_hand, minute_hand, hour_hand, writer
mode("logo")
make_hand_shape("second_hand", 125, 25)
make_hand_shape("minute_hand", 130, 25)
make_hand_shape("hour_hand", 90, 25)
clockface(160)
second_hand = Turtle()
second_hand.shape("second_hand")
second_hand.color("gray20", "gray80")
minute_hand = Turtle()
minute_hand.shape("minute_hand")
minute_hand.color("blue1", "red1")
hour_hand = Turtle()
hour_hand.shape("hour_hand")
hour_hand.color("blue3", "red3")
for hand in second_hand, minute_hand, hour_hand:
hand.resizemode("user")
hand.shapesize(1, 1, 3)
hand.speed(0)
ht()
writer = Turtle()
#writer.mode("logo")
writer.ht()
writer.pu()
writer.bk(85)
def wochentag(t):
wochentag = ["Monday", "Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday", "Sunday"]
return wochentag[t.weekday()]
def datum(z):
monat = ["Jan.", "Feb.", "Mar.", "Apr.", "May", "June",
"July", "Aug.", "Sep.", "Oct.", "Nov.", "Dec."]
j = z.year
m = monat[z.month - 1]
t = z.day
return "%s %d %d" % (m, t, j)
def tick():
t = datetime.today()
sekunde = t.second + t.microsecond*0.000001
minute = t.minute + sekunde/60.0
stunde = t.hour + minute/60.0
try:
tracer(False) # Terminator can occur here
writer.clear()
writer.home()
writer.forward(65)
writer.write(wochentag(t),
align="center", font=("Courier", 14, "bold"))
writer.back(150)
writer.write(datum(t),
align="center", font=("Courier", 14, "bold"))
writer.forward(85)
tracer(True)
second_hand.setheading(6*sekunde) # or here
minute_hand.setheading(6*minute)
hour_hand.setheading(30*stunde)
tracer(True)
ontimer(tick, 100)
except Terminator:
pass # turtledemo user pressed STOP
def main():
tracer(False)
setup()
tracer(True)
tick()
return "EVENTLOOP"
if __name__ == "__main__":
mode("logo")
msg = main()
print(msg)
mainloop() | [
"noreply@github.com"
] | DarshPro.noreply@github.com |
9422d64a7fd3b7c00aadf3fc9f3fb39087611d8b | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/qosm/ingrpktshist1qtr.py | 52d1c6228888f3285d4b421d6b525367a8d9933a | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,745 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class IngrPktsHist1qtr(Mo):
"""
A class that represents historical statistics for ingress packets in a 1 quarter sampling interval. This class updates every day.
"""
meta = StatsClassMeta("cobra.model.qosm.IngrPktsHist1qtr", "ingress packets")
counter = CounterMeta("drop", CounterCategory.COUNTER, "packets", "ingress drop packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "dropCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "dropPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "dropMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "dropMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "dropAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "dropSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "dropThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "dropTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "dropRate"
meta._counters.append(counter)
counter = CounterMeta("admit", CounterCategory.COUNTER, "packets", "ingress admit packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "admitCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "admitPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "admitMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "admitMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "admitAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "admitSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "admitThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "admitTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "admitRate"
meta._counters.append(counter)
meta.moClassName = "qosmIngrPktsHist1qtr"
meta.rnFormat = "HDqosmIngrPkts1qtr-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical ingress packets stats in 1 quarter"
meta.writeAccessMask = 0x100000000000001
meta.readAccessMask = 0x100000000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.qosm.IfClass")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.qosm.IngrPktsHist")
meta.rnPrefixes = [
('HDqosmIngrPkts1qtr-', True),
]
prop = PropMeta("str", "admitAvg", "admitAvg", 10928, PropCategory.IMPLICIT_AVG)
prop.label = "ingress admit packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("admitAvg", prop)
prop = PropMeta("str", "admitCum", "admitCum", 10924, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "ingress admit packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("admitCum", prop)
prop = PropMeta("str", "admitMax", "admitMax", 10927, PropCategory.IMPLICIT_MAX)
prop.label = "ingress admit packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("admitMax", prop)
prop = PropMeta("str", "admitMin", "admitMin", 10926, PropCategory.IMPLICIT_MIN)
prop.label = "ingress admit packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("admitMin", prop)
prop = PropMeta("str", "admitPer", "admitPer", 10925, PropCategory.IMPLICIT_PERIODIC)
prop.label = "ingress admit packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("admitPer", prop)
prop = PropMeta("str", "admitRate", "admitRate", 10932, PropCategory.IMPLICIT_RATE)
prop.label = "ingress admit packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("admitRate", prop)
prop = PropMeta("str", "admitSpct", "admitSpct", 10929, PropCategory.IMPLICIT_SUSPECT)
prop.label = "ingress admit packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("admitSpct", prop)
prop = PropMeta("str", "admitThr", "admitThr", 10930, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "ingress admit packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("admitThr", prop)
prop = PropMeta("str", "admitTr", "admitTr", 10931, PropCategory.IMPLICIT_TREND)
prop.label = "ingress admit packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("admitTr", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "dropAvg", "dropAvg", 10955, PropCategory.IMPLICIT_AVG)
prop.label = "ingress drop packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropAvg", prop)
prop = PropMeta("str", "dropCum", "dropCum", 10951, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "ingress drop packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("dropCum", prop)
prop = PropMeta("str", "dropMax", "dropMax", 10954, PropCategory.IMPLICIT_MAX)
prop.label = "ingress drop packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropMax", prop)
prop = PropMeta("str", "dropMin", "dropMin", 10953, PropCategory.IMPLICIT_MIN)
prop.label = "ingress drop packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropMin", prop)
prop = PropMeta("str", "dropPer", "dropPer", 10952, PropCategory.IMPLICIT_PERIODIC)
prop.label = "ingress drop packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("dropPer", prop)
prop = PropMeta("str", "dropRate", "dropRate", 10959, PropCategory.IMPLICIT_RATE)
prop.label = "ingress drop packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("dropRate", prop)
prop = PropMeta("str", "dropSpct", "dropSpct", 10956, PropCategory.IMPLICIT_SUSPECT)
prop.label = "ingress drop packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("dropSpct", prop)
prop = PropMeta("str", "dropThr", "dropThr", 10957, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "ingress drop packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("dropThr", prop)
prop = PropMeta("str", "dropTr", "dropTr", 10958, PropCategory.IMPLICIT_TREND)
prop.label = "ingress drop packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("dropTr", prop)
prop = PropMeta("str", "index", "index", 7102, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
dd84a5790a2a78e6f48019faaa8ff6e1469c0763 | 1ab7b3f2aa63de8488ce7c466a67d367771aa1f2 | /Ricardo_OS/Python_backend/venv/lib/python3.8/site-packages/werkzeug/wrappers/accept.py | 9605e637dc682aa6fb376053cb9a80387c566377 | [
"MIT"
] | permissive | icl-rocketry/Avionics | 9d39aeb11aba11115826fd73357b415026a7adad | 95b7a061eabd6f2b607fba79e007186030f02720 | refs/heads/master | 2022-07-30T07:54:10.642930 | 2022-07-10T12:19:10 | 2022-07-10T12:19:10 | 216,184,670 | 9 | 1 | MIT | 2022-06-27T10:17:06 | 2019-10-19T09:57:07 | C++ | UTF-8 | Python | false | false | 429 | py | import typing as t
import warnings
class AcceptMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'AcceptMixin' is deprecated and will be removed in"
" Werkzeug 2.1. 'Request' now includes the functionality"
" directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs) # type: ignore
| [
"kd619@ic.ac.uk"
] | kd619@ic.ac.uk |
94f2d43841d6de8c61172de178f2cf83ea40e303 | b8a3e758eff2922ff6abc77947d879e3f6d1afa3 | /ws_moveit/build/moveit_resources/catkin_generated/pkg.develspace.context.pc.py | 1db6f9fe77fc24ca7b6f4cd26bc3b8b329be1584 | [] | no_license | rrowlands/ros-baxter-coffee | ab7a496186591e709f88ccfd3b9944428e652f3e | 32473c3012b7ec4f91194069303c85844cf1aae7 | refs/heads/master | 2016-09-05T20:58:20.428241 | 2013-12-02T23:10:44 | 2013-12-02T23:10:44 | 14,313,406 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/p/peth8881/robotics/ws_moveit/build/moveit_resources/include".split(';') if "/home/p/peth8881/robotics/ws_moveit/build/moveit_resources/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "moveit_resources"
PROJECT_SPACE_DIR = "/home/p/peth8881/robotics/ws_moveit/devel"
PROJECT_VERSION = "0.5.0"
| [
"peth8881@csel-112-02.csel.loc"
] | peth8881@csel-112-02.csel.loc |
7617908ed4d5cf7e73e7be822d4dd57e8b9b26c4 | 6e1549257568a0ca81b3fc5864e2e1fa65171b06 | /salarydk/models/inline_object84.py | d01ba31c4e2de0ed3ff37022127b34bb38a3e313 | [] | no_license | tdwizard/salarydk | 19d3453de8fbdd886a0189dbf232f98de971e18a | dcf5040101b3e576f1068ea104148651e5c66511 | refs/heads/master | 2023-08-05T05:40:09.561288 | 2021-09-24T09:41:43 | 2021-09-24T09:41:43 | 409,910,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,024 | py | # coding: utf-8
"""
Salary.dk API
This is the public API for Salary.dk. # General Our API is a JSON-based, REST-like API. Our webapp uses the exact same API, so everything you can do in our webapp, you can do through our API. However, we are slowly opening up the API, so not all endpoints are documented here yet. Only the endpoints documented here are stable. If there is some functionality you would like to access through our API, please contact us. The API is located at https://api.salary.dk. All requests must use TLS. In order to use the API on behalf of other users than yourself, you need to register as an API client. You do this by sending an e-mail to dev@salary.dk with the name and purpose of your client. API-keys for each account can be obtained once logged in to Salary, under the settings for the Company. All endpoints are documented to be able to return the 500 error code. We strive to not return this error code, so if you do encounter this error code, it might mean there is an error on our side. In this case, do not hesitate to contact us. # Versioning, upgrade and deprecation policy Our API might change over time. In order to ensure a stable API, we follow these rules when changing the API. New fields might be added at any time to any response or as non-required parameters to any input. When adding input fields, we ensure the default behaviour when not supplying the field is the same as the previous version. In these cases, the version of an endpoint is not increased, since it is backwards compatible. Since we might add new fields to responses, be sure to use a JSON parser in your implementation. This ensures that any extra fields added are ignored by your implementation. We might add entirely new endpoints at any time. If we need to change an existing endpoint without being able to make it backwards compatible, we will add a new version of the endpoint, and mark the old as deprecated but still functional. We will then contact any users of the deprecated endpoint and ensure an upgrade is performed. Once all consumers have moved to the new endpoint version, the old one will be removed. We will not at any point change the meaning of any existing field, nor will we remove any field or endpoint without following the above deprecated procedure. However, we might add new types to existing enums at any time. # Cross-Origin Resource Sharing This API features Cross-Origin Resource Sharing (CORS) implemented in compliance with [W3C spec](https://www.w3.org/TR/cors/) - and that allows cross-domain communication from the browser. All responses have a wildcard same-origin which makes them completely public and accessible to everyone, including any code on any site, as long as the proper access token is passed. # Authentication All request require an access token. There are two ways to obtain an access token: * Logging in as a user. (this endpoint is not yet publicly available). * Using an API-key: [endpoint](#operation/APIClientLogin) Using one of these methods, you will obtain an access token. In all subsequest requests, this access token should be passed in the Authorization header. The access token is valid for around one hour, after which a new token should be obtained. You do not need to dispose of access tokens once created. They have a limited lifetime, and Salary.dk will automatically expire old ones. For some endpoints, the authorizedUserQuery security definition is used. This allows for passing the access token as a query parameter where it is not possible to pass it as a header. In particular, this is used for downloading files. <!-- ReDoc-Inject: <security-definitions> --> # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: dev@salary.dk
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from salarydk.configuration import Configuration
class InlineObject84(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'company_id': 'str',
'force': 'bool',
'month': 'date'
}
attribute_map = {
'company_id': 'companyID',
'force': 'force',
'month': 'month'
}
def __init__(self, company_id=None, force=None, month=None, local_vars_configuration=None): # noqa: E501
"""InlineObject84 - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._company_id = None
self._force = None
self._month = None
self.discriminator = None
self.company_id = company_id
if force is not None:
self.force = force
self.month = month
@property
def company_id(self):
"""Gets the company_id of this InlineObject84. # noqa: E501
The company to perform a zero tax report for. # noqa: E501
:return: The company_id of this InlineObject84. # noqa: E501
:rtype: str
"""
return self._company_id
@company_id.setter
def company_id(self, company_id):
"""Sets the company_id of this InlineObject84.
The company to perform a zero tax report for. # noqa: E501
:param company_id: The company_id of this InlineObject84. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and company_id is None: # noqa: E501
raise ValueError("Invalid value for `company_id`, must not be `None`") # noqa: E501
self._company_id = company_id
@property
def force(self):
"""Gets the force of this InlineObject84. # noqa: E501
If true, force a zero tax report to run, even though internal checking says it should not run. # noqa: E501
:return: The force of this InlineObject84. # noqa: E501
:rtype: bool
"""
return self._force
@force.setter
def force(self, force):
"""Sets the force of this InlineObject84.
If true, force a zero tax report to run, even though internal checking says it should not run. # noqa: E501
:param force: The force of this InlineObject84. # noqa: E501
:type: bool
"""
self._force = force
@property
def month(self):
"""Gets the month of this InlineObject84. # noqa: E501
The date for the 1st of the month to perform a zero tax report for. # noqa: E501
:return: The month of this InlineObject84. # noqa: E501
:rtype: date
"""
return self._month
@month.setter
def month(self, month):
"""Sets the month of this InlineObject84.
The date for the 1st of the month to perform a zero tax report for. # noqa: E501
:param month: The month of this InlineObject84. # noqa: E501
:type: date
"""
if self.local_vars_configuration.client_side_validation and month is None: # noqa: E501
raise ValueError("Invalid value for `month`, must not be `None`") # noqa: E501
self._month = month
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineObject84):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InlineObject84):
return True
return self.to_dict() != other.to_dict()
| [
"tdrobiszewski@tdwizard.com"
] | tdrobiszewski@tdwizard.com |
4a3419b3ac8e6cea68ba90f084889c976e9a5efb | 7f35b6bd7454920bcd7a43bc96b39779ea95fbef | /prepare_train_val.py | eafbb2d82a5cdba6ff00396b4fb5c95f115c9987 | [] | no_license | gunescepic/InstrumentSegmentation | 12341640a85e08d8cb6c43a284515646a04c7e2d | 7e63d85dca543240a24f72ffa67db9341e47d84a | refs/heads/master | 2022-04-19T18:56:21.124854 | 2020-04-15T04:04:14 | 2020-04-15T04:04:14 | 252,764,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | from prepare_data import data_path
def get_split(fold):
folds = {0: [1, 3],
1: [1, 2],
2: [3, 2]}
train_path = data_path / 'cropped_train'
train_file_names = []
val_file_names = []
for instrument_id in range(1, 4):
if instrument_id in folds[fold]:
val_file_names += list((train_path / ('instrument_dataset_' + str(instrument_id)) / 'images').glob('*'))
else:
train_file_names += list((train_path / ('instrument_dataset_' + str(instrument_id)) / 'images').glob('*'))
return train_file_names, val_file_names
| [
"e2171494@gitlab.ceng.metu.edu.tr"
] | e2171494@gitlab.ceng.metu.edu.tr |
65a1aca3134f2ea863bbfe28f56950a24a811114 | 72430ca87464532ba5d3773ec1a81d1d2f308d75 | /main.py | dc9b73f90c87eccc167b90439cdfbb79a7ea8de8 | [] | no_license | DeanFeathers/Text-Detection | 663c665c3ccadb108d14e64ba2d8761da474f747 | 6ac5205143a77df3111ecd3fbd06b1fdc3aa9d06 | refs/heads/main | 2023-05-01T19:12:18.707215 | 2021-05-25T11:41:10 | 2021-05-25T11:41:10 | 370,669,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,627 | py | from tkinter import *
import tkinter.messagebox
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import nltk
nltk.download('vader_lexicon')
class analysis_text():
# Main function in the program
def center(self, toplevel):
toplevel.update_idletasks()
w = toplevel.winfo_screenwidth()
h = toplevel.winfo_screenheight()
size = tuple(int(_) for _ in
toplevel.geometry().split('+')[0].split('x'))
x = w/2 - size[0]/2
y = h/2 - size[1]/2
toplevel.geometry("%dx%d+%d+%d" % (size + (x, y)))
def callback(self):
if tkinter.messagebox.askokcancel("Quit", "Do you want to leave?"):
self.main.destroy()
def setResult(self, type, res):
# calculated comments in vader analysis
if type == "neg":
self.negativeLabel.configure(text="you typed a negative comment :" + str(res) + "% \n")
elif type == "neu":
self.neutralLabel.configure(text="you typed a neutral comment :" + str(res) + "% \n")
elif type == "pos":
self.positiveLabel.configure(text="you typed a possitive comment :" + str(res) + "% \n")
def runAnalysis(self):
sentences = []
sentences.append(self.line.get())
sid = SentimentIntensityAnalyzer()
for sentence in sentences:
# print(sentence)
ss = sid.polarity_scores(sentence)
if ss['compound'] >= 0.05:
self.normalLabel.configure(text="You typed a positive Statement: ")
elif ss['compound'] <= - 0.05:
self.normalLabel.configure(text="You typed a negative Statement: ")
else:
self.normalLabel.configure(text = "You typed a neutral statement: ")
for k in sorted(ss):
self.setResult(k, ss[k])
print()
def editedText(self, event):
self.typedText.configure(text = self.line.get() + event.char)
def runByEnter(self, event):
self.runAnalysis()
def __init__(self):
# Create main window
self.main = Tk()
self.main.title("Text Detector System")
self.main.geometry("600x600")
self.main.resizable(width=FALSE, height=FALSE)
self.main.protocol("WM_DELETE_WINDOW", self.callback)
self.main.focus()
self.center(self.main)
# Addition item on window
self.label1 = Label(text = "type a text here :")
self.label1.pack()
# Add a hidden button to enter
self.line = Entry(self.main, width=70)
self.line.pack()
self.textLabel = Label(text = "\n", font=("Helvetica", 15))
self.textLabel.pack()
self.typedText = Label(text = "", fg = "blue", font=("Helvetica", 20))
self.typedText.pack()
self.line.bind("<Key>", self.editedText)
self.line.bind("<Return>", self.runByEnter)
self.result = Label(text="\n", font=("Helvetica", 15))
self.result.pack()
self.negativeLabel = Label(text="", fg="red", font=("Helvetica", 20))
self.negativeLabel.pack()
self.neutralLabel = Label(text="", font=("Helvetica", 20))
self.neutralLabel.pack()
self.positiveLabel = Label(text="", fg="green", font=("Helvetica", 20))
self.positiveLabel.pack()
self.normalLabel = Label(text="", fg="red", font=("Helvetica", 20))
self.normalLabel.pack()
# Driver code
myanalysis = analysis_text()
mainloop()
| [
"noreply@github.com"
] | DeanFeathers.noreply@github.com |
9890649bedea303f7b98f0f05c0ab63c869b5d77 | 2d925a123f948d84fcd547415201f51c9638276f | /20 - Valid Parentheses.py | fdf7691dd4847ec8f91c5b5632a8dd8230f380bd | [] | no_license | idan1ezer/LeetCode | 4d4f1326be6cac50aaa281a3f6de228267ec4541 | a79e03c7793700349c03854c9b6d040ea2798b1f | refs/heads/master | 2023-06-28T20:45:01.554566 | 2021-08-12T07:55:58 | 2021-08-12T07:55:58 | 395,239,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | def isValid(s):
queue = []
dic = {')':'(',']':'[','}':'{'}
for ch in s:
if (ch in dic.values()):
queue.append(ch)
elif (ch in dic.keys()):
if((not queue) or dic[ch] != queue.pop()):
return False
else:
return False
return (queue == [])
print(isValid(s = "()"))
print(isValid(s = "()[]{}"))
print(isValid(s = "(]"))
print(isValid(s = "([)]"))
print(isValid(s = "{[]}")) | [
"Idan.Ezer1@s.afeka.ac.il"
] | Idan.Ezer1@s.afeka.ac.il |
072492c0333ea6a7591fb949bacadfc5f8e7d64c | eeee555d546abe58edeaacb90e109c0f3879c45e | /decorator_2.py | 781452fd9815119c1538440b0f44bc23b7ef8b83 | [] | no_license | iPROGRAMMER007/Advanced_Python | 5054a1c6c06bb4e6949505d63780b1dda9ee4a08 | a202bf99cb4ba7de134735fabbcc9d7be663317b | refs/heads/master | 2020-12-04T20:43:06.285969 | 2020-01-05T10:02:31 | 2020-01-05T10:02:31 | 231,897,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | def chees_and_buns(orignal_func):
def wrap():
print("This is upper bread")
orignal_func()
print("This is lower bread")
return wrap
@chees_and_buns
def chicken():
print("I am roasted chicken")
chicken() | [
"noreply@github.com"
] | iPROGRAMMER007.noreply@github.com |
e6a5d11e2d2e2ccb5576e48d4bdb66784f320772 | e2e3addb7858678d553339e52ed4ea4502a20f6f | /division_algo.py | 17de54ea559db493c02a5a53b23ff488d7177bb3 | [] | no_license | djschlicht/Cryptography | 503b18bc601ab520ae86eb0475595369a3e79954 | 89d29d53727ceb18e869637c83616866c3406559 | refs/heads/master | 2023-02-27T17:24:31.101170 | 2021-02-03T02:03:55 | 2021-02-03T02:03:55 | 243,169,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,745 | py | #!/usr/bin/env python3
# division_algo.py - Uses the division algorithm to find the
# greatest common divisor of two integers and shows the
# intermediate steps in tabular form
from prettytable import PrettyTable
# Number 1. a = 28056, b = 3032
# Number 2. a = 48432, b = 47376
def division_algorithm(a, b):
# Set up table headers
table = PrettyTable(['u_1', 'v_1', 'u_2', 'v_2', 'u_3', 'v_3', 'q'])
# Populate the first row of the table
# The first row is always 1, 0, 0, 1, max(a,b), min(a,b), 0
currentRow = [1, 0, 0, 1, max([a,b]), min([a,b]), 0]
table.add_row(currentRow)
# Use division algorithm to populate subsequent rows
while currentRow[5] != 0: # Exit condition is when v_3 == 0
# new u = old v
u_1 = currentRow[1]
u_2 = currentRow[3]
u_3 = currentRow[5]
# q = div part of quotient of u_3 and v_3
q = currentRow[4] // currentRow[5]
# new v = old u - (current q)(old v)
v_1 = currentRow[0] - q * currentRow[1]
v_2 = currentRow[2] - q * currentRow[3]
v_3 = currentRow[4] - q * currentRow[5]
newRow = [u_1, v_1 , u_2, v_2, u_3, v_3, q]
# append row to the table and set it to the current row
table.add_row(newRow)
currentRow = newRow
# pull the gcd, x, and y values from the last row
gcd = currentRow[4]
x = currentRow[0]
y = currentRow[2]
# print the table
print(table)
print("gcd(%d, %d) = %d" %(a, b, gcd))
print("x = %d, y = %d" %(x, y))
print("Problem 1: a = 28056, b = 3032")
division_algorithm(28056, 3032)
print()
print("Problem 2: a = 48432, b = 47376")
division_algorithm(48432, 47376)
print()
division_algorithm(48, 56)
| [
"noreply@github.com"
] | djschlicht.noreply@github.com |
38c802e9a5aa03f8b26c62e5c786bc7ad2c47ed2 | 3196861aea8da409d4775291ec33ea61574a9604 | /oddmanout/solution.py | 2e4832e714c1ee44fff90c4acc32009327aa7ebb | [] | no_license | 109-marzouk/kattis | f7df72b59cc7bdbb0baab0cef86b5c5d1348a0d0 | da1d26541a64210a3d55452db538956e06e47ec1 | refs/heads/master | 2022-04-21T09:11:41.788688 | 2020-04-24T10:09:36 | 2020-04-24T10:09:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | # https://open.kattis.com/problems/oddmanout
for x in xrange(1, input()+1):
n= input()
s = map(int, raw_input().split(" "))
for e in s:
if s.count(e) == 1: print "Case #" + str(x) + ": " + str(e); break;
| [
"32062205+MohamedMarzouk23@users.noreply.github.com"
] | 32062205+MohamedMarzouk23@users.noreply.github.com |
4b2039b2583b2258d2f0fea69a7ad4fcde28256d | 46ac0965941d06fde419a6f216db2a653a245dbd | /sdks/python/appcenter_sdk/models/AzureSubscriptionPatchRequest.py | 12120015c7f4061d120ebe159a0c58a00ab14fa1 | [
"MIT",
"Unlicense"
] | permissive | b3nab/appcenter-sdks | 11f0bab00d020abb30ee951f7656a3d7ed783eac | bcc19c998b5f648a147f0d6a593dd0324e2ab1ea | refs/heads/master | 2022-01-27T15:06:07.202852 | 2019-05-19T00:12:43 | 2019-05-19T00:12:43 | 187,386,747 | 0 | 3 | MIT | 2022-01-22T07:57:59 | 2019-05-18T17:29:21 | Python | UTF-8 | Python | false | false | 3,336 | py | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class AzureSubscriptionPatchRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'is_billing': 'boolean'
}
attribute_map = {
'is_billing': 'is_billing'
}
def __init__(self, is_billing=None): # noqa: E501
"""AzureSubscriptionPatchRequest - a model defined in Swagger""" # noqa: E501
self._is_billing = None
self.discriminator = None
self.is_billing = is_billing
@property
def is_billing(self):
"""Gets the is_billing of this AzureSubscriptionPatchRequest. # noqa: E501
If the subscription is used for billing # noqa: E501
:return: The is_billing of this AzureSubscriptionPatchRequest. # noqa: E501
:rtype: boolean
"""
return self._is_billing
@is_billing.setter
def is_billing(self, is_billing):
"""Sets the is_billing of this AzureSubscriptionPatchRequest.
If the subscription is used for billing # noqa: E501
:param is_billing: The is_billing of this AzureSubscriptionPatchRequest. # noqa: E501
:type: boolean
"""
if is_billing is None:
raise ValueError("Invalid value for `is_billing`, must not be `None`") # noqa: E501
self._is_billing = is_billing
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AzureSubscriptionPatchRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"b3nab@users.noreply.github.com"
] | b3nab@users.noreply.github.com |
ab38a960d0a8048f0c08b21c1876de6f501d2193 | 8f9ec2aa1b423d64a5aa033acf60db842b122398 | /编程/python/高级探究/pdf/pdf_pypdf2.py | 17d57a22bfa593ddd1811ea396629230c48bd000 | [] | no_license | xuysang/learn_python | 853c650e3d546437c761bc59007fbb9633b01fc8 | 84d4ec67b99fb1d697603c04e8d33bae6693c4a8 | refs/heads/master | 2020-08-20T02:05:05.995711 | 2020-01-09T08:58:31 | 2020-01-09T08:58:31 | 215,974,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | from PyPDF2 import PdfFileReader
def extract_info(pdf_path):
with open(pdf_path,'rb') as f:
pdf = PdfFileReader(f)
information = pdf.getDocumentInfo()
title = information.title
print(information)
extract_info("大成基金大成优势企业混合关于大成优势企业混合型证券投资基金基金合同生效公告.pdf") | [
"807692755@qq.com"
] | 807692755@qq.com |
5260ab00f8a2ad5cfa467bf282d37fa2f11ed232 | 13f216cab500ea007c8568d3ddea53cd681bf8fb | /voxelSimple/satTest.py | 4b7192e6fd2c4bd1fffa99adac6d95b27deb9c80 | [] | no_license | agentqwerty/python_scripts | 8895463b1f8e2e799f674b832abf529f2437c3f2 | f99f1bcbc8cbe1dbb7b689b5bb1c5c417dfd138a | refs/heads/master | 2016-09-06T16:24:29.527166 | 2013-04-16T17:56:13 | 2013-04-16T17:56:13 | 4,854,955 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,095 | py | __doc__ = """
Implementation of SAT (separating axis theorem) to test polygon-bounding box
intersection.
"""
# Standard Imports
import math, unittest
# Classes
class SimpleVec(object):
"""
A simple vector class for use if we're not in maya.
"""
def __init__(self, x,y,z):
self.x = x
self.y = y
self.z = z
self._invert_zeroes()
def __add__(self, v):
return SimpleVec(self.x+v.x, self.y+v.y, self.z+v.z)
def __eq__(self, v):
eq_x = self.x == v.x
eq_y = self.y == v.y
eq_z = self.z == v.z
return all([eq_x, eq_y, eq_z])
def __sub__(self, v):
return SimpleVec(self.x-v.x, self.y-v.y,self.z-v.z)
def __mul__(self, v):
if type(v) == SimpleVec:
return self.cross(v)
else:
return SimpleVec(self.x*v, self.y*v, self.z*v)
def __rmul__(self, v):
if type(v) == SimpleVec:
return v.cross(self)
else:
return SimpleVec(self.x*v, self.y*v, self.z*v)
def __iter__(self):
for v in [self.x, self.y, self.z]:
yield v
def __str__(self):
return '[%s]'%','.join([str(i) for i in [self.x,self.y,self.z]])
def _invert_zeroes(self):
"""
Inverts the sign of any vector components valued at -0.0. This is to
ensure that equality statements evaluate correctly.
"""
if self.x == -0.0:
self.x = 0.0
if self.y == -0.0:
self.y = 0.0
if self.z == -0.0:
self.z = 0.0
def cross(self, v):
"""
Cross product.
| i j k |
| s.x s.y s.z |
| v.x v.y v.z |
"""
x_comp = self.y*v.z - v.y*self.z
y_comp = self.x*v.z - v.x*self.z
z_comp = self.x*v.y - v.x*self.y
return SimpleVec(x_comp, -y_comp, z_comp)
def dot(self, v):
"""
Dot product.
"""
return self.x*v.x + self.y*v.y + self.z*v.z
def normalize(self):
"""
Normalize this vector.
"""
magnitude = math.sqrt(self.x**2+self.y**2+self.z**2)
self.x = self.x/magnitude
self.y = self.y/magnitude
self.z = self.z/magnitude
# Import vector class
try:
from pymel.core.datatypes import Vector as Vec3d
except ImportError:
Vec3d = SimpleVec
# Intersection Classes
class Shape(object):
"""
A generalized representation of a shape. A shape is defined as a collection
of vertices.
"""
# Define a zero vector for identifying degenerate cross products
zero_vector = Vec3d(0.0,0.0,0.0)
def __init__(self, vertices):
"""
vertices: a list of Vec3d objects representing the vertices of the
shape
"""
self.vertices = vertices
self._max_values(vertices)
def __str__(self):
return str('(%s)'%','.join([str(v) for v in self.vertices]))
def __getitem__(self, key):
return self.vertices[key]
def __setitem__(self, key, value):
self.vertices[key] = value
def _max_values(self, vertices):
"""
Obtains the max x,y,z values among the given verts.
vertices: a list of vertices.
"""
max_x = vertices[0].x
min_x = vertices[0].x
max_y = vertices[0].y
min_y = vertices[0].y
max_z = vertices[0].z
min_z = vertices[0].z
for v in vertices:
x,y,z = v
if x > max_x:
max_x = x
elif x < min_x:
min_x = x
if y > max_y:
max_y = y
elif y < min_y:
min_y = y
if z > max_z:
max_z = z
elif z < min_z:
min_z = z
self.min_x = min_x
self.max_x = max_x
self.min_y = min_y
self.max_y = max_y
self.min_z = min_z
self.max_z = max_z
def _quick_rejection(self, shape):
"""
Tests the min/max values of the given shape with this one, and if
returns True if intersection is not possible.
"""
if self.max_x < shape.min_x:
return True
if self.min_x > shape.max_x:
return True
if self.max_y < shape.min_y:
return True
if self.min_y > shape.max_y:
return True
if self.max_z < shape.min_z:
return True
if self.min_z > shape.max_z:
return True
return False
def project(self, axis):
"""
Project this shape onto the given axis. Returns the minimum and maximum
values for the projection.
axis: a Vec3d object representing the projection axis.
"""
minVal = axis.dot(self.vertices[0])
maxVal = minVal
for v in self.vertices:
p = axis.dot(v)
if p < minVal:
minVal = p
elif p > maxVal:
maxVal = p
return (minVal, maxVal)
def _overlap(self, min_max0, min_max1):
"""
Returns True if overlap exists between the min-max ranges of minMax0
and min_max1, False otherwise.
minMax<0,1>: Tuples containing (minValue, maxValue)
"""
min0, max0 = min_max0
min1, max1 = min_max1
if min0 < min1 and max0 > min1:
return True
elif min1 < min0 and max1 > min0:
return True
elif max1 > max0 and min1 < min0: # 1 contains 0
return True
elif max0 > max1 and min0 < min1: # 0 contains 1
return True
else:
return False
def intersects(self, shape):
"""
Returns true if this shape intersects the given shape, False otherwise.
shape: A Shape to test for intersection.
"""
# First check using _quick_rejection, so we don't have to do all the
# projections for nothing.
if self._quick_rejection(shape):
return False
# Check for overlap on each of the axes.
for axis in self.axes+shape.axes:
p0 = self.project(axis)
p1 = shape.project(axis)
if not self._overlap(p0, p1):
return False
return True
class AABB(Shape):
"""
An axis alligned bounding box.
"""
# AABB surface normals can be shared since they're all the same (e.g the
# cardinal axes)
axes = [Vec3d(1,0,0), Vec3d(0,1,0), Vec3d(0,0,1)]
def __init__(self, cx, cy, cz, hx, hy, hz, subdivs=0):
"""
c<x,y,z>: The location of the center of the bounding box.
h<x,y,z>: The half lengths of the bounding box in each cardinal
direction.
"""
self.subdivs = subdivs
# Get the vertices for the aabb.
vertices = []
for i in [cx-hx, cx+hx]:
for j in [cy-hy, cy+hy]:
for k in [cz-hz, cz+hz]:
vertices.append(Vec3d(i,j,k))
super(AABB, self).__init__(vertices)
self.center = Vec3d(cx,cy,cz)
self.half_vector = Vec3d(hx,hy,hz)
def subdivide(self):
"""
Divides this AABB into 8 sub-AABBs. Returns a list of those.
"""
bounding_boxes = []
cx,cy,cz = self.center
hx,hy,hz = self.half_vector
x_vals = [cx-hx/2.0, cx+hx/2.0]
y_vals = [cy-hy/2.0, cy+hy/2.0]
z_vals = [cz-hz/2.0, cz+hz/2.0]
new_half_vector = [hx/2.0, hy/2.0, hz/2.0]
# The new centers are the 8 combinations of x,y,z.
for new_cx in x_vals:
for new_cy in y_vals:
for new_cz in z_vals:
bounding_boxes.append(AABB(new_cx, new_cy, new_cz,
*new_half_vector, subdivs=self.subdivs+1))
return bounding_boxes
class Triangle(Shape):
"""
A trangle.
"""
def __init__(self, x0, y0, z0, x1, y1, z1, x2, y2, z2):
"""
<x,y,z>0: the x,y,z coordinates of the first triangle vertex.
<x,y,z>1: the x,y,z coordinates of the second triangle vertex.
<x,y,z>2: the x,y,z coordinates of the third triangle vertex.
"""
# Triangle vertices.
super(Triangle, self).__init__([Vec3d(x0,y0,z0),
Vec3d(x1,y1,z1),
Vec3d(x2,y2,z2)])
self.axes = self._find_axes()
def _find_axes(self):
"""
Finds the axes that we'll need to test against for overlap. They
include the triangle surface normal, and the 3 edge normals.
"""
# Edge vectors.
f0 = self.vertices[1] - self.vertices[0]
f1 = self.vertices[2] - self.vertices[1]
f2 = self.vertices[0] - self.vertices[2]
# Surface Normal
surf_normal = f0.cross(f1)
surf_normal.normalize()
# Edges x AABB normals
normals = [surf_normal]
for e in [f0, f1, f2]:
for n in AABB.axes:
n_cross_e = n.cross(e)
# Don't want degenerate cross products
if n_cross_e == self.zero_vector:
continue
normals.append(n.cross(e))
# Normalize the edge normals.
for n in normals:
n.normalize()
return normals
# Unit Tests.
class SimpleVecTester(unittest.TestCase):
"""
A group of unit tests for the SimpleVec class.
"""
def setUp(self):
self.test_vec0 = SimpleVec(1.0, 0.0, 0.0)
self.test_vec1 = SimpleVec(0.0, 1.0, 0.0)
self.test_vec2 = SimpleVec(0.0, 0.0, 1.0)
self.test_vec3 = SimpleVec(1.0, 1.0, 1.0)
self.test_vec4 = SimpleVec(2.0, 0.0, 3.0)
def test_add(self):
"""
test_add -- ensure v0 + v1 produces a vector with correct values.
"""
check_vector = SimpleVec(1.0, 1.0, 0.0)
result_vector = self.test_vec0 + self.test_vec1
self.assertEqual(check_vector, result_vector)
def test_sub(self):
"""
test_sub -- ensure v0 - v1 produces a vector with correct values.
"""
check_vector = SimpleVec(1.0, 0.0, 1.0)
result_vector = self.test_vec3 - self.test_vec1
self.assertEqual(check_vector, result_vector)
def test_mult(self):
"""
test_mult -- ensure v0*v1 yields a cross product, and v0*scalar yields
the correct scaled vector.
"""
check_vector_cross = SimpleVec(0.0, 0.0, 1.0)
result_vector_cross = self.test_vec0*self.test_vec1
self.assertEqual(check_vector_cross, result_vector_cross)
check_vector_scalar = SimpleVec(3.0, 0.0, 0.0)
result_vector_scalar = 3.0*self.test_vec0
self.assertEqual(check_vector_scalar, result_vector_scalar)
def test_dot(self):
"""
test_dot -- ensure v0 . v1 retuns the correct scalar value.
"""
check_value = 5.0
result_value = self.test_vec3.dot(self.test_vec4)
self.assertEqual(check_value, result_value)
class ShapeTester(unittest.TestCase):
def setUp(self):
self.test_vec0 = SimpleVec(1.0, 0.0, 0.0)
self.test_vec1 = SimpleVec(0.0, 1.0, 0.0)
self.test_vec2 = SimpleVec(0.0, 0.0, 1.0)
self.test_vec3 = SimpleVec(1.0, 1.0, 1.0)
self.test_vec4 = SimpleVec(2.0, 0.0, 3.0)
self.test_aabb = AABB(0.0, 0.0, 0.0, 1.0, 1.0, 1.0)
self.test_triangle0 = Triangle(2.0, 0.9, -2.0,
-2.0, 0.9, -2.0,
0.0, 0.9, 2.0)
self.test_triangle1 = Triangle(2.0, 1.1, -2.0,
-2.0, 1.1, -2.0,
0.0, 1.1, 2.0)
def test_max_values(self):
"""
test_max_values -- ensures that the correct min and max values are
being collected from shapes.
"""
check_values = {'max_x':2.0, 'min_x':-2.0, 'max_y':0.9, 'min_y':0.9,
'max_z':2.0, 'min_z':-2.0}
for component, value in check_values.items():
self.assertEqual(getattr(self.test_triangle0, component),
check_values[component])
def test_quick_rejection(self):
"""
test_quick_rejection -- ensure that shapes outside of bounding values
are rejected correctly.
"""
self.assertFalse(self.test_aabb._quick_rejection(self.test_triangle0))
self.assertTrue(self.test_aabb._quick_rejection(self.test_triangle1))
def test_project(self):
"""
test_project -- ensure that min/max values returned by projection
are correct.
"""
projection_axis0 = self.test_vec1
projection_axis1 = self.test_vec2
check_values0 = (1.1, 1.1)
check_values1 = (-2.0, 2.0)
self.assertEqual(self.test_triangle1.project(projection_axis0),
check_values0)
self.assertEqual(self.test_triangle1.project(projection_axis1),
check_values1)
def test_overlap(self):
"""
test_overlap -- ensure that overlapping geo is correctly
identified.
"""
# overlap: 1 contains 0
overlap_checks0 = [(1.1, 1.1), (-2.0, 2.0)]
self.assertTrue(self.test_triangle1._overlap(*overlap_checks0))
# overlap: 0 contains 1
overlap_checks1 = [(-2.0, 2.0), (1.1, 1.1)]
self.assertTrue(self.test_triangle1._overlap(*overlap_checks1))
# overlap: 0 max > 1 min
overlap_checks2 = [(0.0, 2.0), (1.1, 3.0)]
self.assertTrue(self.test_triangle1._overlap(*overlap_checks2))
# overlap: 1 max > 0 min
overlap_checks3 = [(1.1, 3.0), (0.0, 2.0)]
self.assertTrue(self.test_triangle1._overlap(*overlap_checks3))
# no overlap.
overlap_checks4 = [(1.1, 1.9), (2.0, 3.0)]
self.assertFalse(self.test_triangle1._overlap(*overlap_checks4))
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"qwertyman.01@gmail.com"
] | qwertyman.01@gmail.com |
a93cdcce54c0962c262506fda48da79fafe9b647 | 754691c0b7e57557ffa2b33c00f7ade57330d4d5 | /data_splitting_rcv1.py | bdc81afd023ccddf5fade4956d9b294b6705b454 | [] | no_license | utamhank1/data_mining_algorithms | a2a34b9e299b5fb7db89b9223be587cd98af212f | 01a9e719de980f0fb39f96ace07b7833cec0755c | refs/heads/master | 2020-12-29T21:46:29.705800 | 2020-02-07T20:01:12 | 2020-02-07T20:01:12 | 238,743,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,033 | py | # This script takes the rcv1 dataset (www.jmlr.org/papers/volume5/lewis04a/lewis04a.pdf) of 800,000 categorized news
# stories and splits it into training and test data.
# Import libraries
import numpy as np
from sklearn.datasets import fetch_rcv1
def main():
# Fetch the rcv1 dataset from sklearn.
rcv1 = fetch_rcv1()
# Clean and reformat the dataset.
target = rcv1['target'].todense()
label = np.array(target[:, 33]).reshape(1, -1)[0]
label.dtype = 'int8'
label[label == 0] = -1
# Create numpy array of training data.
training_data = rcv1['data'][0:100000, :]
# Assign labels to training data.
training_label = label[0:100000]
test_data = rcv1['data'][100000:, :]
test_label = label[100000:]
# Save the training and test datasets to disk.
np.save('test_data_rcv1.npy', test_data)
np.save('test_label_rcv1', test_label)
np.save('training_data_rcv1', training_data)
np.save('training_label_rcv1', training_label)
if __name__ == '__main__':
main()
| [
"utamhank1@gmail.com"
] | utamhank1@gmail.com |
a2a5ab237decb693a81f7126c4194036946116cd | b4cb52100df0e253d9423ba3a7a832a2d758b6de | /pixelpics/pixelfy_app/migrations/0002_like.py | fbfca80f00cfff2c7d83d6b713eee75e54e7bb91 | [] | no_license | nchanay/Capstone | 2e94708b80c33efbf48a1d46af73cbbaa5af706b | ef4832292c788926dde3a87d44fe631cf6d068dd | refs/heads/master | 2023-04-26T14:19:41.383260 | 2019-05-31T01:14:11 | 2019-05-31T01:14:11 | 185,299,498 | 0 | 0 | null | 2023-04-21T20:32:03 | 2019-05-07T01:42:47 | JavaScript | UTF-8 | Python | false | false | 835 | py | # Generated by Django 2.2.1 on 2019-05-22 03:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pixelfy_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='likes', to='pixelfy_app.Pixelfy')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"nchanay@gmail.com"
] | nchanay@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.