blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
83b0799e74862af36a5da566a7b2cb752cbbda06 | a6010992087af00ca728bbae4343ad6dbb4a487c | /db/db_item/views.py | 0f798cad7068783c943988a12f61489edfbe7e9c | [] | no_license | baebae996/Database-project | 0ec163b932d15b75bb882871e936ec4b2638e96b | 7fe3c388107d537cd70ea0596d58c2caf68e3a83 | refs/heads/master | 2022-11-14T11:57:04.135149 | 2020-06-29T11:49:07 | 2020-06-29T11:49:07 | 266,790,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,628 | py | from django.shortcuts import render,redirect
from django.http import HttpResponse
from django.db import models
from db_item import models
# Create your views here.
def index(request):
return HttpResponse('this is index')
def login(request):
if request.session.get('is_login', None):
return redirect('/index')
if request.method== 'POST':
buyer_or_seller = request.POST.get('role')
username = request.POST.get('username')
password = request.POST.get('password')
message='请填写所有字段'
if username and password:
username=username.strip()
if buyer_or_seller=='user':
try:
user = models.User.objects.get(user_name=username)
if user.user_password==password :
request.session['is_login'] = True
# request.session['user_id'] = user.id
request.session['user_username'] = user.user_name
return redirect('/index')
else:
message='密码不正确'
except:
message='用户名不存在'
elif buyer_or_seller=='seller':
try:
seller = models.Seller.objects.get(seller_name=username)
if seller.seller_password == password:
request.session['is_login'] = True
# request.session['user_id'] = user.id
request.session['sell_sellname'] = seller.seller_name
return redirect('/index')
else:
message = '密码不正确'
except:
message = '用户名不存在'
return render(request, '../static/html/login.html', {"message":message})
return render(request, '../static/html/login.html')
def register(request):
if request.method=='POST':
buyer_or_seller = request.POST.get('role')
username = request.POST.get('username')
password1 = request.POST.get('password')
password2 = request.POST.get('password2')
if username and password1 and password2:
if password1==password2:
if buyer_or_seller=='user':
if models.User.objects.filter(user_name=username):
message='用户已经存在,'
return render(request, '../static/html/register.html', {"message":message})
new_user = models.User.objects.create()
new_user.user_name = username
new_user.user_password = password1
new_user.save()
elif buyer_or_seller=='buyer':
if models.Seller.objects.filter(seller_name=username):
message = '用户已经存在,'
return render(request, '../static/html/register.html', {"message": message})
new_seller = models.Seller.objects.create()
new_seller.seller_name= username
new_seller.seller_password = password1
new_seller.save()
else:
message = '两次密码输入不同'
return render(request, '../static/html/register.html', {"message": message})
return render(request, '../static/html/login.html')
def logout(request):
if not request.session.get('is_login',None):
return redirect('/index')
request.session.flush()
return redirect('/index') | [
"2459343217@qq.com"
] | 2459343217@qq.com |
77a7abbd67fc0f5d958444057b77e1fa3518e3fa | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/monitoring/dashboard/v1/monitoring-dashboard-v1-py/google/monitoring/dashboard_v1/types/xychart.py | d94061b1defd3543e734bee18c04bf35a39253da | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,725 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.monitoring.dashboard_v1.types import metrics
from google.protobuf import duration_pb2 as duration # type: ignore
__protobuf__ = proto.module(
package='google.monitoring.dashboard.v1',
manifest={
'XyChart',
'ChartOptions',
},
)
class XyChart(proto.Message):
r"""A chart that displays data on a 2D (X and Y axes) plane.
Attributes:
data_sets (Sequence[google.monitoring.dashboard_v1.types.XyChart.DataSet]):
Required. The data displayed in this chart.
timeshift_duration (google.protobuf.duration_pb2.Duration):
The duration used to display a comparison
chart. A comparison chart simultaneously shows
values from two similar-length time periods
(e.g., week-over-week metrics).
The duration must be positive, and it can only
be applied to charts with data sets of LINE plot
type.
thresholds (Sequence[google.monitoring.dashboard_v1.types.Threshold]):
Threshold lines drawn horizontally across the
chart.
x_axis (google.monitoring.dashboard_v1.types.XyChart.Axis):
The properties applied to the X axis.
y_axis (google.monitoring.dashboard_v1.types.XyChart.Axis):
The properties applied to the Y axis.
chart_options (google.monitoring.dashboard_v1.types.ChartOptions):
Display options for the chart.
"""
class DataSet(proto.Message):
r"""Groups a time series query definition with charting options.
Attributes:
time_series_query (google.monitoring.dashboard_v1.types.TimeSeriesQuery):
Required. Fields for querying time series
data from the Stackdriver metrics API.
plot_type (google.monitoring.dashboard_v1.types.XyChart.DataSet.PlotType):
How this data should be plotted on the chart.
legend_template (str):
A template string for naming ``TimeSeries`` in the resulting
data set. This should be a string with interpolations of the
form ``${label_name}``, which will resolve to the label's
value.
min_alignment_period (google.protobuf.duration_pb2.Duration):
Optional. The lower bound on data point frequency for this
data set, implemented by specifying the minimum alignment
period to use in a time series query For example, if the
data is published once every 10 minutes, the
``min_alignment_period`` should be at least 10 minutes. It
would not make sense to fetch and align data at one minute
intervals.
"""
class PlotType(proto.Enum):
r"""The types of plotting strategies for data sets."""
PLOT_TYPE_UNSPECIFIED = 0
LINE = 1
STACKED_AREA = 2
STACKED_BAR = 3
HEATMAP = 4
time_series_query = proto.Field(proto.MESSAGE, number=1,
message=metrics.TimeSeriesQuery,
)
plot_type = proto.Field(proto.ENUM, number=2,
enum='XyChart.DataSet.PlotType',
)
legend_template = proto.Field(proto.STRING, number=3)
min_alignment_period = proto.Field(proto.MESSAGE, number=4,
message=duration.Duration,
)
class Axis(proto.Message):
r"""A chart axis.
Attributes:
label (str):
The label of the axis.
scale (google.monitoring.dashboard_v1.types.XyChart.Axis.Scale):
The axis scale. By default, a linear scale is
used.
"""
class Scale(proto.Enum):
r"""Types of scales used in axes."""
SCALE_UNSPECIFIED = 0
LINEAR = 1
LOG10 = 2
label = proto.Field(proto.STRING, number=1)
scale = proto.Field(proto.ENUM, number=2,
enum='XyChart.Axis.Scale',
)
data_sets = proto.RepeatedField(proto.MESSAGE, number=1,
message=DataSet,
)
timeshift_duration = proto.Field(proto.MESSAGE, number=4,
message=duration.Duration,
)
thresholds = proto.RepeatedField(proto.MESSAGE, number=5,
message=metrics.Threshold,
)
x_axis = proto.Field(proto.MESSAGE, number=6,
message=Axis,
)
y_axis = proto.Field(proto.MESSAGE, number=7,
message=Axis,
)
chart_options = proto.Field(proto.MESSAGE, number=8,
message='ChartOptions',
)
class ChartOptions(proto.Message):
r"""Options to control visual rendering of a chart.
Attributes:
mode (google.monitoring.dashboard_v1.types.ChartOptions.Mode):
The chart mode.
"""
class Mode(proto.Enum):
r"""Chart mode options."""
MODE_UNSPECIFIED = 0
COLOR = 1
X_RAY = 2
STATS = 3
mode = proto.Field(proto.ENUM, number=1,
enum=Mode,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
3e095418c36e15c34c0e0ca151cbf3b909a23222 | 6ca22ee79ccc834ec9734a2fb22f74fa66f40d81 | /mirror3d/visualization/__init__.py | 382b9558218d36de8d09cc47540e57fedacc7cbc | [] | no_license | samxuxiang/mirror3d | 1284b2a5e5f2b8516ebeb1ed00f51a754f9f5f45 | c06ec658052675c7d9f4fb4fc26a5bc4bfee15f3 | refs/heads/main | 2023-06-21T06:47:56.799688 | 2021-08-02T21:02:47 | 2021-08-02T21:02:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | from mirror3d.visualization.other_visualization_tool import *
from mirror3d.visualization.result_visualization import *
| [
"jiaqit@cs-3dlg-10.cmpt.sfu.ca"
] | jiaqit@cs-3dlg-10.cmpt.sfu.ca |
55256b13384151091b0b27383d410edd1ce32fa3 | fec1cacfa6f01ba92852d776b52e41d4d7e0421e | /Practice/21/main.py | 77b19c00c9f23331dd3be62677f5ff1e3a849cc4 | [] | no_license | BRUS1NATOR/Python-Tasks | 47b36a60f73a6cb292285926cdc45130b49e1791 | 0678f4e4b47eed4d6f4c48f6008cd617cd920d08 | refs/heads/main | 2023-04-23T18:30:42.963773 | 2021-04-28T09:59:27 | 2021-04-28T09:59:27 | 344,458,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | from abstract import ABMI
class my_BMI(ABMI):
# Принимает вес в килограммах и рост в метрах. Возвращает индекс массы тела.
def bmi(self, weight: float, height: float):
super().bmi(weight, height)
return weight / ((height / 100) ** 2)
# Принимает численное значение ИМТ и печатает на экран соответствующую категорию
def print_bmi(self, _bmi: float):
super().print_bmi(_bmi)
if _bmi < 18.5:
print('Underweight')
elif 18.5 <= _bmi < 25.0:
print('Normal')
elif 25 <= _bmi < 30.0:
print('Overweight')
else:
print('Obesity')
def user_input():
try:
data = list(map(float, input('Введите вес и рост через пробел: ').split(' ')))
if len(data) == 2:
my_BMI().print_bmi(my_BMI().bmi(data[0], data[1]))
except ValueError or TypeError:
print('Введите вес и рост должны быть числами')
user_input()
if __name__ == '__main__':
user_input()
| [
"BRUSLENKOAL@gmail.com"
] | BRUSLENKOAL@gmail.com |
341d76325035ff6d87b2a472e45099b20a10e743 | 690f9c5ded4403aa8247e510addac01c1801278e | /devel/lib/python2.7/dist-packages/principiante_tutoriales/srv/_AddTwoInts.py | 521c95106bef4afd7d4ee01e5d3b0212bd459a54 | [] | no_license | mario-serna/my_robot_workspace | 88d7cf1fa9e7fd664c5beaed023871252f2132fb | 298b4a9e41ad41231d1233d525b1e790443dbcdf | refs/heads/master | 2020-03-22T10:35:51.508897 | 2018-09-14T13:00:35 | 2018-09-14T13:00:35 | 139,912,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,415 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from principiante_tutoriales/AddTwoIntsRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class AddTwoIntsRequest(genpy.Message):
_md5sum = "36d09b846be0b371c5f190354dd3153e"
_type = "principiante_tutoriales/AddTwoIntsRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int64 a
int64 b
"""
__slots__ = ['a','b']
_slot_types = ['int64','int64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
a,b
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(AddTwoIntsRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.a is None:
self.a = 0
if self.b is None:
self.b = 0
else:
self.a = 0
self.b = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_2q().pack(_x.a, _x.b))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 16
(_x.a, _x.b,) = _get_struct_2q().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_2q().pack(_x.a, _x.b))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 16
(_x.a, _x.b,) = _get_struct_2q().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2q = None
def _get_struct_2q():
global _struct_2q
if _struct_2q is None:
_struct_2q = struct.Struct("<2q")
return _struct_2q
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from principiante_tutoriales/AddTwoIntsResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class AddTwoIntsResponse(genpy.Message):
_md5sum = "b88405221c77b1878a3cbbfff53428d7"
_type = "principiante_tutoriales/AddTwoIntsResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int64 sum
"""
__slots__ = ['sum']
_slot_types = ['int64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
sum
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(AddTwoIntsResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.sum is None:
self.sum = 0
else:
self.sum = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_q().pack(self.sum))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 8
(self.sum,) = _get_struct_q().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_q().pack(self.sum))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 8
(self.sum,) = _get_struct_q().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_q = None
def _get_struct_q():
global _struct_q
if _struct_q is None:
_struct_q = struct.Struct("<q")
return _struct_q
class AddTwoInts(object):
_type = 'principiante_tutoriales/AddTwoInts'
_md5sum = '6a2e34150c00229791cc89ff309fff21'
_request_class = AddTwoIntsRequest
_response_class = AddTwoIntsResponse
| [
"mario_sh95@hotmail.com"
] | mario_sh95@hotmail.com |
b1a13dce339a941be725d6d1ad3e8a952bd105fc | 177676a4c0b7b29dae729a3cff7f5d1a09405746 | /app/models.py | 038b334f25c3b6fe33e56be0c1b5ceed83c4612c | [
"MIT"
] | permissive | eddyvdaker/Basic-Flask-Template-App | 4ffade73c1cdfb1f2ca77554d7a034f4b4e5282d | d25149c01a05d8d3b336eccc86e0262eba0e30c7 | refs/heads/master | 2020-04-05T08:48:27.039075 | 2018-11-08T17:13:30 | 2018-11-08T17:13:30 | 156,730,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | # If you are using a DB extension, like SQLAlchemy, add any models here.
| [
"eddy.v.d.aker@gmail.com"
] | eddy.v.d.aker@gmail.com |
00bd146e6b0d26bdad0458716b04c5972f7c3696 | b47b7f2ddf128d9660d15f6d7a777d2bb2fe0923 | /userManager/userManager/urls.py | 740548c1bb83e47e67787594e7ccbe413f34c8bf | [] | no_license | gaizhongfeng/manageSystem | 8a4711b6c8c3ae41720535e057a707c0161d509f | a6c49d74d85a1243f0ecd0eac106d9be3a050de2 | refs/heads/master | 2020-04-12T02:40:38.855497 | 2018-12-18T10:23:00 | 2018-12-18T10:23:00 | 162,249,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | """userManager URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from app01 import views
from django.conf.urls import url,include
urlpatterns = [
path('admin/', admin.site.urls),
# path('login.html',views.login),
#url指向一个类 views.类名.as_view() 类一定要继承views.View
url('login.html',views.Login.as_view()),
url(r'^index.html',views.index),
url(r'^logout.html',views.logout),
url(r'^classes.html',views.handle_classes),
url(r'^add_classes.html',views.handle_add_classes),
url(r'^student.html',views.handle_student),
url(r'^teacher.html',views.handle_teacher),
url(r'^add_teacher.html',views.add_teacher),
url(r'^upload.html',views.upload),
url(r'^edit_teacher-(\d+).html',views.edit_teacher),
path('js_cookie.html',views.js_cookie),
]
| [
"zhong-feng.gai@hpe.com"
] | zhong-feng.gai@hpe.com |
a132f26dbd5f323274528482518a0db067ccaee7 | 24cce1ec7737f9ebb6df3e317a36c0a0329ec664 | /HZMX/amazon_api/wizard/__init__.py | ec88e0a602a1466bc35254ab81c4e14fb0155649 | [] | no_license | tate11/HangZhouMinXing | ab261cb347f317f9bc4a77a145797745e2531029 | 14b7d34af635db015bd3f2c139be1ae6562792f9 | refs/heads/master | 2021-04-12T04:23:20.165503 | 2018-03-14T05:02:05 | 2018-03-14T05:02:05 | 125,855,729 | 1 | 0 | null | 2018-03-19T12:42:07 | 2018-03-19T12:42:07 | null | UTF-8 | Python | false | false | 183 | py | # -*- coding: utf-8 -*-
from . import amazon_wizard
from . import shop_template_wizard
from . import sync_sale_order
from . import stock_adjust
from . import stock_immediate_transfer | [
"1121403085"
] | 1121403085 |
9fb779ad0f796559354d71475b2f226bf9a24669 | b40208a775a817a8153b03fe3b63e63edc8664ca | /Basics/Day-1/Inputt.py | c86610289c9de3f2ce07d1759df57f105ab84d62 | [] | no_license | optionalg/Practising-Python | 96ea952273cdff693c6b24387bb305975e2f5a57 | f94475ef8c4100841d407544e4b3ef3a2d78dc10 | refs/heads/master | 2021-06-18T21:43:39.179871 | 2017-06-16T11:01:24 | 2017-06-16T11:01:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | import sys
#print (sys.platform)
#print (2 ** 100)
x=input( )
print(x)
| [
"noreply@github.com"
] | optionalg.noreply@github.com |
e9f2bec9246fd4ed09f7d97c23f46e5fcf455a55 | 7dd1738268b4ebb721592b02080626799f3c6eed | /tests/test_textparser.py | d32dae4bb5571e0834d0263cf72ec0b74ea70c68 | [
"MIT"
] | permissive | Afsaan/textparser | f721dad1aa8fd36d21274ea4cf5ec5722561fe8c | cc4a85f8b7e6d6be83f5072f45af4a7baf6c35df | refs/heads/master | 2022-04-06T03:27:03.962419 | 2020-01-02T14:51:05 | 2020-01-02T14:51:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,626 | py | import unittest
from collections import namedtuple
import textparser
from textparser import Grammar
from textparser import Sequence
from textparser import Choice
from textparser import choice
from textparser import ChoiceDict
from textparser import ZeroOrMore
from textparser import ZeroOrMoreDict
from textparser import OneOrMore
from textparser import OneOrMoreDict
from textparser import DelimitedList
from textparser import Token
from textparser import TokenizeError
from textparser import tokenize_init
from textparser import Any
from textparser import AnyUntil
from textparser import Optional
from textparser import Tag
from textparser import Forward
from textparser import NoMatch
from textparser import Not
from textparser import And
from textparser import markup_line
from textparser import replace_blocks
def tokenize(items, add_eof_token=True):
tokens = []
for item in items:
if len(item) == 2:
token = Token(*item, offset=1)
else:
token = Token(*item)
tokens.append(token)
if add_eof_token:
tokens.append(Token('__EOF__', None, -1))
return tokens
class TextParserTest(unittest.TestCase):
def parse_and_assert_tree(self, grammar, datas):
for tokens, expected_tree in datas:
tree = grammar.parse(tokenize(tokens))
self.assertEqual(tree, expected_tree)
def parse_and_assert_mismatch(self, grammar, datas):
for tokens, line in datas:
tokens = tokenize(tokens)
with self.assertRaises(textparser.GrammarError) as cm:
grammar.parse(tokens)
self.assertEqual(cm.exception.offset, line)
def test_grammar_sequence(self):
grammar = Grammar(Sequence('NUMBER', 'WORD'))
tokens = tokenize([
('NUMBER', '1.45'),
('WORD', 'm')
])
tree = grammar.parse(tokens)
self.assertEqual(tree, ['1.45', 'm'])
def test_grammar_sequence_mismatch(self):
grammar = Grammar(Sequence('NUMBER', 'WORD'))
tokens = tokenize([('NUMBER', '1.45')])
with self.assertRaises(textparser.GrammarError) as cm:
grammar.parse(tokens)
self.assertEqual(cm.exception.offset, -1)
def test_grammar_choice(self):
grammar = Grammar(Choice('NUMBER', 'WORD'))
datas = [
(
[('WORD', 'm')],
'm'
),
(
[('NUMBER', '5')],
'5'
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_choice_mismatch(self):
grammar = Grammar(Choice(Sequence('NUMBER', 'WORD'),
'WORD'))
datas = [
([('NUMBER', '1', 5)], -1),
([('NUMBER', '1', 5), ('NUMBER', '2', 7)], 7)
]
self.parse_and_assert_mismatch(grammar, datas)
def test_grammar_choice_dict(self):
number = Forward()
number <<= Sequence('NUMBER')
grammar = Grammar(ChoiceDict(number,
Tag('foo', Sequence('WORD')),
ChoiceDict('BAR'),
'FIE'))
datas = [
(
[('WORD', 'm')],
('foo', ['m'])
),
(
[('NUMBER', '5')],
['5']
),
(
[('BAR', 'foo')],
'foo'
),
(
[('FIE', 'fum')],
'fum'
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_choice_dict_mismatch(self):
grammar = Grammar(ChoiceDict(Sequence('NUMBER'),
Sequence('WORD')))
tokens = tokenize([(',', ',', 3)])
with self.assertRaises(textparser.Error) as cm:
grammar.parse(tokens)
self.assertEqual(cm.exception.offset, 3)
def test_grammar_choice_dict_init(self):
datas = [
(
('WORD', 'WORD'),
"First token kind must be unique, but WORD isn't."
),
(
('WORD', Sequence('WORD')),
"First token kind must be unique, but WORD isn't."
),
(
(Sequence(Sequence(Optional('WORD'))), ),
"Unsupported pattern type <class 'textparser.Optional'>."
)
]
for grammar, message in datas:
with self.assertRaises(textparser.Error) as cm:
ChoiceDict(*grammar)
self.assertEqual(str(cm.exception), message)
def test_grammar_delimited_list(self):
grammar = Grammar(Sequence(DelimitedList('WORD'), Optional('.')))
datas = [
(
[('WORD', 'foo')],
[['foo'], []]
),
(
[('WORD', 'foo'), (',', ','), ('WORD', 'bar')],
[['foo', 'bar'], []]
),
(
[('WORD', 'foo'), (',', ','), ('WORD', 'bar'), ('.', '.')],
[['foo', 'bar'], ['.']]
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_delimited_list_mismatch(self):
grammar = Grammar(Sequence(DelimitedList('WORD'), Optional('.')))
datas = [
(
[
('WORD', 'foo', 1),
(',', ',', 2)
],
2
),
(
[
('WORD', 'foo', 1),
(',', ',', 2),
('WORD', 'foo', 3),
(',', ',', 4),
('.', '.', 5)
],
4
)
]
self.parse_and_assert_mismatch(grammar, datas)
def test_grammar_zero_or_more(self):
grammar = Grammar(ZeroOrMore('WORD'))
datas = [
(
[],
[]
),
(
[('WORD', 'foo')],
['foo']
),
(
[('WORD', 'foo'), ('WORD', 'bar')],
['foo', 'bar']
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_zero_or_more_partial_element_match(self):
grammar = Grammar(Sequence(
ZeroOrMore(Sequence('WORD', 'NUMBER')), 'WORD'))
datas = [
(
[
('WORD', 'foo'),
('NUMBER', '1'),
('WORD', 'bar'),
('NUMBER', '2'),
('WORD', 'fie')],
[[['foo', '1'], ['bar', '2']], 'fie']
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_zero_or_more_dict(self):
grammar = Grammar(ZeroOrMoreDict(Sequence('WORD', 'NUMBER')))
datas = [
(
[],
{}
),
(
[('WORD', 'foo'), ('NUMBER', '1'),
('WORD', 'bar'), ('NUMBER', '2'),
('WORD', 'foo'), ('NUMBER', '3')],
{
'foo': [['foo', '1'], ['foo', '3']],
'bar': [['bar', '2']]
}
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_one_or_more(self):
grammar = Grammar(OneOrMore('WORD'))
datas = [
(
[('WORD', 'foo')],
['foo']
),
(
[('WORD', 'foo'), ('WORD', 'bar')],
['foo', 'bar']
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_one_or_more_mismatch(self):
grammar = Grammar(OneOrMore('WORD'))
datas = [
(
[]
, -1
),
(
[('NUMBER', 'foo', 2)],
2
)
]
self.parse_and_assert_mismatch(grammar, datas)
def test_grammar_one_or_more_dict(self):
grammar = Grammar(OneOrMoreDict(Sequence('WORD', 'NUMBER')))
datas = [
(
[('WORD', 'foo'), ('NUMBER', '1')],
{
'foo': [['foo', '1']]
}
),
(
[('WORD', 'foo'), ('NUMBER', '1'),
('WORD', 'bar'), ('NUMBER', '2'),
('WORD', 'foo'), ('NUMBER', '3')],
{
'foo': [['foo', '1'], ['foo', '3']],
'bar': [['bar', '2']]
}
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_one_or_more_dict_mismatch(self):
grammar = Grammar(OneOrMoreDict(Sequence('WORD', 'NUMBER')))
datas = [
(
[('WORD', 'foo', 5)],
-1
),
(
[
('WORD', 'foo', 5),
('WORD', 'bar', 6)
],
6
),
(
[
('WORD', 'foo', 5),
('NUMBER', '4', 6),
('WORD', 'bar', 7),
('WORD', 'fie', 8)
],
8
)
]
self.parse_and_assert_mismatch(grammar, datas)
def test_grammar_any(self):
grammar = Grammar(Any())
datas = [
(
[('A', r'a')],
'a'
),
(
[('B', r'b')],
'b'
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_any_until(self):
grammar = Grammar(Sequence(AnyUntil('STRING'), 'STRING'))
datas = [
(
[('NUMBER', '1'),
('WORD', 'a'),
('STRING', '"b"')],
[['1', 'a'], '"b"']
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_any_until_sequence(self):
grammar = Grammar(Sequence(AnyUntil(Sequence('WORD', 'STRING')),
'WORD',
'STRING'))
datas = [
(
[('NUMBER', '1'),
('WORD', 'a'),
('WORD', 'b'),
('STRING', '"b"')],
[['1', 'a'], 'b', '"b"']
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_1(self):
grammar = Grammar(Sequence(
'IF',
choice(Sequence(choice('A', 'B'), 'STRING'),
'STRING'),
'WORD',
choice(
Sequence(
choice(DelimitedList('STRING'), ZeroOrMore('NUMBER')), '.'),
'.')))
datas = [
(
[
('IF', 'IF'),
('STRING', 'foo'),
('WORD', 'bar'),
('.', '.')
],
['IF', 'foo', 'bar', [[], '.']]
),
(
[
('IF', 'IF'),
('STRING', 'foo'),
('WORD', 'bar'),
('NUMBER', '0'),
('NUMBER', '100'),
('.', '.')
],
['IF', 'foo', 'bar', [['0', '100'], '.']]
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_1_mismatch(self):
grammar = Grammar(Sequence(
'IF',
choice(Sequence(choice('A', 'B'), 'STRING'),
'STRING'),
'WORD',
choice(
Sequence(
choice(DelimitedList('STRING'), ZeroOrMore('NUMBER')), '.'),
'.')))
datas = [
(
[
('IF', 'IF', 1),
('STRING', 'foo', 2),
('WORD', 'bar', 3),
(',', ',', 4)
],
4
),
(
[
('IF', 'IF', 1),
('STRING', 'foo', 2),
('.', '.', 3)
],
3
),
(
[
('IF', 'IF', 1),
('NUMBER', '1', 2)
],
2
),
(
[
('IF', 'IF', 1),
('STRING', 'foo', 2),
('WORD', 'bar', 3),
('.', '.', 4),
('.', '.', 5)
],
5
)
]
self.parse_and_assert_mismatch(grammar, datas)
def test_grammar_forward(self):
foo = Forward()
foo <<= Sequence('FOO')
grammar = Grammar(foo)
datas = [
(
[('FOO', 'foo')],
['foo']
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_forward_text(self):
foo = Forward()
foo <<= 'FOO'
grammar = Grammar(foo)
datas = [
(
[('FOO', 'foo')],
'foo'
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_optional(self):
grammar = Grammar(Sequence(Optional('WORD'),
Optional('WORD'),
Optional('NUMBER')))
datas = [
(
[],
[[], [], []]
),
(
[('WORD', 'a')],
[['a'], [], []]
),
(
[('NUMBER', 'c')],
[[], [], ['c']]
),
(
[('WORD', 'a'), ('NUMBER', 'c')],
[['a'], [], ['c']]
),
(
[('WORD', 'a'), ('WORD', 'b'), ('NUMBER', 'c')],
[['a'], ['b'], ['c']]
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_tag(self):
grammar = Grammar(Tag('a',
Tag('b',
choice(Tag('c', 'WORD'),
Tag('d', Optional('NUMBER'))))))
datas = [
(
[('WORD', 'bar')],
('a', ('b', ('c', 'bar')))
),
(
[('NUMBER', '1')],
('a', ('b', ('d', ['1'])))
),
(
[],
('a', ('b', ('d', [])))
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_tag_mismatch(self):
grammar = Grammar(Tag('a', 'WORD'))
datas = [
(
[('NUMBER', 'bar')],
1
)
]
self.parse_and_assert_mismatch(grammar, datas)
def test_grammar_and(self):
grammar = Grammar(Sequence(And('NUMBER'), 'NUMBER'))
datas = [
(
[('NUMBER', '1')],
[[], '1']
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_and_mismatch(self):
grammar = Grammar(Sequence(And('NUMBER'), 'NUMBER'))
datas = [
(
[('WORD', 'foo', 3), ('NUMBER', '1', 4)],
3
)
]
self.parse_and_assert_mismatch(grammar, datas)
def test_grammar_not(self):
grammar = Grammar(Sequence(Not('WORD'), 'NUMBER'))
datas = [
(
[('NUMBER', '1')],
[[], '1']
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_not_mismatch(self):
grammar = Grammar(Sequence(Not('WORD'), 'NUMBER'))
datas = [
(
[('WORD', 'foo', 3), ('NUMBER', '1', 4)],
3
)
]
self.parse_and_assert_mismatch(grammar, datas)
def test_grammar_no_match(self):
grammar = Grammar(NoMatch())
datas = [
(
[('NUMBER', '1', 3)],
3
),
(
[('WORD', 'foo', 3)],
3
)
]
self.parse_and_assert_mismatch(grammar, datas)
def test_parse_start_and_end_of_file(self):
class Parser(textparser.Parser):
def grammar(self):
return Sequence('__SOF__', '__EOF__')
self.assertEqual(Parser().parse('', match_sof=True),
['__SOF__', '__EOF__'])
def test_parse_start_of_file_mismatch(self):
class Parser(textparser.Parser):
def grammar(self):
return Sequence('__EOF__')
with self.assertRaises(textparser.ParseError) as cm:
Parser().parse('123', match_sof=True)
self.assertEqual(str(cm.exception),
'Invalid syntax at line 1, column 1: ">>!<<123"')
def test_parse_end_of_file(self):
class Parser(textparser.Parser):
def grammar(self):
return '__EOF__'
self.assertEqual(Parser().parse('', match_sof=False), '__EOF__')
def test_grammar_none(self):
class AnyAsNone(textparser.Pattern):
def match(self, tokens):
tokens.get_value()
return None
grammar = Grammar(AnyAsNone())
datas = [
(
[('NUMBER', '1')],
None
)
]
self.parse_and_assert_tree(grammar, datas)
def test_grammar_error(self):
grammar = Grammar(NoMatch())
datas = [
[('NUMBER', '1', 3)],
[('WORD', 'foo', 3)]
]
for tokens in datas:
tokens = tokenize(tokens)
with self.assertRaises(textparser.GrammarError) as cm:
grammar.parse(tokens)
self.assertEqual(cm.exception.offset, 3)
self.assertEqual(str(cm.exception),
'Invalid syntax at offset 3.')
def test_tokenize_error(self):
datas = [
(2, 'hej', 'Invalid syntax at line 1, column 3: "he>>!<<j"'),
(0, 'a\nb\n', 'Invalid syntax at line 1, column 1: ">>!<<a"'),
(1, 'a\nb\n', 'Invalid syntax at line 1, column 2: "a>>!<<"'),
(2, 'a\nb\n', 'Invalid syntax at line 2, column 1: ">>!<<b"')
]
for offset, text, message in datas:
with self.assertRaises(TokenizeError) as cm:
raise TokenizeError(text, offset)
self.assertEqual(cm.exception.text, text)
self.assertEqual(cm.exception.offset, offset)
self.assertEqual(str(cm.exception), message)
def test_create_token_re(self):
datas = [
(
[('A', r'a')],
'(?P<A>a)'
),
(
[('A', r'b'), ('C', r'd')],
'(?P<A>b)|(?P<C>d)'
)
]
for spec, expected_re_token in datas:
tokens, re_token = tokenize_init(spec)
self.assertEqual(tokens,
[Token(kind='__SOF__', value='__SOF__', offset=0)])
self.assertEqual(re_token, expected_re_token)
def test_parser(self):
class Parser(textparser.Parser):
def keywords(self):
return set([
'IF',
'A',
'B'
])
def token_specs(self):
return [
('SKIP', r'[ \r\n\t]+'),
('NUMBER', r'-?\d+(\.\d+)?([eE][+-]?\d+)?'),
('DOT', '.', r'\.'),
('WORD', r'[A-Za-z0-9_]+'),
('ESCAPED_STRING', r'"(\\"|[^"])*?"'),
('MISMATCH', r'.')
]
def grammar(self):
return Sequence(
'IF',
Optional(choice('A', 'B')),
'ESCAPED_STRING',
'WORD',
Optional(choice(DelimitedList('ESCAPED_STRING'),
ZeroOrMore('NUMBER'))),
'.')
datas = [
(
'IF "foo" bar .',
['IF', [], '"foo"', 'bar', [[]], '.'],
[
Token(kind='IF', value='IF', offset=0),
[],
Token(kind='ESCAPED_STRING', value='"foo"', offset=3),
Token(kind='WORD', value='bar', offset=9),
[[]],
Token(kind='.', value='.', offset=13)
]
),
(
'IF B "" b 1 2 .',
['IF', ['B'], '""', 'b', [['1', '2']], '.'],
[
Token(kind='IF', value='IF', offset=0),
[
Token(kind='B', value='B', offset=3)
],
Token(kind='ESCAPED_STRING', value='""', offset=5),
Token(kind='WORD', value='b', offset=8),
[
[
Token(kind='NUMBER', value='1', offset=10),
Token(kind='NUMBER', value='2', offset=12)
]
],
Token(kind='.', value='.', offset=14)
]
)
]
for text, expected_tree, expected_token_tree in datas:
tree = Parser().parse(text)
self.assertEqual(tree, expected_tree)
tree = Parser().parse(text, token_tree=True)
self.assertEqual(tree, expected_token_tree)
def test_parser_default_keywords(self):
class Parser(textparser.Parser):
def token_specs(self):
return [
('SKIP', r'[ \r\n\t]+'),
('NUMBER', r'-?\d+(\.\d+)?([eE][+-]?\d+)?'),
('DOT', '.', r'\.'),
('WORD', r'[A-Za-z0-9_]+'),
('ESCAPED_STRING', r'"(\\"|[^"])*?"'),
('MISMATCH', r'.')
]
def grammar(self):
return Sequence(
'WORD',
Optional('WORD'),
'ESCAPED_STRING',
'WORD',
Optional(choice(DelimitedList('ESCAPED_STRING'),
ZeroOrMore('NUMBER'))),
'.')
datas = [
(
'IF "foo" bar .',
['IF', [], '"foo"', 'bar', [[]], '.'],
[
Token(kind='WORD', value='IF', offset=0),
[],
Token(kind='ESCAPED_STRING', value='"foo"', offset=3),
Token(kind='WORD', value='bar', offset=9),
[[]],
Token(kind='.', value='.', offset=13)
]
),
(
'IF B "" b 1 2 .',
['IF', ['B'], '""', 'b', [['1', '2']], '.'],
[
Token(kind='WORD', value='IF', offset=0),
[
Token(kind='WORD', value='B', offset=3)
],
Token(kind='ESCAPED_STRING', value='""', offset=5),
Token(kind='WORD', value='b', offset=8),
[
[
Token(kind='NUMBER', value='1', offset=10),
Token(kind='NUMBER', value='2', offset=12)
]
],
Token(kind='.', value='.', offset=14)
]
)
]
for text, expected_tree, expected_token_tree in datas:
tree = Parser().parse(text)
self.assertEqual(tree, expected_tree)
tree = Parser().parse(text, token_tree=True)
self.assertEqual(tree, expected_token_tree)
def test_parser_bare(self):
class Parser(textparser.Parser):
pass
with self.assertRaises(NotImplementedError) as cm:
Parser().parse('foo')
self.assertEqual(str(cm.exception), 'No grammar defined.')
def test_parser_default_token_specs(self):
class Parser(textparser.Parser):
def grammar(self):
return 'WORD'
tree = Parser().parse('foo')
self.assertEqual(tree, 'foo')
def test_parser_tokenize_mismatch(self):
class Parser(textparser.Parser):
def token_specs(self):
return [
('SKIP', r'[ \r\n\t]+'),
('NUMBER', r'-?\d+(\.\d+)?([eE][+-]?\d+)?'),
('MISMATCH', r'.')
]
def grammar(self):
return Grammar('NUMBER')
with self.assertRaises(textparser.ParseError) as cm:
Parser().parse('12\n34foo\n789')
self.assertEqual(cm.exception.offset, 5)
self.assertEqual(cm.exception.line, 2)
self.assertEqual(cm.exception.column, 3)
self.assertEqual(str(cm.exception),
'Invalid syntax at line 2, column 3: "34>>!<<foo"')
def test_parser_grammar_mismatch(self):
class Parser(textparser.Parser):
def tokenize(self, _text):
return tokenize([
('NUMBER', '1.45', 0),
('NUMBER', '2', 5)
])
def grammar(self):
return Sequence('NUMBER', 'WORD')
with self.assertRaises(textparser.ParseError) as cm:
Parser().parse('1.45 2')
self.assertEqual(cm.exception.offset, 5)
self.assertEqual(cm.exception.line, 1)
self.assertEqual(cm.exception.column, 6)
self.assertEqual(str(cm.exception),
'Invalid syntax at line 1, column 6: "1.45 >>!<<2"')
def test_parser_grammar_mismatch_choice_max(self):
class Parser(textparser.Parser):
def __init__(self, tokens):
self._tokens = tokens
def tokenize(self, _text):
return tokenize(self._tokens, add_eof_token=False)
def grammar(self):
return Choice(Sequence('NUMBER', 'WORD'),
'WORD')
Data = namedtuple('Data',
[
'text',
'tokens',
'offset',
'line',
'column',
'message',
])
datas = [
Data(
text='1.45',
tokens=[
('NUMBER', '1.45', 0)
],
offset=4,
line=1,
column=5,
message='Invalid syntax at line 1, column 5: "1.45>>!<<"'
),
Data(
text='1.45 2',
tokens=[
('NUMBER', '1.45', 0),
('NUMBER', '2', 5)
],
offset=5,
line=1,
column=6,
message='Invalid syntax at line 1, column 6: "1.45 >>!<<2"'
)
]
for text, tokens, offset, line, column, message in datas:
with self.assertRaises(textparser.ParseError) as cm:
Parser(tokens).parse(text)
self.assertEqual(cm.exception.offset, offset)
self.assertEqual(cm.exception.line, line)
self.assertEqual(cm.exception.column, column)
self.assertEqual(str(cm.exception), message)
def test_parse_error(self):
class Parser(textparser.Parser):
def tokenize(self, text):
raise TokenizeError(text, 5)
def grammar(self):
return Grammar(Sequence('NUMBER', 'WORD'))
with self.assertRaises(textparser.ParseError) as cm:
Parser().parse('12\n3456\n789')
self.assertEqual(cm.exception.text, '12\n3456\n789')
self.assertEqual(cm.exception.offset, 5)
self.assertEqual(cm.exception.line, 2)
self.assertEqual(cm.exception.column, 3)
self.assertEqual(str(cm.exception),
'Invalid syntax at line 2, column 3: "34>>!<<56"')
def test_markup_line(self):
datas = [
(0, '>>!<<0', None),
(1, '0>>!<<', None),
(2, '>>!<<1234', None),
(4, '12>>!<<34', None),
(6, '1234>>!<<', None),
(7, '>>!<<56', None),
(8, '5>>!<<6', None),
(9, '56>>!<<', None),
(3, '1x234', 'x')
]
for offset, line, marker in datas:
if marker is None:
text = markup_line('0\n1234\n56', offset)
else:
text = markup_line('0\n1234\n56',
offset,
marker=marker)
self.assertEqual(text, line)
def test_replace_blocks(self):
datas = [
('{}', '{}'),
('{{}}', '{ }'),
('{{\n} xxx {}}', '{ \n }'),
('1{a\n}2{b}3', '1{ \n}2{ }3')
]
for old, expected in datas:
new = replace_blocks(old)
self.assertEqual(new, expected)
def test_replace_blocks_start_end(self):
datas = [
('1[a]2[b]3', '1[ ]2[ ]3', '[', ']'),
('1{a}2{b}3', '1{ }2{ }3', '{', '}'),
('1(a)2(b)3', '1( )2( )3', '(', ')'),
('1((a))2((b))3', '1(( ))2(( ))3', '((', '))')
]
for old, expected, start, end in datas:
new = replace_blocks(old, start, end)
self.assertEqual(new, expected)
if __name__ == '__main__':
unittest.main()
| [
"erik.moqvist@gmail.com"
] | erik.moqvist@gmail.com |
9bf4779a9261029f2e4a01cc040f2a33a5df8b58 | e2d0716c2cfafa7acd1f644f9c19b4e1b24adb1d | /models.py | 718f5719ce3fcc57695c0ae894a012d7aa6e7c78 | [] | no_license | nahid111/face-recognition-app | 75dd345a5cf2418c2537b3a74b654743220a9f5a | 73f756f5518f010671c41e134abf122c92d40319 | refs/heads/master | 2022-11-12T10:19:05.434381 | 2020-06-30T08:14:28 | 2020-06-30T08:14:28 | 276,038,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | from app import db
class Students(db.Model):
sl = db.Column(db.Integer, primary_key=True)
id = db.Column(db.String(12))
name = db.Column(db.String(20))
imageName = db.Column(db.String(300))
imageData = db.Column(db.LargeBinary)
imageEncodings = db.Column(db.PickleType)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
user = db.Column(db.String(20), unique=True)
password = db.Column(db.String(20))
def __init__(self, user, password):
self.user = user
self.password = password
# for Creating the tables
# python3
# from app import db
# db.create_all()
| [
"mdnahid22@gmail.com"
] | mdnahid22@gmail.com |
228ffda480e3defaae4548133092a553b9e066fd | 30b9174b248ad0aa1a3a4794ae2bb0bf2dd0a275 | /summarize_GCF_presence_absence.py | 65e22dd1174f0c171a16a06966310d056dbed5eb | [] | no_license | thej-ssi/antismash_scripts | cfd2d41a5d9ffcc89961c446909fd716f202b107 | fd4cde44a6703fffbfed88158066e91a69138b23 | refs/heads/master | 2023-01-30T10:55:20.479160 | 2020-12-16T14:00:36 | 2020-12-16T14:00:36 | 268,782,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | #!/usr/bin/env python3
import os
import sys
antismash_folder = sys.argv[1]
IDs = os.listdir(antismash_folder)
BGCs = []
BGC_dict = {}
BGC_to_fam = {}
#domain_dir = os.path.join(antismash_folder,'cache/domains')
#files = os.listdir(domain_dir)
for ID in IDs:
BGC_dict[ID] = []
path = os.path.join(antismash_folder,ID)
files = os.listdir(path)
for file in files:
if file.endswith('.gbk') and file.find('region') != -1:
if file not in BGCs:
BGCs.append(file)
BGC_dict[ID].append(file)
print('ID'+'\t'+'\t'.join(BGCs))
for ID in IDs:
printlist = [ID]
for BGC in BGCs:
if BGC in BGC_dict[ID]:
printlist.append('1')
else:
printlist.append('0')
print('\t'.join(printlist)) | [
"thej@ssi.dk"
] | thej@ssi.dk |
784ab7f90cd3ba591dda82698e301b73ed5bd0c3 | d2de9eb91cb47c359da6d1cc0c3dfb2a2402d802 | /rpg/wsgi.py | a585dc34140ab80f285dcc22570d3fd00de7b474 | [] | no_license | erikAlon/Django-RPG | 8616ce0987b06ca92e55d2de729639875ee1bbb2 | 97e9d4dac9c265c0f911a9509a9bf1deb98466c3 | refs/heads/master | 2020-03-22T16:14:13.445758 | 2018-07-13T19:52:06 | 2018-07-13T19:52:06 | 140,313,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | """
WSGI config for rpg project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rpg.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| [
"mr.alonsoea@protonmail.ch"
] | mr.alonsoea@protonmail.ch |
38d0007315c5109f692cfc3d0863c6a1343c6a40 | 44f15bdca68e71290ee609076f278a9a4a614cc9 | /manage.py | 211a80d6c00e7a9a7fe4dcfff1f9187c13262e7e | [] | no_license | snajera/test3 | 1779437b400ad112cfcb172a08182ddd78de0258 | 37e48ca64669c70fc77478daf636f8f6ce5315eb | refs/heads/master | 2020-05-24T15:58:30.614913 | 2019-05-18T10:12:57 | 2019-05-18T10:12:57 | 187,344,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test3.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"snajera@gmail.com"
] | snajera@gmail.com |
03ae4cbe01915c65e39608b58c9a250c5ec96390 | 1a1a4f7dad6662022104d00c615d333703544e9c | /dns_resolver/dns_resolver.py | 42cfd2d8fe1d782be549a112b2ce55f1e7a7460b | [] | no_license | cnijim/tetration | ceb707fc429b72f8f8f72706b89684caf4a079f6 | d785f71458f5763d0e7e50188b451d7383bc25ac | refs/heads/master | 2021-05-02T10:11:05.287056 | 2018-01-09T21:02:32 | 2018-01-09T21:02:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,818 | py | # =================================================================================
from tetpyclient import RestClient
import tetpyclient
import json
import requests.packages.urllib3
import sys
import os
import argparse
import time
import dns.resolver, dns.reversename
import csv
# =================================================================================
# See reason below -- why verify=False param is used
requests.packages.urllib3.disable_warnings()
# ====================================================================================
# GLOBALS
# ------------------------------------------------------------------------------------
TETRATION_API_URL = "https://<tetration IP or Hostname>"
TETRATION_API_CRED_PATH = '<tetration credential file>.json'
TETRATION_HOST_NAME_USER_ANNOTATION = 'Hostname'
TETRATION_SCOPE_NAME = 'Default'
TETRATION_SEARCH_LIMIT = 20
parser = argparse.ArgumentParser(description='Tetration API Demo Script')
parser.add_argument('--url', help='Tetration URL', required=False)
parser.add_argument('--credential', help='Path to Tetration json credential file', required=False)
parser.add_argument('--annotation', help='User Annotation Field for tracking hostname', required=False)
parser.add_argument('--scope', help='Target scope for DNS resolution', required=False)
parser.add_argument('--limit', help='Results limit for inventory search', required=False)
args = parser.parse_args()
TETRATION_API_URL = args.url if args.url else TETRATION_API_URL
TETRATION_API_CRED_PATH = args.credential if args.credential else TETRATION_API_CRED_PATH
TETRATION_HOST_NAME_USER_ANNOTATION = args.annotation if args.annotation else TETRATION_HOST_NAME_USER_ANNOTATION
TETRATION_SCOPE_NAME = args.scope if args.scope else TETRATION_SCOPE_NAME
TETRATION_SEARCH_LIMIT = args.limit if args.limit else TETRATION_SEARCH_LIMIT
'''
====================================================================================
Class Constructor
------------------------------------------------------------------------------------
'''
def CreateRestClient():
rc = RestClient(TETRATION_API_URL,
credentials_file=TETRATION_API_CRED_PATH, verify=False)
return rc
'''
====================================================================================
Get Hosts with empty hostnames
------------------------------------------------------------------------------------
'''
def GetUnnamedHosts(rc,offset):
req_payload = {
"filter": {
"type": "or",
"filters": [
{
"type": "eq",
"field": "hostname",
"value": ""
},
{
"type": "eq",
"field": "user_" + TETRATION_HOST_NAME_USER_ANNOTATION,
"value": ""
}
]
},
"scopeName": TETRATION_SCOPE_NAME,
"limit": TETRATION_SEARCH_LIMIT,
"offset": offset if offset else ""
}
resp = rc.post('/inventory/search',json_body=json.dumps(req_payload))
if resp.status_code != 200:
print(resp.status_code)
print(resp.text)
exit(0)
else:
return resp.json()
'''
====================================================================================
Resolve empty hostnames by IP Address
------------------------------------------------------------------------------------
'''
def ResolveUnnamedHosts(inventoryList):
resolved_hosts = []
for host in inventoryList:
try:
addr = dns.reversename.from_address(host["ip"])
host_name = str(dns.resolver.query(addr,"PTR")[0])
host.update({"user_" + TETRATION_HOST_NAME_USER_ANNOTATION: host_name[:-1] })
resolved_hosts.append(host)
except:
print("Couldn't resolve IP: {ip}".format(ip=host["ip"]))
continue
return resolved_hosts
'''
====================================================================================
Create annotation csv and push to Tetration
------------------------------------------------------------------------------------
'''
def SendAnnotationUpdates(rc,resolved_hosts):
user_annotations = []
headerFlag = 0
for host in resolved_hosts:
row = dict([(k if not k.startswith('user_') else k.split('user_')[1],v) for k,v in host.items() if k.startswith(('ip', 'vrf_name', 'user_'))])
row['IP'] = row.pop('ip')
row['VRF'] = row.pop('vrf_name')
user_annotations.append(row)
if headerFlag == 0:
headers = [key for key in row if key != 'IP' and key != 'VRF']
headers.insert(0,'VRF')
headers.insert(0,'IP')
with open('annotations.csv', 'wb') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=headers)
writer.writeheader()
writer.writerows(user_annotations)
file_path = 'annotations.csv'
keys = ['IP', 'VRF']
req_payload = [tetpyclient.MultiPartOption(key='X-Tetration-Key', val=keys), tetpyclient.MultiPartOption(key='X-Tetration-Oper', val='add')]
resp = rc.upload(file_path, '/assets/cmdb/upload', req_payload)
if resp.status_code != 200:
print("Error posting annotations to Tetration cluster")
else:
print("Successfully posted annotations to Tetration cluster")
def main():
rc = CreateRestClient()
offset = ''
while True:
print("Getting offset: {offset}".format(offset=offset))
unnamed_hosts = GetUnnamedHosts(rc,offset)
resolved_hosts = ResolveUnnamedHosts(unnamed_hosts["results"])
SendAnnotationUpdates(rc,resolved_hosts)
try:
offset = unnamed_hosts["offset"]
except:
break
time.sleep(2)
if __name__ == "__main__":
main()
| [
"robbeck@cisco.com"
] | robbeck@cisco.com |
78dfcf4dfa7571d3dae88e7211d113e1116620db | a98b83e743926fa56c591e936ce6767e4d5a7c04 | /tests/test_warnings.py | acc820b2e56bdf5972d73b14593983e80fcf092e | [
"MIT"
] | permissive | tomwojcik/starlette-context | 8b1ef890c50f490c977ba61ee9bd83ff24563faf | 8bc7db107767326199ffa1e6b156ed83d93fefa9 | refs/heads/master | 2023-08-07T00:16:58.821787 | 2023-07-25T04:15:25 | 2023-07-25T15:29:36 | 219,023,167 | 375 | 22 | MIT | 2023-09-13T16:57:54 | 2019-11-01T16:32:04 | Python | UTF-8 | Python | false | false | 836 | py | """Temp set of tests until 1.0.0 is released."""
import warnings
from unittest.mock import MagicMock
from starlette_context.middleware.context_middleware import (
ContextMiddleware,
CONTEXT_MIDDLEWARE_WARNING_MSG,
)
from starlette_context.middleware.raw_middleware import RawContextMiddleware
def test_context_middleware_raises_warning():
with warnings.catch_warnings(record=True) as caught_warnings:
ContextMiddleware(app=MagicMock())
assert len(caught_warnings) == 1
w = caught_warnings[0]
assert str(w.message) == CONTEXT_MIDDLEWARE_WARNING_MSG
assert w.category == DeprecationWarning
def test_raw_context_middleware_does_not_raise_warning():
with warnings.catch_warnings(record=True) as caught_warnings:
RawContextMiddleware(app=MagicMock())
assert len(caught_warnings) == 0
| [
"tomwojcik@users.noreply.github.com"
] | tomwojcik@users.noreply.github.com |
9a680b89ad712ab3ef4aa354989845ff928553e7 | 290c87c429e682a3794d7f6ad25ade57547c7a04 | /train/net_train.py | 988a584388af9e18acfe503746fe7b5b566c82d9 | [] | no_license | XiongQiuQiu/ner | 1a5174a1e0bbd03da7c3ac217a328a6d48d7dfc8 | 3089c8d08ac3e6651407222f47c535a26ae7d27a | refs/heads/master | 2021-01-20T16:05:08.966692 | 2017-06-19T09:13:07 | 2017-06-19T09:13:07 | 90,815,933 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | #-*-coding:utf-8
import os
import sys
def load_2014():
_data_path = os.path.normpath(os.path.join(os.getcwd(), os.path.dirname('2014/')))
dir_file = os.listdir(_data_path)
result = set()
for dir in dir_file:
dir_file_path = os.path.normpath(os.path.join(_data_path, os.path.dirname(dir+'/')))
file_name_list = os.listdir(dir_file_path)
for file in file_name_list:
file_path = os.path.join(dir_file_path, file)
print dir_file_path, file_path
with open(file_path) as f:
for line in f:
result.add(line.strip().decode('utf-8'))
return result | [
"zhangjinwei94@163.com"
] | zhangjinwei94@163.com |
d0a458e86b3a80e35e5f94466e5e4c5f3e9ff7a5 | 5840ddbe3c2d63c8d69cff9ad0efbcfd07ea1db8 | /(A) Bit++.py | fc9c54ab6f65fe9cbc7ab104684324da7712b4fa | [] | no_license | Mohamad-Aboda/Data-Science-CAT-Reloaded-Tasks | 2a7112ab5ee68506f8d5abca1f2a16465623019e | b9da681529b3811ab2b9e9ad39a8046e9e9e271a | refs/heads/master | 2023-01-02T08:33:05.172036 | 2020-10-23T15:10:12 | 2020-10-23T15:10:12 | 240,038,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | # problem link https://codeforces.com/contest/282/problem/A
n = int(input())
cnt = 0
for i in range(0, n):
s = input()
if(s == "X++" or s == "++X"):
cnt+=1
elif(s == "--X" or s == "X--"):
cnt-=1
print(cnt)
| [
"noreply@github.com"
] | Mohamad-Aboda.noreply@github.com |
c6f5f3941b86fc9197ffda49361b9e893dd4af5d | d87483a2c0b50ed97c1515d49d62c6e9feaddbe0 | /.history/get_positions_20210205015710.py | 71c1dcc17b16ec8ec382841ca4d884e1bb8d3c0f | [
"MIT"
] | permissive | HopperKremer/hoptrader | 0d36b6e33922414003cf689fb81f924da076a54b | 406793c10bc888648290fd15c7c2af62cf8c6c67 | refs/heads/main | 2023-06-12T15:51:00.910310 | 2021-07-06T16:15:41 | 2021-07-06T16:15:41 | 334,754,936 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | # Buy top tickers from Financhill
import requests
from tda import auth, client
from tda.orders.equities import equity_buy_market, equity_buy_limit
from tda.orders.common import Duration, Session
import os, sys
import time
from selenium import webdriver
import json
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import config # stored in parent directory for security
token_path = "token"
c = auth.client_from_token_file(token_path, config.api_key)
# positions = c.get_account(config.tda_acct_num, c.Account.Fields.POSITIONS)
# account_info = c.get_account(config.tda_acct_num, fields=[c.Account.Fields.POSITIONS]).json()
# print(account_info)
# positions = c.Account.Fields.POSITIONS
# r = c.get_account(config.tda_acct_num, fields=positions)
# stocks = r.json()['securitiesAccount']['positions']
# # stocks = json.dumps(r.json(), indent=4)
# for stock in stocks:
# print('--------------------------------')
# print(stock['instrument']['symbol'])
orders = c.Order.Status.QUEUED
res = c.get_orders_by_path(config.tda_acct_num, status != orders)
data = res.json()
print(data) | [
"hopperkremer@gmail.com"
] | hopperkremer@gmail.com |
3934e73ca1736a03f5d427773d394783d2b20f3c | f73e1c031d9fda6ccbf8a9ba73744180fbe6cc55 | /exercise/guess_game.py | 51b1273cffb72581621f6f234bfa1a41435731ba | [] | no_license | subin97/python_basic | dfb21dbeb42439381e458ad8af36cae25f239aed | b1e28b49adbee949e055f8b6e02a05cb4d9d830f | refs/heads/master | 2023-02-11T20:34:08.549923 | 2021-01-05T01:37:53 | 2021-01-05T01:37:53 | 326,850,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | import random
guess_number = random.randint(1,100)
user_number = 0
count = 0
while user_number != guess_number:
user_number = int(input("숫자를 입력하세요 : "))
count += 1
if user_number > guess_number:
print("숫자가 너무 큽니다.")
else:
print("숫자가 너무 작습니다.")
else:
print("정답 {0}을 {1}번 만에 맞췄습니다!".format(guess_number, count))
| [
"subin_97@likelion.org"
] | subin_97@likelion.org |
0bda85efe421e587872caefb529ecf3bc49ce2f1 | c971cc48bf884e172855565af15891bf37e6227c | /PoliceComplainSystem/Template Files/talkey.py | 35bb431deb71939e5e0b998a8a7a678fff75e01c | [] | no_license | harshidkoladara/Python_Projects | 50e47459576b5c3d4e1c5e5eeb766bd0a6673ed9 | 6035fbe6fcf86b9d982bd95912e02d8b504e5f1c | refs/heads/master | 2022-10-15T23:44:43.083583 | 2020-06-10T10:44:20 | 2020-06-10T10:44:20 | 271,249,162 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py |
from gtts import gTTS
import winsound
from googletrans import Translator
import os
from pygame import mixer
translator =Translator()
mytext = input("ENter TExt")
lan = input("enter language code")
translated = translator.translate(mytext, dest=lan)
print (translated.text)
#print(type(translated))
language = lan
myobj = gTTS(text=translated.text, lang=language, slow=False)
myobj.save("welcome.wav")
#os.system("welcome.wav")
winsound.PlaySound('welcome.wav', winsound.SND_ALIAS)
#mixer.init()
#mixer.music.load("welcome.mp3")
#mixer.music.play()
| [
"hackspatel3524@gmail.com"
] | hackspatel3524@gmail.com |
a6993fe495e89a4d3567b0a065621e10400aef68 | f55f0e215dcfb9027ffd3f9aaf575086d0164812 | /src/order.py | 79f8275d86b1e54e8b27c85a37886031d8a10a7e | [] | no_license | lungyoungyu/predictious-bot | 5d9a862d7ab2652e8073d24c5fee48881950e2a3 | 55035ed2716c48c54d57f70c66defc1a273be618 | refs/heads/master | 2021-05-05T17:08:49.194122 | 2018-01-30T21:22:26 | 2018-01-30T21:22:26 | 117,412,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | import json
import httphandler
class Order:
def __init__(self, conID, isAsk, quantity, price):
self.conID = conID
self.isAsk = isAsk
self.quantity = quantity
self.price = price
def __repr__(self):
return order2str(self)
# Orders[] -> Str
def order2str(listOfOrders):
listOfjson = []
for order in listOfOrders:
json_obj = {'ContractId': order.conID,
'isAsk': order.isAsk,
'Quantity': order.quantity,
'Price': order.price}
listOfjson.append(json_obj)
json_str = json.dumps(listOfjson, indent=1, separators=(',',':'))
return json_str
# implement multi cancel orders (how? should order ID be in order?
# Strings[] -> Str
def cancel2str(listOfCancels):
listOfjson = []
for string in listOfCancels:
json_obj = {'Id': string}
listOfjson.append(json_obj)
json_str = json.dumps(listOfjson, indent=1, separators=(',',':'))
return json_str
| [
"lungyouy@usc.edu"
] | lungyouy@usc.edu |
cd9f312dbe143fe36d55b3e3069b4dc947cb93a2 | 708f79947a389c10a9f6cde318666466d1de5efd | /mysite/settings.py | 1079ba24da8f4da85bd0403db69a4182bb85962c | [] | no_license | 34t3rnull/my-first-blog | 1f129c9e8d1b0f46f578964ecc5e0f00e5ac34ab | c3379e5ccb22fcac669bc5ebed81ca80ac9f5495 | refs/heads/master | 2020-12-02T16:17:16.492915 | 2017-07-07T12:23:04 | 2017-07-07T12:23:04 | 96,486,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,196 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.10.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l8fw*0@@po8h0+katzldd5#*9y&*j$dip9+h3po&9r1u)+a(ld'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"34t3rnull@naver.com"
] | 34t3rnull@naver.com |
be4b0793a9f7e0ea9c4f216097e68a84acf12089 | f02eaa3bf1342bf2e0d5abefc78550b0ae6d07c8 | /Ch05/p.142.py | 52cd307f32bcb1f4619cc04b8279cf27c254fa2f | [] | no_license | Leedokyeong95/PythonWorks | 227f6d57c7469b7bf99e33b23808beb813737f2d | f69817325c26651aef3093589ef16014ebd2a5f0 | refs/heads/master | 2023-03-23T19:46:06.381414 | 2021-03-11T12:28:59 | 2021-03-11T12:28:59 | 339,357,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | """
날짜 : 2021/02/25
이름 : 이도경
내용 : 파이썬 프로그램 1~n 정수 누적합 예제 p.142
"""
# (1) 재귀함수 정의 : 1~n 누적합(1+2+3+4+5=15)
def Adder(n):
if n == 1 :
return 1
else:
result = n + Adder(n-1)
print(n, end = ' ')
return result
# (2) 함수 호출1
print('n=1 :', Adder(1))
# (3) 함수 호출2
print('\nn=5 :', Adder(5))
| [
"nannayamzz95@naver.com"
] | nannayamzz95@naver.com |
e901f446ad0119c627a7c6a987bb7c84fef14aa3 | 5710bb7b727e68dfb7428e6bc1d3f7fed86ec029 | /queries_docker.py | 9dab6a9b655d6c65c0766ca291321140da8f56e8 | [
"MIT"
] | permissive | Alexandre-gommez/System_for_big_data | a40cdaa5a79c3920d1ffa4f40bd18b50e6ceaba8 | 47d6492abfad9cf98e8c1948a3c3d3fdbcd458cb | refs/heads/main | 2023-08-29T13:48:32.770949 | 2021-11-05T22:43:43 | 2021-11-05T22:43:43 | 416,639,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,500 | py | from functions import *
if __name__ == '__main__':
thread_customer=ThreadWithReturn(target=Load, args=('data/customer.tbl', [int, str, str, int, str, float, str, str],))
thread_customer.start()
""" thread_lineitem=ThreadWithReturn(target=Load, args=('data/lineitem.tbl', [int]*5 + [float]*3 + [str]*8,))
thread_lineitem.start() """
thread_nation=ThreadWithReturn(target=Load, args=('data/nation.tbl', [int, str, int, str],))
thread_nation.start()
""" thread_orders=ThreadWithReturn(target=Load, args=('data/orders.tbl', [int, int, str, float] + [str]*3 + [int, str],))
thread_orders.start() """
thread_part=ThreadWithReturn(target=Load, args=('data/part.tbl', [int] + [str]*4 + [int, str, float, str],))
thread_part.start()
thread_partsupp=ThreadWithReturn(target=Load, args=('data/partsupp.tbl', [int]*3 + [float, str],))
thread_partsupp.start()
thread_region=ThreadWithReturn(target=Load, args=('data/region.tbl', [int, str, str],))
thread_region.start()
thread_supplier=ThreadWithReturn(target=Load, args=('data/supplier.tbl', [int, str, str, int, str, float, str],))
thread_supplier.start()
REGION = thread_region.join()
PARTSUPP = thread_partsupp.join()
PART = thread_part.join()
#ORDERS = thread_orders.join()
NATION = thread_nation.join()
#LINEITEM = thread_lineitem.join()
CUSTOMER = thread_customer.join()
SUPPLIER = thread_supplier.join()
print("START")
# HASH JOIN
start = time.time()
JOINED = hash_join_thread(PART, 'P_PARTKEY', CUSTOMER, 'C_CUSTKEY', nb_thread=4)
end = time.time()
print('HASH JOIN THREADED :', format(end - start, '.4f'))
start = time.time()
JOINED = hash_join(CUSTOMER, 'C_CUSTKEY', PART, 'P_PARTKEY')
end = time.time()
print('HASH JOIN :', format(end - start, '.4f'))
write(JOINED, 'JOINED.csv')
# PROJECTION
start = time.time()
PROJECTION = projection(PART, ['P_PARTKEY', 'P_NAME'])
end = time.time()
print('PROJECTION :', format(end - start, '.4f'))
write(PROJECTION, 'PROJECTION.csv')
# SELECTIONS
start = time.time()
SELECTION = selection(PART, ['P_PARTKEY', 'P_SIZE'], ['<=', '<='], [10, 40])
end = time.time()
print('SELECTION :', format(end - start, '.4f'))
start = time.time()
SELECTION = selection_thread(PART, ['P_PARTKEY', 'P_SIZE'], ['<=', '<='], [10, 40], nb_thread=4)
end = time.time()
print('SELECTION THREADED :', format(end - start, '.4f'))
write(SELECTION, 'SELECTION.csv')
start = time.time()
SELECTION_ATTRIBUTES = selection_attributes(JOINED, ['P_PARTKEY'], ['='], ['C_CUSTKEY'])
end = time.time()
print('SELECTION ATTRIBUTES :', format(end - start, '.4f'))
start = time.time()
SELECTION_ATTRIBUTES = selection_attributes_thread(JOINED, ['P_PARTKEY'], ['='], ['C_CUSTKEY'], nb_thread=4)
end = time.time()
print('SELECTION ATTRIBUTES THREADED:', format(end - start, '.4f'))
write(SELECTION_ATTRIBUTES, 'SELECTION_ATTRIBUTES.csv')
# GROUP-BY
start = time.time()
GROUP_BY = group_by(NATION, ['N_REGIONKEY'],[(sum,'N_NATIONKEY')],["N_REGIONKEY"])
end = time.time()
print('GROUP BY :', format(end - start, '.4f'))
start = time.time()
GROUP_BY = group_by_thread(NATION, ['N_REGIONKEY'], [(sum,'N_NATIONKEY')], ["N_REGIONKEY"], nb_thread=4)
end = time.time()
print('GROUP BY THREADED :', format(end - start, '.4f'))
write(GROUP_BY, 'GROUP_BY.csv') | [
"alexandre.gommez@edu.ece.fr"
] | alexandre.gommez@edu.ece.fr |
1ec4b902e65e4fa3e962f755ca889931ce7e8608 | 11d6bc3bfccb18adb1c5e530fb51aae08a10a0d7 | /Django-1.7.7/Django-1.7.7/Django-1.7.7/tests/i18n/test_extraction.py | ca1e0dbb77b0c7f8a29f9cd36f086c4b39f8bbb7 | [
"BSD-3-Clause"
] | permissive | mysolutus/my-first-site | a6819135c67b1ef7447e21c266072a8172370bc9 | 81b25bd8f89e35a7a98a54560d3526bf3d6c32ed | refs/heads/master | 2022-11-04T12:29:13.447825 | 2015-05-20T06:13:22 | 2015-05-20T06:13:22 | 35,928,435 | 0 | 1 | null | 2022-10-18T11:01:02 | 2015-05-20T05:44:33 | Python | UTF-8 | Python | false | false | 28,710 | py | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import io
import os
import re
import shutil
import sys
from unittest import expectedFailure, SkipTest, skipUnless
import warnings
from django.conf import settings
from django.core import management
from django.core.management.utils import find_command
from django.test import SimpleTestCase
from django.test import override_settings
from django.utils.encoding import force_text
from django.utils._os import upath
from django.utils import six
from django.utils.six import StringIO
from django.utils.translation import TranslatorCommentWarning
LOCALE = 'de'
has_xgettext = find_command('xgettext')
this_directory = os.path.dirname(upath(__file__))
@skipUnless(has_xgettext, 'xgettext is mandatory for extraction tests')
class ExtractorTests(SimpleTestCase):
PO_FILE = 'locale/%s/LC_MESSAGES/django.po' % LOCALE
def setUp(self):
self._cwd = os.getcwd()
self.test_dir = os.path.abspath(os.path.join(this_directory, 'commands'))
def _rmrf(self, dname):
if os.path.commonprefix([self.test_dir, os.path.abspath(dname)]) != self.test_dir:
return
shutil.rmtree(dname)
def rmfile(self, filepath):
if os.path.exists(filepath):
os.remove(filepath)
def tearDown(self):
os.chdir(self.test_dir)
try:
self._rmrf('locale/%s' % LOCALE)
except OSError:
pass
os.chdir(self._cwd)
def _run_makemessages(self, **options):
os.chdir(self.test_dir)
stdout = StringIO()
management.call_command('makemessages', locale=[LOCALE], verbosity=2,
stdout=stdout, **options)
output = stdout.getvalue()
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = fp.read()
return output, po_contents
def assertMsgId(self, msgid, s, use_quotes=True):
q = '"'
if use_quotes:
msgid = '"%s"' % msgid
q = "'"
needle = 'msgid %s' % msgid
msgid = re.escape(msgid)
return self.assertTrue(re.search('^msgid %s' % msgid, s, re.MULTILINE), 'Could not find %(q)s%(n)s%(q)s in generated PO file' % {'n': needle, 'q': q})
def assertNotMsgId(self, msgid, s, use_quotes=True):
if use_quotes:
msgid = '"%s"' % msgid
msgid = re.escape(msgid)
return self.assertTrue(not re.search('^msgid %s' % msgid, s, re.MULTILINE))
def _assertPoLocComment(self, assert_presence, po_filename, line_number, *comment_parts):
with open(po_filename, 'r') as fp:
po_contents = force_text(fp.read())
if os.name == 'nt':
# #: .\path\to\file.html:123
cwd_prefix = '%s%s' % (os.curdir, os.sep)
else:
# #: path/to/file.html:123
cwd_prefix = ''
parts = ['#: ']
parts.append(os.path.join(cwd_prefix, *comment_parts))
if line_number is not None:
parts.append(':%d' % line_number)
needle = ''.join(parts)
if assert_presence:
return self.assertTrue(needle in po_contents, '"%s" not found in final .po file.' % needle)
else:
return self.assertFalse(needle in po_contents, '"%s" shouldn\'t be in final .po file.' % needle)
def assertLocationCommentPresent(self, po_filename, line_number, *comment_parts):
"""
self.assertLocationCommentPresent('django.po', 42, 'dirA', 'dirB', 'foo.py')
verifies that the django.po file has a gettext-style location comment of the form
`#: dirA/dirB/foo.py:42`
(or `#: .\dirA\dirB\foo.py:42` on Windows)
None can be passed for the line_number argument to skip checking of the :42 suffix part.
"""
return self._assertPoLocComment(True, po_filename, line_number, *comment_parts)
def assertLocationCommentNotPresent(self, po_filename, line_number, *comment_parts):
"""Check the opposite of assertLocationComment()"""
return self._assertPoLocComment(False, po_filename, line_number, *comment_parts)
class BasicExtractorTests(ExtractorTests):
def test_comments_extractor(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with io.open(self.PO_FILE, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
self.assertTrue('#. Translators: This comment should be extracted' in po_contents)
self.assertTrue('This comment should not be extracted' not in po_contents)
# Comments in templates
self.assertTrue('#. Translators: Django template comment for translators' in po_contents)
self.assertTrue("#. Translators: Django comment block for translators\n#. string's meaning unveiled" in po_contents)
self.assertTrue('#. Translators: One-line translator comment #1' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #1\n#. continued here.' in po_contents)
self.assertTrue('#. Translators: One-line translator comment #2' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #2\n#. continued here.' in po_contents)
self.assertTrue('#. Translators: One-line translator comment #3' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #3\n#. continued here.' in po_contents)
self.assertTrue('#. Translators: One-line translator comment #4' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #4\n#. continued here.' in po_contents)
self.assertTrue('#. Translators: One-line translator comment #5 -- with non ASCII characters: áéíóúö' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #5 -- with non ASCII characters: áéíóúö\n#. continued here.' in po_contents)
def test_templatize_trans_tag(self):
# ticket #11240
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('Literal with a percent symbol at the end %%', po_contents)
self.assertMsgId('Literal with a percent %% symbol in the middle', po_contents)
self.assertMsgId('Completed 50%% of all the tasks', po_contents)
self.assertMsgId('Completed 99%% of all the tasks', po_contents)
self.assertMsgId("Shouldn't double escape this sequence: %% (two percent signs)", po_contents)
self.assertMsgId("Shouldn't double escape this sequence %% either", po_contents)
self.assertMsgId("Looks like a str fmt spec %%s but shouldn't be interpreted as such", po_contents)
self.assertMsgId("Looks like a str fmt spec %% o but shouldn't be interpreted as such", po_contents)
def test_templatize_blocktrans_tag(self):
# ticket #11966
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('I think that 100%% is more that 50%% of anything.', po_contents)
self.assertMsgId('I think that 100%% is more that 50%% of %(obj)s.', po_contents)
self.assertMsgId("Blocktrans extraction shouldn't double escape this: %%, a=%(a)s", po_contents)
def test_blocktrans_trimmed(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# should not be trimmed
self.assertNotMsgId('Text with a few line breaks.', po_contents)
# should be trimmed
self.assertMsgId("Again some text with a few line breaks, this time should be trimmed.", po_contents)
# #21406 -- Should adjust for eaten line numbers
self.assertMsgId("I'm on line 97", po_contents)
self.assertLocationCommentPresent(self.PO_FILE, 97, 'templates', 'test.html')
def test_force_en_us_locale(self):
"""Value of locale-munging option used by the command is the right one"""
from django.core.management.commands.makemessages import Command
self.assertTrue(Command.leave_locale_alone)
def test_extraction_error(self):
os.chdir(self.test_dir)
self.assertRaises(SyntaxError, management.call_command, 'makemessages', locale=[LOCALE], extensions=['tpl'], verbosity=0)
with self.assertRaises(SyntaxError) as context_manager:
management.call_command('makemessages', locale=[LOCALE], extensions=['tpl'], verbosity=0)
six.assertRegex(
self, str(context_manager.exception),
r'Translation blocks must not include other block tags: blocktrans \(file templates[/\\]template_with_error\.tpl, line 3\)'
)
# Check that the temporary file was cleaned up
self.assertFalse(os.path.exists('./templates/template_with_error.tpl.py'))
def test_unicode_decode_error(self):
os.chdir(self.test_dir)
shutil.copyfile('./not_utf8.sample', './not_utf8.txt')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'not_utf8.txt'))
stdout = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=stdout)
self.assertIn("UnicodeDecodeError: skipped file not_utf8.txt in .",
force_text(stdout.getvalue()))
# This issue is fixed in 1.8+ (#23312).
if six.PY3 and sys.platform.startswith('win'):
test_unicode_decode_error = expectedFailure(test_unicode_decode_error)
def test_extraction_warning(self):
"""test xgettext warning about multiple bare interpolation placeholders"""
os.chdir(self.test_dir)
shutil.copyfile('./code.sample', './code_sample.py')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'code_sample.py'))
stdout = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=stdout)
self.assertIn("code_sample.py:4", force_text(stdout.getvalue()))
def test_template_message_context_extractor(self):
"""
Ensure that message contexts are correctly extracted for the
{% trans %} and {% blocktrans %} template tags.
Refs #14806.
"""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertTrue('msgctxt "Special trans context #1"' in po_contents)
self.assertMsgId("Translatable literal #7a", po_contents)
self.assertTrue('msgctxt "Special trans context #2"' in po_contents)
self.assertMsgId("Translatable literal #7b", po_contents)
self.assertTrue('msgctxt "Special trans context #3"' in po_contents)
self.assertMsgId("Translatable literal #7c", po_contents)
# {% blocktrans %}
self.assertTrue('msgctxt "Special blocktrans context #1"' in po_contents)
self.assertMsgId("Translatable literal #8a", po_contents)
self.assertTrue('msgctxt "Special blocktrans context #2"' in po_contents)
self.assertMsgId("Translatable literal #8b-singular", po_contents)
self.assertTrue("Translatable literal #8b-plural" in po_contents)
self.assertTrue('msgctxt "Special blocktrans context #3"' in po_contents)
self.assertMsgId("Translatable literal #8c-singular", po_contents)
self.assertTrue("Translatable literal #8c-plural" in po_contents)
self.assertTrue('msgctxt "Special blocktrans context #4"' in po_contents)
self.assertMsgId("Translatable literal #8d %(a)s", po_contents)
def test_context_in_single_quotes(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertTrue('msgctxt "Context wrapped in double quotes"' in po_contents)
self.assertTrue('msgctxt "Context wrapped in single quotes"' in po_contents)
# {% blocktrans %}
self.assertTrue('msgctxt "Special blocktrans context wrapped in double quotes"' in po_contents)
self.assertTrue('msgctxt "Special blocktrans context wrapped in single quotes"' in po_contents)
def test_template_comments(self):
"""Template comment tags on the same line of other constructs (#19552)"""
os.chdir(self.test_dir)
# Test detection/end user reporting of old, incorrect templates
# translator comments syntax
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
management.call_command('makemessages', locale=[LOCALE], extensions=['thtml'], verbosity=0)
self.assertEqual(len(ws), 3)
for w in ws:
self.assertTrue(issubclass(w.category, TranslatorCommentWarning))
six.assertRegex(
self, str(ws[0].message),
r"The translator-targeted comment 'Translators: ignored i18n comment #1' \(file templates[/\\]comments.thtml, line 4\) was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(
self, str(ws[1].message),
r"The translator-targeted comment 'Translators: ignored i18n comment #3' \(file templates[/\\]comments.thtml, line 6\) was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(
self, str(ws[2].message),
r"The translator-targeted comment 'Translators: ignored i18n comment #4' \(file templates[/\\]comments.thtml, line 8\) was ignored, because it wasn't the last item on the line\."
)
# Now test .po file contents
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('Translatable literal #9a', po_contents)
self.assertFalse('ignored comment #1' in po_contents)
self.assertFalse('Translators: ignored i18n comment #1' in po_contents)
self.assertMsgId("Translatable literal #9b", po_contents)
self.assertFalse('ignored i18n comment #2' in po_contents)
self.assertFalse('ignored comment #2' in po_contents)
self.assertMsgId('Translatable literal #9c', po_contents)
self.assertFalse('ignored comment #3' in po_contents)
self.assertFalse('ignored i18n comment #3' in po_contents)
self.assertMsgId('Translatable literal #9d', po_contents)
self.assertFalse('ignored comment #4' in po_contents)
self.assertMsgId('Translatable literal #9e', po_contents)
self.assertFalse('ignored comment #5' in po_contents)
self.assertFalse('ignored i18n comment #4' in po_contents)
self.assertMsgId('Translatable literal #9f', po_contents)
self.assertTrue('#. Translators: valid i18n comment #5' in po_contents)
self.assertMsgId('Translatable literal #9g', po_contents)
self.assertTrue('#. Translators: valid i18n comment #6' in po_contents)
self.assertMsgId('Translatable literal #9h', po_contents)
self.assertTrue('#. Translators: valid i18n comment #7' in po_contents)
self.assertMsgId('Translatable literal #9i', po_contents)
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #8')
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #9')
self.assertMsgId("Translatable literal #9j", po_contents)
class JavascriptExtractorTests(ExtractorTests):
PO_FILE = 'locale/%s/LC_MESSAGES/djangojs.po' % LOCALE
def test_javascript_literals(self):
os.chdir(self.test_dir)
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId('This literal should be included.', po_contents)
self.assertMsgId('This one as well.', po_contents)
self.assertMsgId(r'He said, \"hello\".', po_contents)
self.assertMsgId("okkkk", po_contents)
self.assertMsgId("TEXT", po_contents)
self.assertMsgId("It's at http://example.com", po_contents)
self.assertMsgId("String", po_contents)
self.assertMsgId("/* but this one will be too */ 'cause there is no way of telling...", po_contents)
self.assertMsgId("foo", po_contents)
self.assertMsgId("bar", po_contents)
self.assertMsgId("baz", po_contents)
self.assertMsgId("quz", po_contents)
self.assertMsgId("foobar", po_contents)
@override_settings(
STATIC_ROOT=os.path.join(this_directory, 'commands', 'static/'),
MEDIA_ROOT=os.path.join(this_directory, 'commands', 'media_root/'))
def test_media_static_dirs_ignored(self):
"""
Regression test for #23583.
"""
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId("Static content inside app should be included.", po_contents)
self.assertNotMsgId("Content from STATIC_ROOT should not be included", po_contents)
@override_settings(STATIC_ROOT=None, MEDIA_ROOT='')
def test_default_root_settings(self):
"""
Regression test for #23717.
"""
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId("Static content inside app should be included.", po_contents)
class IgnoredExtractorTests(ExtractorTests):
def test_ignore_directory(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
os.path.join('ignore_dir', '*'),
])
self.assertTrue("ignoring directory ignore_dir" in out)
self.assertMsgId('This literal should be included.', po_contents)
self.assertNotMsgId('This should be ignored.', po_contents)
def test_ignore_subdirectory(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
'templates/*/ignore.html',
'templates/subdir/*',
])
self.assertTrue("ignoring directory subdir" in out)
self.assertNotMsgId('This subdir should be ignored too.', po_contents)
def test_ignore_file_patterns(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
'xxx_*',
])
self.assertTrue("ignoring file xxx_ignored.html" in out)
self.assertNotMsgId('This should be ignored too.', po_contents)
@override_settings(
STATIC_ROOT=os.path.join(this_directory, 'commands', 'static/'),
MEDIA_ROOT=os.path.join(this_directory, 'commands', 'media_root/'))
def test_media_static_dirs_ignored(self):
out, _ = self._run_makemessages()
self.assertIn("ignoring directory static", out)
self.assertIn("ignoring directory media_root", out)
class SymlinkExtractorTests(ExtractorTests):
def setUp(self):
super(SymlinkExtractorTests, self).setUp()
self.symlinked_dir = os.path.join(self.test_dir, 'templates_symlinked')
def tearDown(self):
super(SymlinkExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.remove(self.symlinked_dir)
except OSError:
pass
os.chdir(self._cwd)
def test_symlink(self):
# On Python < 3.2 os.symlink() exists only on Unix
if hasattr(os, 'symlink'):
if os.path.exists(self.symlinked_dir):
self.assertTrue(os.path.islink(self.symlinked_dir))
else:
# On Python >= 3.2) os.symlink() exists always but then can
# fail at runtime when user hasn't the needed permissions on
# WIndows versions that support symbolink links (>= 6/Vista).
# See Python issue 9333 (http://bugs.python.org/issue9333).
# Skip the test in that case
try:
os.symlink(os.path.join(self.test_dir, 'templates'), self.symlinked_dir)
except (OSError, NotImplementedError):
raise SkipTest("os.symlink() is available on this OS but can't be used by this user.")
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, symlinks=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This literal should be included.', po_contents)
self.assertTrue('templates_symlinked/test.html' in po_contents)
class CopyPluralFormsExtractorTests(ExtractorTests):
PO_FILE_ES = 'locale/es/LC_MESSAGES/django.po'
def tearDown(self):
super(CopyPluralFormsExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
self._rmrf('locale/es')
except OSError:
pass
os.chdir(self._cwd)
def test_copy_plural_forms(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertTrue('Plural-Forms: nplurals=2; plural=(n != 1)' in po_contents)
def test_override_plural_forms(self):
"""Ticket #20311."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['es'], extensions=['djtpl'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_ES))
with io.open(self.PO_FILE_ES, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
found = re.findall(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', po_contents, re.MULTILINE | re.DOTALL)
self.assertEqual(1, len(found))
class NoWrapExtractorTests(ExtractorTests):
def test_no_wrap_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This literal should also be included wrapped or not wrapped depending on the use of the --no-wrap option.', po_contents)
def test_no_wrap_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=False)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('""\n"This literal should also be included wrapped or not wrapped depending on the "\n"use of the --no-wrap option."', po_contents, use_quotes=False)
class LocationCommentsTests(ExtractorTests):
def test_no_location_enabled(self):
"""Behavior is correct if --no-location switch is specified. See #16903."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=True)
self.assertTrue(os.path.exists(self.PO_FILE))
self.assertLocationCommentNotPresent(self.PO_FILE, 55, 'templates', 'test.html.py')
def test_no_location_disabled(self):
"""Behavior is correct if --no-location switch isn't specified."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=False)
self.assertTrue(os.path.exists(self.PO_FILE))
# #16903 -- Standard comment with source file relative path should be present
self.assertLocationCommentPresent(self.PO_FILE, 55, 'templates', 'test.html')
# #21208 -- Leaky paths in comments on Windows e.g. #: path\to\file.html.py:123
self.assertLocationCommentNotPresent(self.PO_FILE, None, 'templates', 'test.html.py')
class KeepPotFileExtractorTests(ExtractorTests):
POT_FILE = 'locale/django.pot'
def setUp(self):
super(KeepPotFileExtractorTests, self).setUp()
def tearDown(self):
super(KeepPotFileExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.unlink(self.POT_FILE)
except OSError:
pass
os.chdir(self._cwd)
def test_keep_pot_disabled_by_default(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_explicitly_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=False)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=True)
self.assertTrue(os.path.exists(self.POT_FILE))
class MultipleLocaleExtractionTests(ExtractorTests):
PO_FILE_PT = 'locale/pt/LC_MESSAGES/django.po'
PO_FILE_DE = 'locale/de/LC_MESSAGES/django.po'
LOCALES = ['pt', 'de', 'ch']
def tearDown(self):
os.chdir(self.test_dir)
for locale in self.LOCALES:
try:
self._rmrf('locale/%s' % locale)
except OSError:
pass
os.chdir(self._cwd)
def test_multiple_locales(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['pt', 'de'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_PT))
self.assertTrue(os.path.exists(self.PO_FILE_DE))
class CustomLayoutExtractionTests(ExtractorTests):
def setUp(self):
self._cwd = os.getcwd()
self.test_dir = os.path.join(this_directory, 'project_dir')
def test_no_locale_raises(self):
os.chdir(self.test_dir)
with six.assertRaisesRegex(self, management.CommandError,
"Unable to find a locale path to store translations for file"):
management.call_command('makemessages', locale=LOCALE, verbosity=0)
@override_settings(
LOCALE_PATHS=(os.path.join(
this_directory, 'project_dir', 'project_locale'),)
)
def test_project_locale_paths(self):
"""
Test that:
* translations for an app containing a locale folder are stored in that folder
* translations outside of that app are in LOCALE_PATHS[0]
"""
os.chdir(self.test_dir)
self.addCleanup(shutil.rmtree,
os.path.join(settings.LOCALE_PATHS[0], LOCALE), True)
self.addCleanup(shutil.rmtree,
os.path.join(self.test_dir, 'app_with_locale', 'locale', LOCALE), True)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
project_de_locale = os.path.join(
self.test_dir, 'project_locale', 'de', 'LC_MESSAGES', 'django.po')
app_de_locale = os.path.join(
self.test_dir, 'app_with_locale', 'locale', 'de', 'LC_MESSAGES', 'django.po')
self.assertTrue(os.path.exists(project_de_locale))
self.assertTrue(os.path.exists(app_de_locale))
with open(project_de_locale, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This app has no locale directory', po_contents)
self.assertMsgId('This is a project-level string', po_contents)
with open(app_de_locale, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This app has a locale directory', po_contents)
| [
"mysolutus@gmail.com"
] | mysolutus@gmail.com |
9e60486235c68aede8d66a0cd43543bc36f9c18d | 31898e25ba3623fa537c21a093aec179bb37d3f2 | /basic.py | eea0b5aa341023f57af250deb7fc7392fe764caa | [] | no_license | audreyemmely/Python-programming | 92fe9d8a960052f3ae071f1e3d2177fffbe4c253 | 09026315bd7a9a1ab6c0584a8dfe4e00ae53f514 | refs/heads/master | 2023-08-30T22:48:55.658605 | 2020-07-28T02:55:57 | 2020-07-28T02:55:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,984 | py | # -*- coding: utf-8 -*-
print("Hello World!")
print("Olá mundo!")
#-------------------------------------------------------------------------
#math
print(2 + 2)
print(2 * 3)
print(2 ** 3)
print(10 % 3)
#-------------------------------------------------------------------------
#message
message = "Olá mundo"
print (message)
#-------------------------------------------------------------------------
#types
var1 = 1 #integer
var2 = 1.1 #float
var3 = "Eu sou demais, heheh" #string
print(var1)
print(var2)
print(var3)
#-------------------------------------------------------------------------
#comparing
x = 2
y = 3
print(x == y)
print(x < y)
sum = x + y
print (sum >= y)
print (sum < y)
#-------------------------------------------------------------------------
#conditional
print(x == y and x == sum)
x = 3
z = 3
print(x == y and x == z)
print(x == y or x == z)
#-------------------------------------------------------------------------
#if
b = 1
c = 1000
if b > c:
print("b é maior que c")
if c > b:
print("c é maior que b")
#-------------------------------------------------------------------------
#else
d = 1
e = 2
if d > e:
print("d maior que e")
else:
print("d não é maior que e")
#-------------------------------------------------------------------------
#elif
f = 1
g = 2
if f == g:
print("números iguais")
elif g > f:
print("g maior que f")
else:
print("números diferentes")
#-------------------------------------------------------------------------
#while
var_1 = 1
while var_1 < 10:
print(var_1)
var_1 += 2 #var_1 = var_1 + 2
#-------------------------------------------------------------------------
#for
list1 = [1, 2, 3, 4, 5]
list2 = ["olá", "mundo"]
list3 = [0, "olá", 9.99]
for i in list1:
print(i)
for i in list2:
print(i)
for i in list3:
print(i)
#-------------------------------------------------------------------------
#for/range
for i in range(10):
print(i)
for i in range(10, 20):
print(i)
for i in range(10, 20, 2):
print(i)
#-------------------------------------------------------------------------
#input
number = input("Digite um número: ")
print("O número digitado é: ")
print(number)
name = input("Digite seu nome: ")
print("Bem vindo/a, " + name)
#-------------------------------------------------------------------------
#string
string1 = "Audrey"
string2 = "Emmely"
concatenate = string1 + " " + string2
print(concatenate)
size = len(concatenate)
print(size)
print(string1[2])
print(string1[0:6])
my_string = "O rato roeu a roupa do rei de Roma"
my_list = my_string.split(" ")
print(my_list)
search = my_string.find("rei")
print(search)
print(my_string[search:])
my_string = my_string.replace("o rei", "a rainha")
print(my_string)
#-------------------------------------------------------------------------
#functions
def sumValue(val1, val2):
print(val1+val2)
sumValue(2, 3)
def mult(val3, val4):
return val3*val4
m = mult(3, 4)
print(m) | [
"aerv@ic.ufal.br"
] | aerv@ic.ufal.br |
a4cfa1bd9084c7fafb765a66bcd1d7f1d3eb5665 | e5ab431a2437fdc2bf67f02f6cbf016edaea8b23 | /reporting/urls.py | d3459d500b7b7eaefb9d5716a3782435d698d634 | [] | no_license | genologics/statify-ui | 1cdc6be53b978e083b39a25556826a4a11ad2140 | d1741729b9e8869113ad04db4d50c4a1533a15b2 | refs/heads/master | 2020-07-01T04:10:27.676218 | 2011-05-27T00:38:12 | 2011-05-27T00:38:14 | 1,672,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('',
url(r'^$', 'reporting.views.index', name='reporting_index'),
) | [
"ajordens@Phaeton.local"
] | ajordens@Phaeton.local |
279a44e09162d76a550d461c8cbaa7c7122d9644 | 1934da4a36761193645be79467fbf614421343c0 | /writeNew.py | 13164a0908b3d4f03e51a56badec855ecc8a35f0 | [] | no_license | saransh-mehta/BasicPythonPrograms | 0efca7c1ff56c7d321b8905d6eaf486ef2d856a9 | 18715b834ff77be29a0af73aa622c73223774589 | refs/heads/master | 2021-01-17T14:51:18.054465 | 2017-03-06T17:31:43 | 2017-03-06T17:31:43 | 84,100,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | #writing a txt file with each sentence in new line
file=open("F:/PYTHON PROGRAMS/writeTest.txt","w+")
def nextLine(content):
sentence=[]
string=''
sentence=content.split(".")
for line in sentence:
string=string+line+"\n"
print(string)
file.write(string)
print("writing done")
file.close()
value=input("add all content : ")
nextLine(value)
| [
"noreply@github.com"
] | saransh-mehta.noreply@github.com |
ad9989eac00b34fb5d4c74cacffaf49e07c379a3 | 042bd40e554ac7fcd618c334ae98b4f43248a250 | /examples/python/gpu/tensors/ocean_cast_01.py | ce97706ce32728f5fa42af5c74d567a917e63ef7 | [
"Apache-2.0"
] | permissive | kant/ocean-tensor-package | 8a62df968335de2057ff095f0910e5ad5fcff8e1 | fb3fcff8bba7f4ef6cd8b8d02f0e1be1258da02d | refs/heads/master | 2020-03-29T04:01:22.064480 | 2018-09-19T19:17:19 | 2018-09-19T19:17:19 | 149,511,923 | 0 | 0 | Apache-2.0 | 2018-09-19T21:03:14 | 2018-09-19T21:03:14 | null | UTF-8 | Python | false | false | 388 | py | import ocean
A = ocean.asTensor([1,2,3])
B = A.storage
C = ocean.int8(10)
print(ocean.gpu[0](A))
print(ocean.ensure(A,ocean.float,ocean.gpu[0]))
ocean.ensure(A,ocean.half,ocean.gpu[0],True)
print(A)
print(ocean.gpu[0](B))
print(ocean.ensure(B,ocean.int8,ocean.gpu[0]))
ocean.ensure(B,ocean.gpu[0],True)
print(B)
print(ocean.gpu[0](C))
print(ocean.ensure(C,ocean.int16,ocean.cpu))
| [
"evandenberg@us.ibm.com"
] | evandenberg@us.ibm.com |
a7bc78dafa0d03b4347019c2c6a65be338480395 | ced336fa7bc732bcf78b79096e4633a3bcd876b3 | /coffee_leaderboard/database/__init__.py | a9cbbb612186bc8dae71d725e0319a4fd0481f3c | [
"MIT"
] | permissive | lxndrdagreat/coffee-leaderboard | 33b50712a52d04a461da4b034ebd73979e875818 | 282b748c62004320913580d058d637b66d6d0ea6 | refs/heads/master | 2021-07-17T02:05:34.246723 | 2021-02-25T03:26:48 | 2021-02-25T03:26:48 | 55,627,380 | 1 | 1 | MIT | 2021-02-25T03:26:06 | 2016-04-06T17:39:03 | Python | UTF-8 | Python | false | false | 904 | py | from tortoise import Tortoise
from coffee_leaderboard.database.models import CoffeeEntry, UserProfile
from coffee_leaderboard import settings
async def init_db(generate_schemas: bool = False):
await Tortoise.init(
db_url=settings.DATABASE_URL,
modules={
'models': ['coffee_leaderboard.database.models']
}
)
if generate_schemas:
await Tortoise.generate_schemas(safe=True)
async def seed_test_data():
await init_db()
# for testing only
user = UserProfile(username='the_dude')
await user.save()
dates = [
1549978467092,
1549978467092,
1549892929567
]
for d in dates:
entry = CoffeeEntry(user=user, text=':coffee:',
channel_id='42', channel_name='my-channel',
date=d)
await entry.save()
| [
"lxndrdagreat@gmail.com"
] | lxndrdagreat@gmail.com |
cf7d8327b5903facc99af2b18a3a46f2122b91f9 | 71c2d17abc08fe6320895d44735833ebdd632aba | /globalvar.py | 70f2922063d7e09bf218d81a14a4100692224186 | [
"Apache-2.0"
] | permissive | LiamxZhang/sailboat_zoneControl_big-catamaran | 2389811c104969e3d631b188458dade610262c12 | d5ed14cc7a9e95cd82263799159f9e01566caac2 | refs/heads/master | 2022-01-14T00:00:11.905351 | 2018-11-27T11:58:51 | 2018-11-27T11:58:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 15 22:29:06 2018
@author: Lianxin Zhang
This is the global variable for the information of flag, heading, PWM...
"""
def _init():#初始化
global _global_dict
_global_dict = {}
def set_value(key,value):
_global_dict[key] = value
def get_value(key,defValue=None):
try:
return _global_dict[key]
except KeyError:
return defValue
_init() | [
"noreply@github.com"
] | LiamxZhang.noreply@github.com |
5568540908351a61910755b5f92dcbe0d1297e58 | f078935d113ca56008a999ed1e111965c818a4cb | /jupyter/short_term_plasticity.py | a7169148a71e673370ba874e712190134e3b228c | [] | no_license | HanjiaJiang/AllenAPI | 7286700b6fe6d5d50263140489962e238b850a5d | 5252c29615ece75b1fc6eea994a0479b796f6bbc | refs/heads/master | 2020-11-23T22:56:26.556381 | 2019-12-13T14:16:49 | 2019-12-13T14:16:49 | 227,854,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,738 | py | import numpy as np
import scipy.stats
from aisynphys.database import SynphysDatabase
from aisynphys.cell_class import CellClass, classify_cells, classify_pairs
from aisynphys.connectivity import measure_connectivity, pair_was_probed
# Download and cache the sqlite file for the requested database
# (for available versions, see SynphysDatabase.list_versions)
db = SynphysDatabase.load_version('synphys_r1.0_2019-08-29_small.sqlite')
# Load all synapses associated with mouse V1 projects
pairs = db.pair_query(
project_name=["mouse V1 coarse matrix", "mouse V1 pre-production"],
synapse=True,
).all()
print("loaded %d synapses" % len(pairs))
cell_classes = {
'pyr': CellClass(cell_class='ex', name='pyr'),
'pvalb': CellClass(cre_type='pvalb', name='pvalb'),
'sst': CellClass(cre_type='sst', name='sst'),
'vip': CellClass(cre_type='vip', name='vip'),
}
# get a list of all cells in the selected pairs
cells = set([pair.pre_cell for pair in pairs] + [pair.post_cell for pair in pairs])
# Classify each cell. Note that, depending on the class definitions above, a cell could
# belong to multiple classes.
cell_class = {}
for cell in cells:
# which of the classes defined above is this cell a member of?
cell_in_classes = [cls_name for cls_name, cls in cell_classes.items() if cell in cls]
cell_class[cell] = ','.join(cell_in_classes)
import pandas
# construct a pandas dataframe containing the pre/postsynaptic cell class names
# and a measure of short-term plasticity
pre_class = [cell_class[pair.pre_cell] for pair in pairs]
post_class = [cell_class[pair.post_cell] for pair in pairs]
stp = [None if pair.dynamics is None else pair.dynamics.stp_induction_50hz for pair in pairs]
df = pandas.DataFrame(
zip(pairs, pre_class, post_class, stp),
columns=['pair', 'pre_class', 'post_class', 'stp'])
# select out only cells that are a member of exactly 1 class
mask = df.pre_class.isin(cell_classes) & df.post_class.isin(cell_classes)
df = df[mask]
# select only pairs with a measured stp
df = df.dropna()
df.head()
stp = df.pivot_table('stp', 'pre_class', 'post_class', aggfunc=np.mean)
# sort rows/cols into the expected order
order = list(cell_classes)
stp = stp[order].loc[order]
stp
import matplotlib.pyplot as plt
import seaborn as sns
fig,ax = plt.subplots(figsize=(8, 6))
hm = sns.heatmap(stp, cmap='coolwarm', vmin=-0.4, vmax=0.4, square=True, ax=ax,
cbar_kws={"ticks":[-0.3, 0, 0.3], 'label': '<-- depressing facilitating -->'})
fig.suptitle("50 Hz Train-induced STP", fontsize=16)
hm.set_xlabel("postsynaptic", fontsize=14)
hm.set_ylabel("presynaptic", fontsize=14);
hm.figure.axes[-1].yaxis.label.set_size(14)
hm.tick_params(labelsize=12)
| [
"bagheera1984@gmail.com"
] | bagheera1984@gmail.com |
a2b91eceee4b8605757728c8196874fbfb1c1d05 | 71469cb9d9dd41438373be83c1e43b67bca25649 | /tests/test__util.py | 5f8dd6d9d82c8331fe015eef296620530e1e28c4 | [
"MIT"
] | permissive | achillesrasquinha/honcho | 7494042775f205b5c0690676856a49185f4ef5d1 | aab83cb10b1d4832c82a4dd3661a6b6df1e1e021 | refs/heads/master | 2020-03-26T10:00:02.351236 | 2018-08-17T18:37:34 | 2018-08-17T18:37:34 | 144,776,468 | 0 | 0 | MIT | 2018-08-14T22:03:08 | 2018-08-14T22:03:08 | null | UTF-8 | Python | false | false | 397 | py | from honcho._util import (
_get_if_empty,
_listify
)
def test__get_if_empty():
assert _get_if_empty("foo", "bar") == "foo"
assert _get_if_empty(None, "bar") == "bar"
def test__listify():
assert _listify("foo") == ["foo"]
assert _listify(12345) == [12345]
assert _listify(["foo"]) == ["foo"]
assert _listify([[]]) == [[]]
assert _listify([]) == [] | [
"achillesrasquinha@gmail.com"
] | achillesrasquinha@gmail.com |
7d9ed61967fe6732bde0e501d749f81ef25b670d | 6121ca934b33a1e8d5a340a91641a83e7322aac7 | /tests/handlers/test_alb.py | e75d2d9145bf1be2583c0be58849efae8d34869c | [
"MIT"
] | permissive | jordaneremieff/mangum | 44c010a1c7c4e38a84decb2efe97c26f1af31087 | 5ef6c4e5f58fc3e0bdee3248b8b2409725f055d1 | refs/heads/main | 2023-08-16T01:40:47.350887 | 2022-11-27T11:20:23 | 2022-11-27T11:20:23 | 165,652,505 | 1,223 | 82 | MIT | 2023-09-04T07:53:24 | 2019-01-14T11:49:29 | Python | UTF-8 | Python | false | false | 12,958 | py | """
References:
1. https://docs.aws.amazon.com/lambda/latest/dg/services-alb.html
2. https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html # noqa: E501
"""
from typing import Dict, List, Optional
import pytest
from mangum import Mangum
from mangum.handlers.alb import ALB
def get_mock_aws_alb_event(
method,
path,
query_parameters: Optional[Dict[str, List[str]]],
headers: Optional[Dict[str, List[str]]],
body,
body_base64_encoded,
multi_value_headers: bool,
):
"""Return a mock AWS ELB event.
The `query_parameters` parameter must be given in the
`multiValueQueryStringParameters` format - and if `multi_value_headers`
is disabled, then they are simply transformed in to the
`queryStringParameters` format.
Similarly for `headers`.
If `headers` is None, then some defaults will be used.
if `query_parameters` is None, then no query parameters will be used.
"""
resp = {
"requestContext": {
"elb": {
"targetGroupArn": (
"arn:aws:elasticloadbalancing:us-east-2:123456789012:"
"targetgroup/lambda-279XGJDqGZ5rsrHC2Fjr/49e9d65c45c6791a"
)
}
},
"httpMethod": method,
"path": path,
"body": body,
"isBase64Encoded": body_base64_encoded,
}
if headers is None:
headers = {
"accept": [
"text/html,application/xhtml+xml,application/xml;"
"q=0.9,image/webp,image/apng,*/*;q=0.8"
],
"accept-encoding": ["gzip"],
"accept-language": ["en-US,en;q=0.9"],
"connection": ["keep-alive"],
"host": ["lambda-alb-123578498.us-east-2.elb.amazonaws.com"],
"upgrade-insecure-requests": ["1"],
"user-agent": [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
],
"x-amzn-trace-id": ["Root=1-5c536348-3d683b8b04734faae651f476"],
"x-forwarded-for": ["72.12.164.125"],
"x-forwarded-port": ["80"],
"x-forwarded-proto": ["http"],
"x-imforwards": ["20"],
}
query_parameters = {} if query_parameters is None else query_parameters
# Only set one of `queryStringParameters`/`multiValueQueryStringParameters`
# and one of `headers`/multiValueHeaders (per AWS docs for ALB/lambda)
if multi_value_headers:
resp["multiValueQueryStringParameters"] = query_parameters
resp["multiValueHeaders"] = headers
else:
# Take the last query parameter/cookie (per AWS docs for ALB/lambda)
resp["queryStringParameters"] = {
k: (v[-1] if len(v) > 0 else []) for k, v in query_parameters.items()
}
resp["headers"] = {k: (v[-1] if len(v) > 0 else []) for k, v in headers.items()}
return resp
@pytest.mark.parametrize(
"method,path,query_parameters,headers,req_body,body_base64_encoded,"
"query_string,scope_body,multi_value_headers",
[
("GET", "/hello/world", None, None, None, False, b"", None, False),
(
"GET",
"/lambda",
{
"q1": ["1234ABCD"],
"q2": ["b+c"], # not encoded
"q3": ["b%20c"], # encoded
"q4": ["/some/path/"], # not encoded
"q5": ["%2Fsome%2Fpath%2F"], # encoded
},
None,
"",
False,
b"q1=1234ABCD&q2=b+c&q3=b+c&q4=%2Fsome%2Fpath%2F&q5=%2Fsome%2Fpath%2F",
"",
False,
),
(
"POST",
"/",
{"name": ["me"]},
None,
"field1=value1&field2=value2",
False,
b"name=me",
b"field1=value1&field2=value2",
False,
),
# Duplicate query params with multi-value headers disabled:
(
"POST",
"/",
{"name": ["me", "you"]},
None,
None,
False,
b"name=you",
None,
False,
),
# Duplicate query params with multi-value headers enable:
(
"GET",
"/my/resource",
{"name": ["me", "you"]},
None,
None,
False,
b"name=me&name=you",
None,
True,
),
(
"GET",
"",
{"name": ["me", "you"], "pet": ["dog"]},
None,
None,
False,
b"name=me&name=you&pet=dog",
None,
True,
),
# A 1x1 red px gif
(
"POST",
"/img",
None,
None,
b"R0lGODdhAQABAIABAP8AAAAAACwAAAAAAQABAAACAkQBADs=",
True,
b"",
b"GIF87a\x01\x00\x01\x00\x80\x01\x00\xff\x00\x00\x00\x00\x00,"
b"\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;",
False,
),
(
"POST",
"/form-submit",
None,
None,
b"say=Hi&to=Mom",
False,
b"",
b"say=Hi&to=Mom",
False,
),
],
)
def test_aws_alb_scope_real(
method,
path,
query_parameters,
headers,
req_body,
body_base64_encoded,
query_string,
scope_body,
multi_value_headers,
):
event = get_mock_aws_alb_event(
method,
path,
query_parameters,
headers,
req_body,
body_base64_encoded,
multi_value_headers,
)
example_context = {}
handler = ALB(event, example_context, {"api_gateway_base_path": "/"})
scope_path = path
if scope_path == "":
scope_path = "/"
assert type(handler.body) == bytes
assert handler.scope == {
"asgi": {"version": "3.0", "spec_version": "2.0"},
"aws.context": {},
"aws.event": event,
"client": ("72.12.164.125", 0),
"headers": [
[
b"accept",
b"text/html,application/xhtml+xml,application/xml;q=0.9,image/"
b"webp,image/apng,*/*;q=0.8",
],
[b"accept-encoding", b"gzip"],
[b"accept-language", b"en-US,en;q=0.9"],
[b"connection", b"keep-alive"],
[b"host", b"lambda-alb-123578498.us-east-2.elb.amazonaws.com"],
[b"upgrade-insecure-requests", b"1"],
[
b"user-agent",
b"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
b" (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
],
[b"x-amzn-trace-id", b"Root=1-5c536348-3d683b8b04734faae651f476"],
[b"x-forwarded-for", b"72.12.164.125"],
[b"x-forwarded-port", b"80"],
[b"x-forwarded-proto", b"http"],
[b"x-imforwards", b"20"],
],
"http_version": "1.1",
"method": method,
"path": scope_path,
"query_string": query_string,
"raw_path": None,
"root_path": "",
"scheme": "http",
"server": ("lambda-alb-123578498.us-east-2.elb.amazonaws.com", 80),
"type": "http",
}
if handler.body:
assert handler.body == scope_body
else:
assert handler.body == b""
@pytest.mark.parametrize("multi_value_headers_enabled", (True, False))
def test_aws_alb_set_cookies(multi_value_headers_enabled) -> None:
async def app(scope, receive, send):
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [
[b"content-type", b"text/plain; charset=utf-8"],
[b"set-cookie", b"cookie1=cookie1; Secure"],
[b"set-cookie", b"cookie2=cookie2; Secure"],
],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
handler = Mangum(app, lifespan="off")
event = get_mock_aws_alb_event(
"GET", "/test", {}, None, None, False, multi_value_headers_enabled
)
response = handler(event, {})
expected_response = {
"statusCode": 200,
"isBase64Encoded": False,
"body": "Hello, world!",
}
if multi_value_headers_enabled:
expected_response["multiValueHeaders"] = {
"set-cookie": ["cookie1=cookie1; Secure", "cookie2=cookie2; Secure"],
"content-type": ["text/plain; charset=utf-8"],
}
else:
expected_response["headers"] = {
"content-type": "text/plain; charset=utf-8",
# Should see case mutated keys to avoid duplicate keys:
"set-cookie": "cookie1=cookie1; Secure",
"Set-cookie": "cookie2=cookie2; Secure",
}
assert response == expected_response
@pytest.mark.parametrize(
"method,content_type,raw_res_body,res_body,res_base64_encoded",
[
("GET", b"text/plain; charset=utf-8", b"Hello world", "Hello world", False),
# A 1x1 red px gif
(
"POST",
b"image/gif",
b"GIF87a\x01\x00\x01\x00\x80\x01\x00\xff\x00\x00\x00\x00\x00,"
b"\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;",
"R0lGODdhAQABAIABAP8AAAAAACwAAAAAAQABAAACAkQBADs=",
True,
),
],
)
def test_aws_alb_response(
method, content_type, raw_res_body, res_body, res_base64_encoded
):
async def app(scope, receive, send):
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", content_type]],
}
)
await send({"type": "http.response.body", "body": raw_res_body})
event = get_mock_aws_alb_event(method, "/test", {}, None, None, False, False)
handler = Mangum(app, lifespan="off")
response = handler(event, {})
assert response == {
"statusCode": 200,
"isBase64Encoded": res_base64_encoded,
"headers": {"content-type": content_type.decode()},
"body": res_body,
}
def test_aws_alb_response_extra_mime_types():
content_type = b"application/x-yaml"
utf_res_body = "name: 'John Doe'"
raw_res_body = utf_res_body.encode()
b64_res_body = "bmFtZTogJ0pvaG4gRG9lJw=="
async def app(scope, receive, send):
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", content_type]],
}
)
await send({"type": "http.response.body", "body": raw_res_body})
event = get_mock_aws_alb_event("GET", "/test", {}, None, None, False, False)
# Test default behavior
handler = Mangum(app, lifespan="off")
response = handler(event, {})
assert content_type.decode() not in handler.config["text_mime_types"]
assert response == {
"statusCode": 200,
"isBase64Encoded": True,
"headers": {"content-type": content_type.decode()},
"body": b64_res_body,
}
# Test with modified text mime types
handler = Mangum(app, lifespan="off")
handler.config["text_mime_types"].append(content_type.decode())
response = handler(event, {})
assert response == {
"statusCode": 200,
"isBase64Encoded": False,
"headers": {"content-type": content_type.decode()},
"body": utf_res_body,
}
@pytest.mark.parametrize("multi_value_headers_enabled", (True, False))
def test_aws_alb_exclude_headers(multi_value_headers_enabled) -> None:
async def app(scope, receive, send):
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [
[b"content-type", b"text/plain; charset=utf-8"],
[b"x-custom-header", b"test"],
],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
handler = Mangum(app, lifespan="off", exclude_headers=["x-custom-header"])
event = get_mock_aws_alb_event(
"GET", "/test", {}, None, None, False, multi_value_headers_enabled
)
response = handler(event, {})
expected_response = {
"statusCode": 200,
"isBase64Encoded": False,
"body": "Hello, world!",
}
if multi_value_headers_enabled:
expected_response["multiValueHeaders"] = {
"content-type": ["text/plain; charset=utf-8"],
}
else:
expected_response["headers"] = {
"content-type": "text/plain; charset=utf-8",
}
assert response == expected_response
| [
"noreply@github.com"
] | jordaneremieff.noreply@github.com |
79fbd925431f99844a3a2eed5f37893e22a2d1f2 | 5ddf0291d7c64c7e5480bf31d61713d5e458a732 | /backend/app/api/stream/__init__.py | e9e874b86bc7907a4a7b56186d350ae93e021d60 | [] | no_license | duongduc2908/do_an_end | c2ee5800ac49f2735cc698dfa5fa19c73ad44a44 | 065170d8a07d90334d91824a8c73aaef4ab47e47 | refs/heads/main | 2023-05-31T12:08:56.986625 | 2021-06-22T12:04:52 | 2021-06-22T12:04:52 | 375,472,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39 | py | from app.api.stream import api_camera
| [
"duongduccpcntt@gmail.com"
] | duongduccpcntt@gmail.com |
48319e496a0ac795e70f1075fc5d5894618cb49b | 17dcf860d207bc2760594920665e6e35d166f91b | /3D/04-3D-structure-recover/homography.py | ff1f007ac3b2808818407c7a9f47bf4c4a02f661 | [] | no_license | bingjiezhu/CV_Learning | d4bbd713526e2cd99bf95e7a46051f8075da43d9 | 762aae8886309fb37bc6681478110c6c9e77fd2e | refs/heads/master | 2021-05-18T09:51:24.316748 | 2019-12-26T12:27:01 | 2019-12-26T12:27:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,774 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 5 13:13:57 2019
@author: User
"""
import numpy as np
from numpy import *
from scipy import ndimage
class RansacModel(object):
"""
Class for testing homography fit with ransac.py from
http://www.scipy.org/Cookbook/RANSAC
"""
def __init__(self, debug=False):
self.debug = debug
def fit(self, data):
"""
Fit homography to four selected correspondences.
"""
# transpose to fit H_from_points()
data = data.T
# from points
fp = data[:3, :4]
# target points
tp = data[3:, :4]
# fit homography and return
return H_from_points(fp, tp)
def get_error(self, data, H):
"""
Apply homography to all correspondences,
return error for each transformed point.
"""
data = data.T
# from points
fp = data[:3]
# target points
tp = data[3:]
# transform fp
fp_transformed = np.dot(H, fp)
# normalize hom. coordinates
for i in range(3):
fp_transformed[i] /= fp_transformed[2]
# return error per point
return np.sqrt(np.sum((tp - fp_transformed)**2, axis=0))
def H_from_ransac(fp,tp,model,maxiter=1000,match_theshold=10):
""" Robust estimation of homography H from point
correspondences using RANSAC (ransac.py from
http://www.scipy.org/Cookbook/RANSAC).
input: fp,tp (3*n arrays) points in hom. coordinates. """
import ransac
# group corresponding points
data = np.vstack((fp,tp))
# compute H and return
H,ransac_data = ransac.ransac(data.T,model,4,maxiter,match_theshold,10,return_all=True)
return H,ransac_data['inliers']
def H_from_points(fp,tp):
""" Find homography H, such that fp is mapped to tp
using the linear DLT method. Points are conditioned
automatically. """
if fp.shape != tp.shape:
raise RuntimeError('number of points do not match')
# condition points (important for numerical reasons)
# --from points--
m = mean(fp[:2], axis=1)
maxstd = max(std(fp[:2], axis=1)) + 1e-9
C1 = diag([1/maxstd, 1/maxstd, 1])
C1[0][2] = -m[0]/maxstd
C1[1][2] = -m[1]/maxstd
fp = dot(C1,fp)
# --to points--
m = mean(tp[:2], axis=1)
maxstd = max(std(tp[:2], axis=1)) + 1e-9
C2 = diag([1/maxstd, 1/maxstd, 1])
C2[0][2] = -m[0]/maxstd
C2[1][2] = -m[1]/maxstd
tp = dot(C2,tp)
# create matrix for linear method, 2 rows for each correspondence pair
nbr_correspondences = fp.shape[1]
A = zeros((2*nbr_correspondences,9))
for i in range(nbr_correspondences):
A[2*i] = [-fp[0][i],-fp[1][i],-1,0,0,0,
tp[0][i]*fp[0][i],tp[0][i]*fp[1][i],tp[0][i]]
A[2*i+1] = [0,0,0,-fp[0][i],-fp[1][i],-1,
tp[1][i]*fp[0][i],tp[1][i]*fp[1][i],tp[1][i]]
U,S,V = linalg.svd(A)
H = V[8].reshape((3,3))
# decondition
H = dot(linalg.inv(C2),dot(H,C1))
# normalize and return
return H / H[2,2]
def Haffine_from_points(fp,tp):
""" Find H, affine transformation, such that
tp is affine transf of fp. """
if fp.shape != tp.shape:
raise RuntimeError('number of points do not match')
# condition points
# --from points--
m = mean(fp[:2], axis=1)
maxstd = max(std(fp[:2], axis=1)) + 1e-9
C1 = diag([1/maxstd, 1/maxstd, 1])
C1[0][2] = -m[0]/maxstd
C1[1][2] = -m[1]/maxstd
fp_cond = dot(C1,fp)
# --to points--
m = mean(tp[:2], axis=1)
C2 = C1.copy() #must use same scaling for both point sets
C2[0][2] = -m[0]/maxstd
C2[1][2] = -m[1]/maxstd
tp_cond = dot(C2,tp)
# conditioned points have mean zero, so translation is zero
A = concatenate((fp_cond[:2],tp_cond[:2]), axis=0)
U,S,V = linalg.svd(A.T)
# create B and C matrices as Hartley-Zisserman (2:nd ed) p 130.
tmp = V[:2].T
B = tmp[:2]
C = tmp[2:4]
tmp2 = concatenate((dot(C,linalg.pinv(B)),zeros((2,1))), axis=1)
H = vstack((tmp2,[0,0,1]))
# decondition
H = dot(linalg.inv(C2),dot(H,C1))
return H / H[2,2]
def normalize(points):
""" Normalize a collection of points in
homogeneous coordinates so that last row = 1. """
for row in points:
row /= points[-1]
return points
def make_homog(points):
""" Convert a set of points (dim*n array) to
homogeneous coordinates. """
return np.vstack((points,ones((1,points.shape[1])))) | [
"caoqi95@gmail.com"
] | caoqi95@gmail.com |
0fbd1610fa955f8f77c0c74c21c1e6e1511405f3 | 8b99d2ff35ab5b3050a3999675f18974376fb49b | /generate_faces.py | db0458f3589f4a99e7ec7df58de3a9fdddfa323d | [] | no_license | PrakarshBhardwaj/auto-animeface-generation-using-DCGAN-pytorch | 0162214b77e828b533ecde09b3f0d39df69341de | edb531a7b6d9ace22a93bbeefc57c41ca904b0da | refs/heads/master | 2022-11-20T14:44:29.468792 | 2020-07-21T16:02:37 | 2020-07-21T16:02:37 | 281,320,004 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,640 | py | # Usage - python generate_faces.py ((number of faces to generate)) ((filename to save image with))
from models import Generator , sampler_noise , show_imgs
from data_processing import denormalize
import torch
import sys , time
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
NOISE_DIM = 96
def savegrid(ims, save_name , fill=True, showax=False):
rows = int(np.ceil(np.sqrt(imgs.shape[0])))
cols = int(np.ceil(np.sqrt(imgs.shape[0])))
gridspec_kw = {'wspace': 0, 'hspace': 0} if fill else {}
fig,axarr = plt.subplots(rows, cols, gridspec_kw=gridspec_kw)
if fill:
bleed = 0
fig.subplots_adjust(left=bleed, bottom=bleed, right=(1 - bleed), top=(1 - bleed))
for ax,im in zip(axarr.ravel(), ims):
ax.imshow(im)
if not showax:
ax.set_axis_off()
kwargs = {'pad_inches': .01} if fill else {}
fig.savefig('{}.png'.format(save_name), **kwargs)
def imgs_to_array(imgs , name_):
img_arr = []
for i,img in enumerate(imgs):
img_arr.append(denormalize(np.transpose(img.numpy() , (1,2,0))))
return np.array(img_arr)
start = time.time()
print("Generating Images...")
Gt = Generator(NOISE_DIM , 64 , 3 , extra_layers=1)
Gt.load_state_dict(torch.load("finalG.pt"))
Gt.eval()
N_generate = int(sys.argv[1])
name = sys.argv[2]
noise = sampler_noise(N_generate , NOISE_DIM)
generated_imgs = Gt(noise).detach()
show_imgs(generated_imgs)
imgs = imgs_to_array(generated_imgs , name)
savegrid(imgs , name)
print("Finished all tasks in {:.4} seconds".format(time.time() - start)) | [
"noreply@github.com"
] | PrakarshBhardwaj.noreply@github.com |
0809266a0f6942b0ad0c87706e44b248ee86f7ac | 62fcebeca8803a97f3cc599e85874f859f7931c4 | /AULAS_60/ENDERECO/endereco_view/web_view.py | 8dece8f1191868cf91ab18557fc5a0e5f7d3abd3 | [] | no_license | thalytacf/PythonClass | 877ba08decdf2c807af755f0c6615383f94e7b8d | e6b9c97bae7d8d78400b964d4b34c2dad8cc9103 | refs/heads/master | 2023-05-14T12:23:23.250575 | 2020-03-04T19:55:43 | 2020-03-04T19:55:43 | 220,218,841 | 0 | 0 | null | 2023-05-01T21:31:22 | 2019-11-07T11:16:01 | Tcl | UTF-8 | Python | false | false | 372 | py | from flask import Flask, render_template
import sys
sys.path.append('/Users/900163/Desktop/PythonHBSIS/Hard/ENDERECO')
from endereco_controller.endereco_controller import EnderecoController
app = Flask(__name__)
ec = EnderecoController()
@app.route('/')
def inicio():
endereco = ec.listar_todos()
return render_template('index.html', lista = endereco)
app.run() | [
"thalytacfischer@gmail.com"
] | thalytacfischer@gmail.com |
a85345fb6a4e53890f5d25cd6f55100028f7fcaa | a5d310e69116655aeea5e03d78fb030733b723b9 | /project/settings.py | a10b7187d2ab1fdd4b8f622bd358e7b1ad146693 | [] | no_license | dmitry-mag/taskman | a5bf3dfa9048ee4a01b96e2f5321fc4cb763fed2 | 6a668f781de3de86e07c5a087d6cd91fd9483eab | refs/heads/master | 2022-07-04T02:53:35.699284 | 2022-06-19T16:17:57 | 2022-06-19T16:17:57 | 229,421,243 | 0 | 0 | null | 2022-06-19T15:49:45 | 2019-12-21T12:00:49 | JavaScript | UTF-8 | Python | false | false | 3,428 | py | """
Django settings for taskman project.
Generated by 'django-admin startproject' using Django 2.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$#8iqygfqh&^x1ptkt+yk0ywb-$!e4#rj(xr*99k!38grdnu0c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# third party apps
'crispy_forms',
'django_filters',
# project apps
'tasks',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
'templates',
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
# {
# # 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
# },
# {
# # 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
# },
# {
# # 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
# },
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
CRISPY_TEMPLATE_PACK = 'bootstrap4' | [
"mdvaven@mail.ru"
] | mdvaven@mail.ru |
ce9cacb177ce5e5ab233a69cca7469454d7a56e2 | 61dcd9b485bc5e6d07c4adf14f138eabaa9a23b5 | /evennumberedexercise/Exercise10_2.py | 484e16240b641b4684b13211c0c81be57f8d9814 | [] | no_license | bong1915016/Introduction-to-Programming-Using-Python | d442d2252d13b731f6cd9c6356032e8b90aba9a1 | f23e19963183aba83d96d9d8a9af5690771b62c2 | refs/heads/master | 2020-09-25T03:09:34.384693 | 2019-11-28T17:33:28 | 2019-11-28T17:33:28 | 225,904,132 | 1 | 0 | null | 2019-12-04T15:56:55 | 2019-12-04T15:56:54 | null | UTF-8 | Python | false | false | 303 | py | def main():
# Read numbers as a string from the console
s = input("Enter numbers separated by spaces from one line: ")
items = s.split() # Extracts items from the string
numbers = [ eval(x) for x in items ] # Convert items to numbers
numbers.reverse()
print(numbers)
main() | [
"38396747+timmy61109@users.noreply.github.com"
] | 38396747+timmy61109@users.noreply.github.com |
0cd4326f779ca4ad3b5f2584b21f88ef4924540e | 2a7f1a6400f9bb07183367fa2126f1e746ab245e | /articles/migrations/0002_article_thumb.py | 23b10ed53fe4f945921528aaaf745cb79f308912 | [] | no_license | gustavsberg/djstar | 6bc2c65ab47524a466f68ee3fb519e551d9f7ea1 | 1c705b90892e5a2dc717a975c64ffb1a7a63c834 | refs/heads/master | 2020-06-13T13:26:09.902136 | 2019-08-02T19:48:28 | 2019-08-02T19:48:28 | 194,670,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | # Generated by Django 2.2 on 2019-07-04 10:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='thumb',
field=models.ImageField(blank=True, default='default.png', upload_to=''),
),
]
| [
"hainsgud@gmail.com"
] | hainsgud@gmail.com |
73f6735429e026508d57adbd1bebf195ec5c37a7 | d78de9e18b8a3dfe8e363bb8f84e2dd0590b811b | /2021-01-03 reddit_to_firestore/Scripts/wsdump.py | 27e08ddcd7858808464fc9ec3916cfd482cc6490 | [] | no_license | marvintensuan/My-Data-Migrations | c9efeab3a79cac5b7f8888827c40ea7ff58ed66f | d0d60b2ce90f99eb536dc601d8888a987f9af09a | refs/heads/master | 2023-03-05T12:35:02.380446 | 2021-02-13T18:05:49 | 2021-02-13T18:05:49 | 326,229,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,439 | py | #!d:\projects\tagapagtuos\scripts\python.exe
import argparse
import code
import sys
import threading
import time
import ssl
import gzip
import zlib
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = list(map(str.strip, args.headers.split(',')))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data)>2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except:
pass
if isinstance(data, bytes):
data = repr(data)
if args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
else:
msg = data
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
| [
"mdtensuan_dev@protonmail.com"
] | mdtensuan_dev@protonmail.com |
d84bdf27a8f16d518790617f76e7fb344e09c6d0 | dd1cc60f2d452a029bd5487980efbb9f27128db3 | /pages/labels/get-a-label.py | dbe6b42172e9931891197b8d44e6a6d8696d4892 | [] | no_license | tutorcruncher/tc-api-docs | 22d3453a6e7ebf31d4dfcef32e38006d79096360 | 062e93c307f258a324d0c31ca19a2ae33c8c20ad | refs/heads/master | 2023-08-18T01:05:57.228821 | 2023-08-11T15:24:11 | 2023-08-11T15:24:11 | 248,555,059 | 0 | 0 | null | 2023-09-01T14:37:40 | 2020-03-19T16:43:12 | JavaScript | UTF-8 | Python | false | false | 183 | py | import pprint, requests
headers = {'Authorization': 'token <API KEY>'}
r = requests.get('https://secure.tutorcruncher.com/api/labels/<id>/', headers=headers)
pprint.pprint(r.json())
| [
"seb.prentice@hotmail.com"
] | seb.prentice@hotmail.com |
6f52e25526d91b14a03debb468ee2df71da8d084 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2_neat/16_0_2_Loctet_exo3_2.py | bf23d8a702249cdbeb9c90068dad392df81c129c | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 943 | py | def flip(s):
s=s[::-1]
final=""
for i in s:
if i=="-":
final+="+"
else:
final+="-"
return final
def lastOccur(s):
l=len(s)
l-=1
while l>=0:
if s[l]=="-":
return l
l-=1
return -1
input = open("B-large.in","r")
output = open("output2.txt","w")
T = input.readline()
T = int(T)
for i in range(T):
s=input.readline()
ind = lastOccur(s)
nb=0
while ind != -1:
nb+=1
st = s[0:(ind+1)]
tmp=""
f=ind
while st[0]!=st[f] and f>0:
tmp+=st[f]
f-=1
if tmp!="":
stk = st[0:f+1]
stF = flip(stk)
s=stF+tmp
ind = lastOccur(s)
else:
stF=flip(st)
ind=lastOccur(stF)
s = stF[0:ind+1]
output.write("Case #{0}: {1}\n".format(i+1,nb)) | [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
87c7b69e90ed610c46ecf362b658e2f357aacf33 | 6ba2d64e6ed0c5470926d16b7b593756975a27c8 | /boids.py | 43eb809195ff46f9877dbf46c561be7504c1f06e | [] | no_license | stephen-david-evans/boids | 0752320bcf21f8101464f1e41b71982f574ad38e | 1c21e297109e3b91b8167d87e8eec6c6dda4b592 | refs/heads/master | 2022-12-18T21:06:25.192406 | 2020-10-09T13:16:04 | 2020-10-09T13:16:04 | 293,083,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,446 | py | """
"""
import numpy as np
from matplotlib import animation
from matplotlib import pyplot as plt
import execs as ecs
XMIN, XMAX = 0.0, 250.0
YMIN, YMAX = 0.0, 250.0
BOUNDARY_LAYER = 5.0
BOUNDARY_FACTOR = 1.1
MAX_SPEED = 2.0
COHESION_STRENGTH = 0.005
SEPARATION_STRENGTH = 0.05
ALIGNMENT_STRENGTH = 0.05
VISUAL_RANGE = 75.0
VISUAL_ANGLE = np.pi / 4.0
SEPARATION_RANGE = 5.0
@ecs.component
class Velocity:
def __init__(self, x=None, y=None):
self.x = x
self.y = y
def bearing(self):
"""bearing of velocity"""
return np.arctan2(self.y, self.x)
def __call__(self):
return np.array((self.x, self.y))
@ecs.component
class Position:
def __init__(self, x=None, y=None):
self.x = x
self.y = y
def __call__(self):
return np.array((self.x, self.y))
@ecs.component
class Close:
def __init__(self, position_data=None, velocity_data=None):
self.position_data = position_data
self.velocity_data = velocity_data
def __bool__(self):
return len(self.position_data) > 0
def get_position_data():
"""get all curent position data"""
return np.array([e.position() for e in ecs.World.join(Position, Velocity)])
def get_velocity_data():
"""get all current velocity data"""
return np.array([e.velocity() for e in ecs.World.join(Position, Velocity)])
def get_close(boid, all_positions, all_velocities):
"""return positions and velocities of close boids"""
position = boid.position()
other_mask = (all_positions == position).sum(axis=1) < 2
other_positions = all_positions[other_mask, :]
other_velocities = all_velocities[other_mask, :]
distance = np.linalg.norm(other_positions - position, axis=1)
close = distance < VISUAL_RANGE
if not close.any():
return [], []
other_positions = other_positions[close, :]
other_velocities = other_velocities[close, :]
angle = np.arctan2(other_positions[:, 1] - boid.position.y,
other_positions[:, 0] - boid.position.x)
seen = np.abs(angle - boid.velocity.bearing()) < VISUAL_ANGLE
if not seen.any():
return [], []
return other_positions[seen, :], other_velocities[seen, :]
def create_random_flock(n):
"""create a random flock of boids uniformly across parameter space"""
for _ in range(n):
boid = world.create()
boid.attach(Position(x=np.random.uniform(XMIN, XMAX),
y=np.random.uniform(YMIN, YMAX)))
boid.attach(Velocity(x=np.random.uniform(-MAX_SPEED, MAX_SPEED),
y=np.random.uniform(-MAX_SPEED, MAX_SPEED)))
boid.attach(Close(position_data=None, velocity_data=None))
@ecs.system
def find_close():
"""find close boids and update local subsets of data"""
all_positions, all_velocities = get_position_data(), get_velocity_data()
for boid in ecs.World.join(Position, Velocity):
close_postions, close_velocities = get_close(boid, all_positions, all_velocities)
boid.close.position_data = close_postions
boid.close.velocity_data = close_velocities
@ecs.system
def limit_speed():
"""limit top speed of boids"""
for e in ecs.World.gather(Velocity):
speed = np.linalg.norm((e.velocity.x, e.velocity.y))
if speed > MAX_SPEED:
e.velocity.x = MAX_SPEED * (e.velocity.x / speed)
e.velocity.y = MAX_SPEED * (e.velocity.y / speed)
@ecs.system
def check_boundary():
"""apply periodic boundary conditions"""
for e in ecs.World.join(Position, Velocity):
if e.position.x < XMIN + BOUNDARY_LAYER:
e.velocity.x += BOUNDARY_FACTOR
if e.position.x > XMAX - BOUNDARY_LAYER:
e.velocity.x -= BOUNDARY_FACTOR
if e.position.y < YMIN + BOUNDARY_LAYER:
e.velocity.y += BOUNDARY_FACTOR
if e.position.y > YMAX - BOUNDARY_LAYER:
e.velocity.y -= BOUNDARY_FACTOR
@ecs.system
def chohesion():
"""each boid flys towards local centre of mass"""
for boid in ecs.World.join(Position, Velocity, Close):
if not boid.close:
continue
centre = np.mean(boid.close.position_data, axis=0)
boid.velocity.x += COHESION_STRENGTH * (centre[0] - boid.position.x)
boid.velocity.y += COHESION_STRENGTH * (centre[1] - boid.position.y)
@ecs.system
def separation():
"""each boid will try and avoid other boids"""
for boid in ecs.World.join(Position, Velocity, Close):
if not boid.close:
continue
position = boid.position()
distance = np.linalg.norm(boid.close.position_data - position, axis=1)
move = np.sum(position - boid.close.position_data[distance < SEPARATION_RANGE], axis=0)
boid.velocity.x += SEPARATION_STRENGTH * move[0]
boid.velocity.y += SEPARATION_STRENGTH * move[1]
@ecs.system
def alignment():
"""each boid will try and match velocity to close boids"""
for boid in ecs.World.join(Position, Velocity, Close):
if not boid.close:
continue
centre = np.mean(boid.close.velocity_data, axis=0)
boid.velocity.x += ALIGNMENT_STRENGTH * (centre[0] - boid.velocity.x)
boid.velocity.y += ALIGNMENT_STRENGTH * (centre[1] - boid.velocity.y)
@ecs.system
def update_positions():
"""update all positions"""
for e in ecs.World.join(Position, Velocity):
e.position.x += e.velocity.x
e.position.y += e.velocity.y
if __name__ == "__main__":
world = ecs.World()
create_random_flock(100)
fig = plt.figure(tight_layout=True)
ax = fig.add_subplot(111)
ax.set(xlim=(XMIN, XMAX), ylim=(YMIN, YMAX))
positions, directions = get_position_data(), get_velocity_data()
next_positions = positions + directions
body, = ax.plot(positions[:, 0], positions[:, 1], c="k", markersize=6, marker="o", ls="none")
head, = ax.plot(next_positions[:, 0], next_positions[:, 1], c="r", markersize=3, marker="o", ls="none")
def animate(frame, body, head):
world.run_systems()
positions, directions = get_position_data(), get_velocity_data()
next_positions = positions + directions
body.set_data(positions[:, 0], positions[:, 1])
head.set_data(next_positions[:, 0], next_positions[:, 1])
anim = animation.FuncAnimation(fig, animate, interval=0, fargs=(body, head))
plt.show()
| [
"evans.stephen.david@gmail.com"
] | evans.stephen.david@gmail.com |
e999a16e6adfdf17446ba8992e64289a3804c4f2 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_examples/_algorithms_challenges/pybites/intermediate/125_get_most_recommended_books/save2_nopass.py | 797bdd2662c2b1d825c4e4a7e7ca198722722ac8 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,283 | py | from collections import Counter
from bs4 import BeautifulSoup
import requests
AMAZON = "amazon.com"
# static copy
TIM_BLOG = ('https://bites-data.s3.us-east-2.amazonaws.com/'
'tribe-mentors-books.html')
MIN_COUNT = 3
def load_page():
"""Download the blog html and return its decoded content"""
with requests.Session() as session:
return session.get(TIM_BLOG).content.decode('utf-8')
def get_top_books(content=None):
"""Make a BeautifulSoup object loading in content,
find all links that contain AMAZON, extract the book title
(stripping spacing characters), and count them.
Return a list of (title, count) tuples where
count is at least MIN_COUNT
"""
if content is None:
content = load_page()
soup = BeautifulSoup(content, 'html.parser')
right_table = soup.find('div', {'class': 'entry-content'})
books = [row.text
for row in right_table.select('a[href*=amazon]')]
c = Counter(books)
books_final = []
count = []
for letter in c:
if c[letter] >= MIN_COUNT:
books_final.append(letter.strip())
count.append(c[letter])
return sorted(list(zip(books_final, count)),
key=lambda tup: tup[1], reverse=True) | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
352dc2bb521d93056573dd75f1115f610e23980e | c73a411084362ec9b3f2fdca978841cc20489070 | /experiments/dqn_test_expt.py | d8684d53f867901fef66927e87ac5d7c65e8afde | [
"Apache-2.0"
] | permissive | victor-psiori/mdp-playground | be01855e83e9c702d19c524868359dfb6fb2fd8c | 84f361b48c1197d9c5bde153d664df24eb8b9d14 | refs/heads/master | 2023-07-09T17:55:52.541552 | 2021-08-02T13:26:46 | 2021-08-02T13:26:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,878 | py | from ray import tune
from collections import OrderedDict
timesteps_total = 10_000
num_seeds = 10
var_env_configs = OrderedDict(
{
"state_space_size": [8], # , 10, 12, 14] # [2**i for i in range(1,6)]
"action_space_size": [8], # 2, 4, 8, 16] # [2**i for i in range(1,6)]
"delay": [0] + [2 ** i for i in range(4)],
"sequence_length": [1, 2, 3, 4], # i for i in range(1,4)]
"reward_density": [0.25], # np.linspace(0.0, 1.0, num=5)
"make_denser": [False],
"terminal_state_density": [0.25], # np.linspace(0.1, 1.0, num=5)
"transition_noise": [0], # , 0.01, 0.02, 0.10, 0.25]
"reward_noise": [0], # , 1, 5, 10, 25] # Std dev. of normal dist.
"dummy_seed": [i for i in range(num_seeds)],
}
)
var_configs = OrderedDict({"env": var_env_configs})
env_config = {
"env": "RLToy-v0",
"horizon": 100,
"env_config": {
"seed": 0, # seed
"state_space_type": "discrete",
"action_space_type": "discrete",
"generate_random_mdp": True,
"repeats_in_sequences": False,
"reward_scale": 1.0,
"completely_connected": True,
},
}
algorithm = "DQN"
agent_config = {
"adam_epsilon": 1e-4,
"beta_annealing_fraction": 1.0,
"buffer_size": 1000000,
"double_q": False,
"dueling": False,
"exploration_final_eps": 0.01,
"exploration_fraction": 0.1,
"final_prioritized_replay_beta": 1.0,
"hiddens": None,
"learning_starts": 1000,
"lr": 1e-4, # "lr": grid_search([1e-2, 1e-4, 1e-6]),
"n_step": 1,
"noisy": False,
"num_atoms": 1,
"prioritized_replay": False,
"prioritized_replay_alpha": 0.5,
"sample_batch_size": 4,
"schedule_max_timesteps": 20000,
"target_network_update_freq": 800,
"timesteps_per_iteration": 1000,
"min_iter_time_s": 0,
"train_batch_size": 32,
}
model_config = {
"model": {
"fcnet_hiddens": [256, 256],
"custom_preprocessor": "ohe",
"custom_options": {}, # extra options to pass to your preprocessor
"fcnet_activation": "tanh",
"use_lstm": False,
"max_seq_len": 20,
"lstm_cell_size": 256,
"lstm_use_prev_action_reward": False,
},
}
eval_config = {
"evaluation_interval": 1, # I think this means every x training_iterations
"evaluation_config": {
"explore": False,
"exploration_fraction": 0,
"exploration_final_eps": 0,
"evaluation_num_episodes": 10,
"horizon": 100,
"env_config": {
"dummy_eval": True, # hack Used to check if we are in evaluation mode or training mode inside Ray callback on_episode_end() to be able to write eval stats
"transition_noise": 0
if "state_space_type" in env_config["env_config"]
and env_config["env_config"]["state_space_type"] == "discrete"
else tune.function(lambda a: a.normal(0, 0)),
"reward_noise": tune.function(lambda a: a.normal(0, 0)),
"action_loss_weight": 0.0,
},
},
}
# varying_configs = get_grid_of_configs(var_configs)
# # print("VARYING_CONFIGS:", varying_configs)
#
# final_configs = combined_processing(env_config, agent_config, model_config, eval_config, varying_configs=varying_configs, framework='ray', algorithm=algorithm)
# value_tuples = []
# for config_type, config_dict in var_configs.items():
# for key in config_dict:
# assert type(var_configs[config_type][key]) == list, "var_config should be a dict of dicts with lists as the leaf values to allow each configuration option to take multiple possible values"
# value_tuples.append(var_configs[config_type][key])
#
# import itertools
# cartesian_product_configs = list(itertools.product(*value_tuples))
# print("Total number of configs. to run:", len(cartesian_product_configs))
| [
"raghuspacerajan@gmail.com"
] | raghuspacerajan@gmail.com |
e666f5d439aad85d1179dc962ed7e1227dbacdd0 | 0a8d4cea1b9e94c76853198d1c2d557f2e1753c1 | /codes/mahalanobis.py | 526d179fcf7a13acdef6a180f69596552eaab2b3 | [] | no_license | dr-alok-tiwari/BRATS | 5dfb43cce3b26e0814ffd21a108178359cf75e1e | a3b5050efb05b019e3cc8c65de0391cbfcd08d3a | refs/heads/master | 2023-08-08T14:00:09.795807 | 2016-02-10T09:41:22 | 2016-02-10T09:41:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,560 | py | import scipy.ndimage.morphology as morphologicalOperations
from scipy.ndimage.filters import gaussian_filter
from mha import *
from mha2 import *
import numpy as np
import os
import nibabel as nib
from scipy import ndimage
path = '/home/bmi/varghese/BRATS_FINAL_2015/'
maskedImages = []
folders = []
rawPrediction = []
Flair = []
T1 = []
T1c = []
T2 = []
posteriors = []
print '==> Loading data..'
for subdir, dirs, files in os.walk(path):
if 'x116' in subdir:
for file1 in files:
if 'combined' in file1 and '.mha' in file1:
try:
image = new(subdir + '/' + file1)
except:
image = new2(subdir + '/' + file1)
maskedImages.append(image)
folders.append(subdir+'/')
if 'Ensemble' in file1:
image = nib.load(subdir + '/' + file1)
rawPrediction.append(image)
if 'Flair' in file1:
try:
image = new(subdir + '/' + file1)
except:
image = new2(subdir + '/' + file1)
Flair.append(image)
if 'T1.' in file1:
try:
image = new(subdir + '/' + file1)
except:
image = new2(subdir + '/' + file1)
T1.append(image)
if 'T1c' in file1:
try:
image = new(subdir + '/' + file1)
except:
image = new2(subdir + '/' + file1)
T1c.append(image)
if 'T2' in file1:
try:
image = new(subdir + '/' + file1)
except:
image = new2(subdir + '/' + file1)
T2.append(image)
if file1 == 'posteriors_mask.nii':
posteriors.append(subdir + '/' + file1)
for i in xrange(len(maskedImages)):
print 'Iteration: ', i+1
print 'Folder: ', folders[i]
# if i == 1:
# break
rawData = rawPrediction[i].get_data()
posterior = nib.load(posteriors[i])
posterior = posterior.get_data()
data = np.copy(rawData)
data[np.where(posterior!=0)] = 0
data[np.where(data > 0)] = 1
affine = [[-1,0,0,0],[0,-1,0,0],[0,0,1,0],[0,0,0,1]]
img = nib.Nifti1Image(data, affine)
img.set_data_dtype(np.int32)
nib.save(img,folders[i] +'PM.nii')
###################################################
flair_data = Flair[i].data
T1_data = T1[i].data
T2_data = T2[i].data
T1c_data = T1c[i].data
brainMean_flair = np.mean(flair_data[flair_data > 0])
brainMean_T1 = np.mean(T1_data[T1_data > 0])
brainMean_T2 = np.mean(T2_data[T2_data > 0])
brainMean_T1c = np.mean(T1c_data[T1c_data > 0])
brainStd_flair = np.std(flair_data[flair_data > 0])
brainStd_T1 = np.std(T1_data[T1_data > 0])
brainStd_T2 = np.std(T2_data[T2_data > 0])
brainStd_T1c = np.std(T1c_data[T1c_data > 0])
###################################################
gen = nib.load(folders[i] + 'PM.nii')
gen = gen.get_data()
mask = gen > 0
c,n = ndimage.label(mask)
connectedComponents = np.copy(c)
sizes = ndimage.sum(mask, c, range(n+1))
flair_mean = []
flair_std = []
T1_mean = []
T1_std = []
T2_mean = []
T2_std = []
T1c_mean = []
T1c_std = []
distance = []
std = []
mahalanobis_distance = []
eps = 1e-07
print '==> Finding distance..'
for j in xrange(n+1):
flair_mean.append(np.mean(flair_data[np.where(c==j)]))
T1_mean.append(np.mean(T1_data[np.where(c==j)]))
T2_mean.append(np.mean(T2_data[np.where(c==j)]))
T1c_mean.append(np.mean(T1c_data[np.where(c==j)]))
flair_std.append(np.std(flair_data[np.where(c==j)]))
T1_std.append(np.std(T1_data[np.where(c==j)]))
T2_std.append(np.std(T2_data[np.where(c==j)]))
T1c_std.append(np.std(T1c_data[np.where(c==j)]))
if flair_std[j] == 0:
flair_std[j] = 1
if T1_std[j] == 0:
T1_std[j] = 1
if T2_std[j] == 0:
T2_std[j] = 1
if T1c_std[j] == 0:
T1c_std[j] = 1
cov = [[0 for x in range(4)] for x in range(4)]
blob_mean = [flair_mean[j], T1_mean[j], T2_mean[j], T1c_mean[j]]
blob = [flair_data[np.where(c==j)], T1_data[np.where(c==j)], T2_data[np.where(c==j)], T1c_data[np.where(c==j)]]
for u in xrange(4):
for v in xrange(4):
cov[u][v] = np.mean((blob[u] - blob_mean[u])*(blob[v] - blob_mean[v]))
try:
inv_cov = np.linalg.inv(cov)
except:
inv_cov = 0
blob_mean = np.asarray(blob_mean)
brain_mean = np.asarray([brainMean_flair, brainMean_T1, brainMean_T2, brainMean_T1c])
mahalanobis_distance.append(np.dot(np.dot(blob_mean - brain_mean, inv_cov), blob_mean - brain_mean))
# flair_hist = np.histogram(flair_data[np.where(c==j)])
# T1_hist = np.histogram(T1_data[np.where(c==j)])
# T1c_hist = np.histogram(T1c_data[np.where(c==j)])
# T2_hist = np.histogram(T2_data[np.where(c==j)])
# p1 = flair_hist[0] / float(np.sum(flair_hist[0]))
# p2 = T1_hist[0] / float(np.sum(T1_hist[0]))
# p3 = T2_hist[0] / float(np.sum(T2_hist[0]))
# p4 = T1c_hist[0] / float(np.sum(T1c_hist[0]))
# entropy.append( np.sum(-p1*np.log2(p1 + eps)) + np.sum(-p2*np.log2(p2+eps)) + np.sum(-p3*np.log2(p3+eps)) + np.sum(-p4*np.log2(p4+eps)) )
distance.append( (flair_mean[j] - brainMean_flair)*(flair_mean[j] - brainMean_flair)
+(T1_mean[j] - brainMean_T1)*(T1_mean[j] - brainMean_T1)
+(T2_mean[j] - brainMean_T2)*(T2_mean[j] - brainMean_T2)
+(T1c_mean[j] - brainMean_T1c)*(T1c_mean[j] - brainMean_T1c))
mask_size = sizes < (max(sizes) * 0.1)
distance_mask = distance < np.mean(distance)
remove_voxels = mask_size[c]
remove_distance_voxels = distance_mask[c]
c[remove_distance_voxels] = 0
c[remove_voxels] = 0
c[np.where(c!=0)]=1
# data = data.astype(int)
# for j in xrange(data.shape[2]):
# data[:,:,j] = morphologicalOperations.binary_fill_holes(data[:,:,j], structure = np.ones((2,2))).astype(int)
# gaussian_kernel = 0.7
# data = gaussian_filter(data,gaussian_kernel)
# data[np.where(data!=0)] = 1
# data = data.astype(float)
# data[np.where(c==0)] = 0
# rawData = rawPrediction[i].get_data()
data[np.where(c==0)] = 0
if len(np.unique(data)) == 1:
print '==> Blank Image'
print '==> Unique labels: ', len(np.unique(rawData))
rawData = rawPrediction[i].get_data()
data = np.copy(rawData)
data[np.where(posterior!=0)] = 0
data[np.where(data > 0)] = 1
mask = data > 0
c,n = ndimage.label(mask)
connectedComponents = np.copy(c)
sizes = ndimage.sum(mask, c, range(n+1))
mask_size = sizes < (max(sizes) * 0.6)
remove_voxels = mask_size[c]
c[remove_voxels] = 0
c[np.where(c!=0)]=1
data[np.where(c==0)] = 0
rawData[np.where(data == 0)] = 0
# affine = [[-1,0,0,0],[0,-1,0,0],[0,0,1,0],[0,0,0,1]]
# img = nib.Nifti1Image(rawData, affine)
# img.set_data_dtype(np.int32)
# nib.save(img,folders[i] +'Filled.nii')
# img = nib.Nifti1Image(connectedComponents, affine)
# img.set_data_dtype(np.int32)
# nib.save(img,folders[i] +'connectedComponents.nii')
| [
"subramaniam1603@gmail.com"
] | subramaniam1603@gmail.com |
2b015e8f4ac0bc5f7e04f4e1166f7c266513a007 | d86a711660862d3da99de349e853c088b65142f3 | /evenodd.py | 6eda91ceff0cf51b9cdaeaa18ddad54c753ac054 | [] | no_license | Haasini7/Haasini | c881876818d3a8002063d2660e6e9ecc867ac18b | eb75ed7ce4c844db663a0f3a0c32f427de61a545 | refs/heads/master | 2021-07-19T22:05:18.016368 | 2020-08-03T13:42:35 | 2020-08-03T13:42:35 | 200,616,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | num=int(input(" "))
if(num%2==0):
print("Even")
else:
print("Odd")
| [
"noreply@github.com"
] | Haasini7.noreply@github.com |
26f393aa985fd35b0a3d045f6e99ce386528e53c | aa142cf0dcd3e3564f0bd870d3efaa587d3bc6b5 | /tests/test_database_writer.py | 74aa0208e136cf1c3ad13449dd10bb4cbf96a9d1 | [
"Apache-2.0"
] | permissive | nikolabogetic/website-metrics | c22277a3e72a7169346257517acabad834fa703e | c6d96acf1e99e0d3cc6747c07ee47b230d6b8ede | refs/heads/main | 2023-01-14T01:58:15.546631 | 2020-11-24T01:21:38 | 2020-11-24T01:21:38 | 314,949,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | from utils.postgres import init_postgres, create_metrics_table, insert_data
from config import conf
def test_database_writer():
"""Write some data to Postgres, then query Postgres.
Check that input matches the output."""
conn = init_postgres(conf)
data = {
'url': 'https://www.wikipedia.org/',
'response_time': 0.10382,
'status_code': 200,
'regex_found': True
}
create_metrics_table(conn)
insert_data(conn, data)
with conn:
with conn.cursor() as curs:
curs.execute(
"""
SELECT * FROM website_metrics ORDER BY timestamp DESC LIMIT 1;
"""
)
row = curs.fetchone()
assert row[1] == data.get('url')
assert row[2] == data.get('response_time')
assert row[3] == data.get('status_code')
assert row[4] == data.get('regex_found') | [
"nikolabogetic@gmail.com"
] | nikolabogetic@gmail.com |
a388b757cc41ce2d8e3f7d875781184476430064 | cf34c709463c9cf59779b9b16a6aa093756f5e41 | /forms.py | b54a58ef8eea1af58f3974003ea0346741d06cf6 | [] | no_license | gasparrobi/school-system | 05fa27a6e7785da9c9c22a5611a76b3734b03539 | a1305ed5b9b533df47b8eb862f8aac59a05f68a8 | refs/heads/master | 2021-01-22T06:27:37.613505 | 2017-02-12T22:27:52 | 2017-02-12T22:27:52 | 81,761,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,397 | py | from flask_wtf import Form
from wtforms import StringField, PasswordField, SelectField
from wtforms.validators import (DataRequired, Regexp, ValidationError, Email,
Length, EqualTo)
try:
from models import User, Applicant, City
except Exception:
from .models import User, Applicant, City
# from models import User
def username_exists(form, field):
if User.select().where(User.login == field.data).exists():
raise ValidationError("User already exists")
def email_exists(form, field):
if Applicant.select().where(Applicant.email == field.data).exists():
raise ValidationError("Email already exists")
def city_exist(form, field):
if City.select().where(City.city_name == str(field.data).lower()).exists() == False:
raise ValidationError("There is no such city")
class AddInterviewSlot(Form):
start = StringField(
"Starting Date"
)
end = StringField(
"End Date"
)
class RegisterForm(Form):
login = StringField(
"Username",
validators=[
DataRequired(),
Regexp(r'^[a-zA-Z0-9_]+$',
message=("Username should be one word, letters, "
"numbers, and underscores only.")
),
username_exists
])
first_name = StringField(
"First Name",
validators=[
DataRequired(),
Regexp(r'^[a-zA-Z0-9_]+$',
message=("Username should be one word, letters, "
"numbers, and underscores only.")
)
])
last_name = StringField(
"Last name",
validators=[
DataRequired(),
Regexp(r'^[a-zA-Z0-9_]+$',
message=("Username should be one word, letters, "
"numbers, and underscores only.")
)
])
email = StringField(
"Email",
validators=[
DataRequired(),
Email(),
email_exists
])
city = StringField(
"City",
validators=[
DataRequired(),
Regexp(r'^[a-zA-Z0-9_]+$',
message=("Username should be one word, letters, "
"numbers, and underscores only.")
),
city_exist
])
password = PasswordField(
"Password",
validators=[
DataRequired(),
Length(min=2),
EqualTo("password2", message="Passwords must match")])
password2 = PasswordField(
"Confirm Password",
validators=[DataRequired()])
class LoginForm(Form):
username = StringField("Username", validators=[DataRequired()])
password = PasswordField("Password", validators=[DataRequired()])
class FilterApplicantForm(Form):
name = StringField("name", validators=[DataRequired()])
options = SelectField("options", choices=[
("first_name","Filter by First Name"),
("last_name","Filter by Last Name"),
("school","Filter by School"),
("status","Filter by Status"),
])
| [
"gasparrobi@gmail.com"
] | gasparrobi@gmail.com |
6633aaf841c66798b7f26e53ce98722ba6c11f37 | 086c199b617f304f5edcbb3481a82119b9cec99d | /build/turtlebot3_simulations/turtlebot3_gazebo/catkin_generated/pkg.installspace.context.pc.py | 71b5c21b7cabd6268333c28e49d1b4f0f942a34c | [] | no_license | canveo/catkin_ws | 59634bee66aa9f5ed593acd85e6bd4e2e2eaab01 | 3c931df576529ad7e1e48dc0e69ba131914d18dc | refs/heads/master | 2023-02-28T13:23:43.653964 | 2021-02-03T23:00:45 | 2021-02-03T23:00:45 | 325,388,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;sensor_msgs;geometry_msgs;nav_msgs;tf;gazebo_ros".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot3_gazebo"
PROJECT_SPACE_DIR = "/home/canveo/catkin_ws/install"
PROJECT_VERSION = "1.3.0"
| [
"canveo@gmail.com"
] | canveo@gmail.com |
e48aff8cf7d5fc9445fa3a8eec5f5ca279050237 | 83bac6ab0e171a540a463476d830c764b7578dec | /DRL_prune_local/DRL_prune_cifarnet/utils/operator.py | f16861f92317130685a076dd46b6d0f3e4103219 | [] | no_license | yangzhaogit/FYP_FederatedLearningStudy | e5a053459fd4b5a9129c0c8a582ebaa55d1af33a | a3c9f9f9979dfd1afbf2a4065bd94c67840cd0aa | refs/heads/master | 2023-04-14T14:57:45.085071 | 2021-04-18T06:23:37 | 2021-04-18T06:23:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,449 | py | import torch
def convert_01_to_n1p1(x):
'''
convert x from zero-one to negative-one-positive-one
x must be {0, 1}
'''
assert x.dtype == torch.float32 or x.dtype == torch.long
if x.dtype == torch.float32:
x = x * 2. - 1.
elif x.dtype == torch.long:
x = x * 2 - 1
return x
def convert_n1p1_to_01(x):
'''
convert x from negative-one-positive-one to zero-one
x must be {-1, 1}
'''
assert x.dtype == torch.float32 or x.dtype == torch.long
if x.dtype == torch.float32:
x = (x + 1.) / 2.
elif x.dtype == torch.long:
x = (x + 1) / 2
return x
def convert_n10p1_to_01(x, zero_to_p1):
'''
convert x from negative-one-zeros-positive-one to zero-one
x usually is output of torch.sign()
x must be {-1, 0, 1}
x should be float
zero_to_ones is boolean
'''
assert x.dtype == torch.float32
assert zero_to_p1 == True or zero_to_p1 == False
if zero_to_p1 == True:
x = x + 0.1
x = torch.sign(x)
elif zero_to_p1 == False:
x = x - 0.1
x = torch.sign(x)
else:
raise Exception()
x = convert_n1p1_to_01(x)
return x
def convert_0lp1_to_01(x):
'''
convert x from zeros-positive-number to zero-one
x can be {0, R>=1}
x should be float
'''
assert x.dtype == torch.float32
x = x - 0.1
x = torch.sign(x)
x = convert_n1p1_to_01(x)
return x
| [
"aloy_czw@hotmail.com"
] | aloy_czw@hotmail.com |
fafa22e9f1df1fccc783dd15e51ccf5964a48f0e | 406bf29e4cfc59bc2cc6005813e4a63065c642bb | /jhyouk_universal_filter/13_SV/Delly_annotation_scripts/04.find_BP.py | d0fd9fd9b0bc1c2085ea05b43aee84e05620e0cd | [] | no_license | leehs96/Emergency_backup | 630c5efbe17f3175851ee80ef6dde72f5f613fe7 | 9bc30b7a5eda03ac207b16f83d93559a5637ac19 | refs/heads/main | 2023-02-10T11:43:40.172101 | 2021-01-05T06:10:33 | 2021-01-05T06:10:33 | 326,903,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,499 | py | #Arg1: delly output # must be sorted. (del 3-5, dup 5-3, inv pos1<pos2, tra chr1<chr2; 1...22,X,Y,MT)
#Arg2: Tumor bam
#Arg3: Normal bam
import sys, pysam
import collections
from scipy.stats import ttest_ind
print('### Find BP')
print(sys.argv[1])
dl_file=open(sys.argv[1]) # delly output
dl_line=dl_file.readline().strip()
tbam_file=pysam.AlignmentFile(sys.argv[2],'rb') # Cancer bam
nbam_file=pysam.AlignmentFile(sys.argv[3],'rb') #Normal bam
out_file=open(sys.argv[1]+'.BPinfo','w')
fors=700; bacs=100 #cut-off check! bacs must be smaller than fors
def make_cigartuple(cigarstring):
cg_num=len(cigarstring)
lt=''
cigar_tuple_list=[]
for n in range(0,cg_num):
try: lt = lt+str(int(cigarstring[n]))
except:
if cigarstring[n]=='M': cigar_tuple_list.append((0,int(lt)))
elif cigarstring[n]=='I': cigar_tuple_list.append((1,int(lt)))
elif cigarstring[n]=='D': cigar_tuple_list.append((2,int(lt)))
elif cigarstring[n]=='N': cigar_tuple_list.append((3,int(lt)))
elif cigarstring[n]=='S': cigar_tuple_list.append((4,int(lt)))
elif cigarstring[n]=='H': cigar_tuple_list.append((5,int(lt)))
elif cigarstring[n]=='P': cigar_tuple_list.append((6,int(lt)))
elif cigarstring[n]=='=': cigar_tuple_list.append((7,int(lt)))
elif cigarstring[n]=='X': cigar_tuple_list.append((8,int(lt)))
elif cigarstring[n]=='B': cigar_tuple_list.append((9,int(lt)))
lt=''
return cigar_tuple_list
def find_M_range(cigar):
m_start=0;m_end=0 # m_start: just before the start, m_end= the exact end
cigar_list=make_cigartuple(cigar)
m_count=0
for (t, n) in cigar_list:
if t == 0:
m_count +=1
if m_count ==1:
for (t,n) in cigar_list:
if t!=0 and t!=1:
m_start+=n
elif t==0:
m_end=m_start+n
break
elif m_count > 1:
find_m=0;m_length=0
for (t,n) in cigar_list:
if find_m==0 and t!=0 and t!=1:
m_start+=n
elif find_m >0 and t!=0 and t!=1:
m_length+=n
elif t==0:
find_m+=1
if find_m < m_count:
m_length+=n
elif find_m == m_count:
m_end=m_start+m_length
break
return([m_start, m_end])
def change_chr_to_int(chr1):
if chr1[0:2]=='GL':
chr_n=25
elif chr1[0:3]=='NC_':
chr_n=26
elif chr1=='hs37d5':
chr_n = 27
elif chr1[0:2]=='JH':
chr_n=28
else:
chr_n=int(((chr1.replace('X','23')).replace('Y','24')).replace('MT','25'))
return(chr_n)
def find_SA_reads(chr1,start1, end1, chr2, target_start2, target_end2, bam_file):
saINFO=[]
reverse_list=['1','3','5','7','9','b','d','f']
start1=max(start1,1)
end1=max(end1,1)
for read in bam_file.fetch(chr1,start1-1,end1):
if read.cigartuples == None or read.is_secondary == True or read.is_supplementary == True or read.is_duplicate == True:
continue
if read.has_tag('SA'):
chr_n=change_chr_to_int(read.reference_name)
cigar_info=read.cigarstring
read_size=read.infer_read_length()
SA_list=str(read.get_tag('SA')).split(';')[:-1]
for SA_indi in SA_list:
info_ori=''
SA_chr=SA_indi.split(',')[0]
SA_chr_n=change_chr_to_int(SA_chr)
SA_pos=int(SA_indi.split(',')[1])
SA_strand=SA_indi.split(',')[2]
SA_cigar=SA_indi.split(',')[3]
SA_MQ=SA_indi.split(',')[4]
if SA_chr==chr2 and int(SA_pos)>=target_start2 and int(SA_pos)<=target_end2: #check
pri_M_range=find_M_range(cigar_info)
SA_M_range=find_M_range(SA_cigar)
pri_len=pri_M_range[1]-pri_M_range[0]
SA_len=SA_M_range[1]-SA_M_range[0]
if (hex(int(read.flag))[-2] in reverse_list and SA_strand=='-') or (hex(int(read.flag))[-2] not in reverse_list and SA_strand=='+'): #same_direction
if pri_M_range[0] <= SA_M_range[0] and pri_M_range[1] >= SA_M_range[1]: continue
elif SA_M_range[0] <= pri_M_range[0] and SA_M_range[1] >= pri_M_range[1]: continue
if pri_M_range[1] > SA_M_range[1]:
MHLEN=SA_M_range[1]-pri_M_range[0]
bp1=read.reference_start+1
bp2=SA_pos+SA_len-1
terminal1="5";terminal2="3"
if read.reference_name!=SA_chr:
rearr="TRA"
if chr_n < SA_chr_n: info_ori='rs'
elif chr_n > SA_chr_n: info_ori='sr'
else:
if bp1<bp2: rearr="DUP"; info_ori='rs'
elif bp1>bp2: rearr="DEL"; info_ori='sr'
elif SA_M_range[1] > pri_M_range[1]:
MHLEN=pri_M_range[1]-SA_M_range[0]
bp1=read.reference_start+pri_len
bp2=SA_pos
terminal1="3"; terminal2="5"
if read.reference_name!=SA_chr:
rearr="TRA"
if chr_n < SA_chr_n: info_ori='rs'
elif chr_n > SA_chr_n: info_ori='sr'
else:
if bp1<bp2: rearr="DEL"; info_ori='rs'
elif bp1>bp2: rearr="DUP"; info_ori='sr'
else:
'blank'
else: # opposite direction
rvs_pri_M_range=[read_size-pri_M_range[1], read_size-pri_M_range[0]]
if rvs_pri_M_range[0] <= SA_M_range[0] and rvs_pri_M_range[1] >= SA_M_range[1]: continue
elif SA_M_range[0] <= rvs_pri_M_range[0] and SA_M_range[1] >= rvs_pri_M_range[1]: continue
if rvs_pri_M_range[1] > SA_M_range[1]:
MHLEN=SA_M_range[1]-rvs_pri_M_range[0]
bp1=read.reference_start+pri_len
bp2=SA_pos+SA_len-1
terminal1="3";terminal2="3"
if read.reference_name!=SA_chr:
rearr="TRA"
if chr_n < SA_chr_n: info_ori='rs'
elif chr_n > SA_chr_n: info_ori='sr'
else:
rearr="INV"
if bp1 < bp2: info_ori='rs'
elif bp1 > bp2: info_ori='sr'
elif SA_M_range[1] > rvs_pri_M_range[1]:
MHLEN=rvs_pri_M_range[1]-SA_M_range[0]
bp1=read.reference_start+1
bp2=SA_pos
terminal1="5";terminal2="5"
if read.reference_name!=SA_chr:
rearr="TRA"
if chr_n < SA_chr_n: info_ori='rs'
elif chr_n > SA_chr_n: info_ori='sr'
else:
rearr="INV"
if bp1 < bp2: info_ori="rs"
elif bp1 > bp2: info_ori="sr"
else:
'blank'
else:
'blank'
if info_ori=='rs':
rs_info=read.reference_name+':'+str(bp1)+'_'+SA_chr+':'+str(bp2)+'_'+str(MHLEN)+'_'+rearr+'_'+terminal1+'to'+terminal2
saINFO.append(rs_info)
elif info_ori=='sr':
sr_info=SA_chr+':'+str(bp2)+'_'+read.reference_name+':'+str(bp1)+'_'+str(MHLEN)+'_'+rearr+'_'+terminal2+'to'+terminal1
saINFO.append(sr_info)
else:
'blank'
return(saINFO)
while dl_line:
if dl_line[0:2]=='##':
out_file.write(dl_line+'\n')
elif dl_line[0:4]=='#CHR':
out_file.write(dl_line+'\ttBPinfo\tnBPinfo\n')
elif dl_line[0]=='#':
out_file.write(dl_line+'\n')
else:
dl_indi=dl_line.split('\t')
chr1=dl_indi[0];pos1=int(dl_indi[1]);chr2=(dl_indi[7].split('CHR2=')[1]).split(';')[0];pos2=int((dl_indi[7].split('END=')[1]).split(';')[0])
ct1=(dl_indi[7].split('CT=')[1]).split(';')[0][0]
ct2=(dl_indi[7].split('CT=')[1]).split(';')[0][-1]
sv_type=dl_indi[2][0:3]
dist=pos2-pos1
if sv_type!='TRA' and dist < 0:
print('Sorting error')
print(dl_line)
sys.exit(1)
if sv_type == 'INV' and ct1=='3' and ct2=='3':
start1=pos1-bacs
end1=pos1+fors
start2=pos2-bacs
end2=pos2+fors
if dist >= bacs and dist < fors:
end1=pos1+dist-bacs
elif dist < bacs:
end1=pos1+dist/2
start2=pos2-dist/2
elif sv_type == 'INV' and ct1 == '5' and ct2=='5':
start1=pos1-fors
end1=pos1+bacs
start2=pos2-fors
end2=pos2+bacs
if dist >= bacs and dist < fors:
start2=pos2-dist+bacs
elif dist < bacs:
end1=pos1+dist/2
start2=pos2-dist/2
elif sv_type == 'DEL':
start1=pos1-bacs
end1=pos1+fors
start2=pos2-fors
end2=pos2+bacs
if dist < fors:
end1=pos1+dist/2
start2=pos2-dist/2
elif sv_type == 'DUP':
start1=pos1-fors
end1=pos1+bacs
start2=pos2-bacs
end2=pos2+fors
if dist < bacs:
end1=pos1+dist/2
start2=pos2-dist/2
elif sv_type == 'INS':
t_info='NA'; n_info='NA'
out_file.write(dl_line+'\t'+t_info+'\t'+n_info+'\n')
dl_line=dl_file.readline().strip()
continue
elif sv_type == 'TRA':
if ct1=='5':
start1=pos1-fors
end1=pos1+bacs
elif ct1=='3':
start1=pos1-bacs
end1=pos1+fors
if ct2=='5':
start2=pos2-fors
end2=pos2+bacs
elif ct2=='3':
start2=pos2-bacs
end2=pos2+fors
if sv_type == 'DEL' or sv_type=='DUP':
target_start1=start1
target_end1=end1
target_start2=start2
target_end2=end2
elif sv_type=='INV' or sv_type=='TRA':
if ct1=='5':
target_start1=pos1-fors
target_end1=pos1+bacs
elif ct1=='3':
target_start1=pos1-bacs
target_end1=pos1+fors
if ct2=='5':
target_start2=pos2-fors
target_end2=pos2+bacs
elif ct2=='3':
target_start2=pos2-bacs
target_end2=pos2+fors
t_list1=find_SA_reads(chr1, start1, end1, chr2, target_start2, target_end2,tbam_file)
t_list2=find_SA_reads(chr2, start2, end2, chr1, target_start1, target_end1,tbam_file)
t_list=t_list1+t_list2
if len(t_list) > 0:
t_dic=collections.Counter(t_list)
t_new_list=[]
for info in t_dic.keys():
t_new_list.append(info+'('+str(t_dic[info])+')')
t_info=','.join(t_new_list)
else:
t_info='NA'
n_list1=find_SA_reads(chr1, start1, end1, chr2, target_start2, target_end2, nbam_file)
n_list2=find_SA_reads(chr2, start2, end2, chr1, target_start1, target_end1, nbam_file)
n_list=n_list1+n_list2
if len(n_list) > 0:
n_dic=collections.Counter(n_list)
n_new_list=[]
for info in n_dic.keys():
n_new_list.append(info+'('+str(n_dic[info])+')')
n_info=','.join(n_new_list)
else:
n_info='NA'
out_file.write(dl_line+'\t'+t_info+'\t'+n_info+'\n')
dl_line=dl_file.readline().strip()
| [
"hs960201@gmail.com"
] | hs960201@gmail.com |
17e99ae06203b967169774487ad0ca61533ead81 | ab3e6cee73c76e1bda1ac8d4e9cb82286de757fe | /190527_Day1/reverse_content.py | 30ed2dade96ed6a24b05d28198f6f26c4f91c408 | [] | no_license | arara90/TIL_django | cc961535feba95e55d531c90a5d274cb5ec5f02e | 5aa5fcb839dceb0abc9c5b09fdcb5a478dca34f4 | refs/heads/master | 2020-05-27T20:15:46.663200 | 2020-03-21T06:13:40 | 2020-03-21T06:13:40 | 188,775,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | #1. read file
with open('mulcam.txt', 'r') as f:
lines = f.readlines()
print(lines)
#2. reverse
lines.reverse()
#3. write file
with open('mulcam.txt', 'w') as f:
f.writelines(lines)
| [
"arara90@hotmail.com"
] | arara90@hotmail.com |
fda745354b7753ddd2c109832984b26e2e572881 | f9d3cf76abd6140abcc6c37b63f5ee742bd0f514 | /config.py | ae1411cb5892b933534edc779b5103369f81d1dc | [] | no_license | mkenyakevin/flask-MVC | 4b3bbe0d77a6cfb85185625fd8099bc15001d16a | 84a98cc8ba50b3758d87a467438e39b6c299f8ff | refs/heads/master | 2022-12-14T22:58:44.328773 | 2019-02-11T10:52:40 | 2019-02-11T10:52:40 | 167,151,365 | 2 | 0 | null | 2019-02-05T09:39:20 | 2019-01-23T09:02:38 | Python | UTF-8 | Python | false | false | 728 | py | import os
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
class DevelopmentConfig(Config):
DEBUG = True
ORATOR_DATABASES = {
'default': 'mysql',
'mysql': {
'driver': 'mysql',
'host': 'localhost',
'database': 'flask_skeleton_2',
'user': 'root',
'password': 'munangwe22',
'prefix': ''
}
}
SECRET_KEY = os.environ.get('SECRET_KEY') or 'bmlnZ2FzaW5wYXJpcw=='
class TestingConfig(Config):
Testing = True
class ProductionConfig(Config):
pass
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| [
"kevinmkenya2@gmail.com"
] | kevinmkenya2@gmail.com |
c156856679b73744598416c2c230ac998017c6fb | 762742b3c5cb5706e93e12dbdc3f8c46fc65f0db | /Tests/scripts/utils/get_modified_files_for_testing.py | 6333c7c2593b8c7ec585000499dfa924647c6328 | [
"MIT"
] | permissive | EmersonElectricCo/content | 018f95f7fe7de13819e093a3661587a18407e348 | 82c82bbee7d428f0b14991a88c67672e2c02f5af | refs/heads/master | 2021-06-17T04:54:22.938033 | 2021-05-06T16:39:59 | 2021-05-06T16:39:59 | 161,693,191 | 2 | 0 | MIT | 2018-12-18T15:16:49 | 2018-12-13T20:47:26 | Python | UTF-8 | Python | false | false | 8,790 | py | """
This class replaces the old get_modified_files_for_testing function in collect_tests_and_content_packs
"""
import glob
import os
from typing import Dict, Set, Optional
import demisto_sdk.commands.common.constants as constants
from demisto_sdk.commands.common.constants import FileType
from Tests.scripts.utils.collect_helpers import (
COMMON_YML_LIST,
is_pytest_file, checked_type, SECRETS_WHITE_LIST, LANDING_PAGE_SECTIONS_JSON_PATH,
)
from demisto_sdk.commands.common import tools
class ModifiedFiles:
def __init__(self,
modified_files: list,
modified_tests: list,
changed_common_files: list,
is_conf_json: bool,
sample_tests: list,
modified_metadata: set,
is_reputations_json: bool,
is_indicator_json: bool,
is_landing_page_sections_json: bool):
"""
A holder for the 'get_modified_files_for_testing' method's response
Args:
modified_files: Modified YMLs for testing (Integrations, Scripts, Playbooks).
modified_tests: Test playbooks.
changed_common_files: Globally used YMLs (Like CommonServerPython).
is_conf_json: If Tests/Conf.json has been changed.
sample_tests: Files to test, Like the infrastructures files.
modified_metadata: Pack names of changed metadata files.
is_reputations_json: If any reputation file changed.
is_indicator_json: If any indicator file changed.
is_landing_page_sections_json: If Tests/Marketplace/landingPage_sections.json has been changed
"""
self.modified_files = modified_files
self.modified_tests = modified_tests
self.changed_common_files = changed_common_files
self.is_conf_json = is_conf_json
self.sample_tests = sample_tests
self.modified_metadata = modified_metadata
self.is_reputations_json = is_reputations_json
self.is_indicator_json = is_indicator_json
self.is_landing_page_sections_json = is_landing_page_sections_json
def resolve_type(file_path: str) -> Optional[FileType]:
"""Will classify file_path that tools.find_type could not find types for.
Args:
file_path: file path to classify
Returns:
FileType. Conf.json and Metadata files.
"""
# if conf.json file
if checked_type(file_path, [constants.CONF_PATH]):
return FileType.CONF_JSON
# landingPage_sections.json file
if checked_type(file_path, [LANDING_PAGE_SECTIONS_JSON_PATH]):
return FileType.LANDING_PAGE_SECTIONS_JSON
# MetaData files
elif any(
file in file_path
for file in (
constants.PACKS_PACK_META_FILE_NAME,
constants.PACKS_WHITELIST_FILE_NAME,
)
):
return FileType.METADATA
# Whitelist file type
elif checked_type(file_path, [SECRETS_WHITE_LIST]):
return FileType.WHITE_LIST
return None
def remove_python_files(types_to_files: Dict[FileType, Set[str]]):
"""Get corresponding yml files and types from PY files.
If a corresponding yml found, will remove the py file
Args:
types_to_files: Mapping of FileType: file_paths
Returns:
Filtered types_to_files
"""
py_to_be_removed = set()
for file_path in types_to_files.get(FileType.PYTHON_FILE, set()):
if not is_pytest_file(file_path):
yml_path = get_corresponding_yml_file(file_path)
# There's a yml path
if yml_path is not None:
yml_type = tools.find_type(yml_path) or resolve_type(file_path)
if yml_type is not None:
if yml_type in types_to_files:
types_to_files[yml_type].add(yml_path)
else:
types_to_files[yml_type] = {yml_path}
py_to_be_removed.add(file_path)
else:
py_to_be_removed.add(file_path)
# remove python files
if py_to_be_removed:
types_to_files[FileType.PYTHON_FILE] = types_to_files[FileType.PYTHON_FILE] - py_to_be_removed
return types_to_files
def create_type_to_file(files_string: str) -> Dict[FileType, Set[str]]:
"""Classifies the files in the diff list (files_string) using tools.find_type
Returns:
A dict of {FileType: Set of files}
"""
types_to_files: Dict[FileType, Set[str]] = dict()
for line in files_string.split("\n"):
if line:
file_status, file_path = line.split(maxsplit=1)
file_status = file_status.lower()
# Get to right file_path on renamed
if file_status.startswith("r"):
_, file_path = file_path.split(maxsplit=1)
file_status = file_status.lower()
# ignoring deleted files.
# also, ignore files in ".circle", ".github" and ".hooks" directories and .
if file_path:
if (
file_status in ("m", "a") or file_status.startswith("r")
) and not file_path.startswith("."):
file_type = tools.find_type(file_path) or resolve_type(file_path)
if file_type in types_to_files:
types_to_files[file_type].add(file_path)
elif file_type is not None:
types_to_files[file_type] = {file_path}
types_to_files = remove_python_files(types_to_files)
return types_to_files
def remove_common_files(
types_to_files: Dict[FileType, Set[str]], changed_common_files: Set[str]) -> Dict[FileType, Set[str]]:
if changed_common_files:
types_to_files[FileType.SCRIPT] = types_to_files[FileType.SCRIPT] - changed_common_files
return types_to_files
def get_modified_files_for_testing(git_diff: str) -> ModifiedFiles:
"""
Gets git diff string and filters those files into tests:
Args:
git_diff: a git diff output (with --name-only flag)
Returns:
ModifiedFiles instance
"""
types_to_files: Dict[FileType, Set[str]] = create_type_to_file(git_diff) # Mapping of the files FileType: file path
# Checks if any common file exists in types_to_file
changed_common_files = get_common_files(types_to_files.get(FileType.SCRIPT, set()))
types_to_files = remove_common_files(types_to_files, changed_common_files)
# Sample tests are the remaining python files
sample_tests = types_to_files.get(FileType.PYTHON_FILE, set())
# Modified files = YMLs of integrations, scripts and playbooks
modified_files: Set[str] = types_to_files.get(FileType.INTEGRATION, set()).union(
types_to_files.get(FileType.SCRIPT, set()),
types_to_files.get(FileType.BETA_INTEGRATION, set()),
types_to_files.get(FileType.PLAYBOOK, set())) # Modified YMLs for testing (Integrations, Scripts, Playbooks).
# Metadata packs
modified_metadata: Set[str] = set()
for file_path in types_to_files.get(FileType.METADATA, set()):
modified_metadata.add(tools.get_pack_name(file_path))
modified_tests: Set[str] = types_to_files.get(FileType.TEST_PLAYBOOK, set()) # Modified tests are test playbooks
# Booleans. If this kind of file is inside, its exists
is_conf_json = FileType.CONF_JSON in types_to_files
is_landing_page_sections_json = FileType.LANDING_PAGE_SECTIONS_JSON in types_to_files
is_reputations_json = FileType.REPUTATION in types_to_files
is_indicator_json = FileType.INDICATOR_FIELD in types_to_files
modified_files_instance = ModifiedFiles(
list(modified_files),
list(modified_tests),
list(changed_common_files),
is_conf_json,
list(sample_tests),
modified_metadata,
is_reputations_json,
is_indicator_json,
is_landing_page_sections_json)
return modified_files_instance
def get_corresponding_yml_file(file_path: str) -> Optional[str]:
"""Gets yml files from file path.
Args:
file_path
Returns:
file path of the yml file if exists else None.
"""
try:
# Py files, Integration, script, playbook ymls
dir_path = os.path.dirname(file_path)
file_path = glob.glob(dir_path + "/*.yml")[0]
return file_path
except IndexError: # Not matching yml - sample test
return None
def get_common_files(paths_set: Set[str]) -> Set[str]:
"""Gets paths of files and return only the common yml files
Args:
paths_set: A path to find common yml files on
Returns:
intersection of the Common files list
"""
common_yml = set(COMMON_YML_LIST)
return paths_set.intersection(common_yml)
| [
"noreply@github.com"
] | EmersonElectricCo.noreply@github.com |
0f1fe0d7b7427e43223e7a9c0c4c64f6116a45f0 | caf8cbcafd448a301997770165b323438d119f5e | /.history/spider/car_spider_20201124011404.py | ac405951f74aa797df77660aae074658fec4ce27 | [
"MIT"
] | permissive | KustomApe/nerdape | 03e0691f675f13ce2aefa46ee230111247e90c72 | aef6fb2d1f8c364b26d91bf8570b4487a24de69a | refs/heads/main | 2023-01-23T10:13:26.584386 | 2020-11-28T22:29:49 | 2020-11-28T22:29:49 | 309,897,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,314 | py | from selenium import webdriver
import pandas as pd
import time
"""[Initial Setting]
初期設定
"""
options = webdriver.ChromeOptions()
options.add_argument('--headeless')
options.add_argument('--disable-gpu')
options.add_argument('--lang-ja')
browser = webdriver.Chrome(chrome_options=options, executable_path='./chromedriver')
df = pd.DataFrame(columns=['name', 'image', 'price', 'category', 'car'])
url = 'https://motorz-garage.com/parts/'
"""[CSS Selector Setting]
CSSセレクターの設定
"""
PAGER_NEXT = "li.select-page.arrow a[rel='next']"
POSTS = ".product-item-list__item"
PRODUCT_NAME = ".product-item-list__item-name"
IMAGE = ".product-item-list__item-image img"
PRICE = ".product-item-list__item-price"
CATEGORY = ".product-item-list__item-category"
CAR = ".product-item-list__item-car-name"
"""[Activate Section]
実行部分
"""
browser.get(url)
while True: #Continue until getting the last page.
if len(browser.find_elements_by_css_selector(PAGER_NEXT)) > 0:
print('Starting to get posts...')
posts = browser.find_elements_by_css_selector(POSTS)
print(len(posts))
for post in posts:
try:
name = post.find_element_by_css_selector(PRODUCT_NAME).text
print(name)
thumbnailURL = post.find_element_by_css_selector(IMAGE).get_attribute('src')
print(thumbnailURL)
price = post.find_element_by_css_selector(PRICE).text
print(price)
category = post.find_element_by_css_selector(CATEGORY).text
print(category)
car = post.find_element_by_css_selector(CAR).text
print(car)
se = pd.Series([name, thumbnailURL, price, category, car], ['name', 'image', 'price', 'category', 'car'])
df.append(se, ignore_index=True)
except Exception as e:
print(e)
break
btn = browser.find_element_by_css_selector(PAGER_NEXT).get_attribute('href')
print('next url:{}'.format(btn))
time.sleep(3)
browser.get(btn)
print('Moving to next page.')
else:
print('No pager exist anymore...')
break
print('Finished Crawling. Writing out to CSV file...')
df.to_csv('car_parts.csv')
print('Done')
| [
"kustomape@gmail.com"
] | kustomape@gmail.com |
91d8836964de0a1322b936fadd0cfcb2a1a054ba | fe4f9a9a97fb0a90caedaca546d565005c17fef2 | /gen/CSiBE-v2.1.1/size_result.py | 381f996891dd8f3a66b0e21a0431bb29b8dff109 | [
"BSD-3-Clause"
] | permissive | plctlab/csibe | 1e8e7fb8a02ffc47658c28cf2fdf7dca1da246ac | a3538ad78da05bd2e8939a1bbca501505fec6281 | refs/heads/master | 2023-05-02T12:14:01.835162 | 2021-05-27T03:30:51 | 2021-05-27T03:30:51 | 371,004,263 | 1 | 0 | BSD-3-Clause | 2021-05-27T03:30:52 | 2021-05-26T11:08:18 | null | UTF-8 | Python | false | false | 388 | py | #!/usr/bin/env python
import os
import subprocess
if __name__ == "__main__":
csibe_binary_dir = os.environ["CSiBE_BIN_DIR"]
project_binary_dir = os.environ["PROJECT_BINARY_DIR"]
dump_obj_size_script = os.path.join(csibe_binary_dir, "dump_obj_size")
for file in os.listdir(project_binary_dir):
subprocess.call([dump_obj_size_script, file, project_binary_dir])
| [
"gaborb@inf.u-szeged.hu"
] | gaborb@inf.u-szeged.hu |
c072aa624478df0c59ad5e130486df23997f18c1 | 7cf04717763eeffe7685fe689a009eea6caca2ba | /Whole-Body/Generation_Cartesian_trajs/Cartesian_Task.py | 46abf727b989232f31efd2b4606ca7f1b32f8953 | [] | no_license | matteosodano/Gap-crossing_task_with_CENTAURO_robot | c33c1ba4af0d5c244274d34541079ff7d601454e | c7804f2679dcc9e27fa1abea241779258b3bbbe2 | refs/heads/master | 2023-01-11T20:13:42.034126 | 2020-11-17T10:46:00 | 2020-11-17T10:46:00 | 297,380,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,676 | py | #!/usr/bin/env python
import numpy as np
import math as m
from matplotlib import path
from numpy.linalg import inv
import time
import rospy
import roslib
import tf
import geometry_msgs.msg
from cartesian_interface.pyci_all import *
#########################################################
############ AUXILIARY FUNCTIONS AND CLASSES ############
#########################################################
# colors and styles for text to be used as print(color.BOLD + 'Hello World !' + color.END)
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# fct giving coords of target_frame wrt reference_frame
def getPose(reference_frame, target_frame):
listener = tf.TransformListener()
listener.waitForTransform(reference_frame, target_frame, rospy.Time(), rospy.Duration(4.0))
while not rospy.is_shutdown():
try:
now = rospy.Time(0)
listener.waitForTransform(reference_frame, target_frame, now, rospy.Duration(4.0))
(t,r) = listener.lookupTransform(reference_frame, target_frame, now)
return (t, r)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
# computes coefficients of a cubic with given initial and final positions and 0 velocity at beginning and end
def cubic(vi, vf, dvi, dvf, duration):
a = ((dvi + dvf)*duration + 2*(vi-vf))/duration**3
b = (-(2*dvi + dvf)*duration + 3*(vf-vi))/duration**2
c = dvi
d = vi
return a, b, c, d
#########################################################
############## BRING TO HOME CONFIGURATION ##############
#########################################################
def homing(car, w1, w2, w3, w4, pelvis, com):
com.disable()
Tw1, _, _ = w1.getPoseReference()
Tw2, _, _ = w2.getPoseReference()
Tw3, _, _ = w3.getPoseReference()
Tw4, _, _ = w4.getPoseReference()
Tcar, _, _ = car.getPoseReference()
Tw1.translation_ref()[0] = 0.35
Tw1.translation_ref()[1] = 0.35
Tw2.translation_ref()[0] = 0.35
Tw2.translation_ref()[1] = -0.35
Tw3.translation_ref()[0] = -0.35
Tw3.translation_ref()[1] = 0.35
Tw4.translation_ref()[0] = -0.35
Tw4.translation_ref()[1] = -0.35
w1.setPoseTarget(Tw1, 1.0)
w2.setPoseTarget(Tw2, 1.0)
w3.setPoseTarget(Tw3, 1.0)
w4.setPoseTarget(Tw4, 1.0)
w1.waitReachCompleted(1.5)
w2.waitReachCompleted(1.5)
w3.waitReachCompleted(1.5)
w4.waitReachCompleted(1.5)
com.enable()
movecar(car, [w1, w2, w3, w4], 1.0, [pelvis, com])
#########################################################
######### FUNCTIONS CORRESPONDING TO PRIMITIVES #########
#########################################################
# fct for rolling along the x axis and y axis
def roll(car, wheels, distance, axis, duration, to_be_disabled, cli):
for item in to_be_disabled:
item.disable()
Tcar = cli.getPoseFromTf('ci/car_frame', 'ci/world')
Tw1 = cli.getPoseFromTf('ci/'+wheels[0].getDistalLink(), 'ci/car_frame')
Tw2 = cli.getPoseFromTf('ci/'+wheels[1].getDistalLink(), 'ci/car_frame')
Tw3 = cli.getPoseFromTf('ci/'+wheels[2].getDistalLink(), 'ci/car_frame')
Tw4 = cli.getPoseFromTf('ci/'+wheels[3].getDistalLink(), 'ci/car_frame')
x_init = Tcar.translation[0]
y_init = Tcar.translation[1]
R = Tcar.linear
c = R[0, 0]
s = R[1, 0]
t = 0.0
dt = 0.01
if axis == 'x':
x_goal = x_init + c*distance
y_goal = y_init + s*distance
elif axis == 'y':
x_goal = x_init + s*distance
y_goal = y_init - c*distance
a_x, b_x, c_x, d_x = cubic(x_init, x_goal, 0., 0., duration)
a_y, b_y, c_y, d_y = cubic(y_init, y_goal, 0., 0., duration)
while t < duration:
Tcar.translation_ref()[0] = a_x * t**3 + b_x * t**2 + c_x * t + d_x
Tcar.translation_ref()[1] = a_y * t**3 + b_y * t**2 + c_y * t + d_y
car.setPoseReference(Tcar) # this publishes the reference
wheels[0].setPoseReference(Tw1)
wheels[1].setPoseReference(Tw2)
wheels[2].setPoseReference(Tw3)
wheels[3].setPoseReference(Tw4)
t += dt
time.sleep(dt)
for item in to_be_disabled:
item.enable()
# fct for rolling two wheels only
def rollTwoWheelsandMoveCom(wheels, distance_wheels, com, distance_com, movecom, other_wheels, duration, to_be_disabled, cli):
for item in to_be_disabled:
item.disable()
if movecom:
com.enable()
Tw0 = cli.getPoseFromTf('ci/'+wheels[0].getDistalLink(), 'ci/car_frame')
Tw1 = cli.getPoseFromTf('ci/'+wheels[1].getDistalLink(), 'ci/car_frame')
Tcom = cli.getPoseFromTf('ci/'+com.getDistalLink(), 'ci/world')
other_wheels[0].setBaseLink(u'world')
other_wheels[1].setBaseLink(u'world')
Tw2 = cli.getPoseFromTf('ci/'+other_wheels[0].getDistalLink(), 'ci/world')
Tw3 = cli.getPoseFromTf('ci/'+other_wheels[1].getDistalLink(), 'ci/world')
Tw0_trans = Tw0.translation
Tw1_trans = Tw1.translation
Tcom_trans = Tcom.translation
Tw2_trans = Tw2.translation
Tw3_trans = Tw3.translation
a_x_w0, b_x_w0, c_x_w0, d_x_w0 = cubic(Tw0_trans[0], Tw0_trans[0] + distance_wheels, 0., 0., duration)
a_x_w1, b_x_w1, c_x_w1, d_x_w1 = cubic(Tw1_trans[0], Tw1_trans[0] + distance_wheels, 0., 0., duration)
a_x_com, b_x_com, c_x_com, d_x_com = cubic(Tcom_trans[0], Tcom_trans[0] + distance_com, 0., 0., duration)
t = 0.0
dt = 0.01
while t < duration:
Tw0.translation_ref()[0] = a_x_w0 * t**3 + b_x_w0 * t**2 + c_x_w0 * t + d_x_w0
Tw1.translation_ref()[0] = a_x_w1 * t**3 + b_x_w1 * t**2 + c_x_w1 * t + d_x_w1
Tw2.translation_ref()[0] = Tw2.translation[0]
Tw3.translation_ref()[0] = Tw3.translation[0]
Tw2.translation_ref()[1] = Tw2.translation[1]
Tw3.translation_ref()[1] = Tw3.translation[1]
Tw2.translation_ref()[2] = Tw2.translation[2]
Tw3.translation_ref()[2] = Tw3.translation[2]
if movecom:
Tcom.translation_ref()[0] = a_x_com * t**3 + b_x_com * t**2 + c_x_com * t + d_x_com
wheels[0].setPoseReference(Tw0)
wheels[1].setPoseReference(Tw1)
other_wheels[0].setPoseReference(Tw2)
other_wheels[1].setPoseReference(Tw3)
if movecom:
com.setPoseReference(Tcom)
t += dt
time.sleep(dt)
other_wheels[0].setBaseLink(u'car_frame')
other_wheels[1].setBaseLink(u'car_frame')
if movecom:
com.disable()
for item in to_be_disabled:
item.enable()
# fct for spinning (i.e., rotating around the center of the sp -> rotation about the z-axis)
def spin(car, wheels, angle, duration, to_be_disabled, cli):
for item in to_be_disabled:
item.disable()
Tcar = cli.getPoseFromTf('ci/car_frame', 'ci/world')
Tw1 = cli.getPoseFromTf('ci/'+wheels[0].getDistalLink(), 'ci/car_frame')
Tw2 = cli.getPoseFromTf('ci/'+wheels[1].getDistalLink(), 'ci/car_frame')
Tw3 = cli.getPoseFromTf('ci/'+wheels[2].getDistalLink(), 'ci/car_frame')
Tw4 = cli.getPoseFromTf('ci/'+wheels[3].getDistalLink(), 'ci/car_frame')
R = Tcar.linear
c_i = R[0, 0]
s_i = R[1, 0]
t = 0.0
dt = 0.01
angle_i = m.atan2(s_i, c_i)
angle_f = angle_i + angle
a, b, c, d = cubic(angle_i, angle_f, 0., 0., duration)
while t < duration:
angle = a * t**3 + b * t**2 + c * t + d
c_t = m.cos(angle)
s_t = m.sin(angle)
R = np.array([[c_t, -s_t, 0.], [s_t, c_t, 0.], [0., 0., 1.]])
for row in range(0, 3):
for col in range(0, 3):
Tcar.linear_ref()[row][col] = R[row][col]
car.setPoseReference(Tcar)
wheels[0].setPoseReference(Tw1)
wheels[1].setPoseReference(Tw2)
wheels[2].setPoseReference(Tw3)
wheels[3].setPoseReference(Tw4)
t += dt
time.sleep(dt)
for item in to_be_disabled:
item.enable()
# fct for moving the car frame keeping the stance --- to be launched after the 4 steps (or 2) in order to bring the car frame at the center of the SP
def movecar(car, wheels, duration, to_be_disabled, cli):
for item in to_be_disabled:
item.disable()
Tcar = cli.getPoseFromTf('ci/car_frame', 'ci/world')
Tw1 = cli.getPoseFromTf('ci/wheel_1', 'ci/car_frame')
Tw2 = cli.getPoseFromTf('ci/wheel_2', 'ci/car_frame')
Tw3 = cli.getPoseFromTf('ci/wheel_3', 'ci/car_frame')
Tw4 = cli.getPoseFromTf('ci/wheel_4', 'ci/car_frame')
# initial position of car and wheels
xcar_i = Tcar.translation[0]
ycar_i = Tcar.translation[1]
xw1_i = Tw1.translation[0]
yw1_i = Tw1.translation[1]
xw2_i = Tw2.translation[0]
yw2_i = Tw2.translation[1]
xw3_i = Tw3.translation[0]
yw3_i = Tw3.translation[1]
xw4_i = Tw4.translation[0]
yw4_i = Tw4.translation[1]
# final position of car (center of the SP)
Tw1_world = np.matmul(Tcar.matrix(), Tw1.matrix())
Tw2_world = np.matmul(Tcar.matrix(), Tw2.matrix())
Tw3_world = np.matmul(Tcar.matrix(), Tw3.matrix())
Tw4_world = np.matmul(Tcar.matrix(), Tw4.matrix())
xcar_f = (Tw1_world[0, 3] + Tw2_world[0, 3] + Tw3_world[0, 3] + Tw4_world[0, 3])/4
ycar_f = (Tw1_world[1, 3] + Tw2_world[1, 3] + Tw3_world[1, 3] + Tw4_world[1, 3])/4
# final position of wheels wrt car
xw1_f = 0.35
yw1_f = 0.35
xw2_f = 0.35
yw2_f = -0.35
xw3_f = -0.35
yw3_f = 0.35
xw4_f = -0.35
yw4_f = -0.35
a_carx, b_carx, c_carx, d_carx = cubic(xcar_i, xcar_f, 0., 0., duration)
a_cary, b_cary, c_cary, d_cary = cubic(ycar_i, ycar_f, 0., 0., duration)
a_w1x, b_w1x, c_w1x, d_w1x = cubic(xw1_i, xw1_f, 0., 0., duration)
a_w1y, b_w1y, c_w1y, d_w1y = cubic(yw1_i, yw1_f, 0., 0., duration)
a_w2x, b_w2x, c_w2x, d_w2x = cubic(xw2_i, xw2_f, 0., 0., duration)
a_w2y, b_w2y, c_w2y, d_w2y = cubic(yw2_i, yw2_f, 0., 0., duration)
a_w3x, b_w3x, c_w3x, d_w3x = cubic(xw3_i, xw3_f, 0., 0., duration)
a_w3y, b_w3y, c_w3y, d_w3y = cubic(yw3_i, yw3_f, 0., 0., duration)
a_w4x, b_w4x, c_w4x, d_w4x = cubic(xw4_i, xw4_f, 0., 0., duration)
a_w4y, b_w4y, c_w4y, d_w4y = cubic(yw4_i, yw4_f, 0., 0., duration)
t = 0.0
dt = 0.01
while t < duration:
xcar_t = a_carx * t**3 + b_carx * t**2 + c_carx * t + d_carx
ycar_t = a_cary * t**3 + b_cary * t**2 + c_cary * t + d_cary
xw1_t = a_w1x * t**3 + b_w1x * t**2 + c_w1x * t + d_w1x
yw1_t = a_w1y * t**3 + b_w1y * t**2 + c_w1y * t + d_w1y
xw2_t = a_w2x * t**3 + b_w2x * t**2 + c_w2x * t + d_w2x
yw2_t = a_w2y * t**3 + b_w2y * t**2 + c_w2y * t + d_w2y
xw3_t = a_w3x * t**3 + b_w3x * t**2 + c_w3x * t + d_w3x
yw3_t = a_w3y * t**3 + b_w3y * t**2 + c_w3y * t + d_w3y
xw4_t = a_w4x * t**3 + b_w4x * t**2 + c_w4x * t + d_w4x
yw4_t = a_w4y * t**3 + b_w4y * t**2 + c_w4y * t + d_w4y
Tcar.translation_ref()[0] = xcar_t
Tcar.translation_ref()[1] = ycar_t
Tw1.translation_ref()[0] = xw1_t
Tw1.translation_ref()[1] = yw1_t
Tw2.translation_ref()[0] = xw2_t
Tw2.translation_ref()[1] = yw2_t
Tw3.translation_ref()[0] = xw3_t
Tw3.translation_ref()[1] = yw3_t
Tw4.translation_ref()[0] = xw4_t
Tw4.translation_ref()[1] = yw4_t
car.setPoseReference(Tcar)
wheels[0].setPoseReference(Tw1)
wheels[1].setPoseReference(Tw2)
wheels[2].setPoseReference(Tw3)
wheels[3].setPoseReference(Tw4)
t += dt
time.sleep(2*dt)
for item in to_be_disabled:
item.enable()
# fct for stepping. the step is a semi-circumference. every 0.1s, it is checked if the com lies in the support triangle
def step(moving_foot, still_feet, step_length, duration, to_be_disabled, car, com, cli, filename_pos, filename_vel):
for item in to_be_disabled:
item.disable()
com.enable()
com_pos = np.loadtxt('/home/matteo/catkin_ws/src/centauro_cartesio-devel-cpack/python/' + filename_pos)
com_vel = np.loadtxt('/home/matteo/catkin_ws/src/centauro_cartesio-devel-cpack/python/' + filename_vel)
interval_duration = 0.1
t = 0.00
dt = 0.01
period = 4.0
radius = step_length/2
T1 = 1.0
T2 = 3.0
moving_foot.setBaseLink(u'world')
for foot in still_feet:
foot.setBaseLink(u'world')
Tmoving_foot = (cli.getPoseFromTf('ci/'+moving_foot.getDistalLink(), 'ci/world'))
Tmoving_init = Tmoving_foot.translation
Tw1 = cli.getPoseFromTf('ci/'+still_feet[0].getDistalLink(), 'ci/world')
Tw2 = cli.getPoseFromTf('ci/'+still_feet[1].getDistalLink(), 'ci/world')
Tw3 = cli.getPoseFromTf('ci/'+still_feet[2].getDistalLink(), 'ci/world')
Tcar = cli.getPoseFromTf('ci/car_frame', 'ci/world')
Tcom = cli.getPoseFromTf('ci/com', 'ci/world')
counter = 0
i = 0
total = int(duration*100)
'''
# first "control" loop that brings the com from the current position to the one imposed by casadi
a_x_com, b_x_com, c_x_com, d_x_com = cubic(Tcom.translation[0], data[0][0], 3.0)
a_y_com, b_y_com, c_y_com, d_y_com = cubic(Tcom.translation[1], data[0][1], 3.0)
a_z_com, b_z_com, c_z_com, d_z_com = cubic(Tcom.translation[2], data[0][2], 3.0)
while t < 3.0:
Tcom.translation_ref()[0] = a_x_com * t**3 + b_x_com * t**2 + c_x_com * t + d_x_com
Tcom.translation_ref()[1] = a_y_com * t**3 + b_y_com * t**2 + c_y_com * t + d_y_com
Tcom.translation_ref()[2] = a_z_com * t**3 + b_z_com * t**2 + c_z_com * t + d_z_com
com.setPoseReference(Tcom)
t += dt
time.sleep(dt)
t = 0.0
'''
while counter < total:
if counter%10 == 0:
com_init = com_pos[i]
com_goal = com_pos[i+1]
dcom_init = com_vel[i]
dcom_goal = com_vel[i+1]
a_comx, b_comx, c_comx, d_comx = cubic(com_init[0], com_goal[0], dcom_init[0], dcom_goal[0], interval_duration)
a_comy, b_comy, c_comy, d_comy = cubic(com_init[1], com_goal[1], dcom_init[1], dcom_goal[1], interval_duration)
a_comz, b_comz, c_comz, d_comz = cubic(com_init[2], com_goal[2], dcom_init[2], dcom_goal[2], interval_duration)
i += 1
print 'com_init: ' + str(com_init)
print 'com_goal: ' + str(com_goal)
print 'com: ' + str(Tcom.translation)
Tcom.translation_ref()[0] = a_comx * (t-(i-1)*interval_duration)**3 + b_comx * (t-(i-1)*interval_duration)**2 + c_comx * (t-(i-1)*interval_duration) + d_comx
Tcom.translation_ref()[1] = a_comy * (t-(i-1)*interval_duration)**3 + b_comy * (t-(i-1)*interval_duration)**2 + c_comy * (t-(i-1)*interval_duration) + d_comy
Tcom.translation_ref()[2] = a_comz * (t-(i-1)*interval_duration)**3 + b_comz * (t-(i-1)*interval_duration)**2 + c_comz * (t-(i-1)*interval_duration) + d_comz + 0.3
Tw1.translation_ref()[0] = Tw1.translation[0]
Tw1.translation_ref()[1] = Tw1.translation[1]
Tw1.translation_ref()[2] = Tw1.translation[2]
Tw2.translation_ref()[0] = Tw2.translation[0]
Tw2.translation_ref()[1] = Tw2.translation[1]
Tw2.translation_ref()[2] = Tw2.translation[2]
Tw3.translation_ref()[0] = Tw3.translation[0]
Tw3.translation_ref()[1] = Tw3.translation[1]
Tw3.translation_ref()[2] = Tw3.translation[2]
if t >= T1 and t <= T2:
delta_x = radius - radius * m.cos((t-T1)*m.pi/(T2-T1))
delta_z = radius * m.sin((t-T1)*m.pi/(T2-T1))
Tmoving_foot.translation_ref()[0] = delta_x + Tmoving_init[0]
Tmoving_foot.translation_ref()[1] = Tmoving_init[1]
Tmoving_foot.translation_ref()[2] = delta_z + Tmoving_init[2]
else:
Tmoving_foot.translation_ref()[0] = Tmoving_foot.translation[0]
Tmoving_foot.translation_ref()[1] = Tmoving_foot.translation[1]
Tmoving_foot.translation_ref()[2] = Tmoving_foot.translation[2]
com.setPoseReference(Tcom)
still_feet[0].setPoseReference(Tw1)
still_feet[1].setPoseReference(Tw2)
still_feet[2].setPoseReference(Tw3)
moving_foot.setPoseReference(Tmoving_foot)
counter += 1
t += dt
time.sleep(5*dt)
com.disable()
for item in to_be_disabled:
item.enable()
moving_foot.setBaseLink(u'car_frame')
for foot in still_feet:
foot.setBaseLink(u'car_frame')
#########################################################
########################## MAIN #########################
#########################################################
def main():
start_time = time.time()
k = 0
# load file with primitives and number of times they are applied
plan = np.loadtxt('/home/matteo/catkin_ws/src/centauro_cartesio-devel-cpack/python/plan.txt')
primitives = plan[:, 0]
times = plan[:, 1]
cli = pyci.CartesianInterfaceRos() # initialization of cartesIO
# tasks
com = cli.getTask('Com')
car = cli.getTask('car_frame')
pelvis = cli.getTask('pelvis')
w1 = cli.getTask('wheel_1')
w2 = cli.getTask('wheel_2')
w3 = cli.getTask('wheel_3')
w4 = cli.getTask('wheel_4')
rw1 = cli.getTask('rolling_wheel_1')
rw2 = cli.getTask('rolling_wheel_2')
rw3 = cli.getTask('rolling_wheel_3')
rw4 = cli.getTask('rolling_wheel_4')
#homing(car, w1, w2, w3, w4, pelvis, com)
n_primitives = len(primitives)
# executes planner indications
for i in range(0, n_primitives):
primitive = primitives[i]
application = times[i]
if primitive == 0:
print(color.BOLD + 'Primitive 0: clockwise spin of 10 deg for ' + str(int(application)) + ' times. (' + str(i+1) + '/' + str(n_primitives) + ')' + color.END)
spin(car, [w1, w2, w3, w4], -m.pi/18 * application, 5.0 * application, [com], cli)
elif primitive == 1:
print(color.BOLD + 'Primitive 1: counter-clockwise spin of 10 deg for ' + str(int(application)) + ' times. (' + str(i+1) + '/' + str(n_primitives) + ')' + color.END)
spin(car, [w1, w2, w3, w4], m.pi/18 * application, 5.0 * application, [com], cli)
elif primitive == 2:
print(color.BOLD + 'Primitive 2: forward roll of 0.05 m for ' + str(int(application)) + ' times. (' + str(i+1) + '/' + str(n_primitives) + ')' + color.END)
roll(car, [w1, w2, w3, w4], 0.05 * application, 'x', 1.0 * application, [com], cli)
elif primitive == 3:
print(color.BOLD + 'Primitive 3: backward roll of 0.05 m for ' + str(int(application)) + ' times. (' + str(i+1) + '/' + str(n_primitives) + ')' + color.END)
roll(car, [w1, w2, w3, w4], -0.05 * application, 'x', 1.0 * application, [com], cli)
elif primitive == 4:
print(color.BOLD + 'Primitive 4: right roll of 0.05 m for ' + str(int(application)) + ' times. (' + str(i+1) + '/' + str(n_primitives) + ')' + color.END)
roll(car, [w1, w2, w3, w4], 0.05 * application, 'y', 1.0 * application, [com], cli)
elif primitive == 5:
print(color.BOLD + 'Primitive 5: left roll of 0.05 m for ' + str(int(application)) + ' times. (' + str(i+1) + '/' + str(n_primitives) + ')' + color.END)
roll(car, [w1, w2, w3, w4], -0.05 * application, 'y', 1.0 * application, [com], cli)
else:
if k == 0:
print(color.BOLD + 'Preparation to step: forward roll 0.20 m with back wheels.' + color.END)
rollTwoWheelsandMoveCom([w3, w4], 0.2, com, 0., False, [w1, w2], 4.0, [], cli)
#time.sleep(3.0)
cli.update()
#return 0
k += 1
if primitive == 6:
print(color.BOLD + 'Primitive 6: step of 0.20 m with BR foot. (' + str(i+1) + '/' + str(n_primitives) + ')' + color.END)
step(moving_foot = w4, still_feet = [w1, w2, w3], step_length = 0.2, duration = 4.0, to_be_disabled = [pelvis], car=car, com=com, cli=cli, filename_pos='com_traj_with_com_vel02/COMtraj_BR.txt', filename_vel = 'com_traj_with_com_vel02/DCOMtraj_BR.txt')
elif primitive == 7:
print(color.BOLD + 'Primitive 7: step of 0.20 m with BL foot. (' + str(i+1) + '/' + str(n_primitives) + ')' + color.END)
step(moving_foot = w3, still_feet = [w1, w2, w4], step_length = 0.2, duration = 4.0, to_be_disabled = [pelvis], car=car, com=com, cli=cli, filename_pos='com_traj_with_com_vel02/COMtraj_BL.txt', filename_vel = 'com_traj_with_com_vel02/DCOMtraj_BL.txt')
elif primitive == 8:
print(color.BOLD + 'Primitive 8: step of 0.20 m with FR foot. (' + str(i+1) + '/' + str(n_primitives) + ')' + color.END)
cli.update()
step(moving_foot = w2, still_feet = [w1, w3, w4], step_length = 0.2, duration = 4.0, to_be_disabled = [pelvis], car=car, com=com, cli=cli, filename_pos='com_traj_with_com_vel02/COMtraj_FR.txt', filename_vel = 'com_traj_with_com_vel02/DCOMtraj_FR.txt')
elif primitive == 9:
print(color.BOLD + 'Primitive 9: step of 0.20 m with FL foot. (' + str(i+1) + '/' + str(n_primitives) + ')' + color.END)
step(moving_foot = w1, still_feet = [w2, w3, w4], step_length = 0.2, duration = 4.0, to_be_disabled = [pelvis], car=car, com=com, cli=cli, filename_pos='com_traj_with_com_vel02/COMtraj_FL.txt', filename_vel = 'com_traj_with_com_vel02/DCOMtraj_FL.txt')
#time.sleep(3.0)
cli.update()
if k == 4:
print(color.BOLD + 'Conclusion of step: forward roll 0.20 m with front wheels.' + color.END)
rollTwoWheelsandMoveCom([w1, w2], 0.2, com, 0.1, True, [w3, w4], 3.0, [pelvis], cli)
#time.sleep(3.0)
cli.update()
print(color.BOLD + 'Realigning car_frame with the center of the support polygon...' + color.END)
movecar(car, [w1, w2, w3, w4], 5.0, [pelvis], cli)
#time.sleep(3.0)
cli.update()
k = 0
print(color.BOLD + color.GREEN + 'Execution completed in ' + str(time.time() - start_time) + ' s. \n' + color.END)
# main
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | matteosodano.noreply@github.com |
cd76ffa51316d5baf1677131e7ac7394c189d79b | de998d0170e81d61982cef9c3181c41fc926bac3 | /Hardware/PCB/KiCadScript/KiAuto/HexLib/setup.py | f899b784ab4257440397fc6a71594a022b1347b1 | [
"BSD-3-Clause"
] | permissive | ymei/TMSPlane | f100294e039bb73e77964cf9c38930160d5f86d9 | 3e30749440b1a8a0fd81a848b368cfbce10dfb86 | refs/heads/master | 2021-01-23T07:44:23.156705 | 2018-03-25T03:16:43 | 2018-03-25T03:16:43 | 86,438,489 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | ## \file
# setup for hexlib.{h,c}
from distutils.core import setup, Extension
module1 = Extension('hexlib',
define_macros = [('MAJOR_VERSION', '0'),
('MINOR_VERSION', '1')],
include_dirs = ['/usr/local/include'],
libraries = ['m'],
library_dirs = ['/usr/local/lib'],
sources = ['hexlib.c'])
setup(name = 'PackageName',
version = '0.1',
description = 'Hexagonal grid coordinate transformation',
author = 'Yuan Mei',
author_email = 'yuan.mei@gmail.com',
url = '',
long_description = '',
ext_modules = [module1]
)
| [
"yuan.mei@gmail.com"
] | yuan.mei@gmail.com |
5a8530e89b2be1b718c1cbce58a02fe81e4b9ffb | f63e40e9ffab225a64655f1b7d45c61a2181d3d9 | /8장 연결 리스트/206_reverse_linked_list.py | 8162b4e2fdcceb74a50d4d6fe3ec3e85eaa76437 | [] | no_license | Geunbaek/python_algorithm_interview | 5d0e9644a87a3e22541aa44a1d5476ae24fc65a7 | b3275ae2e82b940173e3df4e49ce8800efa4b96c | refs/heads/main | 2023-06-14T23:14:07.262253 | 2021-07-10T09:23:53 | 2021-07-10T09:23:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
node = head
ans = None
while node:
ans, ans.next, node = node, ans, node.next
return ans
head = ListNode(1, (ListNode(2, ListNode(3, ListNode(4, ListNode(5))))))
solution = Solution()
print(solution.reverseList(head))
| [
"rmsqor103@naver.com"
] | rmsqor103@naver.com |
bd50bb55882100e57bcf7dbccbfb04333b9e07a6 | 6992c11d874236087dd9eca1d789c386c4fcb08f | /django_passbook_core/urls.py | cd4969b160dd696752bd2454a9c2c12f19354416 | [
"MIT"
] | permissive | timtan/django-passbook | 557f53467a193678b2abb1c23838922a48f916e4 | 2bda4b52315b130ddb286c5a4b3a0b8757b640ff | refs/heads/master | 2020-12-25T04:18:34.548234 | 2013-02-18T08:55:52 | 2013-02-18T08:55:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'django_passbook_core.views.home', name='home'),
url(r'^passbook/', include('passbook.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
| [
"tim.yellow@gmail.com"
] | tim.yellow@gmail.com |
59eeab9dee208bc9a44b449d748ddf58bac96535 | bde2f2178020fcd453c86f373e6296952dd08e7a | /Python Lab/Lab-18-09-20/PatternStar4.py | d90ea55c699346b34f8edd13c789f7298d130fe4 | [] | no_license | ahirwarvivek19/Semester-3 | 981abdc90494e71c5d59ea248d275aba2be7ee77 | 5a37ef859189fa06ebf1298dde38a575d781b4bb | refs/heads/main | 2023-02-26T12:29:40.057396 | 2021-02-05T14:20:59 | 2021-02-05T14:20:59 | 336,293,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | # *
# * *
# * * *
# * * * *
# * * * * *
# * * * *
# * * *
# * *
# *
rows = 5
for i in range(1,2*rows):
if(i<=5):
for j in range(i):
print("*",end=" ")
else:
for j in range(2*rows-i):
print("*", end=" ")
print() | [
"ahirwarvivek19@gmail.com"
] | ahirwarvivek19@gmail.com |
9bf5f9186fe2f542ae87d34473bbfe712d6079a5 | df816f41be8f02107f08b7651e9397b1c905c154 | /1_Classic_RL/Exercises/6 - TD Learning/CliffWalking/plot_utils.py | 8d5b55b91b46530e339d7d36f6b5878fe45467bf | [] | no_license | PabloRR100/Reinforcement-Learning | 7f11caeb2eb2bc68b2ae6b2b3bc7fb4b651eae68 | 8d926cdae59f89a215391ca825d9f07f778dbd96 | refs/heads/master | 2020-03-27T18:39:42.969119 | 2018-12-17T19:01:36 | 2018-12-17T19:01:36 | 146,935,269 | 1 | 2 | null | 2018-09-07T23:37:01 | 2018-08-31T19:36:43 | Jupyter Notebook | UTF-8 | Python | false | false | 620 | py | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("white")
import warnings
warnings.filterwarnings('ignore', 'DeprecationWarning')
def plot_values(V):
# reshape the state-value function
V = np.reshape(V, (4,12))
# plot the state-value function
fig = plt.figure(figsize=(15,5))
ax = fig.add_subplot(111)
im = ax.imshow(V, cmap='cool')
for (j,i),label in np.ndenumerate(V):
ax.text(i, j, np.round(label,3), ha='center', va='center', fontsize=14)
plt.tick_params(bottom='off', left='off', labelbottom='off', labelleft='off')
plt.title('State-Value Function')
plt.show() | [
"pabloruizruiz10@gmail.com"
] | pabloruizruiz10@gmail.com |
c50cf4e64ee767f3927371caa3b179ba922ba1ac | 5afb65fd0608dff66bfc8523853b2e9c61a2b304 | /neurodocker/reprozip/__init__.py | 21ccbc4a92910960d983c401aba06d878b21dfdf | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | m9h/neurodocker | 120fd30960adfbed4583cfff1a6c5b2d52e3f170 | 9991487b530a25d74d39d46e5b3fb21fe48af94f | refs/heads/master | 2020-07-28T13:22:29.786701 | 2019-09-18T13:54:51 | 2019-09-18T13:54:51 | 209,423,783 | 1 | 0 | Apache-2.0 | 2019-09-18T23:50:51 | 2019-09-18T23:50:50 | null | UTF-8 | Python | false | false | 113 | py | from neurodocker.reprozip.trace import ReproZipMinimizer
from neurodocker.reprozip.merge import merge_pack_files
| [
"jakubk@mit.edu"
] | jakubk@mit.edu |
cdb95e14ae883554f395e48805b2fadc4a53a368 | 0961143f750fab83358a8e94d142e69a2423533a | /Django/dynamic-templates/task1/app/settings.py | a7c0c5e02d775015995d40a212fcbbf9509b0138 | [] | no_license | Severes/Netology_python_prof_2019 | 5f8fbe8dfbc841a44dbfa638ee2e232bec46c516 | 383026a1e448318357cf3141dc048fdfb108809e | refs/heads/master | 2022-12-01T01:02:38.643443 | 2019-08-14T18:37:43 | 2019-08-14T18:37:43 | 172,185,103 | 1 | 0 | null | 2022-11-22T03:15:27 | 2019-02-23T07:26:24 | Python | UTF-8 | Python | false | false | 2,935 | py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
'filters_app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'ru-RU'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
INFLATION_FILES = os.path.join(BASE_DIR, 'files')
STATIC_URL = '/static/'
SECRET_KEY = 'put your secret here'
DEBUG = True
try:
from .settings_local import *
except ImportError:
pass
| [
"v.simigin@gmail.com"
] | v.simigin@gmail.com |
b3defae432d7f604c405eab11cc5e214c4734f56 | 5d7755cf993379fd803d3604c0e5300f6586fd24 | /Problem 73/problem73.py | 6907649978f9ed0345ddcbec60fa77fa56f9d13c | [
"MIT"
] | permissive | logicred/Euler-Project | 11aeeec69cee19ae63d596d1e7d4b0e115ee6218 | 3488c409135729a444cb43ff06c5fdaf8feb8623 | refs/heads/master | 2020-07-24T12:43:32.037951 | 2019-12-18T01:17:49 | 2019-12-18T01:17:49 | 207,932,083 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | # Answer = 7295372
# cost = 4.79s
import time
from math import gcd
start = time.time()
limit = 12000
summary = 0
for i in range(4, limit + 1):
low = i // 3 + 1
upp = i // 2
for j in range(low, upp + 1):
if gcd(i, j) == 1:
summary += 1
print(summary)
end = time.time()
print(end - start) | [
"noreply@github.com"
] | logicred.noreply@github.com |
22ceb8d82d4709f2d6148a64604615654a418429 | 73715f40469dda3daede027f3787c345add00c90 | /treehugger/cli/parser.py | aa19ae1911f4a5d7adbc9409d011dfe469fb42d9 | [
"ISC"
] | permissive | adamchainz/treehugger | dd159830c658d6e806194f6e8a955682f8508373 | 14bb400a736d87529592336a39909f0c721c1076 | refs/heads/master | 2023-08-24T01:43:00.562556 | 2018-06-29T10:50:03 | 2018-06-29T10:50:03 | 150,948,661 | 0 | 0 | ISC | 2018-09-30T08:38:46 | 2018-09-30T08:38:50 | null | UTF-8 | Python | false | false | 935 | py | # -*- coding:utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
from .. import __version__
class VarOrDefault(object):
"""
Helper to avoid fetching from environment variable until the moment it's
needed.
"""
def __init__(self, env_var, default):
self.env_var = env_var
self.default = default
def __str__(self):
return os.environ.get(self.env_var, self.default)
parser = argparse.ArgumentParser(
prog='treehugger',
description='''
Takes care of the (runtime) environment. Version {version}.
'''.format(version=__version__),
)
parser.add_argument(
'-k',
'--key',
type=str,
default=VarOrDefault('TREEHUGGER_KEY', 'alias/treehugger'),
dest='key_id',
help='The key ID, alias, or ARN to use on KMS for encryption.',
)
subparsers = parser.add_subparsers(dest='command_name')
| [
"noreply@github.com"
] | adamchainz.noreply@github.com |
b54c3620ba6aebc9d8d6f11ee77170379b92dacf | f847396fe63857a84b7b35a18eefac1dcd0c78f2 | /Web Pet Projects/KDBall-master/jsonexample/sql.py | 2816fddd75f3968860249ca1057702ce19ad857d | [] | no_license | shreyasgune/Work-In-Progress- | 3f5a91e3c6cf60456d91ae95236fb5574d5fcc43 | 59fe45bc2bc815bb9dadd4b7fc7a15c3e56a11e9 | refs/heads/master | 2023-01-21T01:38:37.765012 | 2021-04-03T20:01:19 | 2021-04-03T20:01:19 | 59,343,758 | 0 | 1 | null | 2023-01-19T08:39:32 | 2016-05-21T05:36:02 | Python | UTF-8 | Python | false | false | 164 | py | import sqlite3
with sqlite3.connect("chart_data.db") as connection:
c = connection.cursor()
c.execute("CREATE TABLE chart_table(name TEXT, test_value INTEGER)")
| [
"shreyas.enug@gmail.com"
] | shreyas.enug@gmail.com |
752460317cb09ab50180a9fc7bdc280cd98ec470 | ea4d2285be2dc2f776bb63e19816c4a90606de21 | /benchmark/speed_packed.py | 5bd8750cf594f814afdb1f9feb86195743311bcb | [
"MIT"
] | permissive | mdda/Reverse-GoL | 57fcd3ebfe2caa4b74694006613635ea56e123f9 | 2bddf274ce69b7f0e5f3df25cc1e195ce64e4195 | refs/heads/master | 2020-06-07T23:06:37.847988 | 2014-10-14T16:03:59 | 2014-10-14T16:03:59 | 23,731,319 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,263 | py |
class BitField: # Always surrounded by zeroes
bitarr = []
def __init_COUNT_BITS():
#print "__init_COUNT_BITS()"
arr=[]
for x in range(0, 1<<9):
cnt = bin(x).count('1')
arr.append(cnt)
return arr
COUNT_BITS = __init_COUNT_BITS()
del __init_COUNT_BITS
def __init__(self, n_rows=22, n_cols=22, numeric_array=None):
self.n_rows=n_rows # Includes the padded region
self.n_cols=n_cols # Includes the padded region
self.bitarr = [ 0 for i in range(0, self.n_rows) ]
if numeric_array is None:
return
r=1 # Start 1 row down
for arr in numeric_array: # Each line is an array
acc=0
p = 1<<(self.n_cols-1-1) # Starts 1 column over
for x in arr: # Each element of the array
if x:
acc |= p
p >>= 1
self.bitarr[r] |= acc
r=r+1
def pretty_print(self):
spacer = '-' * self.n_cols
for row in self.bitarr:
b = bin(row).lstrip('-0b')
print (spacer+b)[-self.n_cols:].replace('0','-')
def iterate(self):
## These are constants - the game bits pass over them
tb_filter = 7
mid_filter = 5
current_filter = 2
arr_new=[0]
for r in range(1, self.n_rows-1):
r_top = self.bitarr[r-1]
r_mid = self.bitarr[r]
r_bot = self.bitarr[r+1]
acc = 0
p = 2 # Start in the middle row, one column in
for c in range(1, self.n_cols-1):
cnt = self.COUNT_BITS[
((r_top & tb_filter) << 6 )|
((r_mid & mid_filter) << 3 )|
(r_bot & tb_filter)
]
#if True: acc |= p # Check bit-twiddling bounds
# Return next state according to the game rules:
# exactly 3 neighbors: on,
# exactly 2 neighbors: maintain current state,
# otherwise: off.
# return alive == 3 || alive == 2 && f.Alive(x, y)
if (cnt==3) or (cnt==2 and (r_mid & current_filter)) :
acc |= p
# Move the 'setting-bit' over
p <<= 1
# Shift the arrays over into base filterable position
r_top >>= 1
r_mid >>= 1
r_bot >>= 1
#print "Appending %d" % (acc)
arr_new.append(acc)
arr_new.append(0)
self.bitarr = arr_new
#def copy(self):
glider = [[0,0,1],
[1,0,1],
[0,1,1]]
z = BitField(numeric_array=glider)
print 'Initial state:'
z.pretty_print()
for i in range(65):
z.iterate()
print 'Final state:'
z.pretty_print()
def test_timing():
import timeit
def time_iter():
z = BitField(numeric_array=glider)
for i in range(65):
z.iterate()
t=timeit.Timer(time_iter)
print t.repeat(1, 1000)
test_timing()
| [
"Martin.Andrews@PLATFORMedia.com"
] | Martin.Andrews@PLATFORMedia.com |
fca3976d5ba4c46a65a4351a22a1c030067c7068 | 7956003de36f3a600aef5dc915dd77a13a1a975b | /posts_scraper/api_access/pushshift_wrapper.py | 70a2ea5cf92d3e9076221bbc52f37905f6237c59 | [] | no_license | raczynskid/AlignExample | f4be0c70ab7c885e8283d89f1a1ce70bc7676f0e | d6b94e06f800f99658a8bbeb590179564656612d | refs/heads/main | 2023-03-14T23:32:41.486427 | 2021-03-12T23:21:32 | 2021-03-12T23:21:32 | 345,454,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,494 | py | import requests
import pandas as pd
import json
import time
import datetime
class PushshiftScraper:
def __init__(self, **kwargs):
self.data = self.convert_dates(self.paginate_by_utc(**kwargs)).sort_values(by="created_utc", ascending=False)
def paginate_by_utc(self, queries: list, days: int, interval: int) -> pd.DataFrame:
"""
due to change in pushshift api max return limit need to paginate by comment add date
:param queries: list of strings to be included in the search eg ["invisalign", "teeth"]
:param days: period in days to search for
:param interval: data granularity in hours (paginate by x hours)
example: paginate_by_utc(["invisalign"], 10, 3) - find invisalign posts for last 10 days
with 3h granularity to avoid going over
100 comments api limit - increase up to 24 to
improve performance, decrease to improve
data quality
:return: dataframe containing concatenated api response
"""
data_chunks = []
# cycle through queries
for query in queries:
after = interval
# for each query take 1 day period of comments and move on
for _ in range((24//interval) * days):
before = after - interval
url = f"https://api.pushshift.io/reddit/comment/search/?q={query}&after={after}h&before={before}h&sort=asc&size=100&fields=author,author_flair_text,body,comment_type,created_utc,id,link_id,permalink,score,subreddit,subreddit_id"
try:
df = pd.json_normalize(json.loads(requests.get(url).text), record_path='data')
if df is not None:
time.sleep(1)
if len(df) > 0:
print(len(df))
data_chunks.append(df)
except:
pass
after += interval
result = pd.concat(data_chunks).reset_index(drop=True)
return result
def convert_dates(self, df: pd.DataFrame) -> pd.DataFrame:
df["created_utc"] = df["created_utc"].apply(lambda x: datetime.datetime.fromtimestamp(x))
return df
| [
"raczynski90@gmail.com"
] | raczynski90@gmail.com |
8c57a94a058a1fdb2193f56c4dd14749bf638001 | 777cb3a473ae30aca9a8fad8ebc2622530044ca9 | /hw_54.py | c91a8ebd49976ab093407c3b00c7ae17e8cbad34 | [] | no_license | catman91/working-with-files | c9669f434e0a7724692f27cd83282b6d8e6f47f3 | 2f63b7859a60021657ecb4ad999ab7c496761713 | refs/heads/main | 2023-03-23T09:32:42.595954 | 2021-03-18T14:34:15 | 2021-03-18T14:34:15 | 349,105,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | # Создать (не программно) текстовый файл со следующим содержимым:
# One — 1
# Two — 2
# Three — 3
# Four — 4
# Необходимо написать программу, открывающую файл на чтение
# и считывающую построчно данные. При этом английские
# числительные должны заменяться на русские.
# Новый блок строк должен записываться в новый текстовый файл.
rus = {'One' : 'Один', 'Two' : 'Два', 'Three' : 'Три', 'Four' : 'Четыре'}
new_file = []
with open('file_4.txt', 'r') as file_obj:
#content = file_obj.read().split('\n')
for i in file_obj:
i = i.split(' ', 1)
new_file.append(rus[i[0]] + ' ' + i[1])
print(new_file)
with open('file_4_new.txt', 'w') as file_obj_2:
file_obj_2.writelines(new_file)
| [
"goodcatman@icloud.com"
] | goodcatman@icloud.com |
c30481163a0125f2a4fcfe811a06aab1936fa465 | 2540810b9ecc1858eafab7aef2f38cacaee4f7b4 | /quiz/migrations/0010_auto_20170810_0912.py | dc14cfa32692e1c4d9e47b942101befdf8fd7193 | [] | no_license | nemion/applekermittango | 28a9606cc6a07908483bc99731f8d3efc01ce121 | f37240d6ff63c6ac2c2105a6167b3f80c625f69c | refs/heads/master | 2021-06-20T19:24:39.484723 | 2017-08-12T12:56:59 | 2017-08-12T12:56:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-10 02:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quiz', '0009_section_number'),
]
operations = [
migrations.RemoveField(
model_name='result',
name='manual',
),
migrations.AddField(
model_name='result',
name='final_score',
field=models.BooleanField(default=True),
),
]
| [
"dion_sonnya@yahoo.com"
] | dion_sonnya@yahoo.com |
bc3d996e3eb8b5530fc2c7b8df885a535c382693 | 2fca3dc3f6dcedb5466ddebfe1bd3c4542bdcb9b | /prediccion/modelo/resta_end_start.py | 6e35908fc9501abf10cbe12a7d45a232675a30d1 | [] | no_license | Gnahue/sanFranciscoBiclas | 753060bff207368f260ef0223aed804210ed1cb4 | 21cdbde130fbacac0e71faf0349868180fe6bda8 | refs/heads/master | 2020-05-24T20:21:49.910503 | 2017-06-15T02:29:26 | 2017-06-15T02:29:26 | 84,876,495 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | import pandas as pd
import numpy as np
import matplotlib as plt
df = pd.read_csv("Data/trip_test.csv")
df.start_date = pd.to_datetime(df.start_date, format='%m/%d/%Y %H:%M')
df.end_date = pd.to_datetime(df.end_date, format='%m/%d/%Y %H:%M')
df['duration'] = (df.end_date - df.start_date).dt.total_seconds()
df['duration'] = df['duration'].apply(np.round).astype(int)
# Suma o resta una hora si hubo cambio horario
df.loc[(df.start_date < '2013-11-03 02:00:00') & (df.end_date > '2013-11-03 02:00:00'), ['duration']] -= 3600
df.loc[(df.start_date < '2014-11-02 02:00:00') & (df.end_date > '2014-11-02 02:00:00'), ['duration']] -= 3600
df.loc[(df.start_date < '2014-03-09 03:00:00') & (df.end_date > '2014-03-09 03:00:00'), ['duration']] += 3600
df.loc[(df.start_date < '2015-03-08 03:00:00') & (df.end_date > '2015-03-08 03:00:00'), ['duration']] += 3600
print ("-----------")
print (df.loc[:,['id','duration']])
df.loc[:,['id','duration']].to_csv('resta.csv', index=False) | [
"maxicc4@gmail.com"
] | maxicc4@gmail.com |
0d5f909c4c8564fa9efe48af8add8ad2d1426f3d | 5be0c4f657291711f3a5c73705066249b144c4cc | /neural_net.py | 82637ab66a056a51d4778eadaf62068e09a61b58 | [] | no_license | droftware/smai-assignment1 | cb6046178d0af7a7a258332137a0c3c6b89b5c6f | ef478fd4dca4d5e82583743a91bbe90a4f268f7c | refs/heads/master | 2021-05-06T05:52:20.433779 | 2017-12-23T06:36:09 | 2017-12-23T06:36:09 | 115,176,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,176 | py | #Akshat Tandon 201503001
import numpy as np
import random
import math
def read_file(name):
f = open(name, 'rU')
numbersGrid = []
numbersDigit = []
currentNumber = []
for line in f:
line = line.strip()
length = len(line)
if length == 32 or length == 1:
if length == 32:
# print line
currentNumber.append(line)
# print currentNumber
else:
numbersGrid.append(currentNumber)
numbersDigit.append(line)
currentNumber = []
sampledGrid = []
currentNumber = []
# for i in range(len(numbersDigit)):
# print 'Number: ', numbersDigit[i]
# for j in range(32):
# print numbersGrid[i][j]
# print
cnum = ''
sampleNumbers = []
for i in range(len(numbersDigit)):
# print 'Number: ', numbersDigit[i]
j = 0
if numbersDigit[i] == '0' or numbersDigit[i] == '1' or numbersDigit[i] == '5':
sampleNumbers.append(int(numbersDigit[i]))
while j < 32:
# currentNumber.append(numbersGrid[i][j][::4])
cnum += numbersGrid[i][j][::4]
# print numbersGrid[i][j][::4]
j += 4
cnumList = list(cnum)
cnumList = [int(x) for x in cnumList]
sampledGrid.append(cnumList)
cnum = ''
# print 'Cnum:',list(cnum)
# currentNumber = []
return (sampleNumbers, sampledGrid)
def sigmoid(x):
ex = math.e ** x
val = ex/(1 + ex)
return val
def dif_sigmoid(x):
sig = sigmoid(x)
val = sig * (1 - sig)
return val
def target_value(digit):
if digit == 0:
return np.array([0, 0])
elif digit == 1:
return np.array([0, 1])
elif digit == 5:
return np.array([1, 0])
else:
print 'ERROR: Num not found'
def binary_to_digit(z0, z1):
if z0 == 0 and z1 == 0:
return 0
elif z0 == 0 and z1 == 1:
return 1
elif z0 == 1 and z1 == 0:
return 5
else:
print 'ERROR: Not able to convert from binary: ',(z0, z1)
def feed_forward(x, wh, wo, nh, no):
y = []
z = []
neth = []
neto = []
aug_x = np.append(1, x)
for j in range(nh):
net_j = aug_x.dot(wh[j, :])
neth.append(net_j)
y_j = sigmoid(net_j)
y.append(y_j)
yk = [1]
yk = yk + y
# y.insert(0, 1)
aug_y = np.array(yk)
for k in range(no):
# print 'aug_y: ', aug_y
# print 'wok: ', wo[k, :]
net_k = aug_y.dot(wo[k, :])
neto.append(net_k)
# print 'net_k', net_k
z_k = sigmoid(net_k)
z.append(z_k)
z = np.array(z)
return z, y, neth, neto
def stochastic_backprop(digits, samples, d, nh, no, theta, eta):
wh = np.random.rand(nh, d+1)
wo = np.random.rand(no, nh+1)
num_samples = len(samples)
# print 'Num samples: ', num_samples
count = 0
delo = []
delk = []
while True:
idx = random.randint(0, num_samples - 1)
digit = digits[idx]
t = target_value(digit)
x = np.array(samples[idx])
z, y, neth, neto = feed_forward(x, wh, wo, nh, no)
aug_x = np.append(1, x)
yk = [1]
yk = yk + y
# y.insert(0, 1)
aug_y = np.array(yk)
# x = aug_x
for k in range(no):
dif = t[k] - z[k]
fdash = dif_sigmoid(neto[k])
dell = dif * fdash
delo.append(dell)
for j in range(nh+1):
delta = eta * dell * aug_y[j]
wo[k, j] = wo[k, j] + delta
for j in range(nh):
fdash = dif_sigmoid(neth[j])
sigma = 0
for k in range(no):
sigma += (wo[k, j] * delo[k])
dell = fdash * sigma
for i in range(d+1):
delta = eta * dell * aug_x[i]
wh[j, i] = wh[j, i] + delta
z, y, neth, neto = feed_forward(x, wh, wo, nh, no)
diff = t - z
mod = np.linalg.norm(diff)
j = 0.5 * mod * mod
if j < theta:
print j
break
count += 1
print 'Count: ', count
return wo,wh
def approx(x):
if x< 0 or x >1:
print 'ERROR: Value approximated greater than 1: ', x
if x < 0.5:
return 0
else:
return 1
def classify(sample, wh, wo, nh, no):
x = np.array(sample)
z, y, neth, neto = feed_forward(x, wh, wo, nh, no)
# print 'Sample:',x
# print 'z obtained:', z
z0 = approx(z[0])
z1 = approx(z[1])
# print 'z approximated',(z0,z1)
number = binary_to_digit(z0, z1)
return number
def cross_validate(digits, samples, wh, wo, nh, no):
length = len(digits)
correct = 0
for i in range(length):
# print 'Number:', digits[i]
number = classify(samples[i], wh, wo, nh, no)
# print 'Classified: ',number
# print 'Original Digit: ', digits[i]
if number == digits[i]:
# print 'Classified'
correct += 1
# else:
# print 'Not classified'
fr = correct * 1.0/length
print 'Correctly classified:', fr * 100
def main():
d = 64
nh = 20
no = 2
theta = 0.002
eta = 0.5
digits, samples = read_file('optdigits-orig.tra')
wo, wh = stochastic_backprop(digits, samples, d, nh, no, theta, eta)
# qw = 5
# number = classify(samples[qw], wh, wo, nh, no)
# print 'Original Number:', digits[qw]
# print 'Classified Num:', number
# if number == digits[qw]:
# print 'Classified Correctly'
# else:
# print 'NOT Classified'
cvDigits, cvSamples = read_file('optdigits-orig.cv')
cross_validate(cvDigits, cvSamples, wh, wo, nh, no)
# print 'Hidden Unit weights'
# for i in range(nh):
# for j in range(65):
# print '( '+ str(i) +', ' + str(j) + ') = ' + str(wh[i, j])
# print 'Output Unit weights'
# for i in range(no):
# for j in range(nh+1):
# print '( '+ str(i) +', ' + str(j) + ') = ' + str(wo[i, j])
if __name__ == "__main__":
main()
| [
"akshat.tandon@research.iiit.ac.in"
] | akshat.tandon@research.iiit.ac.in |
730fd1dc97e735fadbcfed6bc54206bce73c99c2 | 7c69f22bb585fe697dd50dc8dc0756ff59fe00e1 | /lib/python3.6/struct.py | 05d9e4a2b61a77caf771c37420202eeeb3bd1ad7 | [] | no_license | elllot/flask-polling-site | 4567d8fcaee056b2628b2a94b5eca87ced999435 | a4ce7ba8528140477404a1283d7ec9e67a1f350f | refs/heads/master | 2022-10-15T00:31:23.920211 | 2017-10-29T20:01:21 | 2017-10-29T20:01:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | /Users/Elli/anaconda3/lib/python3.6/struct.py | [
"syunelli@gmail.com"
] | syunelli@gmail.com |
c026269217beb1cab60358ece205f104ec4666ea | a178eadb5f56662ddf2a95c013eac63ba63c1612 | /SMAI_Codes/intrusion_detection/SVM and NN/intrusiondetection_SVM_v4_allFeat.py | 4bd3d8cfb1761ef1de7f4389f44877016beeae48 | [] | no_license | MurtuzaBohra/General_Implementation | ae55108f0c4952db699f0292302e8657396432de | 4243d0e62ed6cb2b05dd368991cd64a07cfd688f | refs/heads/master | 2022-04-15T17:01:40.411331 | 2020-04-04T18:53:40 | 2020-04-04T18:53:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,234 | py | #Balanced class distribution for each fold
import os
import sys
import math
import time
import random
import numpy as np
from sklearn import svm
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import cross_validate
from sklearn.metrics import precision_recall_fscore_support
inputDirPath = sys.argv[1]
testFold = sys.argv[2]
print ('Preparing Data')
strX_train = []
strY_train = []
strX_test = []
strY_test = []
testFoldFile = 'fold' + testFold + '.txt'
fileList = os.listdir(inputDirPath)
for currFile in fileList:
if(currFile == testFoldFile):
f = open(os.path.join(inputDirPath, currFile))
for line in f:
lineParts = line.split()
strX_test.append(lineParts[0:41])
strY_test.append(lineParts[41])
f.close()
else:
f = open(os.path.join(inputDirPath, currFile))
for line in f:
lineParts = line.split()
strX_train.append(lineParts[0:41])
strY_train.append(lineParts[41])
f.close()
X_train = np.array(strX_train)
X_train = X_train.astype('float')
scaler = preprocessing.StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
print X_train.shape
Y_train = np.array(strY_train)
encoder = LabelEncoder()
encoder.fit(Y_train)
Y_train = encoder.transform(Y_train)
print Y_train.shape
X_test = np.array(strX_test)
X_test = X_test.astype('float')
X_test = scaler.transform(X_test)
print X_test.shape
Y_test = np.array(strY_test)
Y_test = encoder.transform(Y_test)
print Y_test.shape
print ('Training model')
#Initializing SVM
clf = svm.SVC(kernel = 'rbf', decision_function_shape = 'ovo')
#Training model
s = time.time()
clf.fit(X_train, Y_train)
e = time.time()
print 'Training time: ' + str(e - s)
print str(len(clf.support_vectors_))
#Testing
print ('Testing')
Y_pred = clf.predict(X_test)
print str(encoder.classes_)
precision, recall, f1, support = precision_recall_fscore_support(Y_test, Y_pred)
print('Precision: ' + str(precision) + '\tRecall: ' + str(recall) + '\tF1: ' + str(f1))
precision, recall, f1, support = precision_recall_fscore_support(Y_test, Y_pred, average='macro')
print('Precision: ' + str(precision) + '\tRecall: ' + str(recall) + '\tF1: ' + str(f1))
| [
"murtuza88msj@gmail.com"
] | murtuza88msj@gmail.com |
7bdf36160eabaf513f0ae810c19981759f4011d2 | 46e4ca0927313670438ee0a01e13347cf44f0fa9 | /framework/neural_network.py | f3ef1cb96c226123d0832a2deba3524c2b904d10 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | CCSI-Toolset/drmbuilder | 7f0d7d571f13fe3cb1066b9d9292aa2641e59a1d | 6664b93c450bf570b85abdda41b975de7f6ec636 | refs/heads/master | 2021-03-19T15:43:06.634396 | 2018-03-29T00:33:09 | 2018-03-29T00:33:09 | 122,409,036 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,928 | py | # neural_network.py
from math import exp
class NeuralNetwork(object):
def __init__(self):
self.ninput = 1
self.noutput = 1
self.nlayer_hid = 1
self.nneuron_layer_hid = [] # number of neurons in hidden layers excluding the bias neuron
self.steepness_hid = 1.0
self.steepness_out = 1.0
self.iactivation_hid = 2
self.iactivation_out = 0
self.weight = []
self.mean_in = []
self.sigma_in = []
self.mean_out = []
self.sigma_out = []
def to_dictionary(self):
dict_data = dict()
dict_data['ninput'] = self.ninput
dict_data['noutput'] = self.noutput
dict_data['nlayer_hid'] = self.nlayer_hid
dict_data['nneuron_layer_hid'] = self.nneuron_layer_hid
dict_data['steepness_hid'] = self.steepness_hid
dict_data['steepness_out'] = self.steepness_out
dict_data['iactivation_hid'] = self.iactivation_hid
dict_data['iactivation_out'] = self.iactivation_out
dict_data['weight'] = self.weight
dict_data['mean_in'] = self.mean_in
dict_data['sigma_in'] = self.sigma_in
dict_data['mean_out'] = self.mean_out
dict_data['sigma_out'] = self.sigma_out
return dict_data
def from_dictionary(self, dict_data):
self.ninput = dict_data['ninput']
self.noutput = dict_data['noutput']
self.nlayer_hid = dict_data['nlayer_hid']
self.nneuron_layer_hid = dict_data['nneuron_layer_hid']
self.steepness_hid = dict_data['steepness_hid']
self.steepness_out = dict_data['steepness_out']
self.iactivation_hid = dict_data['iactivation_hid']
self.iactivation_out = dict_data['iactivation_out']
self.weight = dict_data['weight']
self.mean_in = dict_data['mean_in']
self.sigma_in = dict_data['sigma_in']
self.mean_out = dict_data['mean_out']
self.sigma_out = dict_data['sigma_out']
def set_from_tuple(self, dat):
self.ninput = dat[0]
self.noutput = dat[1]
self.nlayer_hid = dat[2]
self.nneuron_layer_hid = [None]*self.nlayer_hid
icount = 3
for i in xrange(self.nlayer_hid):
self.nneuron_layer_hid[i] = dat[icount]
icount += 1
self.iactivation_hid = dat[icount]
icount += 1
self.iactivation_out = dat[icount]
icount += 1
self.steepness_hid = dat[icount]
icount += 1
self.steepness_out = dat[icount]
icount += 1
nconnection = dat[icount]
icount += 1
self.weight = [None]*nconnection
for i in xrange(nconnection):
self.weight[i] = dat[icount]
icount += 1
self.mean_in = [None]*self.ninput
self.sigma_in = [None]*self.ninput
self.mean_out = [None]*self.noutput
self.sigma_out = [None]*self.noutput
for i in xrange(self.ninput):
self.mean_in[i] = dat[icount]
icount += 1
for i in xrange(self.noutput):
self.mean_out[i] = dat[icount]
icount += 1
for i in xrange(self.ninput):
self.sigma_in[i] = dat[icount]
icount += 1
for i in xrange(self.noutput):
self.sigma_out[i] = dat[icount]
icount += 1
def write_to_matlab_file(self, fout, iann=0):
# iann is the index of output for DABNet model, use default if NARMA model
iann += 1
line = "NN({0}).nx = {1};\n".format(iann, self.ninput)
fout.write(line)
line = "NN({0}).ny = {1};\n".format(iann, self.noutput)
fout.write(line)
line = "NN({0}).nhid = {1};\n".format(iann, self.nlayer_hid)
fout.write(line)
for i in xrange(self.nlayer_hid): # nlayer_hid is always 1 for NARMA or DABNet and nneuron_hid is not array here
line = "NN({0}).nneuron_hid = {1};\n".format(iann, self.nneuron_layer_hid[i])
fout.write(line)
line = "NN({0}).iactivation_hidden = {1};\n".format(iann, self.iactivation_hid)
fout.write(line)
line = "NN({0}).iactivation_output = {1};\n".format(iann, self.iactivation_out)
fout.write(line)
line = "NN({0}).steepness_hidden = {1};\n".format(iann, self.steepness_hid)
fout.write(line)
line = "NN({0}).steepness_output = {1};\n".format(iann, self.steepness_out)
fout.write(line)
nconnection = len(self.weight)
for i in xrange(nconnection):
line = "NN({0}).weight({1}) = {2};\n".format(iann, i+1, self.weight[i])
fout.write(line)
for i in xrange(self.ninput):
line = "NN({0}).mean_in({1}) = {2};\n".format(iann, i+1, self.mean_in[i])
fout.write(line)
for i in xrange(self.noutput):
line = "NN({0}).mean_out({1}) = {2};\n".format(iann, i+1, self.mean_out[i])
fout.write(line)
for i in xrange(self.ninput):
line = "NN({0}).sigma_in({1}) = {2};\n".format(iann, i+1, self.sigma_in[i])
fout.write(line)
for i in xrange(self.noutput):
line = "NN({0}).sigma_out({1}) = {2};\n".format(iann, i+1, self.sigma_out[i])
fout.write(line)
def predict(self, xinput, iscale_input, iscale_output):
# xinput: list of input
# iscale_input: 0=no scaling of input, 1=scaling input
# iscale_output: 0=no scaling of output, 1=scaling output
# calculate total number of neurons
nneuron = self.ninput + self.noutput + self.nlayer_hid + 2
for i in xrange(self.nlayer_hid):
nneuron += self.nneuron_layer_hid[i]
y_neuron = [None]*nneuron
if iscale_input == 1: # scale input
for i in xrange(self.ninput):
y_neuron[i] = (xinput[i] - self.mean_in[i])/self.sigma_in[i]
else: # does not scale input
for i in xrange(self.ninput):
y_neuron[i] = xinput[i]
y_neuron[self.ninput] = 1.0
ianterior1st = 0
nanterior_with_bias = self.ninput + 1
iconn = 0
ineuron = 0
for i in xrange(self.nlayer_hid+1):
if i < self.nlayer_hid:
ncurrent_without_bias = self.nneuron_layer_hid[i]
else:
ncurrent_without_bias = self.noutput
icurrent1st = ianterior1st + nanterior_with_bias
if i < self.nlayer_hid:
iactivation = self.iactivation_hid
steepness = self.steepness_hid
else:
iactivation = self.iactivation_out
steepness = self.steepness_out
for j in xrange(ncurrent_without_bias):
ineuron = icurrent1st + j
sum_x = 0
for k in xrange(nanterior_with_bias):
sum_x += y_neuron[ianterior1st + k]*self.weight[iconn]
iconn += 1
sum_x *= steepness
if iactivation == 2:
if sum_x < -100:
sum_x = -1.0
else:
sum_x = 2/(1+exp(-2*sum_x)) - 1
y_neuron[ineuron] = sum_x
else:
y_neuron[ineuron] = sum_x
ineuron += 1
y_neuron[ineuron] = 1.0
ianterior1st = icurrent1st
nanterior_with_bias = ncurrent_without_bias + 1
# scale output variable
youtput = [None]*self.noutput
ineuron = icurrent1st
if iscale_output == 1: # scale output
for i in xrange(self.noutput):
youtput[i] = y_neuron[ineuron+i]*self.sigma_out[i] + self.mean_out[i]
else: # not scale output
for i in xrange(self.noutput):
youtput[i] = y_neuron[ineuron+i]
return tuple(youtput) | [
"jinliang.ma@netl.doe.gov"
] | jinliang.ma@netl.doe.gov |
6cc279aca08425752f46c34dd433ac0d8c904369 | 94c8f0b09ced7ae86fba0d09faf4310e508c18e5 | /scaler/dp2/dp4/largest_rectangle.py | ab95e9ec10944ed6e47c17d22cc8956dfee29a56 | [] | no_license | navkant/ds_algo_practice | 6e7dd427df6ac403ac23fa68b079b162b839447a | a2b762d08b151f6dbbc12d76dd930f6cd7b9017d | refs/heads/master | 2023-06-24T02:56:25.886991 | 2021-06-13T03:42:24 | 2021-06-13T03:42:24 | 376,431,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,061 | py | import sys
class LargestRectangle:
def nearest_minimum_left(self, a):
n = len(a)
ans_arr = []
stack = []
for i in range(n):
if not stack:
ans_arr.append(-1)
stack.append(i)
else:
if a[i] > a[stack[-1]]:
ans_arr.append(stack[-1])
stack.append(i)
else:
while stack and a[i] <= a[stack[-1]]:
stack.pop()
if not stack:
ans_arr.append(-1)
else:
ans_arr.append(stack[-1])
stack.append(i)
return ans_arr
def nearest_minimum_right(self, a):
n = len(a)
ans_arr = []
stack = []
for i in range(n-1, -1, -1):
if not stack:
ans_arr.append(-1)
stack.append(i)
else:
if a[i] > a[stack[-1]]:
ans_arr.append(stack[-1])
stack.append(i)
else:
while stack and a[i] <= a[stack[-1]]:
stack.pop()
if not stack:
ans_arr.append(-1)
else:
ans_arr.append(stack[-1])
stack.append(i)
return ans_arr[::-1]
def largest_rectangle(self, a):
n = len(a)
left_mins = self.nearest_minimum_left(a)
right_mins = self.nearest_minimum_right(a)
# print(a)
# print(left_mins)
# print(a)
# print(right_mins)
max_area = 0
for i in range(len(a)):
left_min = left_mins[i]
right_min = right_mins[i]
height = a[i]
if left_min == -1 and right_min == -1:
width = n
elif right_min == -1:
width = n - left_min - 1
elif left_min == -1:
width = right_min
else:
width = right_min - left_min - 1
area = height * width
# print(area, end=' ')
max_area = max(max_area, area)
return max_area
class Solution:
# @param A : list of list of integers
# @return an integer
def maximalRectangle(self, A):
n = len(A[0])
m = len(A)
for i in range(1, m):
for j in range(n):
if A[i][j] == 0:
continue
A[i][j] = A[i][j] + A[i-1][j]
maxx_area = sys.maxsize * -1
obj = LargestRectangle()
for row in A:
current_area = obj.largest_rectangle(row)
maxx_area = max(maxx_area, current_area)
return maxx_area
if __name__ == '__main__':
a = [[1, 1, 1, 1, 0, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[1, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 1, 1, 1, 1],
[1, 0, 0, 1, 1, 1, 1]]
obj = Solution()
ans = obj.maximalRectangle(a)
| [
"navkanttyagi@MacBook-Air.local"
] | navkanttyagi@MacBook-Air.local |
d5859942e60b62359607caaba2b7f25a68d24463 | 547cb84e840764aeab7a049698ffcaba58da8900 | /app.py | 0c41166be9693891479d444306d9cca3a0b0644e | [] | no_license | satzumi/store-rest-api | 3ea515531ca6de3dc719da577e533ce3ffdff6b1 | 56f2fc8df9cc5aecd9f2eff63e036859ce1424e8 | refs/heads/master | 2023-07-22T15:56:16.438977 | 2021-09-01T08:08:46 | 2021-09-01T08:08:46 | 401,949,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | from flask import Flask
from flask_restful import Resource, Api
from flask_jwt import JWT
from security import authenticate,identity
from resources.user import UserRegister
from resources.item import Item,Items
from resources.store import Store,StoreList
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
app.secret_key = 'jose'
jwt = JWT(app,authenticate, identity) # create endpoint /auth
api = Api(app)
api.add_resource(Item,'/item/<string:name>')
api.add_resource(Items,'/items')
api.add_resource(UserRegister, '/register')
api.add_resource(Store,'/store/<string:name>')
api.add_resource(StoreList,'/stores')
if __name__ == '__main__':
app.run(port=5000,debug=True)
| [
"satindarkumar@gmail.com"
] | satindarkumar@gmail.com |
fd5d20da9b0ffd715c0a27df62cb9aa1293849d8 | 1eb7fa8b1745d4e51cefb4eceb44621862516aa6 | /Company Interview/FB/regularExpressionMatching.py | 7c2ad7787f3d25cd13d93fb3a68ba0ddc93ad340 | [] | no_license | geniousisme/CodingInterview | bd93961d728f1fe266ad5edf91adc5d024e5ca48 | a64bca9c07a7be8d4060c4b96e89d8d429a7f1a3 | refs/heads/master | 2021-01-10T11:15:31.305787 | 2017-03-06T00:03:13 | 2017-03-06T00:03:13 | 43,990,453 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,401 | py | class Solution(object):
def isMatch(self, string, pattern):
if not pattern:
return not string
if len(pattern) == 1 or pattern[1] != "*":
if len(string) > 0 and (pattern[0] == string[0] or pattern[0] == '.'):
return self.isMatch(string[1:], pattern[1:])
else:
while len(string) > 0 and (pattern[0] == string[0] or pattern[0] == '.'):
if self.isMatch(string, pattern[2:]):
return True
string = string[1:]
return self.isMatch(string, pattern[2:])
class Solution(object):
def isMatch(self, string, pattern):
dp = [[False for _ in xrange(len(pattern) + 1)] for _ in xrange(len(string) + 1)]
dp[0][0] = True
for j in xrange(2, len(pattern) + 1):
if pattern[j - 1] == "*":
dp[0][j] = dp[0][j - 2]
for i in xrange(1, len(string) + 1):
for j in xrange(1, len(pattern) + 1):
if pattern[j - 1] == ".":
dp[i][j] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
dp[i][j] = dp[i][j - 1] or dp[i][j - 2] or (dp[i - 1][j] and (string[i - 1] == pattern[j - 2] or pattern[j - 2] == '.'))
else:
dp[i][j] = dp[i - 1][j - 1] and string[i - 1] == pattern[j - 1]
return dp[-1][-1]
| [
"chia-hao.hsu@aiesec.net"
] | chia-hao.hsu@aiesec.net |
351b0f50ae1d0e62a629ccea709b9177ceed6a1b | f121f2fbd1938643a5875ac86ac990f23648b59d | /auto_drive/src/my_driver.py | 8b2757d6241805d3c9e99edf8587e35734cd8868 | [] | no_license | ryunchang/2020_KMU_Xytron_contest | ac5b74d00dca40284ba76f0287905cfe10650a24 | 62c912c83380ceb1793052c37fede08e964d0fcd | refs/heads/master | 2023-01-06T18:57:57.097335 | 2020-11-06T08:52:48 | 2020-11-06T08:52:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,676 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
from std_msgs.msg import Int32MultiArray, Header, ColorRGBA
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Quaternion, Pose, Point, Vector3
import cv2
import numpy as np
import matplotlib.pyplot as plt
import copy
WEIGHT = 800
LIMIT_ANGLE = 50
n_win = 60 # 좌,우 차선별 탐지 윈도우의 개수, 적어지면 샘플링이 적어지는 샘이라서 급커브 같은데서 영역을 정확히 못잡아냄
margin = 25 # 윈도우 margin
min_pix = 10 # 유효하다고 판단할 때 윈도우 박스 안 최소 픽셀
rate_of_validWindow = 0.65 # 유효한 차선이라고 판단하는 기준
class BirdEyeView() :
def __init__(self, img) :
self.__img = img
self.img_h = self.__img.shape[0]
self.img_w = self.__img.shape[1]
self.__src = np.float32([[-50, self.img_h], [195, 280], [460, 280], [self.img_w+150 , self.img_h]]) ## 원본이미지의 warping 포인트
self.__dst = np.float32([[100,480] , [100,0] , [540, 0],[540,480]]) ## 결과 이미지에서 src가 매칭될 점들
def setROI(self,frame) :
self.__roi = np.array([self.__src]).astype(np.int32)
return cv2.polylines(frame, np.int32(self.__roi),True,(255,0,0),10) ## 10 두께로 파란선 그림
def warpPerspect(self,frame) :
M = cv2.getPerspectiveTransform(self.__src,self.__dst) ## 시점변환 메트릭스 얻어옴.
return cv2.warpPerspective(frame, M, (self.img_w, self.img_h), flags=cv2.INTER_LINEAR) ## 버드아이뷰로 전환
@property
def src(self):
return self.__src
@property
def dst(self):
return self.__dst
class LaneDetector() :
def __init__(self,bev) :
self.__bev = bev
def slidingWindows(self, binary_img, draw = True) :
## sliding windows 방식으로 좌 우 차선의 영역을 탐지함.
histogram = np.sum(binary_img[binary_img.shape[0]*3//5:,:], axis=0) # 영상의 3/5 이상에서 각 픽셀들의 같은 열 성분들을 합함
width_mid_pt = np.int(histogram.shape[0]/2) ## 이미지의 width의 중점
left_x_base = np.argmax(histogram[:width_mid_pt]) ## 히스토그램을 반으로 나눠서 히스토그램의 값이 첫번째로 높아지는 구간을 좌측 레인 탐지의 베이스로 잡는다.
right_x_base = np.argmax(histogram[width_mid_pt:]) + width_mid_pt ## 히스토그램을 반으로 나눠서 우측 영역에서 히스토그램이 높이자는 구간을 우측 레인 탐지의 베이스로 잡는다.
window_height = np.int(binary_img.shape[0]/n_win) ## 윈도우 높이
non_zero = binary_img.nonzero() ## binary_img에서 값이 0 이 아닌 픽셀들의 좌표를 x 좌표 y 좌표로 각각 인덱싱해서 배출. 예를들어 0,0의 픽셀값이 0이 아니라면 array([array[0], array[0]]) 형태
non_zero_y = np.array(non_zero[0]) ## 0이아닌 y좌표
non_zero_x = np.array(non_zero[1]) ## 0이아닌 x좌표
left_x_current = left_x_base
right_x_current = right_x_base
valid_left_line = True
valid_right_line = True
left_count = 0
right_count = 0
left_lane_indices = []
right_lane_indices = []
half_left_lane_indices = []
half_right_lane_indices = []
for window in range(n_win):
## 각 윈도우는 버드아이뷰 상단점을 기준으로 y 윈도우들의 좌표값을 구한다 .
## win_y_low는 이미지 최상단 y좌표 (height)에서 window+1 에 heght를 곱하면 그만큼 아래쪽이며
## win_y_high 는 그 low 좌표와 짝을 이루는 위쪽 좌표이므로 window 에 height를 곱한다.
win_y_low = binary_img.shape[0] - (window+1)*window_height
win_y_high = binary_img.shape[0] - window*window_height
## 좌측차선의 윈도우 위 아래 x좌표
win_x_left_low = left_x_current - margin
win_x_left_high = left_x_current + margin
## 우측 차선의 윈도우 위 아래 x 좌표
win_x_right_low = right_x_current - margin
win_x_right_high = right_x_current + margin
"""
다음 아래 두 식은 다음과 같은 연산을 진행함.
non_zero_y 의 모든 좌표 중 현재 window의 y 최소값, 최대값 보다 큰값에 대한 판별을 진행한 TF 테이블을 만들고
x에 대해서도 같은 방식을 진행하여 TF 테이블을 만든다. 이 값들이 모두 T인 지점들은 1이 나오므로
해당 점들을 non_zero 로 뽑아내고 x축 값만을 취함
"""
good_left_indices = ((non_zero_y >= win_y_low) & (non_zero_y < win_y_high) & (non_zero_x >= win_x_left_low) & (non_zero_x < win_x_left_high)).nonzero()[0]
good_right_indices = ((non_zero_y >= win_y_low) & (non_zero_y < win_y_high) & (non_zero_x >= win_x_right_low) & (non_zero_x < win_x_right_high)).nonzero()[0]
cv2.rectangle(binary_img, (win_x_left_low, win_y_low), ( win_x_left_high, win_y_high), (255,0,0),1)
cv2.rectangle(binary_img, (win_x_right_low, win_y_low), ( win_x_right_high, win_y_high), (255,0,0),1)
##위에서 추려낸 값을 append
left_lane_indices.append(good_left_indices)
right_lane_indices.append(good_right_indices)
if window < n_win//2 :
half_left_lane_indices.append(good_left_indices)
half_right_lane_indices.append(good_right_indices)
## 다음 윈도우 위치 업데이트 (주석 처리 되있는 것은 )
if len(good_left_indices) > min_pix :
pre_left_x_current = copy.deepcopy(left_x_current)
left_x_current = np.int(np.mean(non_zero_x[good_left_indices]))
left_count += 1
else :
try:
diff =int((left_x_current - pre_left_x_current )*1.2)
except:
diff = 0
pre_left_x_current = copy.deepcopy(left_x_current)
if np.abs(left_x_current + diff) < binary_img.shape[1]:
left_x_current += diff
if len(good_right_indices) > min_pix :
pre_right_x_current = copy.deepcopy(right_x_current)
right_x_current = np.int(np.mean(non_zero_x[good_right_indices]))
right_count += 1
else :
try:
diff = int((right_x_current - pre_right_x_current )*1.2)
except:
diff = 0
pre_right_x_current = copy.deepcopy(right_x_current)
if np.abs(right_x_current + diff) < binary_img.shape[1]:
right_x_current += diff
## 배열 합치기 이 부분은 디텍팅 된 차선의 픽셀의 좌표 집합임.
left_lane_indices = np.concatenate(left_lane_indices)
right_lane_indices = np.concatenate(right_lane_indices)
half_left_lane_indices = np.concatenate(half_left_lane_indices)
half_right_lane_indices = np.concatenate(half_right_lane_indices)
# 좌 우측 라인의 픽셀 위치들을 추출
left_x = non_zero_x[left_lane_indices]
left_y = non_zero_y[left_lane_indices]
right_x = non_zero_x[right_lane_indices]
right_y = non_zero_y[right_lane_indices]
## 다항식으로 피팅한 좌표들을 2차다항식으로 피팅
left_fit = np.polyfit(left_y, left_x, 2)
right_fit = np.polyfit(right_y, right_x, 2)
# 좌 우측 차선이 유효하지 않을 땐 유효한 차선을 가져다 씀 (화면 표시만을 위한 것)
if left_count < n_win*rate_of_validWindow :
valid_left_line = False
left_fit[:] = right_fit[:]
left_fit[0] *= 1.1
left_fit[2] -= 490
if right_count < n_win*rate_of_validWindow :
valid_right_line = False
right_fit[:] = left_fit[:]
right_fit[0] *= 1.1
right_fit[2] += 490
info = {}
info['left_fit'] = left_fit
info['right_fit'] = right_fit
info['non_zero_x'] = non_zero_x
info['non_zero_y'] = non_zero_y
info['left_lane_indices'] = left_lane_indices
info['right_lane_indices'] = right_lane_indices
info['half_left_lane_indices'] = half_left_lane_indices
info['half_right_lane_indices'] = half_right_lane_indices
info['valid_left_line'] = valid_left_line
info['valid_right_line'] = valid_right_line
return info
def drawFitLane(self, frame, binary_warped_frame, info) :
height,width = binary_warped_frame.shape
left_fit = info['left_fit']
right_fit = info['right_fit']
nonzerox = info['non_zero_x']
nonzeroy = info['non_zero_y']
left_lane_inds = info['left_lane_indices']
right_lane_inds = info['right_lane_indices']
half_left_lane_indices = info['half_left_lane_indices']
half_right_lane_indices = info['half_right_lane_indices']
M = cv2.getPerspectiveTransform(self.__bev.dst,self.__bev.src) ## 시점변환용 메트릭스.
##Bird Eye View 에서는 src -> dst 로의 시점 전환을 수행하였으므로
##원본 좌표로 복구를 위해서 dst->src 로 변환을 해야함
plot_y = np.linspace(0,binary_warped_frame.shape[0]-1, binary_warped_frame.shape[0])
left_fit_x = left_fit[0] * plot_y**2 + left_fit[1] * plot_y +left_fit[2]
right_fit_x = right_fit[0] * plot_y**2 + right_fit[1] * plot_y + right_fit[2]
warp_zero = np.zeros_like(binary_warped_frame).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
## np.dstack => 양쪽 행렬의 element wise 로 값을 짝지어 열벡터를 만든다.
lefts = np.array([np.transpose(np.vstack([left_fit_x, plot_y]))])
rights = np.array([np.flipud(np.transpose(np.vstack([right_fit_x, plot_y])))])
## np.flidud을 row기준으로 위아래 순서를 뒤바꿔버림.
points = np.hstack((lefts,rights))
cv2.fillPoly(color_warp, np.int_([points]),(0,0,255))
cv2.polylines(color_warp, np.int32([lefts]), isClosed=False, color = (255,255,0),thickness = 10)
cv2.polylines(color_warp, np.int32([rights]), isClosed=False, color = (255,0,255),thickness = 10)
cv2.imshow("color_warp", color_warp )
new = cv2.warpPerspective(color_warp, M, (width, height))
output = cv2.addWeighted(frame,1,new,0.5,0)
"""
Calculate radius of curvature in meters
"""
y_eval = 480 # 이미지의 y 크기
# 1픽셀당 몇 미터인지 환산
ym_per_pix = 1.8/280
xm_per_pix = 0.845/610
# 좌우측 차선의 좌표 추출
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# 다항식으로 피팅한 좌표들을 2차다항식으로 피팅
left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
# 2차원 그래프 visualizingc
# plt.plot(leftx*xm_per_pix, lefty*ym_per_pix)
# plt.plot(rightx*xm_per_pix, righty*ym_per_pix)
# plt.xlabel('x - axis')
# # naming the y axis
# plt.ylabel('y - axis')
# # giving a title to my graph
# plt.title('My first graph!')
# # function to show the plot
# plt.show()
# 반지름을 이용한 곡률 계산
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix+ left_fit_cr[1])**2)**1.5) / (2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / (2*right_fit_cr[0])
return output, left_curverad, right_curverad
def image_processing(img, bev) :
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 3)
canny = cv2.Canny(blur, 80, 70)
warped_frame = bev.warpPerspect(canny)
return warped_frame
def show_text_in_rviz(marker_publisher, text):
marker = Marker(
type=Marker.TEXT_VIEW_FACING,
id=0,
lifetime=rospy.Duration(1.5),
pose=Pose(Point(0.5, 0.5, 1.45), Quaternion(0, 0, 0, 1)),
scale=Vector3(1.00, 1.00, 1.00),
header=Header(frame_id='base_link'),
color=ColorRGBA(0.0, 1.0, 0.0, 0.9),
text=text)
marker_publisher.publish(marker)
def pub_motor(Angle, Speed):
drive_info = [Angle, Speed]
drive_info = Int32MultiArray(data = drive_info)
pub.publish(drive_info)
def start():
global pub
rospy.init_node('my_driver')
pub = rospy.Publisher('xycar_motor_msg', Int32MultiArray, queue_size=1)
marker_publisher = rospy.Publisher('visualization_marker', Marker, queue_size=5)
rate = rospy.Rate(30)
Speed = 20
# 영상 경로
capture = cv2.VideoCapture("/home/yoon/catkin_ws/src/xycar_simul/src/track-s.mkv")
_ , img = capture.read() # 한 프레임을 읽어 img에 저장
bev = BirdEyeView(img)
ldt = LaneDetector(bev)
while True:
ret , img = capture.read()
if ret == False:
break
# roi_frame= img.copy()
# roi_frame=bev.setROI(roi_frame)
warped_frame = image_processing(img, bev)
warped_frame2 = bev.warpPerspect(img)
info = ldt.slidingWindows(warped_frame)
final_frame, left_curverad, right_curverad = ldt.drawFitLane(img, warped_frame, info)
left_curvature, right_curvature= 1/left_curverad, 1/right_curverad
if info['valid_left_line'] & info['valid_right_line'] :
final_curvature = WEIGHT*(left_curvature + right_curvature)/2
elif info['valid_left_line'] :
final_curvature = WEIGHT*left_curvature
elif info['valid_right_line'] :
final_curvature = WEIGHT*right_curvature
# else :
# final_curvature = 0
if final_curvature >LIMIT_ANGLE :
final_curvature = LIMIT_ANGLE
elif final_curvature < -LIMIT_ANGLE :
final_curvature = -LIMIT_ANGLE
#cv2.imshow("roi_frame", roi_frame)
cv2.imshow("warped_frame", warped_frame)
cv2.imshow("warped_frame2",warped_frame2)
#문자 출력
cv2.putText(final_frame, "Radius of curvature : " + str(final_curvature), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.imshow('image',final_frame)
if cv2.waitKey(33) > 0 : break
show_text_in_rviz(marker_publisher, "1/curvature : " + str(int(final_curvature)))
pub_motor((final_curvature), Speed)
rate.sleep()
if __name__ == '__main__':
start()
| [
"ckdtjq202001@gmail.com"
] | ckdtjq202001@gmail.com |
195a360a6648a5e09af02d18b5658a57ef3334fd | 158bee983b2cf6586b64ed499a05eb816f898fda | /aws/pdfbook/browser/__init__.py | 4a7b943a175af8909c0f6e2ca7a0c68515e7b4ea | [] | no_license | collective/aws.pdfbook | 35a8bd05eee4a7e94c6ac8ec415c213a289f664d | e8c72ab4a46cff832994dd3623025be8093dfdda | refs/heads/master | 2023-06-26T13:38:25.735929 | 2018-03-02T12:05:57 | 2018-03-02T12:05:57 | 12,920,175 | 0 | 0 | null | 2018-03-02T12:06:00 | 2013-09-18T10:25:53 | Python | UTF-8 | Python | false | false | 123 | py | # -*- coding: utf-8 -*-
# $Id: __init__.py 116190 2010-04-26 20:36:44Z glenfant $
"""Browser view resources for PDFBook"""
| [
"thomas.desvenain@gmail.com"
] | thomas.desvenain@gmail.com |
5fbaade6fc98a0d822545a53775f1db0b5b15690 | 4f824bb3a110920e3e26a4ee0fd8597d321b1c78 | /hw01_easy.py | 2fe622fab96bfd170fa19d8dd477f44f7f71fcc5 | [] | no_license | AlexeyGab/HomeWork | 3aa900371d3bc5957d75ecfffda594578fc81cd3 | 688000044bafcc65452cfb65c18f04cfd48afdeb | refs/heads/master | 2020-04-01T03:51:34.398826 | 2018-10-29T04:16:21 | 2018-10-29T04:16:21 | 152,839,448 | 0 | 0 | null | 2018-11-08T18:05:35 | 2018-10-13T05:43:16 | Python | UTF-8 | Python | false | false | 3,202 | py | # coding: UTF-8
__author__ = 'Габеркорн Алексей Игоревич'
import random
# Задача-1: Дано произвольное целое число (число заранее неизвестно).
# Вывести поочередно цифры исходного числа (порядок вывода цифр неважен).
# Подсказки:
# * постарайтесь решить задачу с применением арифметики и цикла while;
# * при желании решите задачу с применением цикла for.
# Получим число от пользователя.
value = int(input('Введите любое целое число:'))
# Ислючим знак минус, т.к. это не цифра
value = abs(value)
# Выведем все цифры по порядку с помощью for
print('Цифры по порядку')
for s in str(value):
print(s)
# Получим целое положительное число от генератора случаных чисел
value = random.randrange(0,1000)
print('Случайное число:', value)
print('Цифры в обратном порядке')
# Выведем все цифры в обратном порядке с помощью цикла while
while(value != 0):
print(value % 10)
value = value // 10
# Задача-2: Исходные значения двух переменных запросить у пользователя.
# Поменять значения переменных местами. Вывести новые значения на экран.
# Подсказка:
# * постарайтесь сделать решение через дополнительную переменную
# или через арифметические действия
# Не нужно решать задачу так:
# print("a = ", b, "b = ", a) - это неправильное решение!
#Решение через доп. переменную (для любых типов переменных)
a = input('Введите значение переменной a:')
b = input('Введите значение переменной b:')
temp = a
a = b
b = temp
print('Переменная a =', a)
print('Переменная b =', b)
#Решение через арифметические действия (только для целых чисел)
a = int(input('Введите целое число a:'))
b = int(input('Введите целое число b:'))
a = a + b
b = a - b
a = a - b
print('Переменная a =', a)
print('Переменная b =', b)
# Задача-3: Запросите у пользователя его возраст.
# Если ему есть 18 лет, выведите: "Доступ разрешен",
# иначе "Извините, пользование данным ресурсом только с 18 лет"
age = int(input('Сколько вам лет?'))
if (age >= 18):
print("Доступ разрешен")
else:
print("Извините, пользование данным ресурсом только с 18 лет") | [
"lexa_gb@mail.ru"
] | lexa_gb@mail.ru |
a118044a91ae8f1aecd55e11f46f9f104f27cfaa | d2dc242930a69774fee51a7e3f7c112557e7540f | /modelutils/tensorflowlite/tflite/ExpandDimsOptions.py | 79e85e522b42716e4668bb24cb072e076ecc5219 | [
"MIT"
] | permissive | shonohs/modelutils | afcd66d7fa758a005fbd8de42ceda54626e08e79 | 24df495ce5372c3f8a1f064f163b51150517e2de | refs/heads/master | 2021-07-10T07:25:24.664418 | 2020-12-11T05:25:18 | 2020-12-11T05:45:52 | 222,640,779 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,665 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class ExpandDimsOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsExpandDimsOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ExpandDimsOptions()
x.Init(buf, n + offset)
return x
@classmethod
def ExpandDimsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# ExpandDimsOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def ExpandDimsOptionsStart(builder): builder.StartObject(0)
def ExpandDimsOptionsEnd(builder): return builder.EndObject()
class ExpandDimsOptionsT(object):
# ExpandDimsOptionsT
def __init__(self):
pass
@classmethod
def InitFromBuf(cls, buf, pos):
expandDimsOptions = ExpandDimsOptions()
expandDimsOptions.Init(buf, pos)
return cls.InitFromObj(expandDimsOptions)
@classmethod
def InitFromObj(cls, expandDimsOptions):
x = ExpandDimsOptionsT()
x._UnPack(expandDimsOptions)
return x
# ExpandDimsOptionsT
def _UnPack(self, expandDimsOptions):
if expandDimsOptions is None:
return
# ExpandDimsOptionsT
def Pack(self, builder):
ExpandDimsOptionsStart(builder)
expandDimsOptions = ExpandDimsOptionsEnd(builder)
return expandDimsOptions
| [
""
] | |
5d9181ef89e47bf36090c05d0625e289410502cd | 9ec7e99e51f8567e20ff29960af87b9fc1c0d26f | /galaxy_evolution/0.3/healpix_util_src/healpix_util/__init__.py | 8434ea146c995e55d46842dc68247e6d7507b10d | [] | no_license | linea-it/docker-images | 08346375382b6b0094536904471ed8f28227ec65 | 1ef02480c2c0876f3f34f3182be3f3aae462fff9 | refs/heads/master | 2021-12-24T00:02:34.054131 | 2021-08-11T11:39:19 | 2021-08-11T11:39:19 | 156,705,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | """
todo
test quad_check with real weight map
"""
import healpy
from healpy import *
from .healpix import \
HealPix, \
RING, NESTED, NEST, \
get_scheme_name, get_scheme_num
from .maps import Map, DensityMap
from .fileio import \
readMap, readMaps, \
readDensityMap, readDensityMaps, \
writeMap, writeMaps
from .coords import \
eq2ang, ang2eq, eq2vec, \
randsphere, randcap, get_posangle_eq, get_quadrant_eq
from .version import __version__
| [
"guilherme.soares@linea.gov.br"
] | guilherme.soares@linea.gov.br |
ecd54e3ea115657c32a7230c77025894b398d2b0 | 7fb39a6a09a5dec95cf9c5d1a6fb4c9b7936c482 | /net-details.py | bc1454cb4076c45af6e064df27ae15b610a9db3e | [] | no_license | nitingadekar/py-monitor | 0fdb1b09d83d4478d573de02e0f37005aaec346a | 3573ea3b1d601c0d49d03777fd9380125092c851 | refs/heads/master | 2020-09-16T12:43:23.516619 | 2019-12-12T14:18:00 | 2019-12-12T14:18:00 | 223,773,468 | 1 | 0 | null | 2019-12-12T13:50:03 | 2019-11-24T16:20:55 | Python | UTF-8 | Python | false | false | 385 | py | #!/usr/bin/python
import psutil
stats = psutil.net_if_stats()
io_counters = psutil.net_io_counters(pernic=True)
for nic, addrs in psutil.net_if_addrs().items():
print("%s:" % (nic))
if nic in stats:
st = stats[nic]
print("speed=%sMB, duplex=%s, mtu=%s, up=%s" % (
st.speed, duplex_map[st.duplex], st.mtu,
"yes" if st.isup else "no"))
| [
"ngadekar@alpha-sense.com"
] | ngadekar@alpha-sense.com |
09996c7287fec355d3cdcca2b6f66b24199158dc | 3733f1d69ef7b3d9cc49fd05392ae27ffd1bfa0f | /eeazycrm/users/routes.py | 607590657f687650745517129a73400c2e85a60c | [] | no_license | jagjot2008/EeazyCRM | f088c94ef1fc4bbd4b52de38fcd9fe7c6cb4adb4 | b2fec9b1125200631ce0f433cebcf834542dafe9 | refs/heads/master | 2023-05-12T17:25:20.238605 | 2022-01-13T13:55:11 | 2022-01-13T13:55:11 | 217,661,838 | 101 | 46 | null | 2023-05-01T21:17:22 | 2019-10-26T05:42:01 | Python | UTF-8 | Python | false | false | 2,486 | py | from flask import Blueprint, session
from flask_login import current_user, login_user, logout_user
from flask import render_template, flash, url_for, redirect, request
from eeazycrm import db, bcrypt
from .forms import Register, Login
from .models import User
users = Blueprint('users', __name__)
@users.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('main.home'))
form = Login()
if request.method == 'POST':
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
if not user.is_user_active:
flash("""User has not been granted access to the system!
Please contact the system administrator""",
'danger')
elif not bcrypt.check_password_hash(user.password, form.password.data):
flash('Invalid Password!', 'danger')
else:
login_user(user, remember=form.remember.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('main.home'))
else:
flash('User does not exist! Please contact the system administrator', 'danger')
return render_template("login.html", title="EeazyCRM - Login", form=form)
@users.route("/register", methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('main.home'))
form = Register()
if request.method == 'POST':
if form.validate_on_submit():
hashed_pwd = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(first_name=form.first_name.data, last_name=form.last_name.data,
email=form.email.data, is_admin=True, is_first_login=False,
is_user_active=True, password=hashed_pwd)
db.session.add(user)
db.session.commit()
flash('User has been created! You can now login', 'success')
return redirect(url_for('users.login'))
else:
flash(f'Failed to register user!', 'danger')
return render_template("register.html", title="EeazyCRM - Register New User", form=form)
@users.route("/logout")
def logout():
logout_user()
session.clear()
return redirect(url_for('users.login'))
| [
"jagjotsingh2008@gmail.com"
] | jagjotsingh2008@gmail.com |
a478f92f69a0e9143bc61d2cfcac83f07be2ff9b | a774771f25ce961c7a8dd5a8562fef60127615ce | /week4/userzap/app.py | 2303e499f42acf52165c2e3e464ba3355dd9a6ed | [] | no_license | KristenLingwood-zz/rithm7_homework | bb9b655ab0622c269b7f67d8e4f5296fcf7a9c15 | dc13e8759aabd76f5632837e12d42c98d2635753 | refs/heads/master | 2023-02-12T14:22:49.861416 | 2018-06-29T21:41:28 | 2018-06-29T21:41:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,042 | py | from flask import Flask, render_template, redirect, request, url_for, session
from flask_modus import Modus
from flask_debugtoolbar import DebugToolbarExtension
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
app = Flask(__name__)
app.config['SECRET_KEY'] = "abc123"
modus = Modus(app)
toolbar = DebugToolbarExtension(app)
DB = "postgresql://localhost/userzap"
app.config['SQLALCHEMY_DATABASE_URI'] = DB
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
bcrypt = Bcrypt()
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.Text, nullable=False, unique=True)
password = db.Column(db.Text, nullable=False)
first_name = db.Column(db.Text, nullable=False)
last_name = db.Column(db.Text, nullable=False)
img_url = db.Column(db.Text)
messages = db.relationship(
'Message', backref='user', lazy="dynamic", cascade="all,delete")
@classmethod
def register(cls, username, password):
"""register a user and hash their password"""
# take password and generate a bcrypt hash, saved to var hashed
hashed = bcrypt.generate_password_hash(password)
hashed_utf8 = hashed.decode("utf8")
return cls(username=username, password=hashed_utf8)
@classmethod
def authenticate(cls, username, password):
"""valid if user exists and password is correct"""
user = User.query.filter_by(username=username).first()
# if user exists
if user:
# if password is correct
if bcrypt.check_password_hash(user.password, password):
return user
return False
class Message(db.Model):
__tablename__ = "messages"
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
message_tags = db.Table(
'message_tags',
db.Column('message_id', db.Integer, db.ForeignKey('messages.id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tags.id')))
class Tag(db.Model):
__tablename__ = "tags"
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text, nullable=False, unique=True)
messages = db.relationship(
'Message',
lazy="dynamic",
secondary=message_tags,
cascade="all,delete",
backref=db.backref('tags', lazy="dynamic"))
db.create_all()
@app.route('/')
def root():
return redirect(url_for('users_index'))
@app.route('/users')
def users_index():
"""show all users"""
users = User.query.all()
return render_template("users/index.html", users=users)
@app.route('/users/new')
def users_new():
"""show create user form"""
return render_template('users/new.html')
@app.route('/users', methods=["POST"])
def users_create():
"""create new user from form and add to db"""
username = request.form.get("username")
password = request.form.get("password")
new_user = User.register(username=username, password=password)
fname = request.form['first_name']
lname = request.form['last_name']
img = request.form['img_url']
if fname == "":
raise ValueError('First name must not be blank')
if lname == "":
raise ValueError('Last name must not be blank')
new_user.first_name = fname
new_user.last_name = lname,
new_user.img_url = img,
db.session.add(new_user)
db.session.commit()
return redirect(url_for('users_index'))
@app.route('/login', methods=['GET', 'POST'])
def users_login():
"""handle log in form. Authenticate and redirect to secret welcome page"""
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
logged_in_user = User.authenticate(username, password)
if logged_in_user:
session['user_id'] = logged_in_user.id
return redirect(url_for('users_welcome', user=logged_in_user))
return render_template('/users/login.html')
@app.route('/users/welcome')
def users_welcome():
"""show welcome form for logged in user"""
if session['user_id']:
user = User.query.filter_by(id=session['user_id']).first()
return render_template('users/welcome.html', user=user)
@app.route('/users/logout')
def users_logout():
"""Logout a user"""
if session['user_id']:
del session['user_id']
return redirect(url_for('users_index'))
@app.route('/users/<int:user_id>')
def users_show(user_id):
"""show individual user's page"""
found_user = User.query.get_or_404(user_id)
return render_template('users/show.html', user=found_user)
@app.route('/users/<int:user_id>/edit')
def users_edit(user_id):
"""show edit form"""
found_user = User.query.get_or_404(user_id)
return render_template('users/edit.html', user=found_user)
@app.route('/users/<int:user_id>', methods=['PATCH'])
def users_update(user_id):
"""update user info and return to user show page"""
found_user = User.query.get_or_404(user_id)
found_user.first_name = request.form['first_name'],
found_user.last_name = request.form['last_name'],
found_user.img_url = request.form['img_url']
db.session.add(found_user)
db.session.commit()
return redirect(url_for('users_show', user_id=found_user.id))
@app.route('/users/<int:user_id>', methods=['DELETE'])
def users_destroy(user_id):
"""delete user"""
found_user = User.query.get_or_404(user_id)
db.session.delete(found_user)
db.session.commit()
return redirect(url_for("users_index"))
@app.route('/users/<int:user_id>/messages')
def messages_index(user_id):
"""show all messages for user"""
found_user = User.query.get_or_404(user_id)
return render_template('messages/index.html', user=found_user)
@app.route('/users/<int:user_id>/messages/new')
def messages_new(user_id):
"""show new message form"""
found_user = User.query.get_or_404(user_id)
tags = Tag.query.all()
return render_template('messages/new.html', user=found_user, tags=tags)
@app.route('/users/<int:user_id>/messages', methods=['POST'])
def messages_create(user_id):
"""create new message and add to db"""
content = request.form['message_content']
new_message = Message(content=content, user_id=user_id)
tag_ids = [int(num) for num in request.form.getlist('tags')]
new_message.tags = Tag.query.filter(Tag.id.in_(tag_ids))
db.session.add(new_message)
db.session.commit()
return redirect(url_for('messages_index', user_id=user_id))
@app.route('/messages/<int:message_id>')
def messages_show(message_id):
"""show specific message"""
found_message = Message.query.get_or_404(message_id)
return render_template('/messages/show.html', message=found_message)
@app.route('/messages/<int:message_id>', methods=['DELETE'])
def messages_destroy(message_id):
"""delete a message"""
found_message = Message.query.get_or_404(message_id)
user = found_message.user
db.session.delete(found_message)
db.session.commit()
return redirect(url_for('messages_index', user_id=user.id))
@app.route('/messages/<int:message_id>/edit')
def messages_editform(message_id):
found_message = Message.query.get_or_404(message_id)
tags = Tag.query.all()
return render_template(
'messages/edit.html', message=found_message, tags=tags)
@app.route('/messages/<int:message_id>', methods=['PATCH'])
def messages_update(message_id):
"""hand messages_editform and update message info"""
found_message = Message.query.get_or_404(message_id)
found_message.content = request.form['message_content']
tag_ids = [int(num) for num in request.form.getlist('tags')]
found_message.tags = Tag.query.filter(Tag.id.in_(tag_ids))
user = found_message.user
db.session.add(found_message)
db.session.commit()
return redirect(url_for('messages_index', user_id=user.id))
@app.route('/tags')
def tags_index():
"""Show all tags"""
tags = Tag.query.all()
return render_template('tags/index.html', tags=tags)
@app.route('/tags/new')
def tags_new():
"""show new tag form"""
messages = Message.query.all()
return render_template('tags/new.html', messages=messages)
@app.route('/tags', methods=['POST'])
def tags_create():
"""handle new tag form"""
content = request.form.get('tag_content')
new_tag = Tag(content=content)
message_ids = [int(num) for num in request.form.getlist("messages")]
new_tag.messages = Message.query.filter(Message.id.in_(message_ids))
db.session.add(new_tag)
db.session.commit()
return redirect(url_for('tags_index'))
@app.route('/tags/<int:tag_id>')
def tags_show(tag_id):
"""show individual tag"""
found_tag = Tag.query.get_or_404(tag_id)
return render_template('tags/show.html', tag=found_tag)
@app.route('/tags/<int:tag_id>/edit')
def tags_editform(tag_id):
"""show edit form"""
found_tag = Tag.query.get_or_404(tag_id)
messages = Message.query.all()
return render_template('tags/edit.html', tag=found_tag, messages=messages)
@app.route('/tags/<int:tag_id>', methods=['PATCH'])
def tags_update(tag_id):
"""update tag info and return to tag show page"""
found_tag = Tag.query.get_or_404(tag_id)
found_tag.content = request.form['tag_content']
message_ids = [int(num) for num in request.form.getlist("messages")]
found_tag.messages = Message.query.filter(Message.id.in_(message_ids))
db.session.add(found_tag)
db.session.commit()
return redirect(url_for('tags_index'))
@app.route('/tags<int:tag_id>', methods=['DELETE'])
def tags_destroy(tag_id):
"""delete tag"""
found_tag = Tag.query.get_or_404(tag_id)
db.session.delete(found_tag)
db.session.commit()
return redirect(url_for('tags_index'))
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
| [
"kristen.lingwood@gmail.com"
] | kristen.lingwood@gmail.com |
17c57c975d41576f914c4142bb91098b5d422220 | f37341f3d96b9f6bfa9fb40feaed7086fc962bb3 | /homework3/readdata.py | 2f57aff75c024866293e9e87996a9d38fe3ea16d | [] | no_license | mathjjxpku/Basic_DeepLearning | e3001f486cdd3b60c521f0b0d144d0a9b0ee2e06 | ed3e84875ee26a74995f280186b1903a56f260c9 | refs/heads/master | 2020-03-12T22:20:32.734924 | 2018-11-27T15:46:41 | 2018-11-27T15:46:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,354 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 5 11:40:37 2018
@author: v-jinji
"""
def delblankline(infile1,infile2,trainfile,validfile,testfile):
#### 2 是test,3 是valid的我写错了
info1 = open(infile1,'r')
info2 = open(infile2,'r')
train=open(trainfile,'w')
valid=open(validfile,'w')
test=open(testfile,'w')
lines1 = info1.readlines()
lines2 = info2.readlines()
for i in range(1,len(lines1)):
t1=lines1[i].replace("-LRB-","(")
t2=t1.replace("-RRB-",")")
###把括号部分还原
k=lines2[i].strip().split(",")
t=t2.strip().split('\t')
if k[1]=='1':
train.writelines(t[1])
train.writelines("\n")
elif(k[1]=='3'):
valid.writelines(t[1])
valid.writelines("\n")
elif(k[1]=='2'):
test.writelines(t[1])
test.writelines("\n")
print("end")
info1.close()
info2.close()
train.close()
valid.close()
test.close()
def tag(infile1,infile2,outputfile3):
info1 = open(infile1,'r')
info2 = open(infile2,'r')
info3=open(outputfile3,'w')
lines1 = info1.readlines()
lines2 = info2.readlines()
text={}
for i in range(0,len(lines1)):
s=lines1[i].strip().split("|")
text[s[1]]=s[0]
for j in range(1,len(lines2)):
k=lines2[j].strip().split("|")
if(k[0] in text):
info3.writelines(text[k[0]])
info3.writelines("\n")
info3.writelines(k[1])
info3.writelines("\n")
print("end2d1")
info1.close()
info2.close()
info3.close()
def tag1(infile0,infile1,infile2,infile3,infile4,infile5,infile6):
info0 = open(infile0,'r')
info1 = open(infile1,'r')
info2 = open(infile2,'r')
info3 = open(infile3,'r')
info4 = open(infile4,'w')
info5 = open(infile5,'w')
info6 = open(infile6,'w')
lines0 = info0.readlines()
lines1 = info1.readlines()
lines2 = info2.readlines()
lines3 = info3.readlines()
for i in range(0,len(lines0),2):
if lines0[i] in lines1:
info4.writelines(lines0[i])
info4.writelines(lines0[i+1])
if lines0[i] in lines2:
info5.writelines(lines0[i])
info5.writelines(lines0[i+1])
if lines0[i] in lines3:
info6.writelines(lines0[i])
info6.writelines(lines0[i+1])
print("end3d1")
info0.close()
info1.close()
info2.close()
info3.close()
info4.close()
info5.close()
info6.close()
delblankline('/Users/jingxingjiang/大数据班课程资料/深度学习课程/homework/homework3/stanfordSentimentTreebank/datasetSentences.txt',"/Users/jingxingjiang/大数据班课程资料/深度学习课程/homework/homework3/stanfordSentimentTreebank/datasetSplit.txt","/Users/jingxingjiang/大数据班课程资料/深度学习课程/homework/homework3/data/train.txt","/Users/jingxingjiang/大数据班课程资料/深度学习课程/homework/homework3/data/valid.txt","/Users/jingxingjiang/大数据班课程资料/深度学习课程/homework/homework3/data/test.txt")
tag("/Users/jingxingjiang/大数据班课程资料/深度学习课程/homework/homework3/stanfordSentimentTreebank/dictionary.txt","/Users/jingxingjiang/大数据班课程资料/深度学习课程/homework/homework3/stanfordSentimentTreebank/sentiment_labels.txt","/Users/jingxingjiang/大数据班课程资料/深度学习课程/homework/homework3/data/allsentimet.txt")
tag1("/Users/jingxingjiang/大数据班课程资料/深度学习课程/homework/homework3/data/allsentimet.txt","/Users/jingxingjiang/大数据班课程资料/深度学习课程/homework/homework3/data/train.txt","/Users/jingxingjiang/大数据班课程资料/深度学习课程/homework/homework3/data/valid.txt","/Users/jingxingjiang/大数据班课程资料/深度学习课程/homework/homework3/data/test.txt","/Users/jingxingjiang/大数据班课程资料/深度学习课程/homework/homework3/data/train1.txt","/Users/jingxingjiang/大数据班课程资料/深度学习课程/homework/homework3/data/valid1.txt","/Users/jingxingjiang/大数据班课程资料/深度学习课程/homework/homework3/data/test1.txt")
| [
"noreply@github.com"
] | mathjjxpku.noreply@github.com |
89a7ebe53f92d5394c8547f1a21b3325ac462d54 | f18df31d4ba8569b420219f5d52da311a32581d6 | /cloudkittydashboard/enabled/_13_admin_pyscripts_panel.py | cd4c2d21ace08745ca15d4db97c789af1506bebe | [
"Apache-2.0"
] | permissive | openstack/cloudkitty-dashboard | 418b54a59a93201c79e422ee4571c9f24b6234e5 | 4ed8863c1b15d489a2a78e767b737402647bc4da | refs/heads/master | 2023-08-23T06:09:10.473334 | 2023-07-12T15:40:17 | 2023-07-12T15:40:17 | 23,157,716 | 25 | 14 | Apache-2.0 | 2022-01-18T10:16:11 | 2014-08-20T17:35:14 | Python | UTF-8 | Python | false | false | 147 | py | PANEL_GROUP = 'rating'
PANEL_DASHBOARD = 'admin'
PANEL = 'pyscripts'
ADD_PANEL = 'cloudkittydashboard.dashboards.admin.pyscripts.panel.PyScripts'
| [
"pa.bardina@objectif-libre.com"
] | pa.bardina@objectif-libre.com |
e7fc667f6d4d2f3ac106b91116dcc787456f4b77 | 352c30c6dfcec94624fd808e213af4f896186027 | /CRFBiLSTM.py | df357ad732b81798de2dc53db455bc54d4dbca29 | [] | no_license | GeethR/Sensitive-Tweet-Classification-with-NER | 37ba731352893188f2b058e5a6535dea07098b3a | 921722d52d438de4aa4498af235abcc5b94b9e85 | refs/heads/master | 2023-09-03T13:11:11.602468 | 2021-11-13T19:48:11 | 2021-11-13T19:48:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,127 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 00:36:43 2020
@author: GEETHA RAJU
"""
#CRF
#standard features learnt by CRF
def word2features(sent, i):
word = sent[i][0]
postag = sent[i][1]
features = {
'bias': 1.0,
'word.lower()': word.lower(),
'word[-3:]': word[-3:],
'word[-2:]': word[-2:],
'word.isupper()': word.isupper(),
'word.istitle()': word.istitle(),
'word.isdigit()': word.isdigit(),
'postag': postag,
'postag[:2]': postag[:2],
}
if i > 0:
word1 = sent[i-1][0]
postag1 = sent[i-1][1]
features.update({
'-1:word.lower()': word1.lower(),
'-1:word.istitle()': word1.istitle(),
'-1:word.isupper()': word1.isupper(),
'-1:postag': postag1,
'-1:postag[:2]': postag1[:2],
})
else:
features['BOS'] = True
if i < len(sent)-1:
word1 = sent[i+1][0]
postag1 = sent[i+1][1]
features.update({
'+1:word.lower()': word1.lower(),
'+1:word.istitle()': word1.istitle(),
'+1:word.isupper()': word1.isupper(),
'+1:postag': postag1,
'+1:postag[:2]': postag1[:2],
})
else:
features['EOS'] = True
return features
def sent2features(sent):
return [word2features(sent, i) for i in range(len(sent))]
def sent2labels(sent):
return [label for token, postag, label in sent]
def sent2tokens(sent):
return [token for token, postag, label in sent]
#BI-LSTM
#same for all datasets
input = Input(shape=(140,))
model = Embedding(input_dim=n_words, output_dim=140, input_length=140)(input)
model = Dropout(0.7)(model)
model = Bidirectional(LSTM(units=100, return_sequences=True, recurrent_dropout=0.1))(model)
out = TimeDistributed(Dense(n_tags, activation="softmax"))(model) # softmax output layer
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
history = model.fit(X_train, np.array(y_train), batch_size=32, epochs=5, validation_split=0.5, verbose=1)
model.summary()
| [
"rajugeeths@gmail.com"
] | rajugeeths@gmail.com |
0c9e9bf9bf8fdd23c61c4d29f0821fa87c71adeb | d53f21b23b54e0b4aee2bdc59bc560f1237584f6 | /test9.py | 951eecd7939a0e6b66d050c840d37ee08986f136 | [] | no_license | cccczl/testpy | 68a42596aeb59812beba06ec54565f12a30ab762 | 156ca2d285a0d309ac9f3d0d0994ec2ebd0a511d | refs/heads/master | 2022-11-24T11:36:12.372773 | 2018-02-19T15:35:18 | 2018-02-19T15:35:18 | 117,459,687 | 0 | 0 | null | 2022-11-17T18:32:50 | 2018-01-14T19:24:26 | Python | UTF-8 | Python | false | false | 264 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
for i in "python":
if i == "h":
break
print "打印当前字母", i
var = 10
while var > 0:
print var
var = var - 1
if var == 5:
print "循环跳出"
break
print "程序结束"
| [
"cccczl@hotmail.com"
] | cccczl@hotmail.com |
34a2a41100356cb517bf959f470a97daa1a2d332 | 059dfadcb4040e569c84f894eb691bd661d144b1 | /clearml/version.py | 13c053bccd20065d6a0d51bba56931ad3e7c276c | [
"Apache-2.0"
] | permissive | jponto/clearml | 855e5c64c889dc44466ec129592bfa0acb837899 | d37aa23fbf577513be999bcf966b4d771e1056d8 | refs/heads/master | 2023-03-18T02:49:50.518973 | 2021-03-11T07:42:35 | 2021-03-11T07:42:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26 | py | __version__ = '0.17.5rc6'
| [
""
] | |
6b6d42eb3d030728790f8497fe392eada6aed1ca | 7f4fae8e0a9e29fc3abee784a2d7d0beb8252bd5 | /wulifang/nuke/_rotopaint_dopesheet.py | 23d06b434f378fa9e35ffc0575925e1cbec8a39d | [] | no_license | WuLiFang/Nuke | a303646e927c9745f2eaf8dad4e5e1ccc09a30e7 | 49df48ded0985771147b1a40707b5454291eab19 | refs/heads/master | 2023-07-21T13:36:27.423572 | 2023-07-17T10:34:04 | 2023-07-17T10:34:04 | 100,696,180 | 16 | 5 | null | 2020-03-08T11:50:16 | 2017-08-18T09:28:26 | Python | UTF-8 | Python | false | false | 5,497 | py | # -*- coding=UTF-8 -*-
# pyright: strict, reportTypeCommentUsage=none
from __future__ import absolute_import, division, print_function, unicode_literals
import re
import nuke
import nuke.rotopaint
import nuke.curvelib
from wulifang._util import cast_str, cast_text
from wulifang.nuke._util import (
iter_deep_rotopaint_element,
Panel as _Panel,
CurrentViewer,
raise_panel,
RotopaintLifeTimeType,
knob_of,
RotoKnob,
)
TYPE_CHECKING = False
if TYPE_CHECKING:
from wulifang._compat.str import Str
def _rotopaint_keyframes(n):
# type: (nuke.Node) -> ...
key_frames = set([n.firstFrame(), n.lastFrame()])
for i in iter_deep_rotopaint_element(knob_of(n, "curves", RotoKnob).rootLayer):
if isinstance(
i,
(
nuke.rotopaint.Shape,
nuke.rotopaint.Stroke,
),
):
attrs = i.getAttributes()
lifetime_type = attrs.getValue(0, attrs.kLifeTimeTypeAttribute)
if lifetime_type == RotopaintLifeTimeType.ALL:
continue
key_frames.add(int(attrs.getValue(0, attrs.kLifeTimeMAttribute)))
key_frames.add(int(attrs.getValue(0, attrs.kLifeTimeNAttribute)))
return sorted(key_frames)
def apply_timewarp(rotopaint, timewarp, all_stroke=False):
# type: (nuke.Node, nuke.Node, bool) -> None
"""Apply timewarp to rotopaint node
Args:
rotopaint (nuke.Node): RotoPaint node
timewarp (nuke.Node): TimeWarp node
all_stroke (bool, optional): whether apply to invisible stroke.
Defaults to False.
"""
root_layer = knob_of(rotopaint, "curves", RotoKnob).rootLayer
lookup = timewarp[cast_str("lookup")]
time_map = {
int(match[1]): int(match[0])
for match in re.findall(
r"x(\d+) (\d+)",
cast_text(lookup.toScript()),
)
}
def apply_lookup(attrs, key):
# type: (nuke.curvelib.AnimAttributes, Str) -> None
input_time = int(attrs.getValue(0, key))
if input_time not in time_map:
nuke.message(
cast_str(
"在 {}.input 中找不到值为 {} 的关键帧".format(timewarp.name(), input_time)
)
)
raise ValueError("timewarp lookup failed")
output_time = time_map[int(input_time)]
attrs.set(key, output_time)
for i in iter_deep_rotopaint_element(root_layer):
if isinstance(
i,
(
nuke.rotopaint.Shape,
nuke.rotopaint.Stroke,
),
):
attrs = i.getAttributes()
lifetime_type = attrs.getValue(0, attrs.kLifeTimeTypeAttribute)
if lifetime_type == RotopaintLifeTimeType.ALL:
continue
if not all_stroke and not attrs.getValue(
nuke.frame(), attrs.kVisibleAttribute
):
continue
apply_lookup(attrs, attrs.kLifeTimeNAttribute)
apply_lookup(attrs, attrs.kLifeTimeMAttribute)
class Panel(_Panel):
"""Panel for rotopaint dopesheet command."""
def __init__(
self,
rotopaint, # type: nuke.Node
):
# type: (...) -> None
super(Panel, self).__init__(
cast_str("RotoPaint摄影表"),
cast_str("com.wlf-studio.rotopaint-dopesheet"),
)
if cast_text(rotopaint.Class()) != "RotoPaint":
nuke.message(cast_str("请选中RotoPaint节点"))
raise ValueError("require roto paint node")
self.rotopaint = rotopaint
n = nuke.createNode(cast_str("TimeWarp"))
n.setInput(0, rotopaint)
k = knob_of(n, "lookup", nuke.Array_Knob)
k.fromScript(
cast_str(
"{curve L l %s}"
% (
" ".join(
"x{} {}".format(i, i) for i in _rotopaint_keyframes(rotopaint)
),
)
)
)
k.setExpression(cast_str("floor(curve)"))
n.showControlPanel()
CurrentViewer.show(n)
self.timewarp = n
rotopaint.hideControlPanel()
k = nuke.Text_Knob(
cast_str(""),
cast_str("说明"),
cast_str(
"请在摄影表中编辑 %s.lookup 然后选择以下操作" % (cast_text(self.timewarp.name()),)
),
)
self.addKnob(k)
k = nuke.Script_Knob(cast_str("apply"), cast_str("应用至可见笔画"))
self.addKnob(k)
k = nuke.Script_Knob(cast_str("apply_all"), cast_str("应用至所有笔画"))
self.addKnob(k)
k = nuke.Script_Knob(cast_str("cancel"), cast_str("Cancel"))
self.addKnob(k)
def show(self):
super(Panel, self).show()
raise_panel("DopeSheet.1")
def knobChanged(self, knob):
# type: (nuke.Knob) -> None
is_finished = False
if knob is self["apply"]:
apply_timewarp(self.rotopaint, self.timewarp)
is_finished = True
elif knob is self["apply_all"]:
apply_timewarp(self.rotopaint, self.timewarp, True)
is_finished = True
elif knob is self["cancel"]:
is_finished = True
if is_finished:
nuke.delete(self.timewarp)
self.rotopaint.showControlPanel()
self.destroy()
raise_panel("DAG.1")
| [
"NateScarlet@Gmail.com"
] | NateScarlet@Gmail.com |
8bd13c6d0e989bf79bff3eeef7cadbe5cf0001e2 | 883a1d6cb2bcd15ab46567f6fed363ea08401340 | /project0001api/comment/migrations/0001_initial.py | cee8e8935b8f0c69a0d015ac3017fa59fc81500b | [
"MIT"
] | permissive | Hir0v0/SNS-API | a08ebaf99dc8fd43a8e1eda99f27dbf56c5145eb | 7616494ccb59bd94e0999bb40ee576557d2cc4f2 | refs/heads/main | 2023-02-24T17:53:30.253471 | 2021-01-08T11:41:28 | 2021-01-08T11:41:28 | 327,885,932 | 0 | 0 | MIT | 2021-01-08T11:41:30 | 2021-01-08T11:37:55 | Python | UTF-8 | Python | false | false | 1,839 | py | # Generated by Django 3.0.11 on 2020-11-30 05:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('post', '0004_delete_comment'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='PostComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(max_length=250)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('is_published', models.BooleanField(default=False)),
('in_moderation', models.BooleanField(default=True)),
('lft', models.PositiveIntegerField(editable=False)),
('rght', models.PositiveIntegerField(editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='children', to='comment.PostComment')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='postcomments', to='post.Post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| [
"charopevez@gmail.com"
] | charopevez@gmail.com |
7a0195cc9a5b62c0a78232c822679a2eb1225c01 | 383504aa898460512521c673989c59ac4f48a28a | /test/characterization/generate_data.py | 2ad0e267732c2f12aa96ab3455fb9319e3c5cb5d | [
"Apache-2.0"
] | permissive | jyu00/qiskit-ignis | a265a4ac18a1f57c4fdd64b8941ced5fb31d37d3 | 0af27dd1fdd5c4e4d2ef80d21d0375d11a1625fd | refs/heads/master | 2022-12-09T16:30:54.540629 | 2020-09-15T17:38:43 | 2020-09-15T17:38:43 | 295,837,466 | 0 | 0 | Apache-2.0 | 2020-09-15T20:07:21 | 2020-09-15T20:07:20 | null | UTF-8 | Python | false | false | 10,422 | py | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Generate data for characterization fitters tests
"""
import os
import sys
from typing import List, Tuple
import json
import numpy as np
import qiskit
from qiskit.providers.aer.noise.errors.standard_errors import \
(thermal_relaxation_error,
coherent_unitary_error)
from qiskit.providers.aer.noise import NoiseModel
from qiskit.ignis.characterization.coherence import (t1_circuits,
t2_circuits,
t2star_circuits)
from qiskit.ignis.characterization.hamiltonian import zz_circuits
# Fix seed for simulations
SEED = 9000
def t1_circuit_execution() -> Tuple[qiskit.result.Result,
np.array,
List[int],
float]:
"""
Create T1 circuits and simulate them.
Returns:
* Backend result.
* xdata.
* Qubits for the T1 measurement.
* T1 that was used in the circuits creation.
"""
# 15 numbers ranging from 1 to 200, linearly spaced
num_of_gates = (np.linspace(1, 200, 15)).astype(int)
gate_time = 0.11
qubits = [0]
circs, xdata = t1_circuits(num_of_gates, gate_time, qubits)
t1_value = 10
error = thermal_relaxation_error(t1_value, 2*t1_value, gate_time)
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(error, 'id')
# TODO: Include SPAM errors
backend = qiskit.Aer.get_backend('qasm_simulator')
shots = 100
backend_result = qiskit.execute(
circs, backend,
shots=shots,
seed_simulator=SEED,
backend_options={'max_parallel_experiments': 0},
noise_model=noise_model,
optimization_level=0).result()
return backend_result, xdata, qubits, t1_value
def generate_data_t1(filename):
"""
Create T1 circuits and simulate them, then write the results in a json file.
The file will contain a dictionary with the following keys:
- 'backend_result', value is stored in the form of a dictionary.
- 'xdata', value is stored as a list (and not as a numpy array).
- 'qubits', these are the qubits for the T1 measurement.
- 't1'
Args:
filename - name of the json file.
"""
backend_result, xdata, qubits, t1_value = t1_circuit_execution()
data = {
'backend_result': backend_result.to_dict(),
'xdata': xdata.tolist(),
'qubits': qubits,
't1': t1_value
}
with open(filename, 'w') as handle:
json.dump(data, handle)
def t2_circuit_execution() -> Tuple[qiskit.result.Result,
np.array,
List[int],
float]:
"""
Create T2 circuits and simulate them.
Returns:
* Backend result.
* xdata.
* Qubits for the T2 measurement.
* T2 that was used in the circuits creation.
"""
num_of_gates = (np.linspace(1, 30, 10)).astype(int)
gate_time = 0.11
qubits = [0]
n_echos = 5
alt_phase_echo = True
circs, xdata = t2_circuits(num_of_gates, gate_time, qubits,
n_echos, alt_phase_echo)
t2_value = 20
error = thermal_relaxation_error(np.inf, t2_value, gate_time, 0.5)
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(error, 'id')
# TODO: Include SPAM errors
backend = qiskit.Aer.get_backend('qasm_simulator')
shots = 100
backend_result = qiskit.execute(
circs, backend,
shots=shots,
seed_simulator=SEED,
backend_options={'max_parallel_experiments': 0},
noise_model=noise_model,
optimization_level=0).result()
return backend_result, xdata, qubits, t2_value
def generate_data_t2(filename):
"""
Create T2 circuits and simulate them, then write the results in a json file.
The file will contain a dictionary with the following keys:
- 'backend_result', value is stored in the form of a dictionary.
- 'xdata', value is stored as a list (and not as a numpy array).
- 'qubits', these are the qubits for the T2 measurement.
- 't2'
Args:
filename - name of the json file.
"""
backend_result, xdata, qubits, t2_value = t2_circuit_execution()
data = {
'backend_result': backend_result.to_dict(),
'xdata': xdata.tolist(),
'qubits': qubits,
't2': t2_value
}
with open(filename, 'w') as handle:
json.dump(data, handle)
def t2star_circuit_execution() -> Tuple[qiskit.result.Result,
np.array,
List[int],
float,
float]:
"""
Create T2* circuits and simulate them.
Returns:
* Backend result.
* xdata.
* Qubits for the T2* measurement.
* T2* that was used in the circuits creation.
* Frequency.
"""
# Setting parameters
num_of_gates = np.append(
(np.linspace(10, 150, 10)).astype(int),
(np.linspace(160, 450, 5)).astype(int))
gate_time = 0.1
qubits = [0]
t2_value = 10
error = thermal_relaxation_error(np.inf, t2_value, gate_time, 0.5)
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(error, 'id')
backend = qiskit.Aer.get_backend('qasm_simulator')
shots = 200
# Estimate T2* via an oscilliator function
circs_osc, xdata, omega = t2star_circuits(num_of_gates, gate_time,
qubits, 5)
backend_result = qiskit.execute(
circs_osc, backend,
shots=shots,
seed_simulator=SEED,
backend_options={'max_parallel_experiments': 0},
noise_model=noise_model,
optimization_level=0).result()
return backend_result, xdata, qubits, t2_value, omega
def generate_data_t2star(filename):
"""
Create T2* circuits and simulate them, then write the results in a json file.
The file will contain a dictionary with the following keys:
- 'backend_result', value is stored in the form of a dictionary.
- 'xdata', value is stored as a list (and not as a numpy array).
- 'qubits', these are the qubits for the T2 measurement.
- 't2'
- 'omega'
Args:
filename - name of the json file.
"""
backend_result, xdata, qubits, t2_value, omega = t2star_circuit_execution()
data = {
'backend_result': backend_result.to_dict(),
'xdata': xdata.tolist(),
'qubits': qubits,
't2': t2_value,
'omega': omega
}
with open(filename, 'w') as handle:
json.dump(data, handle)
def zz_circuit_execution() -> Tuple[qiskit.result.Result,
np.array,
List[int],
List[int],
float,
float]:
"""
Create ZZ circuits and simulate them.
Returns:
* Backend result.
* xdata.
* Qubits for the ZZ measurement.
* Spectators.
* ZZ parameter that used in the circuit creation
* Frequency.
"""
num_of_gates = np.arange(0, 60, 10)
gate_time = 0.1
qubits = [0]
spectators = [1]
# Generate experiments
circs, xdata, omega = zz_circuits(num_of_gates,
gate_time, qubits,
spectators, nosc=2)
# Set the simulator with ZZ
zz_value = 0.1
zz_unitary = np.eye(4, dtype=complex)
zz_unitary[3, 3] = np.exp(1j*2*np.pi*zz_value*gate_time)
error = coherent_unitary_error(zz_unitary)
noise_model = NoiseModel()
noise_model.add_nonlocal_quantum_error(error, 'id', [0], [0, 1])
# Run the simulator
backend = qiskit.Aer.get_backend('qasm_simulator')
shots = 100
backend_result = qiskit.execute(circs, backend,
shots=shots,
seed_simulator=SEED,
noise_model=noise_model,
optimization_level=0).result()
return backend_result, xdata, qubits, spectators, zz_value, omega
def generate_data_zz(filename):
"""
Create ZZ circuits and simulate them, then write the results in a json file.
The file will contain a dictionary with the following keys:
- 'backend_result', value is stored in the form of a dictionary.
- 'xdata', value is stored as a list (and not as a numpy array).
- 'qubits', these are the qubits for the ZZ measurement.
- 'spectators'
- 'zz'
- 'omega'
Args:
filename - name of the json file.
"""
backend_result, xdata, qubits, spectators, zz_value, omega = zz_circuit_execution()
data = {
'backend_result': backend_result.to_dict(),
'xdata': xdata.tolist(),
'qubits': qubits,
'spectators': spectators,
'zz': zz_value,
'omega': omega
}
with open(filename, 'w') as handle:
json.dump(data, handle)
if __name__ == '__main__':
DIRNAME = os.path.dirname(os.path.abspath(__file__))
for fit_type in sys.argv[1:]:
if fit_type == 'zz':
generate_data_zz(os.path.join(DIRNAME, 'zz_data.json'))
elif fit_type == 't2star':
generate_data_t2star(os.path.join(DIRNAME, 't2star_data.json'))
elif fit_type == 't2':
generate_data_t2(os.path.join(DIRNAME, 't2_data.json'))
elif fit_type == 't1':
generate_data_t1(os.path.join(DIRNAME, 't1_data.json'))
else:
print('Skipping unknown argument ' + fit_type)
| [
"noreply@github.com"
] | jyu00.noreply@github.com |
606b7ab155cf0cbadd90c5a5d4faf54a1582a37c | 6e76dcc7db77e75aefddefbc78fb587c9732073a | /Dash_Panel/scripts/holoviews_app.py | fe0d8924a5d2c82d3e31c5c11490055306038e04 | [] | no_license | AOngaro3/Dashboarding_with_python | 1de465dd14bdfc42966e4addd3fcc80cbdc24c72 | 365e87a85f8e2d19e8c2e89392aed1333d528c2d | refs/heads/main | 2023-03-14T16:12:39.394577 | 2021-03-26T11:03:32 | 2021-03-26T11:03:32 | 344,487,491 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | from dash import Dashboard, pn
STYLE = '''
.bk.app-body {
background: #f2f2f2;
color: #000000;
font-family: roboto, sans-serif, Verdana;
}
.bk.app-bar {
background: #d52e3f;
border-color: white;
box-shadow: 5px 5px 20px #9E9E9E;
color: #ffffff;
z-index: 50;
}
.bk.app-container {
background: #ffffff;
border-radius: 5px;
box-shadow: 2px 2px 2px lightgrey;
color: #000000;
}
.bk.app-settings {
background: #e0e0e0;
color: #000000;
}
'''
pn.config.raw_css.append(STYLE)
pn.extension(raw_css=[STYLE])
if __name__ == '__main__':
dash = Dashboard()
pn.serve(dash.panel().servable(), port=5006, allow_websocket_origin=["localhost:5000","127.0.0.1:5000"], show=False) | [
"andrea.ongaro1995@gmail.com"
] | andrea.ongaro1995@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.