blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5ca7d8dcde9b57240cc9f9fd08d1837aa7c915a8 | 30b1d37cf5b46de7131257ae3862ad3fa4eb3952 | /photo_face.py | 5b0c25919639bb4a812d889cede7fc47a8b01f50 | [] | no_license | YOURSyym/Special-person-face-snap-alarm-system | e353f64a6fa830a999941c4f920fc4d856364959 | e4df1017cf89d23e652fe8cd6d77a4b03811132a | refs/heads/master | 2021-04-16T01:03:05.991450 | 2020-03-23T02:14:20 | 2020-03-23T02:14:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,585 | py | import os
import cv2
import time
import shutil
def getAllPath(dirpath, *suffix):
PathArray = []
for r, ds, fs in os.walk(dirpath):
for fn in fs:
if os.path.splitext(fn)[1] in suffix:
fname = os.path.join(r, fn)
PathArray.append(fname)
return PathArray
def readPicSaveFace_1(sourcePath, targetPath, invalidPath, *suffix):
try:
ImagePaths = getAllPath(sourcePath, *suffix)
# 对list中图片逐一进行检查,找出其中的人脸然后写到目标文件夹下
count = 1
# haarcascade_frontalface_alt.xml为库训练好的分类器文件,下载opencv,安装目录中可找到
face_cascade = cv2.CascadeClassifier('C:\\Users\\ASUS\\Desktop\\opencv-master\\data\\haarcascades\\haarcascade_frontalface_alt.xml')
for imagePath in ImagePaths:
try:
img = cv2.imread(imagePath)
if type(img) != str:
faces = face_cascade.detectMultiScale(img, 1.1, 5)
if len(faces):
for (x, y, w, h) in faces:
# 设置人脸宽度大于16像素,去除较小的人脸
if w >= 16 and h >= 16:
# 以时间戳和读取的排序作为文件名称
listStr = [str(int(time.time())), str(count)]
fileName = ''.join(listStr)
# 扩大图片,可根据坐标调整
X = int(x)
W = min(int(x + w), img.shape[1])
Y = int(y)
H = min(int(y + h), img.shape[0])
f = cv2.resize(img[Y:H, X:W], (W - X, H - Y))
cv2.imwrite(targetPath + os.sep + '%s.jpg' % fileName, f)
count += 1
print(imagePath + "have face")
else:
shutil.move(imagePath, invalidPath)
except:
continue
except IOError:
print("Error")
else:
print('Find ' + str(count - 1) + ' faces to Destination ' + targetPath)
if __name__ == '__main__':
invalidPath = r'C:\Users\ASUS\Desktop\data\invalid'
sourcePath = r'C:\Users\ASUS\Desktop\data\web'
targetPath1 = r'C:\Users\ASUS\Desktop\data\new'
readPicSaveFace_1(sourcePath, targetPath1, invalidPath, '.jpg', '.JPG', 'png', 'PNG')
| [
"3315371762@qq.com"
] | 3315371762@qq.com |
b47dea3d469f4570fbb4a8f588f8cf4f4b16c852 | 6df2e5efb3071597e57e02f534abdd858a2d5c77 | /config/urls.py | 7c8e35b90ba30f629a3deedf08c3b1afc160fbe1 | [
"MIT"
] | permissive | Naxaes/DH2413-website | c240d72daf1edc9977ba994a6696d92c7d51294f | 9cc61f453ab7d6ceeec471ec72058c2ce1a2346a | refs/heads/master | 2022-12-07T19:34:06.380258 | 2020-09-01T22:48:15 | 2020-09-01T22:48:15 | 292,038,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,044 | py | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import include, path
from django.views import defaults as default_views
from django.views.generic import TemplateView
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path("", TemplateView.as_view(template_name="fourier_series.html"), name="home"),
# Django Admin, use {% url 'admin:index' %}
# path(settings.ADMIN_URL, admin.site.urls),
# User management
# path("users/", include("dh2413_advanced_graphics_and_interaction.users.urls", namespace="users")),
# path("accounts/", include("allauth.urls")),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# Static file serving when using Gunicorn + Uvicorn for local web socket development
urlpatterns += staticfiles_urlpatterns()
# API URLS
urlpatterns += [
# API base url
path("api/", include("config.api_router")),
# DRF auth token
path("auth-token/", obtain_auth_token),
]
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
| [
"ted@zerebra.com"
] | ted@zerebra.com |
5e985531ad45f5f550230f2d78adf377df29ec40 | b8e823a52e2a7a30fda6b4ff35725275dcd76288 | /mysite/blog/migrations/0001_initial.py | 3f5fa485e616ab3b54b3caa2f41b0f15a9f8387b | [] | no_license | nihalmishra/django-tutorials | 5487a407b6fd27d05a0cdb214e7a6e6b2e2ed576 | 675329af010d87006d23fe8b77b4cb085079b985 | refs/heads/master | 2020-03-22T21:25:16.736775 | 2018-09-10T12:32:53 | 2018-09-10T12:32:53 | 140,686,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-27 10:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=140)),
('body', models.TextField()),
('date', models.DateTimeField()),
],
),
]
| [
"nihal.mishra95@gmail.com"
] | nihal.mishra95@gmail.com |
4a32f86eaacf0ac184b3d541a5e54a0402bdb228 | 2132d79e8ade83a89cd8388164cc43e61f8d82ff | /model_relationships_part1/model_relationships/urls.py | 2408084d9aaf2e5d620d90eac8a83cdb02183ea6 | [] | no_license | nipa04/BootCamp-Works | ca2721495a948134ed31b020216e5826a77b2597 | 5cae671d5208b7cc8c8f858a7c316c59f64230f0 | refs/heads/master | 2023-01-07T21:54:36.577506 | 2020-01-30T16:58:53 | 2020-01-30T16:58:53 | 232,620,796 | 0 | 0 | null | 2023-01-06T03:47:26 | 2020-01-08T17:33:55 | PLSQL | UTF-8 | Python | false | false | 761 | py | """model_relationships URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"nipa.farjana04@gmail.com"
] | nipa.farjana04@gmail.com |
156d14bdae8a0e9a71a2d8974b69ecfec3a41281 | 3859695d6322d39c047b129a6098b2645de9e972 | /mysite/mysite/urls.py | 4ba8ebe28430e4a8155243981a10a0243eaf0aeb | [] | no_license | Davy971/TD1genieLogiciel | cfe9ec7e3ca99162dd28d9d95f3a87b2de326080 | d4f573ddfb741efb2982b3d51e7bb692237c8bd9 | refs/heads/master | 2020-03-30T18:46:23.266367 | 2018-10-04T04:19:11 | 2018-10-04T04:19:11 | 150,511,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include,path
urlpatterns = [
path('ifap/', include('ifap.urls')),
path('admin/', admin.site.urls),
]
| [
"regalade.davy@gmail.com"
] | regalade.davy@gmail.com |
22d441f62a586e355c35efc8f171c07e88700048 | d4811f4ba6b20d9cba2ae63736e58fe8d1de198a | /tests/loading/primitives/test_uint32.py | b8b6effc660668a183b78462679a7647450550ba | [
"MIT"
] | permissive | mlga/schematics-proto3 | f6de009cff5ac6ff096dbe784618001b3c3bd5fb | 588fe5bc212e203688166638a1c52dfeda931403 | refs/heads/master | 2020-09-11T06:31:21.146154 | 2020-08-19T17:32:45 | 2020-08-19T17:32:45 | 221,972,314 | 0 | 0 | MIT | 2020-08-19T17:32:47 | 2019-11-15T17:28:45 | Python | UTF-8 | Python | false | false | 673 | py | # -*- coding:utf-8 -*-
from schematics.types import IntType
from tests.loading.primitives import CommonPrimitivesTests
from tests import schematics_proto3_tests_pb2 as pb2
from tests.utils.randoms import value_for_primitive
from tests.utils.wire import mimic_protobuf_wire_transfer
class TestUInt32(CommonPrimitivesTests):
field_type_class = IntType
protobuf_msg_class = pb2.UInt32
def get_msg_all_set(self):
msg = self.protobuf_msg_class()
msg.value = value_for_primitive('uint32_field')
return mimic_protobuf_wire_transfer(msg)
def get_msg_unsets(self):
return mimic_protobuf_wire_transfer(self.protobuf_msg_class())
| [
"github@mlga.io"
] | github@mlga.io |
a24231af24f186f570d3b622abe248a6ea2645f9 | 91e867dfb15df3e3cc78604335013a06d2b6d6a0 | /tests/unit/tests_standard_lib/test_fields_tests/test_test_generator.py | 12a6578a609c6889a37e828402b4806185b9a181 | [
"Apache-2.0"
] | permissive | monishshah18/pytest-splunk-addon | fd0cccce51f15ae2baac8aadb9ad13827cb7260d | 1600f2c7d30ec304e9855642e63511780556b406 | refs/heads/main | 2023-06-14T16:36:54.600276 | 2021-07-14T21:40:43 | 2021-07-14T21:40:43 | 326,932,988 | 0 | 0 | NOASSERTION | 2021-01-05T08:20:50 | 2021-01-05T08:20:50 | null | UTF-8 | Python | false | false | 12,356 | py | import pytest
from unittest.mock import patch, MagicMock
from pytest_splunk_addon.standard_lib.fields_tests.test_generator import (
FieldTestGenerator,
)
def field_1():
pass
def field_2():
pass
def field_3():
pass
field_1.__dict__.update({"name": "field_1"})
field_2.__dict__.update({"name": "field_2"})
field_3.__dict__.update({"name": "field_3"})
@pytest.fixture()
def addon_parser_mock(monkeypatch):
ap = MagicMock()
ap.return_value = ap
monkeypatch.setattr(
"pytest_splunk_addon.standard_lib.fields_tests.test_generator.AddonParser", ap
)
return ap
@pytest.fixture()
def field_bank_mock(monkeypatch):
fb = MagicMock()
monkeypatch.setattr(
"pytest_splunk_addon.standard_lib.fields_tests.test_generator.FieldBank", fb
)
return fb
def test_field_test_generator_instantiation(addon_parser_mock):
addon_parser_mock.return_value = "ADDON_PARSER_RETURN_VALUE"
ftg = FieldTestGenerator("app_path", "field_bank")
assert ftg.field_bank == "field_bank"
assert ftg.addon_parser == "ADDON_PARSER_RETURN_VALUE"
addon_parser_mock.assert_called_once_with("app_path")
@pytest.mark.parametrize(
"fixture_name, expected_ouptput",
[
("splunk_searchtime_fields_positive", "GENERATE_FILED_TESTS_RETURN_VALUE"),
(
"splunk_searchtime_fields_negative",
"GENERATE_FILED_TESTS_RETURN_VALUE",
),
(
"splunk_searchtime_fields_tags",
"GENERATE_TAG_TESTS_RETURN_VALUE",
),
(
"splunk_searchtime_fields_eventtypes",
"GENERATE_EVENTTYPE_TESTS_RETURN_VALUE",
),
(
"splunk_searchtime_fields_savedsearches",
"GENERATE_SAVEDSEARCHES_TESTS_RETURN_VALUE",
),
],
)
def test_generate_tests(addon_parser_mock, fixture_name, expected_ouptput):
with patch.object(
FieldTestGenerator,
"generate_field_tests",
return_value=(["GENERATE_FILED_TESTS_RETURN_VALUE"]),
), patch.object(
FieldTestGenerator,
"generate_tag_tests",
return_value=(["GENERATE_TAG_TESTS_RETURN_VALUE"]),
), patch.object(
FieldTestGenerator,
"generate_eventtype_tests",
return_value=(["GENERATE_EVENTTYPE_TESTS_RETURN_VALUE"]),
), patch.object(
FieldTestGenerator,
"generate_savedsearches_tests",
return_value=(["GENERATE_SAVEDSEARCHES_TESTS_RETURN_VALUE"]),
):
assert list(
FieldTestGenerator("app_path", "field_bank").generate_tests(fixture_name)
) == [expected_ouptput]
def test_generate_tag_tests(addon_parser_mock):
tags = [
{
"stanza": 'eventtype="fiction_for_tags_positive"',
"tag": "tags_positive_event",
"enabled": True,
},
{
"stanza": 'source="/opt/splunk/var/log/splunk/splunkd.log"',
"tag": "tags_disabled_event",
"enabled": False,
},
]
addon_parser_mock.get_tags.side_effect = lambda: (tag for tag in tags)
with patch.object(pytest, "param", side_effect=lambda x, id: (x, id)) as param_mock:
out = list(FieldTestGenerator("app_path", "field_bank").generate_tag_tests())
assert out == [
(tags[0], f"{tags[0]['stanza']}::tag::{tags[0]['tag']}"),
(tags[1], f"{tags[1]['stanza']}::tag::{tags[1]['tag']}"),
]
assert param_mock.call_count == len(tags)
def test_generate_eventtype_tests(addon_parser_mock):
eventtypes = [
{"stanza": "fiction_is_splunkd"},
{"stanza": "fiction_for_tags_positive"},
{"stanza": "fiction_is_splunkd-%host%"},
]
addon_parser_mock.get_eventtypes.side_effect = lambda: (
event for event in eventtypes
)
with patch.object(pytest, "param", side_effect=lambda x, id: (x, id)) as param_mock:
out = list(
FieldTestGenerator("app_path", "field_bank").generate_eventtype_tests()
)
assert out == [
(eventtypes[0], f"eventtype::{eventtypes[0]['stanza']}"),
(eventtypes[1], f"eventtype::{eventtypes[1]['stanza']}"),
(eventtypes[2], f"eventtype::{eventtypes[2]['stanza']}"),
]
assert param_mock.call_count == len(eventtypes)
def test_generate_savedsearches_tests(addon_parser_mock):
savedsearches = [
{
"stanza": "basic_search",
"search": "index = _internal | stats count by sourcetype",
},
{"stanza": "empty_search", "search": 'index = "main"'},
]
addon_parser_mock.get_savedsearches.side_effect = lambda: (
savedsearch for savedsearch in savedsearches
)
with patch.object(pytest, "param", side_effect=lambda x, id: (x, id)) as param_mock:
out = list(
FieldTestGenerator("app_path", "field_bank").generate_savedsearches_tests()
)
assert out == [
(savedsearches[0], savedsearches[0]["stanza"]),
(savedsearches[1], savedsearches[1]["stanza"]),
]
assert param_mock.call_count == len(savedsearches)
@pytest.mark.parametrize(
"fields_group, criteria, expected_result",
[
({"classname": "valid_classname"}, ["valid_class", "valid_classname"], True),
({"classname": "invalid_classname"}, ["valid_class", "valid_classname"], False),
],
)
def test_contains_classname(fields_group, criteria, expected_result):
assert (
FieldTestGenerator("app_path", "field_bank")._contains_classname(
fields_group, criteria
)
is expected_result
)
@pytest.mark.parametrize(
"is_positive, contains_classname, field_bank, prpos_fields, expected_output",
[
(
False,
[False, False],
[
{
"stanza": "sourcetype::splunkd",
"stanza_type": "sourcetype",
"classname": "field_bank",
"fields": [field_2, field_3],
}
],
[
{
"stanza": "snow:incident",
"stanza_type": "sourcetype",
"classname": "REPORT::transform_string",
"fields": [field_1],
}
],
[
(
{
"stanza": "sourcetype::splunkd",
"stanza_type": "sourcetype",
"classname": "field_bank",
"fields": [{"name": "field_2"}],
},
f"sourcetype::splunkd::field_bank_1::{field_2}",
),
(
{
"stanza": "sourcetype::splunkd",
"stanza_type": "sourcetype",
"classname": "field_bank",
"fields": [{"name": "field_3"}],
},
f"sourcetype::splunkd::field_bank_2::{field_3}",
),
(
{
"stanza": "snow:incident",
"stanza_type": "sourcetype",
"classname": "REPORT::transform_string",
"fields": [{"name": "field_1"}],
},
f"snow:incident::field::{field_1}",
),
],
),
(
True,
[False, False],
[
{
"stanza": "sourcetype::splunkd",
"stanza_type": "sourcetype",
"classname": "field_bank",
"fields": [field_2],
}
],
[
{
"stanza": "snow:incident",
"stanza_type": "sourcetype",
"classname": "REPORT::transform_string",
"fields": [field_1],
}
],
[
(
{
"stanza": "sourcetype::splunkd",
"stanza_type": "sourcetype",
"classname": "field_bank",
"fields": [],
},
"sourcetype::splunkd",
),
(
{
"stanza": "sourcetype::splunkd",
"stanza_type": "sourcetype",
"classname": "field_bank",
"fields": [{"name": "field_2"}],
},
f"sourcetype::splunkd::field_bank_1::{field_2}",
),
(
{
"stanza": "snow:incident",
"stanza_type": "sourcetype",
"classname": "REPORT::transform_string",
"fields": [],
},
"snow:incident",
),
(
{
"stanza": "snow:incident",
"stanza_type": "sourcetype",
"classname": "REPORT::transform_string",
"fields": [{"name": "field_1"}],
},
f"snow:incident::field::{field_1}",
),
],
),
(
False,
[False, True],
[
{
"stanza": "sourcetype::splunkd",
"stanza_type": "sourcetype",
"classname": "field_bank",
"fields": [field_2],
}
],
[
{
"stanza": "snow:incident",
"stanza_type": "sourcetype",
"classname": "REPORT::transform_string",
"fields": [field_1, field_3],
}
],
[
(
{
"stanza": "sourcetype::splunkd",
"stanza_type": "sourcetype",
"classname": "field_bank",
"fields": [{"name": "field_2"}],
},
f"sourcetype::splunkd::field_bank_1::{field_2}",
),
(
{
"stanza": "snow:incident",
"stanza_type": "sourcetype",
"classname": "REPORT::transform_string",
"fields": [{"name": "field_1"}, {"name": "field_3"}],
},
"snow:incident::REPORT::transform_string",
),
(
{
"stanza": "snow:incident",
"stanza_type": "sourcetype",
"classname": "REPORT::transform_string",
"fields": [{"name": "field_1"}],
},
f"snow:incident::field::{field_1}",
),
(
{
"stanza": "snow:incident",
"stanza_type": "sourcetype",
"classname": "REPORT::transform_string",
"fields": [{"name": "field_3"}],
},
f"snow:incident::field::{field_3}",
),
],
),
],
)
def test_generate_field_tests(
addon_parser_mock,
field_bank_mock,
is_positive,
contains_classname,
field_bank,
prpos_fields,
expected_output,
):
addon_parser_mock.get_props_fields.return_value = prpos_fields
field_bank_mock.init_field_bank_tests.return_value = field_bank
with patch.object(
FieldTestGenerator, "_contains_classname", side_effect=contains_classname
), patch.object(pytest, "param", side_effect=lambda x, id: (x, id)) as param_mock:
out = list(
FieldTestGenerator("app_path", "field_bank").generate_field_tests(
is_positive
)
)
assert out == expected_output
assert param_mock.call_count == len(expected_output)
| [
"noreply@github.com"
] | noreply@github.com |
9ae82b5750f77a253ffc1e56ff84b70c2023d0ad | dc36dd2b74d9db4e5b8fcd3d65514fb7275b39ba | /Nikita_Trynus/7_collections/practise/ex_1.py | 85eeb97e25f11f4604d43e3624e8243d9334fdc5 | [] | no_license | YuriiKhomych/ITEA_course | 917fd0b3bc83fc574ef5620b0eba89f5b8f169e4 | 56bf9c67e535b3d521d7f0dd45f1c737a25b6ce7 | refs/heads/master | 2020-06-25T19:00:31.801788 | 2019-09-17T06:14:59 | 2019-09-17T06:14:59 | 199,396,197 | 0 | 15 | null | 2019-11-20T19:49:30 | 2019-07-29T06:55:35 | Python | UTF-8 | Python | false | false | 181 | py | pizza = ['1', '2', '3']
for i in pizza:
print(f'{i} is cool')
print(f'i like {pizza[0]}')
print(f'i love {pizza[1]}')
print(f'i am ok with {pizza[0]}')
print('I like pizza')
| [
"trynuspoc@gmail.com"
] | trynuspoc@gmail.com |
6af093b41a50353b636b1226ae653d4697195ed3 | b83f9df3dc8a4e430cd88076a87cf87c064f0942 | /src/train.py | 70983ee9fc127f999eb5035233668218cf03e62f | [] | no_license | BassirouNdao/GAN2Vec | 40867512d75690b9fa175ae06edc2c0f0cae4619 | 44960dd89369cf3ebb1f53ff96c8351a60d79695 | refs/heads/master | 2023-02-04T19:59:58.342601 | 2020-12-21T21:49:06 | 2020-12-21T21:49:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,796 | py | import torch
import pickle
import os
import time
from torch import nn
from torch.autograd import Variable
from random import randint
from torch.optim import Adam
from gan2vec import Discriminator, Generator
#from gan2vec_conv import ConvGenerator
from torch.nn.utils.rnn import pack_padded_sequence
from gensim.models import Word2Vec
DATA_DIR = 'data'
#DATA_DIR = 'code/GAN2Vec/data' # For debugger
IN_TEXT = 'cleaned_haiku.data'
IN_W2V = 'w2v_haiku.model'
text = encoder = None
def get_data():
global text, encoder
if text:
return
with open(os.path.join(DATA_DIR, IN_TEXT), 'rb') as f:
text = pickle.load(f)[:256]
encoder = Word2Vec.load(os.path.join(DATA_DIR, IN_W2V))
def get_lines(start,end):
get_data()
seq_lens = []
sentences = []
longest = 0
for l in text[start:end]:
seq_lens.append(len(l))
longest = len(l) if len(l) > longest else longest
sentence = []
for w in l:
sentence.append(torch.tensor(encoder.wv[w]))
sentences.append(torch.stack(sentence).unsqueeze(0))
# Pad input
d_size = sentences[0].size(2)
for i in range(len(sentences)):
sl = sentences[i].size(1)
if sl < longest:
sentences[i] = torch.cat(
[sentences[i], torch.zeros(1,longest-sl,d_size)],
dim=1
)
# Need to squish sentences into [0,1] domain
seq = torch.cat(sentences, dim=0)
#seq = torch.sigmoid(seq)
start_words = seq[:, 0:1, :]
packer = pack_padded_sequence(
seq,
seq_lens,
batch_first=True,
enforce_sorted=False
)
return packer , start_words
def get_closest(sentences):
scores = []
wv = encoder.wv
for s in sentences.detach().numpy():
st = [
wv[wv.most_similar([s[i]], topn=1)[0][0]]
for i in range(s.shape[0])
]
scores.append(torch.tensor(st))
return torch.stack(scores, dim=0)
def train(epochs, batch_size=256, latent_size=256, K=1):
get_data()
num_samples = len(text)
G = Generator(64, 64)
D = Discriminator(64)
l2 = nn.MSELoss()
loss = nn.BCELoss()
opt_d = Adam(D.parameters(), lr=0.002, betas=(0.5, 0.999))
opt_g = Adam(G.parameters(), lr=0.002, betas=(0.5, 0.999))
for e in range(epochs):
i = 0
while batch_size*i < num_samples:
stime = time.time()
start = batch_size*i
end = min(batch_size*(i+1), num_samples)
bs = end-start
# Use lable smoothing
tl = torch.full((bs, 1), 0.9)
fl = torch.full((bs, 1), 0.1)
# Train descriminator
opt_d.zero_grad()
real, greal = get_lines(start, end)
fake = G(greal)
r_loss = loss(D(real), tl)
f_loss = loss(D(fake), fl)
r_loss.backward()
f_loss.backward()
d_loss = (r_loss.mean().item() + f_loss.mean().item()) / 2
opt_d.step()
# Train generator
for _ in range(K):
opt_g.zero_grad()
# GAN fooling ability
fake = G(greal)
g_loss = loss(D(fake), tl)
g_loss.backward()
opt_g.step()
g_loss = g_loss.item()
print(
'[%d] D Loss: %0.3f G Loss %0.3f (%0.1fs)' %
(e, d_loss, g_loss, time.time()-stime)
)
i += 1
if e % 10 == 0:
torch.save(G, 'generator.model')
torch.save(G, 'generator.model')
torch.set_num_threads(16)
if __name__ == '__main__':
train(1000, batch_size=256)
| [
"zazyzaya@gmail.com"
] | zazyzaya@gmail.com |
4097c12e5e32ddb612f8e316b6cc081ec845b219 | a2218b9627f403b0482198cebc4a32f4c5f1da95 | /console_version.py | fd5d9cc0ac3afc51da7639e6417e5caba16a2f85 | [] | no_license | VladimirZorin1986/finite-automation | 9c6a3186a61e35b2e5a947578f9f3480f5e2354b | e2fc9ecb385abb5bc29b1586fcffafe4404a93cb | refs/heads/main | 2022-12-28T02:17:47.019093 | 2020-10-08T09:31:40 | 2020-10-08T09:31:40 | 302,371,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | from typing import Set, Dict, Tuple
# Определение константных величин
RULE: Dict[Tuple[str, str], str] = {('H', 'a'): 'S',
('H', 'b'): 'SE',
('S', 'b'): 'SE',
('SE', 'a'): 'SE',
('SE', 'b'): 'SE'}
INITIAL_STATE: str = 'H'
FINAL_STATE: Set[str] = {'S', 'SE'}
def finite_automate(word: str) -> str:
"""Реализация конечного автомата для проверки символьных строк"""
state: str = INITIAL_STATE
for ind, char in enumerate(word):
print(f'{word[ind:]} --> {state}')
state = RULE.get((state, char))
if not state:
break
if state in FINAL_STATE:
return 'Цепочка принадлежит языку'
return 'Цепочка не принадлежит языку'
print(finite_automate(input('Введите слово алфавита:\n')))
| [
"pls.github@gmail.com"
] | pls.github@gmail.com |
66a2df763ed13eb2629342a4a870fa3e9b2f607e | b53782b56116547571a60c20c421908b0110ee79 | /multi.py | 4741b6ffb41bedc2b7b15f0ada20948c2b69f7fa | [] | no_license | phungvankhanh/JPEGImageCompressAlgor | 16eeb2790a11cf2a3a4a62ae4b7263decb37ba90 | 86fb44add45f0406a94cf1993603439e174f07ac | refs/heads/master | 2021-04-09T13:12:30.020741 | 2018-04-01T17:53:06 | 2018-04-01T17:53:06 | 125,459,397 | 0 | 1 | null | 2018-03-26T16:57:49 | 2018-03-16T03:38:06 | Python | UTF-8 | Python | false | false | 2,097 | py | from tkinter import Tk,Scale,Canvas,IntVar, Frame,Menu, BOTH, Label,NW, Text,X,LEFT,Button,RIGHT
from PIL import Image,ImageTk
from tkinter.filedialog import Open
import tkinter.messagebox as mbox
class Example(Frame):
def __init__(self,parent):
Frame.__init__(self,parent)
self.parent=parent
self.initUI()
def initUI(self):
self.parent.title("Multimedia")
self.pack(fill=BOTH,expand=1)
menuBar=Menu(self.parent)
self.parent.config(menu=menuBar)
fileMenu=Menu(menuBar)
fileMenu.add_command(label="Open",command=self.onOpen)
menuBar.add_cascade(label="File",menu=fileMenu)
menuBar.add_command(label="About",command=self.onInfo)
menuBar.add_command(label="Exit",command=self.quit)
#self.txt=Text(self)
self.txt=Text(self)
self.txt.pack(fill=BOTH,expand=1)
lbl1=Label(self,text="Original Image",width=20)
lbl1.place(x=20,y=50)
lbl2=Label(self,text="Output Image",width=20)
lbl2.place(x=350,y=50)
scale=Scale(self,from_=0, to=100,command=self.onScale)
scale.place(x=250,y=50)
self.var=IntVar()
start=Button(self,text="Start")
start.place(x=250,y=170)
self.a=0
def onInfo(self):
mbox.showinfo("Information","Phung Van Khanh-20142895\nNguyen Thai Phuong-\nNguyen Ngoc Dong-20141072\nCu Tuan Minh-20142895")
def onScale(self,val):
v=int(float(val))
self.var.set(v)
def onOpen(self):
if self.a==0:
self.liv=Image.open("1.png")
self.liv.thumbnail((200, 200),Image.ANTIALIAS)
liverpool=ImageTk.PhotoImage(self.liv)
self.lable2=Label(self,image=liverpool)
self.lable2.image=liverpool
self.lable2.place(x=20,y=80)
self.lable2.destroy()
else:
self.lable2.destroy()
ftypes=[('Python files','*.jpg'),('All files','*')]
dlg=Open(self,filetypes=ftypes)
fl=dlg.show()
if fl!='':
self.liv=Image.open(fl)
self.liv.thumbnail((200, 200),Image.ANTIALIAS)
liverpool=ImageTk.PhotoImage(self.liv)
self.lable2=Label(self,image=liverpool)
self.lable2.image=liverpool
self.lable2.place(x=20,y=80)
self.a=1
root=Tk()
root.geometry("600x300+200+100")
Example(root)
root.mainloop()
| [
"caubeuudam@gmail.com"
] | caubeuudam@gmail.com |
f5ddaea2443db0d624de8fa5030142c3e1cf8114 | a67d00cb2ffb9c370d4fea400d6b78a1d3620b7c | /test_server.py | 349ccb7020ed49e212f2460c7928266364bb3312 | [] | no_license | xinli94/Ground-Station-for-UAV | 11895e71b0e12d13675b10aa46b65155646d8cbd | de493c886fa23a72044f5c63e2aab31221261f2b | refs/heads/master | 2021-01-11T15:57:36.022720 | 2017-01-29T21:30:17 | 2017-01-29T21:30:17 | 79,967,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # -*- coding: utf-8 -*-
import sys,socket,struct,time
if __name__=='__main__':
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1',8000))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
while True:
#connection, address = s.accept()
recv_bytes=s.recv(9)
head=struct.unpack("BBBBBBBBB", recv_bytes)
print head
| [
"noreply@github.com"
] | noreply@github.com |
4bc85639ab45b77f2d7f1c5dd8ac2b6b51aeefe4 | c2fca7ddfd8f7dac47fa65ec291d7ea619c324d4 | /venv/bin/pip | a46f09231f77c81bc4e63c9be65641b793837241 | [] | no_license | ashenafiteame/mlwebsite | c177f2af64118f5b72213550da34c395e1723eb1 | 3b08c64bbbbb69d65ab7c950f90b7c69696e6cee | refs/heads/master | 2022-12-10T15:29:38.391511 | 2020-03-05T17:39:58 | 2020-03-05T17:39:58 | 245,230,707 | 0 | 0 | null | 2022-12-08T01:06:01 | 2020-03-05T17:48:41 | Python | UTF-8 | Python | false | false | 271 | #!/home/ashu/1POLIMI/projects/FlaskIntroduction/venv/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ashenafitm@gmail.com"
] | ashenafitm@gmail.com | |
3e7dd28ec82a965c62c46b51c6ce8138182cd2d5 | b6f28e641deea883f6d8ac320de41288eb5eae89 | /front/templatetags/startups.py | 95c702a83f96c4f793b630b7187afee79415387a | [] | no_license | gabrielhora/startupforme | eb27d6ec11fa70be39c4d70c9710e7ce48051867 | 67818fdc2fc8f60e00fc25e547c57c45d7332d1a | refs/heads/master | 2021-01-10T19:55:39.554134 | 2014-07-09T17:17:35 | 2014-07-09T17:17:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | from django import template
from front.models import Startup
from django.core.urlresolvers import reverse
from front.templatetags.utils import remove_http
register = template.Library()
def get_other_startups(parser, token):
try:
func, count = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError('bad arguments for %r' % token.split_contents()[0])
return OtherStartupsNode(count)
get_other_startups = register.tag(get_other_startups)
class OtherStartupsNode(template.Node):
def __init__(self, count):
self.count = count
def render(self, context):
random_startups = Startup.objects.select_related().order_by('?')[:self.count]
items = u''
item_html = u"""
<div>
<div><a href="%s" style="color:#34495e">%s</a></div>
<a href="%s" target="_blank">%s</a>
</div>
<hr style="margin:10px 0;">
"""
for startup in random_startups:
items += item_html % (
reverse('startups_details', args=[startup.owner.username]),
startup.name,
startup.site,
remove_http(startup.site, 25)
)
return u'<div class="other_jobs">%s</div>' % items
| [
"gabrielhora@gmail.com"
] | gabrielhora@gmail.com |
20f8a261e10d05bf4e666f2e18c7161d85acd7b2 | 69b0aaa84cc5ab4f86a5506e91725830d17135fd | /func.py | d65f2435895ea16f9912bdb8cd95678612676a93 | [] | no_license | Danielideal/ChineseChess-AI | 2a46e0ef640fde4fe81db7a55d34653e59a1c856 | 202c5ecc95288466ac1b9c628801d9873c190835 | refs/heads/main | 2023-03-23T10:02:22.575252 | 2021-03-12T06:36:21 | 2021-03-12T06:36:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,626 | py | # -*-coding:utf-8-*-
"""
@author cc
@date 2021/2/17
"""
import config
import rule
import json
class Board:
def __init__(self):
self._board = []
self.status = False
self.roundGame = 0
self.boardInit()
self.moveSet = []
self.boardRecoder = []
def boardInit(self):
self._board = config.__BOARD_INIT__.copy()
self.checkedStoneID = config.__UNCKECKED__
self.currentPlayer = 'RED'
def getBoard(self):
return self._board
def getStoneFromID(self, id):
stone = self._board[id]
if stone == config.b_兵:
return "卒"
elif stone == config.b_炮:
return "炮"
elif stone == config.b_车:
return "车"
elif stone == config.b_马:
return "马"
elif stone == config.b_相:
return "象"
elif stone == config.b_士:
return "士"
elif stone == config.b_将:
return "将"
elif stone == config.r_兵:
return "兵"
elif stone == config.r_炮:
return "炮"
elif stone == config.r_车:
return "车"
elif stone == config.r_马:
return "马"
elif stone == config.r_相:
return "相"
elif stone == config.r_士:
return "仕"
elif stone == config.r_将:
return "帅"
def setChecked(self, id):
self.checkedStoneID = id
def changePlayer(self):
# print("换边")
if 'red' in self.currentPlayer.lower():
self.currentPlayer = 'BLACK'
else:
self.currentPlayer = 'RED'
def whichPlayer(self, stone):
if stone == config.__BLANK__:
return 'BLANK'
# return 'BLACK' if 'red' in self.currentPlayer else 'RED'
elif config.b_兵 <= stone <= config.b_将:
return "BLACK"
else:
return "RED"
def move(self, ido, idd):
self.boardRecoder.append(self._board.copy())
if self.whichPlayer(self._board[idd]) != self.whichPlayer(self._board[ido]) and 'BLANK' not in self.whichPlayer(self._board[ido]):
self._board[ido], self._board[idd] = config.__BLANK__, self._board[ido]
else:
self._board[ido], self._board[idd] = self._board[idd], self._board[ido]
self.checkedStoneID = config.__UNCKECKED__
self.changePlayer()
def canMove(self, id):
if self.status:
return
if self._board[self.checkedStoneID] == config.r_兵 or self._board[self.checkedStoneID] == config.b_兵:
# print("移动兵")
# TODO 兵走法
return rule.canMoveBING(self.checkedStoneID, id, self)
elif self._board[self.checkedStoneID] == config.r_炮 or self._board[self.checkedStoneID] == config.b_炮:
# TODO 炮走法
# print("移动炮")
return rule.canMovePAO(self.checkedStoneID, id, self)
elif self._board[self.checkedStoneID] == config.r_车 or self._board[self.checkedStoneID] == config.b_车:
# print("移动车")
return rule.canMoveCHE(self.checkedStoneID, id, self)
# TODO 车走法
elif self._board[self.checkedStoneID] == config.r_相 or self._board[self.checkedStoneID] == config.b_相:
# print("移动相")
return rule.canMoveXIANG(self.checkedStoneID, id, self)
# TODO 相走法
elif self._board[self.checkedStoneID] == config.r_士 or self._board[self.checkedStoneID] == config.b_士:
# print("移动士")
return rule.canMoveSHI(self.checkedStoneID, id, self)
# TODO 士走法
elif self._board[self.checkedStoneID] == config.r_将 or self._board[self.checkedStoneID] == config.b_将:
# print("移动将")
return rule.canMoveJIANG(self.checkedStoneID, id, self)
# TODO 将走法
elif self._board[self.checkedStoneID] == config.r_马 or self._board[self.checkedStoneID] == config.b_马:
# print("移动马")
return rule.canMoveMA(self.checkedStoneID, id, self)
# TODO 马走法
def getMove(self, move):
ido = (int(move[0])) + (int(move[1])) * 9
idd = (int(move[2])) + (int(move[3])) * 9
return ido, idd
def returnHash(self):
return ''.join([str(i) for i in self._board])
| [
"noreply@github.com"
] | noreply@github.com |
151f926afd5731a0defde7101ace9b5153e33146 | 32a56cf1f4764adc75b6fab32d6e14bfecdeaf97 | /Django Level 5/learning_users_5/basic_app/urls.py | be5be1c709656d305bd31aa0d13049742f310f03 | [] | no_license | sanchit-zeus/Workspace_html | 52dfef7881e3c1f309bc6904a8887dcbc593728c | 4ab344a151be2b426ecd9271ba7d877d64ab8808 | refs/heads/master | 2020-05-09T13:19:41.981502 | 2019-04-13T09:19:25 | 2019-04-13T09:19:25 | 181,147,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | from django.conf.urls import url
from basic_app import views
#template URLs
app_name = 'basic_app'
urlpatterns = [
url(r'^register/$',views.register,name='register'),
url(r'^user_login/$',views.user_login,name='user_login'),
]
| [
"35419687+sanchit-zeus@users.noreply.github.com"
] | 35419687+sanchit-zeus@users.noreply.github.com |
6e4156e4e3069e970ae4e345581cbee3c0abf56f | d74d31455943004326de199f365240fd8f0ec97c | /poiJacSim.py | a9387675f842edc57c73b5c0ee245ec2d4ff2bff | [] | no_license | clussificate/DataAnalysis | 9dda21f99b0ef1798e0898025419fb38395a1b29 | a997cfdc88e8e4682579c715c9ade7fa2460f845 | refs/heads/master | 2020-03-28T23:05:16.703313 | 2018-09-18T11:17:18 | 2018-09-18T11:17:18 | 149,275,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,484 | py | # -*- coding:utf-8 -*-
from __future__ import division
import numpy as np
import collections
import sys
import json
reload(sys)
sys.setdefaultencoding('utf8')
class poiTagSim(object):
def __init__(self,filename):
self.filename = filename
self.poi_set = [] # poi 集合
self.tags_set = [] # 标签集合
self.poi_tags = {} # poi下的tags信息
self.poi_tags_matrix = [] # poi标签矩阵
self.tags_poi = {} # 共同评论的poi以及共同评论数
"""
读取文件
"""
def read_file(self):
with open(self.filename, 'r') as f:
for line in f:
if line:
yield line
else:
return
"""
生成poi_tags字典
"""
def poi_tags_dic(self):
row_num = 0
for line in self.read_file():
if row_num == 0: # 跳出第一行
row_num += 1
continue
item = line.split('\t')
poi_name = item[2]
tag_name = item[5]
if poi_name not in self.poi_tags:
self.poi_tags[poi_name] = [tag_name]
else:
self.poi_tags[poi_name].append(tag_name)
"""
列表去重:输入一个列表,输出一个集合
"""
def listToSet(self,mylist):
myset = []
for item in mylist:
if item in myset:
continue
else:
myset.append(item)
return myset
"""
生成Poi集合和标签集合
"""
def gene_set(self):
poi_list = []
tags_list = []
for key, items in self.poi_tags.items():
poi_list.append(key)
tags_list.extend(items)
self.poi_set = self.listToSet(poi_list)
self.tags_set = self.listToSet(tags_list)
"""
生成poi_tag的矩阵
"""
def gene_mat(self):
poi_tags_matrix = np.zeros((len(self.poi_set), len(self.tags_set)))
for poi in self.poi_tags.keys():
# print("-----------")
# print(poi)
PoiInd = self.poi_set.index(poi)
for tag in self.poi_tags[poi]:
# print(tag)
TagInd = self.tags_set.index(tag)
poi_tags_matrix[PoiInd, TagInd] = 1
self.poi_tags_matrix = poi_tags_matrix
"""
生成tags_poi倒排序字典{tag1:[poi1,poi2]},tag2:[poi..]...}
为了加快相似矩阵的计算速度
"""
def reverse(self):
for key, items in self.poi_tags.items():
for item in items:
if item not in self.tags_poi:
self.tags_poi[item] = set()
self.tags_poi[item].add(key)
"""
计算Jaccard相似度
"""
def caclJacSim(self):
C = {}
N = {}
for tag, pois in self.tags_poi.items():
for poi1 in pois:
if poi1 in N:
N[poi1] += 1
else:
N[poi1] = 1
C[poi1] = {}
for poi2 in pois:
if poi1 == poi2:
continue
if poi2 in C[poi1]:
C[poi1][poi2] += 1
else:
C[poi1][poi2] = 1
poi_sim = collections.defaultdict(dict)
for poi, related_pois in C.items():
for related_poi, comm in related_pois.items():
poi_sim[poi][related_poi] = comm/(N[poi]+N[related_poi]-comm)
return poi_sim
## 计算太慢
# poi_sim = np.zeros((len(self.poi_set), len(self.poi_set)))
# if self.poi_tags_matrix == []:
# print("....no poi_tags_matrix....")
# for i in range(len(self.poi_tags_matrix)):
# for j in range(i+1, len(self.poi_tags_matrix)):
# sim = self.JacSim(self.poi_tags_matrix[i], self.poi_tags_matrix[j])
# poi_sim[i,j] = sim
# poi_sim[j,i] = sim
# return poi_sim
"""
写文件
"""
def write_sim(self,poi_sim,filename):
if poi_sim:
with open(filename, 'w') as f:
for key, items in poi_sim.items():
for key2, value in items.items():
f.write(str(key)+'\t'+str(key2)+'\t'+str(value)+'\n')
"""
运行程序
"""
def run(self):
self.poi_tags_dic()
print(u"...生成poi_tags词典")
# print(self.poi_tags)
self.gene_set()
print(u"...生成poi集合、tags集合....")
# self.gene_mat()
# print("...生成poi-tags矩阵....")
self.reverse()
print(u"...生成tag_poi反转词典....")
# print(json.dumps(self.tags_poi, encoding = 'UTF-8', ensure_ascii=False))
poi_sim = self.caclJacSim()
print(u"...相似度计算完毕...")
# print(u"007 island similarities as follows:")
# print(poi_sim['007岛'])
return poi_sim
if __name__ == '__main__':
filename = sys.argv[1]
myPoitagSimTest = poiTagSim(filename)
poi_sim = myPoitagSimTest.run()
outPutFile = sys.argv[2]
myPoitagSimTest.write_sim(poi_sim, outPutFile)
print("finished....")
| [
"noreply@github.com"
] | noreply@github.com |
a8c5f8fe733b1263b9e715e46f656c1827f702d7 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2843/60723/275315.py | 68bd3c530b53af09ba7366a8b979d4690d44f3fa | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | num=int(input())
a=input().split()
for i in range(num):
a[i]=int(a[i])
b=[]
for i in range(num-1):
b.append(str(a[i]+a[i+1]))
b.append(str(a[num-1]))
print(' '.join(b)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
3a99c1df86fc3b5baa8741113b27ecc0f6f454c9 | aed70e07b568a842177bcc475876dced8c496c07 | /Tutorial_of_Deep_Learning/generator/train.py | 0c497c9009c812a9084ce3fc8d6d18e53baa5184 | [] | no_license | chobit19968535/NKUST | f73b087eb26a85fd13765b59eb1f5a952d453bd5 | 1358509e098c3909974e9351a19f83a93a51bf47 | refs/heads/master | 2022-11-27T21:28:54.938596 | 2020-08-10T06:58:21 | 2020-08-10T06:58:21 | 285,553,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,199 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 22 11:36:45 2020
@author: Lycoris
"""
"""
Utils
"""
from PIL import Image
"""
Core
"""
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from keras.applications import MobileNetV2
from keras.layers import Dense , Dropout, GlobalAveragePooling2D
from keras.models import Model
from keras.optimizers import adam, SGD
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
"""
Callbacks
"""
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from tensorflow.keras.callbacks import TensorBoard
log_dir = 'logs/'
logging = TensorBoard(log_dir=log_dir)
#ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-train_loss{train_loss:.3f}.h5',
# monitor='train_loss', save_weights_only=True, save_best_only=True, period=3)
checkpoint_acc = ModelCheckpoint(log_dir + 'best_acc_Ep{epoch:03d}.h5',
monitor='val_accuracy', save_weights_only=False, save_best_only=True, period=1)
checkpoint_loss = ModelCheckpoint(log_dir + 'min_loss.h5',
monitor='val_loss', save_weights_only=False, save_best_only=True, period=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=7, verbose=1)
print('Starting')
stage_1_epoch = 15
dropout_rate = 0.2
input_shape = (224, 224)
n_classes = 5
batch_size = 4
train_annotation_path = 'train_data.txt'
test_annotation_path = 'test_data.txt'
"""
Modify Model to Custom data
"""
base_model = MobileNetV2(weights='imagenet', include_top=True)
# model = Xception(weights='imagenet', include_top=True)
x = base_model.output
x = GlobalAveragePooling2D()(x)
predictions = Dense(n_classes, activation='softmax', name='probs')(x)
model = Model(inputs=base_model.input, outputs=predictions)
base_model.summary()
"""
Data Prepocessing
"""
with open (train_annotation_path, 'r') as f:
train_lines = f.readlines()
with open (test_annotation_path, 'r') as f:
test_lines = f.readlines()
#lines = lines[:25]
import random
random.shuffle(train_lines)
num_train = len(train_lines)
num_val = len(test_lines)
def data_generator(annotation_lines, batch_size, input_shape, random):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
image_data = []
label_data = []
for b in range(batch_size):
if i==0:
np.random.shuffle(annotation_lines)
image, label = get_random_data(annotation_lines[i], input_shape, random)
image_data.append(image)
label_data.append(label)
i = (i+1) % n
image_data = np.array(image_data)
from keras.utils import to_categorical
label_data = to_categorical(label_data, num_classes=n_classes)
# yield image_data, label_data, np.zeros(batch_size)
yield image_data, label_data
# yield ( [image_data, *label_data], np.zeros(batch_size) )
def data_generator_wrapper(annotation_lines, batch_size, input_shape, random):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, random)
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
def get_random_data(annotation_lines, input_shape, proc_img = True, random =True,
flip = False, hue = .1, jitter = .1, sat = .25, val = .25):
line = annotation_lines.split()
image = Image.open(line[0])
label = line[1]
iw, ih = image.size
h, w = input_shape
if not random:
# resize image
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
dx = (w-nw)//2
dy = (h-nh)//2
image_data=0
if proc_img:
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image)/255.
return (image_data, label)
else:
"""
Data-Augmentation
"""
# =============================================================================
# Scale(parameter is 'jitter')
# =============================================================================
# resize image
new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC)
# =============================================================================
# Shift
# =============================================================================
# place image
dx = int(rand(0, w-nw))
dy = int(rand(0, h-nh))
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image = new_image
# =============================================================================
# Flip
# =============================================================================
# flip image or not
# flip = True
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
# =============================================================================
# Color Augmentation
# RGB --> HSV(parameter is hue) --> RGB
# =============================================================================
# distort image
hue = rand(-hue, hue)
sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
val = rand(1, val) if rand()<.5 else 1/rand(1, val)
x = rgb_to_hsv(np.array(image)/255.)
x[..., 0] += hue
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x>1] = 1
x[x<0] = 0
image_data = hsv_to_rgb(x) # numpy array, 0 to 1
return (image_data, label)
#aug = ImageDataGenerator(rotation_range=20, zoom_range=0.15,
# width_shift_range=0.2, height_shift_range=0.2, shear_range=0.15,
# horizontal_flip=True, fill_mode="nearest")
for layer in model.layers[:]:
layer.trainable = True
"""
Two-Stage training trick
"""
# Stage 1
model.compile(optimizer=adam(lr=1e-3), loss='categorical_crossentropy', metrics = ['accuracy'])
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
history_freeze = model.fit_generator(data_generator_wrapper(train_lines, batch_size, input_shape, random=True),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(test_lines, batch_size, input_shape, random=False),
validation_steps=max(1, num_val//batch_size),
epochs=stage_1_epoch,
initial_epoch=0,
callbacks=[checkpoint_acc, reduce_lr, early_stopping])
model.save(log_dir + 'trained_model_stage_1.h5')
| [
"41323791+chobit19968535@users.noreply.github.com"
] | 41323791+chobit19968535@users.noreply.github.com |
c2768ff9359b864188bdb1b45b120dba311db26c | 21e25a8188021805a0d58c38ef66253d4be6fd4a | /8-1 fibonacci.py | dd368f8a08a6065b7cd66a41b271711b5a8603ed | [] | no_license | greenloper/Dynamic-Programming | e1c01a5e1d95aaa96dba8034ba71e727a398911a | 6b758a9dd4e5cb81474f25cc3a8e0111a6c320cd | refs/heads/main | 2023-03-25T08:52:29.232370 | 2021-03-22T14:07:17 | 2021-03-22T14:07:17 | 335,989,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | d=[0]*100
def fibo(x):
if x==1 or x==2:
return 1
if d[x]!=0:
return d[x]
d[x]=fibo(x-1)+fibo(x-2)
return d[x]
print(fibo(99)) | [
"noreply@github.com"
] | noreply@github.com |
4ffc37db89cf4224b27343cde48d98ea8f146c60 | fff7d26485659fb32585abb868d572b73cff4c0f | /ElevatorUI.py | f978d6eacb2151a6dae74fea7faac68b9d8e4ebf | [] | no_license | zb2313/OS_Elevator_Dispatch | 6f08c9b4172cd0867c21602393be7ea3d5ed9ef6 | 4b61c00b6c7e5622eba6defd00873403781d1f8b | refs/heads/main | 2023-06-10T02:43:08.854050 | 2021-07-04T09:32:21 | 2021-07-04T09:32:21 | 382,812,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,309 | py | from PyQt5 import QtCore, QtGui, QtWidgets
from scheduling import MyScheduling
from PyQt5.QtCore import *
STANDSTILL = 0 # 静止状态
RUNNING_UP = 1 # 上行状态
RUNNING_DOWN = 2 # 下行状态
NOPE = 0 # 空动画
READYSTART = 1 # 准备运动
READYSTOP = 2 # 即将停止
GOUP = 1 # 上行
GODOWN = 2 # 下行
OPEN = 0 # 开门状态
CLOSED = 1 # 关门状态
class Ui_MainWindow(object):
def __init__(self):
self.schedule = MyScheduling(self) # 与调度文件连接
def setupUi(self, MainWindow):
self.upbtn_style = "QPushButton{border-image: url(Resources/up_hover.png)}" \
"QPushButton:hover{border-image: url(Resources/up.png)}" \
"QPushButton:pressed{border-image: url(Resources/up_pressed.png)}"
self.downbtn_style = "QPushButton{border-image: url(Resources/down_hover.png)}" \
"QPushButton:hover{border-image: url(Resources/down.png)}" \
"QPushButton:pressed{border-image: url(Resources/down_pressed.png)}"
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1400, 700)
MainWindow.setStyleSheet("")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.elevEnabled = [True] * 5 # 电梯状态(可使用/禁用)标志位
self.doorState = [CLOSED] * 5 # 电梯门状态(开门/关门)标志位
self.elevState = [STANDSTILL] * 5 # 电梯状态(运行向上/运行向下/静止)标志位
self.animState = [NOPE] * 5 # 动画播放状态(空/即将运动/即将停止)标志位
self.elevNow = [1] * 5 # 电梯楼层
# region 边界
boundPos = [10, 180, 360, 540, 720, 890]
self.bounder = [] # 电梯边界
self.level = [] # 电梯层界
for i in range(0, len(boundPos)):
self.bounder.append(QtWidgets.QGraphicsView(self.centralwidget))
self.bounder[i].setGeometry(QtCore.QRect(boundPos[i], 120, 1, 700))
self.bounder[i].setAutoFillBackground(False)
self.bounder[i].setStyleSheet("background-color: rgb(0, 0, 0);")
self.bounder[i].setObjectName("wall" + str(i))
# endregion
# region 电梯
self.elevator_back = [] # 电梯背景
self.elevator_front = [] #电梯门
self.elevator_Anim = [] #电梯动画
elevator_pos = [30, 200, 380, 560, 740]
for i in range(0, len(elevator_pos)):
# 电梯背景
self.elevator_back.append(QtWidgets.QGraphicsView(self.centralwidget))
self.elevator_back[i].setGeometry(QtCore.QRect(elevator_pos[i], 610, 131, 161))
self.elevator_back[i].setStyleSheet("background-color: rgb(87, 87, 87);")
self.elevator_back[i].setObjectName("elevator_back" + str(i))
# 两扇电梯门
self.elevator_front.append(QtWidgets.QGraphicsView(self.centralwidget))
self.elevator_front[2 * i].setGeometry(QtCore.QRect(elevator_pos[i], 610, 64, 161))
self.elevator_front[2 * i].setStyleSheet("background-color: rgb(160, 160, 160);")
self.elevator_front[2 * i].setObjectName("elevator_front" + str(2 * i))
self.elevator_Anim.append(QPropertyAnimation(self.elevator_front[2 * i], b"geometry"))
self.elevator_Anim[2 * i].setDuration(1000) # 设定动画时间
self.elevator_Anim[2 * i].setStartValue(QtCore.QRect(elevator_pos[i], 610, 64, 161)) # 设置起始大小
self.elevator_Anim[2 * i].setEndValue(QtCore.QRect(elevator_pos[i], 610, 8, 161)) # 设置终止大小
self.elevator_front.append(QtWidgets.QGraphicsView(self.centralwidget))
self.elevator_front[2 * i + 1].setGeometry(QtCore.QRect(elevator_pos[i] + 67, 610, 64, 161))
self.elevator_front[2 * i + 1].setStyleSheet("background-color: rgb(160, 160, 160);")
self.elevator_front[2 * i + 1].setObjectName("elevator_front" + str(2 * i + 1))
self.elevator_Anim.append(QPropertyAnimation(self.elevator_front[2 * i + 1], b"geometry"))
self.elevator_Anim[2 * i + 1].setDuration(1000)
self.elevator_Anim[2 * i + 1].setStartValue(QtCore.QRect(elevator_pos[i] + 67, 610, 64, 161))
self.elevator_Anim[2 * i + 1].setEndValue(QtCore.QRect(elevator_pos[i] + 123, 610, 8, 161))
# endregion
# region 电梯号码
font = QtGui.QFont()
font.setFamily("AcadEref")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.label = []
label_pos = [70, 240, 420, 600, 780]
for i in range(0, len(label_pos)):
self.label.append(QtWidgets.QLabel(self.centralwidget))
self.label[i].setGeometry(QtCore.QRect(label_pos[i], 800, 51, 21))
self.label[i].setFont(font)
self.label[i].setStyleSheet("font: 10pt \"AcadEref\";\n"
"background-color: rgb(160, 160, 160);")
self.label[i].setObjectName("label" + str(i))
self.label2 = []
label_pos2 = [170+i*40 for i in range(0,5)]
for i in range(0, len(label_pos2)):
self.label2.append(QtWidgets.QLabel(self.centralwidget))
self.label2[i].setGeometry(QtCore.QRect(label_pos2[i]+765, 810, 21, 21))
self.label2[i].setFont(font)
self.label2[i].setStyleSheet("font: 10pt \"AcadEref\";\n"
"background-color: rgb(160, 160, 160);")
self.label2[i].setObjectName("label2" + str(i))
# endregion
# region 电梯楼层显示
self.lcdNumber = [] # 数码管
lcdNumber_pos = [50, 220, 400, 580, 760]
for i in range(0, len(lcdNumber_pos)):
self.lcdNumber.append(QtWidgets.QLCDNumber(self.centralwidget))
self.lcdNumber[i].setGeometry(QtCore.QRect(lcdNumber_pos[i]-10, 110, 51, 41))
self.lcdNumber[i].setDigitCount(2)
self.lcdNumber[i].setProperty("value", 1.0) # 设置初始楼层为1层
self.lcdNumber[i].setObjectName("lcdNumber" + str(i))
# endregion
# region 电梯上下行标志
self.stateshow = [] # 上下行标志
stateshow_pos = [95, 265, 445, 625, 805]
for i in range(0, len(stateshow_pos)):
self.stateshow.append(QtWidgets.QGraphicsView(self.centralwidget))
self.stateshow[i].setGeometry(QtCore.QRect(stateshow_pos[i], 100, 71, 61))
self.stateshow[i].setStyleSheet("QGraphicsView{border-image: url(Resources/state.png)}")
self.stateshow[i].setObjectName("stateshow" + str(i))
# endregion
# region 电梯停用
self.warnbtn = [] # 报警器
warnbtn_pos = [90, 260, 440, 620, 800]
for i in range(0, len(warnbtn_pos)):
self.warnbtn.append(QtWidgets.QPushButton(self.centralwidget))
self.warnbtn[i].setGeometry(QtCore.QRect(warnbtn_pos[i] + 10, 60, 56, 31))
self.warnbtn[i].setStyleSheet("background-color: rgb(180, 0, 0);")
self.warnbtn[i].setObjectName("warnbtn" + str(i))
# 连接监听器
for i in range(0, len(self.warnbtn)):
self.warnbtn[i].clicked.connect(MainWindow.connectStopListener)
# endregion
# region 数字按键
self.gridLayoutWidget = []
self.gridLayout = []
gridLayoutWidget_pos = [30, 200, 380, 560, 740]
for i in range(0, len(gridLayoutWidget_pos)):
self.gridLayoutWidget.append(QtWidgets.QWidget(self.centralwidget))
self.gridLayoutWidget[i].setGeometry(QtCore.QRect(gridLayoutWidget_pos[i] + 10, 120, 120, 451))
self.gridLayoutWidget[i].setObjectName("gridLayoutWidget" + str(i))
self.gridLayout.append(QtWidgets.QGridLayout(self.gridLayoutWidget[i]))
self.gridLayout[i].setContentsMargins(0, 0, 0, 0)
self.gridLayout[i].setObjectName("gridLayout" + str(i))
num = ['17', '18', '19', '20', '13', '14', '15', '16', '9', '10', '11', '12', '5', '6', '7', '8', '1', '2',
'3', '4']
positions = [(i, j) for i in range(5) for j in range(4)]
for i in range(0, len(gridLayoutWidget_pos)):
for position, name in zip(positions, num):
button = QtWidgets.QPushButton(name)
button.setObjectName("button " + str(i) + ' ' + name)
button.setStyleSheet("")
button.clicked.connect(MainWindow.connectNumListener)
self.gridLayout[i].addWidget(button, *position)
# endregion
# region 电梯内部开关门按钮
self.openbtn = []
self.closebtn = []
openbtn_pos = [60, 230, 410, 590, 770]
closebtn_pos = [110, 280, 460, 640, 820]
for i in range(0, len(openbtn_pos)):
self.openbtn.append(QtWidgets.QPushButton(self.centralwidget))
self.openbtn[i].setGeometry(QtCore.QRect(openbtn_pos[i]-15, 560, 31, 31))
self.openbtn[i].setStyleSheet("QPushButton{border-image: url(Resources/open.png)}"
"QPushButton:hover{border-image: url(Resources/open_hover.png)}"
"QPushButton:pressed{border-image: url(Resources/open_pressed.png)}")
self.openbtn[i].setObjectName("openbtn" + str(i))
self.closebtn.append(QtWidgets.QPushButton(self.centralwidget))
self.closebtn[i].setGeometry(QtCore.QRect(closebtn_pos[i] +10, 560, 31, 31))
self.closebtn[i].setStyleSheet("QPushButton{border-image: url(Resources/close.png)}"
"QPushButton:hover{border-image: url(Resources/close_hover.png)}"
"QPushButton:pressed{border-image: url(Resources/close_pressed.png)}")
self.closebtn[i].setObjectName("closebtn" + str(i))
self.openbtn[i].clicked.connect(MainWindow.connectDoorListener) # 绑定门开关键槽函数
self.closebtn[i].clicked.connect(MainWindow.connectDoorListener)
# endregion
# region 小人模型
self.figure = [] # 小人
self.figure_Anim = []
figure_pos = [10, 180, 360, 540, 720]
for i in range(0, len(figure_pos)):
self.figure.append(QtWidgets.QGraphicsView(self.centralwidget))
self.figure[i].setGeometry(QtCore.QRect(figure_pos[i] , 690, 71, 71))
self.figure[i].setStyleSheet("QGraphicsView{border-image: url(Resources/people.png)}")
self.figure[i].setVisible(False)
self.figure[i].setObjectName("figure" + str(i))
self.figure_Anim.append(QPropertyAnimation(self.figure[i], b"geometry"))
self.figure_Anim[i].setDuration(1500)
self.figure_Anim[i].setStartValue(QtCore.QRect(figure_pos[i] - 20, 690, 71, 71))
self.figure_Anim[i].setEndValue(QtCore.QRect(figure_pos[i] + 10, 610, 111, 121))
# endregion
# region 电梯每一层数字
self.number_btn = [[] for i in range(1)] # 为使索引序号与电梯序号对应起来,创建六个子数组,第0个不加操作
self.number_btn[0].append(0) # 为使索引序号与电梯楼层对应起来,在第0个位置添加空项,用0替代
for j in range(1, 21):
self.number_btn[0].append(QtWidgets.QGraphicsView(MainWindow)) # 创建一个按钮,并将按钮加入到窗口MainWindow中
self.number_btn[0][j].setGeometry(QtCore.QRect(900, 810.5 - j * 40, 35, 35))
self.number_btn[0][j].setStyleSheet("QGraphicsView{border-image: url(Resources/number/" + str(j) + "_hover.png)}")
# endregion
#region 每层楼的上下行按键
self.up_btn = {}
for i in range(1, 20):
self.up_btn[i] = QtWidgets.QPushButton(MainWindow)
self.up_btn[i].setGeometry(QtCore.QRect(1120, 810 - i * 40, 35, 35))
self.up_btn[i].setStyleSheet(self.upbtn_style)
self.up_btn[i].setObjectName("upbtn"+str(i))
self.up_btn[i].clicked.connect(MainWindow.connectDirListener)
self.down_btn = {}
for i in range(2, 21):
self.down_btn[i] = QtWidgets.QPushButton(MainWindow)
self.down_btn[i].setGeometry(QtCore.QRect(1170, 810 - i * 40, 35, 35))
self.down_btn[i].setStyleSheet(self.downbtn_style)
self.down_btn[i].setObjectName("downbtn" + str(i))
self.down_btn[i].clicked.connect(MainWindow.connectDirListener)
#endregion
#region 小电梯png
self.elevator_label = {}
for i in range(0, 5):
self.elevator_label[i] = QtWidgets.QLabel(MainWindow)
self.elevator_label[i].setPixmap(QtGui.QPixmap("Resources/elevator.png"))
self.elevator_label[i].setGeometry(QtCore.QRect((i+1)* 40 +900, 770, 10, 35))
self.elevator_label[i].setScaledContents(True)
#endregion
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1400, 18))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
for i in range(0, len(self.label)):
self.label[i].setText(_translate("MainWindow", "ele" + str(i)))
self.label2[i].setText(_translate("MainWindow", str(i)))
self.warnbtn[i].setText(_translate("MainWindow", "alarm"))
def connectStopListener(self):
which_warnbtn = int(self.sender().objectName()[-1])
print("点击了{0}号报警器".format(which_warnbtn))
self.warnbtn[which_warnbtn].setStyleSheet("background-color: rgb(255, 255, 255);")
self.MessBox = QtWidgets.QMessageBox.information(self.warnbtn[int(which_warnbtn)], "warning", # 弹出警告框
"第" + str(which_warnbtn) + "号电梯停止使用")
self.warnbtn[which_warnbtn].setStyleSheet("background-color: rgb(180, 0, 0);")
self.schedule.stopUsingListen(which_warnbtn) # 调用控制器进行warnCtrl处理
def connectDoorListener(self):
objectName = self.sender().objectName()
whichelev = int(objectName[-1])
whichcommand = 0 if objectName[0] == 'o' else 1 # 0 => 开门 1 => 关门
print("{0}号电梯, 命令是{1}".format(whichelev, whichcommand))
self.schedule.doorListen(whichelev, whichcommand) # 调用控制器进行doorCtrl处理
def connectNumListener(self):
whichbtn = self.sender()
btn_name = whichbtn.objectName()
buf = [int(s) for s in btn_name.split() if s.isdigit()] # 提取字符串中的数字
whichelev = buf[0]
whichfloor = buf[1]
print("{0}号电梯, {1}按键被按".format(whichelev, whichfloor))
whichbtn.setStyleSheet("background-color: rgb(255, 150, 3);") # 改变按钮背景颜色(模拟点击状态)
whichbtn.setEnabled(False) # 将该按钮设置为不可点击状态
self.schedule.insideNumListen(whichelev, whichfloor) # 调用控制器进行elevMove处理
def connectDirListener(self):
whichbtn = self.sender().objectName()
if whichbtn[0] == 'd':#down
choice = GODOWN
whichfloor=int(whichbtn[7:])
else:
choice = GOUP
whichfloor=int(whichbtn[5:])
print("用户选择了 {0} {1}".format(whichfloor, choice))
self.schedule.outsideDirListen(whichfloor, choice) # 调用控制器进行chooseCtrl处理
| [
"2224105295@qq.com"
] | 2224105295@qq.com |
cefe8a0dbed46bfb84982cf82ac57a6dc7c9e70b | c82ddbaa7a54525ac344d768cdfaa7d044f833b5 | /IOClasses.py | 94ea2775755a1d9ea658870fa302e5b82b50d68b | [] | no_license | hitakshi01/IntroToProg-Python-Mod09 | 4e6c821a05d984ed11e0b182c88a0c449665ddf0 | 615c6f940447cb15425ae7283d4e41ee2ee0d731 | refs/heads/master | 2022-12-09T19:03:22.713135 | 2020-09-07T16:32:48 | 2020-09-07T16:32:48 | 293,572,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,513 | py | # ---------------------------------------------------------- #
# Title: IO Classes
# Description: A module of IO classes
# ChangeLog (Who,When,What):
# RRoot,1.1.2030,Created started script
# Hitakshi,09.06.2020,Modified code to complete assignment 9
# ---------------------------------------------------------- #
if __name__ == "__main__":
raise Exception("This file is not meant to ran by itself")
else:
import DataClasses as DC
class EmployeeIO:
""" A class for performing Employee Input and Output
methods:
print_menu_items():
print_current_list_items(list_of_rows):
input_employee_data():
changelog: (When,Who,What)
RRoot,1.1.2030,Created Class:
"""
@staticmethod
def print_menu_items():
""" Print a menu of choices to the user """
print('''
Menu of Options
1) Show current employee data
2) Add new employee data
3) Save employee data to File
4) Exit program
''')
print() # Add an extra line for looks
@staticmethod
def input_menu_options():
""" Gets the menu choice from a user
:return: string
"""
choice = str(input("Which option would you like to perform? [1 to 4] - ")).strip()
print() # Add an extra line for looks
return choice
@staticmethod
def print_current_list_items(list_of_rows: list):
""" Print the current items in the list of Employee rows
:param list_of_rows: (list) of rows you want to display
"""
print("******* The current employees are: *******")
for row in list_of_rows:
print(str(row.employee_id)
+ ","
+ row.first_name
+ ","
+ row.last_name)
print("*******************************************")
print() # Add an extra line for looks
@staticmethod
def input_employee_data():
""" Gets data for a employee object
:return: (employee) object with input data
"""
try:
employee_id = (input("What is the employee Id? - ").strip())
first_name = str(input("What is the employee First Name? - ").strip())
last_name = str(input("What is the employee Last Name? - ").strip())
print() # Add an extra line for looks
emp = DC.Employee(employee_id,first_name,last_name)
except Exception as e:
print(e)
return emp
| [
"hitakshigulati00@gmail.com"
] | hitakshigulati00@gmail.com |
0b2217cb7663d2b1afefcb03e7dbbe62c5871b2a | f05c0be43076cc041ee59b982ba2fa0ed47f85ea | /python/util.py | 0269ec56c30e1fada96a60c7b9438a8bcfcc679f | [] | no_license | richardsonian/beatboard | c65f4dd4e99020a4f3ba204f2e151074187ae650 | bd67b78ea307d6d85619844b56feaa3e09437b58 | refs/heads/main | 2023-01-31T05:45:24.853192 | 2020-12-17T19:59:36 | 2020-12-17T19:59:36 | 319,755,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | def scale(num, oldMin, oldMax, newMin, newMax):
return (((num - oldMin) * (newMax - newMin)) / (oldMax - oldMin)) + newMin
def get_list_neighbor(item, list_, shift, wrap):
if wrap:
return list_[(list_.index(item) + shift) % len(list_)]
else:
return list_[clamp(list_.index(item) + shift, 0, len(list_) - 1)]
def clamp(num, min_, max_): #inclusive
return max(min_, min(num, max_)) | [
"ian.ross.richardson@gmail.com"
] | ian.ross.richardson@gmail.com |
ffa4f6924746bc1110d1bb3403a60147af30b96a | cdf69c06df441ee4a4c17d6604060094dc6ef87e | /english.py | 0684e14fa30898303980436492380441b7f5f3b3 | [] | no_license | Soulmate7/HelloFlask | 0c8dafbea3eb2ea6738d3f2d7ce25701a220283d | 9334b2da8b817ad2a16b41c58ca2eff87b45347c | refs/heads/master | 2023-05-31T03:38:45.475952 | 2021-06-16T08:36:15 | 2021-06-16T08:36:15 | 371,363,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,526 | py | import pymysql
from flask import Flask,render_template,request
app=Flask(__name__)
@app.route('/count')
def count():
wanted = request.args.get("wanted", type=str)
if wanted == None:
wanted = 'pineapple'
db = pymysql.connect(host='localhost',user='root',passwd="z1012194891",db="DataVisual",charset="utf8")
cursor = db.cursor()
count = []
try:
print('-----')
#sql = "select * from map_enword limit 200"
for i in range(26):
wanted =chr(ord('a')+i)
print(wanted)
sql = "select count(*) from map_enword where english like '"+wanted+"%' "
cursor.execute(sql)
rs = cursor.fetchall()
rs = list(rs)
count.append(rs[0][0])
print(count)
except:
rs = 'db-error'
print('py-db-error')
return render_template('count.html',rs=count)
@app.route('/search')
def search():
wanted=request.args.get("wanted",type=str)
if wanted == None:
wanted='pineapple'
db=pymysql.connect(host='localhost',user='root',passwd="z1012194891",db="DataVisual",charset="utf8")
cursor=db.cursor()
try:
sql="SELECT * FROM DataVisual.map_enword where english like '%"+wanted+"%'"
cursor.execute(sql)
rs=cursor.fetchall()
rs=list(rs)
print(rs)
except:
rs='db-error'
print('py-db-error')
db.close()
return render_template('english.html',rs=rs)
if __name__=='__main__':
app.run(debug=True) | [
"1012194891@qq.com"
] | 1012194891@qq.com |
d9043528e80fd5f56fa7df9cced920dcb7b5c268 | 313a627208d34b0ac3f22ff96ebe53dadb021bac | /pyaccumulo/simple_pool.py | f16332ac386f37c680e08c3369cc3615772e1df9 | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"CC-BY-SA-3.0",
"LicenseRef-scancode-proprietary-license"
] | permissive | nestorsokil/pyaccumulo | 9ec59bafd7eb6909abfc56fafbce76bfb87179ec | 1aef8327cfccb1f87746ca3a19e0b70262bd63e2 | refs/heads/master | 2020-05-23T02:36:17.128786 | 2019-05-20T09:26:23 | 2019-05-20T09:26:23 | 186,608,351 | 0 | 0 | Apache-2.0 | 2019-05-14T11:28:30 | 2019-05-14T11:28:30 | null | UTF-8 | Python | false | false | 991 | py | from contextlib import contextmanager
from threading import RLock
from pyaccumulo.accumulo import Accumulo
class AccumuloPool:
def __init__(self, host, port, user, password, size=10):
self._conn_params = {'host': host, 'port': port, 'user': user, 'password': password}
self._lock = RLock()
self._pool = []
self._max_size = size
@contextmanager
def get(self):
conn: Accumulo = self._check_out()
yield conn
self._check_in(conn)
def _new_conn(self):
return Accumulo(**self._conn_params)
def _check_in(self, conn):
with self._lock:
while len(self._pool) >= self._max_size:
self._pool.pop(0)
self._pool.append(conn)
def _check_out(self):
with self._lock:
conn = None
if len(self._pool) > 0:
conn = self._pool.pop(0)
if not conn:
conn = self._new_conn()
return conn
| [
"nsokil@kountable.com"
] | nsokil@kountable.com |
add0836df218250b62407c0f08330ba06dab4197 | 8962f8a8c69d2fc5e31de6e976ef4823bc9f956f | /TRPLib/pgmconstrict.py | 628b778345cdbd8fc70948cf7d9062b04e8cd729 | [] | no_license | btownshend/pyTecan | 28dd23a6b4a51afdf0dff811c53f4b2fe1b7c124 | 47087c41d7d01598eab6d45696f8c3beedd2310f | refs/heads/master | 2021-08-09T23:18:06.137339 | 2021-03-31T19:12:17 | 2021-03-31T19:12:17 | 9,189,758 | 8 | 3 | null | 2017-09-06T19:46:33 | 2013-04-03T08:05:45 | Python | UTF-8 | Python | false | false | 11,277 | py | import math
from ..Experiment import reagents, clock, worklist
from ..Experiment.concentration import Concentration
from ..Experiment.sample import Sample
from .QSetup import QSetup
from .TRP import TRP
from . import trplayout
reagents.add("BT5310", well="D1", conc=Concentration(20, 20, "pM"))
reagents.add("MKapa", well='A1', conc=Concentration(2.5, 1, 'x'), extraVol=30,
ingredients={'glycerol': 1, 'Water': 39})
reagents.add("MConstrict", well='A6', conc=Concentration(100.0 / 98.0, 1, 'x'), extraVol=30,
ingredients={'glycerol': 1, 'Water': 97})
reagents.add("P-End", well="C1", conc=4)
class Constrict(TRP):
# Mix constriction inputs, constrict, PCR, remove barcodes
pcreff = 1.98
def __init__(self, inputs, nmolecules, nconstrict, vol):
super(Constrict, self).__init__()
self.inputs = inputs
self.nmolecules = nmolecules
self.nconstrict = nconstrict
self.qconc = 20e-12 # Target qPCR concentration
self.qprimers = ["End"]
self.mix_conc = 100e-9 # Concentration of mixdown
self.con_dilvol = 100 # Volume to use for constriction dilutions
self.con_maxdilperstage = 100 / 3.0 # Maximum dilution/stage
self.con_pcr1vol = 100
self.con_pcr1inputvol = 2
self.con_pcr1tgtconc = self.qconc * 4 # Enough to take qPCR without dilutiojn
self.con_pcr2dil = 4
self.con_pcr2vol = 50
self.con_pcr2tgtconc = 10e-9
self.regen_predilvol = 100
self.regen_predil = 25
self.regen_dil = 25
self.regen_vol = 100
self.regen_cycles = 10
self.rsrc = [reagents.add("%s-%s-%s" % (inputs[i]['name'], inputs[i]['left'], inputs[i]['right']),
trplayout.SAMPLEPLATE,
well=inputs[i]['well'] if 'well' in inputs[i] else None,
conc=Concentration(stock=inputs[i]['bconc'], units="nM"),
initVol=vol, extraVol=0)
for i in range(len(inputs))]
self.q = None # Defined in pgm()
def pgm(self):
self.q = QSetup(self, maxdil=16, debug=False, mindilvol=60)
# Don't start idler (to minimize tip cross-contamination); last PCR allows plenty of time for doing dilutions without any effect on run time
# Will start after first constriction PCR is running
#self.q.debug = True
# self.e.addIdleProgram(self.q.idler)
self.q.addReferences(dstep=10, primers=self.qprimers, ref=reagents.getsample("BT5310"),nreplicates=2)
samps=[r.getsample() for r in self.rsrc]
for s in samps:
self.q.addSamples([s],needDil=max(10,s.conc.stock*1e-9/self.qconc),primers=self.qprimers)
print("### Mixdown #### (%.0f min)" % (clock.elapsed() / 60.0))
if len(samps)>1:
mixdown = self.mix(samps, [x['weight'] for x in self.inputs])
else:
mixdown=samps[0]
self.q.addSamples(mixdown, needDil=max(1.0,mixdown.conc.stock * 1e-9 / self.qconc), primers=self.qprimers)
print("Mixdown final concentration = %.0f pM" % (mixdown.conc.stock * 1000))
print("### Constriction #### (%.1f min)" % (clock.elapsed() / 60.0))
constricted = self.constrict(mixdown, mixdown.conc.stock * 1e-9)
print("### Regeneration #### (%.0f min)" % (clock.elapsed() / 60.0))
prefixes = set([x['left'][0] for x in self.inputs])
self.regenerate(constricted * len(prefixes), [p for p in prefixes for _ in constricted])
print("### qPCR #### (%.0f min)" % (clock.elapsed() / 60.0))
self.q.run(confirm=False, enzName='EvaGreen', waitForPTC=True)
print("### qPCR Done #### (%.0f min)" % (clock.elapsed() / 60.0))
worklist.userprompt("qPCR done -- only need to complete final PCR", 300)
self.e.waitpgm()
print("### Final PCR Done #### (%.0f min)" % (clock.elapsed() / 60.0))
def mix(self, inp, weights,mixvol=100,tgtconc=None,maxinpvol=20):
"""Mix given inputs according to weights (by moles -- use conc.stock of each input)"""
vol = [weights[i] *1.0 / inp[i].conc.stock for i in range(len(inp))]
scale = mixvol / sum(vol)
conc=sum([inp[i].conc.stock * scale * vol[i] for i in range(len(inp))]) / mixvol
if tgtconc is not None and conc>tgtconc:
scale*=tgtconc*1.0/conc
if max(vol)*scale<4.0:
scale=4.1/max(vol) # At least one input with 4ul input
vol = [x * scale for x in vol] # Mix to make planned total without water
for i in range(len(vol)):
# Check if this would require more than available of any input
newscale= min(maxinpvol,inp[i].volume-inp[i].plate.unusableVolume()-2)/vol[i]
if newscale<1:
vol = [x * 1.0 * newscale for x in vol]
if tgtconc is not None:
mixvol *= newscale # Maintain same target concentration by reducing total volume
if min(vol) < 4.0:
# Some components are too small; split mixing
lowvol=[i for i in range(len(inp)) if vol[i]<4.0]
highvol=[i for i in range(len(inp)) if i not in lowvol]
assert len(highvol)>0
assert len(lowvol)>0
lowtgtconc=sum([inp[i].conc.stock *1.0/ weights[i] for i in highvol])/len(highvol)*sum([weights[i] for i in lowvol])
print("Running premix of samples "+",".join(["%d"%ind for ind in lowvol])+" with target concentration of %.4f"%lowtgtconc)
mix1=self.mix([inp[i] for i in lowvol],[weights[i] for i in lowvol],tgtconc=lowtgtconc,mixvol=mixvol,maxinpvol=maxinpvol)
wt1=sum([weights[i] for i in lowvol])
mix2=self.mix([inp[i] for i in highvol]+[mix1],[weights[i] for i in highvol]+[wt1],tgtconc=tgtconc,mixvol=mixvol,maxinpvol=maxinpvol)
return mix2
print("Mixing into %.0ful with tgtconc of %s, dil=%.2f"%(mixvol,"None" if tgtconc is None else "%.4f"%tgtconc,mixvol/sum(vol)))
for i in range(len(inp)):
print("%-30.30s %6.3fnM wt=%5.2f v=%5.2ful"%(inp[i].name,inp[i].conc.stock,weights[i],vol[i]))
watervol = mixvol - sum(vol)
#print "Mixdown: vols=[", ",".join(["%.2f " % v for v in vol]), "], water=", watervol, ", total=", mixvol, " ul"
mixdown = Sample('mixdown', plate=trplayout.SAMPLEPLATE)
if watervol < -0.1:
print("Total mixdown is %.1f ul, more than planned %.0f ul" % (sum(vol), mixvol))
assert False
elif watervol >= 4.0: # Omit if too small
self.e.transfer(watervol, trplayout.WATER, mixdown, (False, False))
else:
pass
ordering=sorted(list(range(len(inp))),key=lambda i: vol[i],reverse=True)
for i in ordering:
inp[i].conc.final = inp[i].conc.stock * vol[i] / mixvol # Avoid warnings about concentrations not adding up
self.e.transfer(vol[i], inp[i], mixdown, (False, False))
self.e.shakeSamples([mixdown])
if not mixdown.wellMixed:
self.e.mix(mixdown)
mixdown.conc = Concentration(stock=sum([inp[i].conc.stock * vol[i] for i in range(len(inp))]) / mixvol,
final=None, units='nM')
print("Mix product, %s, is in well %s with %.1ful @ %.2f nM"%(mixdown.name,mixdown.plate.wellname(mixdown.well),mixdown.volume,mixdown.conc.stock))
print("----------")
return mixdown
def constrict(self, constrictin, conc):
"""Constrict sample with concentration given by conc (in M)"""
# noinspection PyPep8Naming
AN = 6.022e23
dil = conc * (self.con_pcr1inputvol * 1e-6) * AN / self.nmolecules
nstages = int(math.ceil(math.log(dil) / math.log(self.con_maxdilperstage)))
dilperstage = math.pow(dil, 1.0 / nstages)
print("Diluting by %.0fx in %.0f stages of %.1f" % (dil, nstages, dilperstage))
s = [trplayout.WATER] + [constrictin] * self.nconstrict + [trplayout.SSDDIL]
self.e.sanitize(3, 50) # Heavy sanitize
for j in range(nstages):
print("Stage ", j, ", conc=", conc)
if conc <= self.qconc * 1e-9:
self.q.addSamples(s, needDil=1.0, primers=self.qprimers, save=False)
s = self.runQPCRDIL(s, self.con_dilvol, dilperstage, dilPlate=True)
conc /= dilperstage
cycles = int(
math.log(self.con_pcr1tgtconc / conc * self.con_pcr1vol / self.con_pcr1inputvol) / math.log(self.pcreff) + 0.5)
pcr1finalconc = conc * self.con_pcr1inputvol / self.con_pcr1vol * self.pcreff ** cycles
print("Running %d cycle PCR1 -> %.1f pM" % (cycles, pcr1finalconc * 1e12))
s = s + [trplayout.WATER] # Extra control of just water added to PCR mix
pcr = self.runPCR(primers=None, src=s, vol=self.con_pcr1vol,
srcdil=self.con_pcr1vol * 1.0 / self.con_pcr1inputvol,
ncycles=cycles, master="MConstrict", kapa=True)
for p in pcr:
p.conc = Concentration(stock=pcr1finalconc * 1e9, final=pcr1finalconc / self.con_pcr2dil, units='nM')
self.e.addIdleProgram(self.q.idler) # Now that constriction is done, can start on qPCR setup
needDil = max(4, pcr1finalconc / self.qconc)
print("Running qPCR of PCR1 products using %.1fx dilution" % needDil)
self.q.addSamples(pcr, needDil=needDil, primers=self.qprimers, save=True)
pcr = pcr[1:-2] # Remove negative controls
cycles2 = int(math.log(self.con_pcr2tgtconc / pcr1finalconc * self.con_pcr2dil) / math.log(self.pcreff) + 0.5)
pcr2finalconc = pcr1finalconc / self.con_pcr2dil * self.pcreff ** cycles2
if cycles2 > 0:
print("Running %d cycle PCR2 -> %.1f nM" % (cycles2, pcr2finalconc * 1e9))
pcr2 = self.runPCR(primers="End", src=pcr, vol=self.con_pcr2vol, srcdil=self.con_pcr2dil,
ncycles=cycles2, master="MKapa", kapa=True)
self.q.addSamples(pcr2, needDil=pcr2finalconc / self.qconc, primers=self.qprimers, save=True)
for p in pcr2:
p.conc = Concentration(stock=pcr2finalconc * 1e9, units='nM')
self.e.waitpgm()
return pcr2
else:
return pcr
def regenerate(self, inp, prefix):
"""Regenerate T7 templates without barcodes with each of the given prefixes"""
print("Regen Predilute: %.1f nM by %.1fx to %.2f nM" % (
inp[0].conc.stock, self.regen_predil, inp[0].conc.stock / self.regen_predil))
d1 = self.runQPCRDIL(inp, self.regen_predilvol, self.regen_predil, dilPlate=True)
inconc = inp[0].conc.stock / self.regen_predil / self.regen_dil
print("Regen PCR: %.3f nM with %d cycles -> %.1f nM" % (
inconc, self.regen_cycles, inconc * self.pcreff ** self.regen_cycles))
res = self.runPCR(src=d1, srcdil=self.regen_dil, vol=self.regen_vol,
ncycles=self.regen_cycles,
primers=["T7%sX" % p for p in prefix], fastCycling=False, master="MKapa", kapa=True)
return res
| [
"bst@tc.com"
] | bst@tc.com |
bab6213702161494b8ba05d3ddcaf9d1faf292fb | a0f218eafbc459f9ba6603219a8d18366c79453a | /Filip/GTSRB/CNN_3/Main3.py | 4d608e932ee8770c38606db35445217e13211f5c | [] | no_license | PerJNilsson/Deep-learning-image-recognition | 634d48976bae16ec6e07e21c7f00e6fd979d6f67 | 28b4e267996f5f37f09a6eb4d6b26b464c263b5a | refs/heads/master | 2022-12-05T22:12:00.229546 | 2018-05-07T07:58:07 | 2018-05-07T07:58:07 | 118,593,152 | 4 | 2 | null | 2022-11-29T12:02:48 | 2018-01-23T10:12:50 | Python | UTF-8 | Python | false | false | 6,620 | py |
from __future__ import print_function
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
import numpy as np
from readData import readData
from readData import readValidationData
from readData import oneHotEncode
from readData import normaliseImage
import Testcases3.testcase1
import Testcases3.testcase2
import Testcases3.testcase3
import Testcases3.testcase4
import Testcases3.testcase5
import Testcases3.testcase6
import Testcases3.testcase7
import Testcases3.testcase8
import Testcases3.testcase9
import Testcases3.testcase10
import Testcases3.testcase11
import Testcases3.testcase12
import Testcases3.testcase13
import Testcases3.testcase14
import Testcases3.testcase15
import Testcases3.testcase16
epochs = 1 #30
test_size = 5 #15000
training_size = 50 # 40000
num_classes = 43 #43
result_file = "test_run_results.txt"
# input image dimensions
img_x, img_y = 32, 32
# load data sets
#arr, labels, images = readData('C:/Users/Filip/Documents/Kandidat/GTSRB/Final_Training/Images', num_classes, (img_x, img_y))
#v_arr, v_labels, v_images = readValidationData('C:/Users/Filip/Documents/Kandidat/GTSRB/Final_Test/Images',
#(img_x, img_y), test_size)
#x_train = np.asarray(arr)
#x_test = np.asarray(v_arr)
#np.save("xtrain", arr)
#np.save("ytrain", labels)
#np.save("xtest", v_arr)
#np.save("ytest", v_labels)
x_train = np.load("xtrain.npy")
labels = np.load("ytrain.npy")
x_test = np.load("xtest.npy")
v_labels = np.load("ytest.npy")
print(x_train.shape)
randomIndexMatrix = np.arange(x_train.shape[0])
np.random.shuffle(randomIndexMatrix)
x_train = x_train[randomIndexMatrix]
labels = labels[randomIndexMatrix]
y_train = oneHotEncode(labels, num_classes)
y_test = oneHotEncode(v_labels, num_classes)
x_train = x_train[:training_size]
y_train = y_train[:training_size]
x_test = x_test[:test_size]
y_test = y_test[:test_size]
# reshape the data into a 4D tensor - (sample_number, x_img_size, y_img_size, num_channels)
# because the MNIST is greyscale, we only have a single channel - RGB colour images would have 3
x_train = x_train.reshape(x_train.shape[0], img_x, img_y, 3)
x_test = x_test.reshape(x_test.shape[0], img_x, img_y, 3)
input_shape = (img_x, img_y, 3)
# convert the data to the right type
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train_processed = normaliseImage(x_train)
x_test_processed = normaliseImage(x_test)
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('============================')
print('x_train shape', x_train.shape)
print('x_test shape', x_test.shape)
print('y_train shape', y_train.shape)
print('y_test shape', y_test.shape)
# Visualize data
plt.imshow(x_train[0])
plt.show()
plt.imshow(x_train_processed[0])
plt.show()
plt.imshow(x_train[1])
plt.show()
plt.imshow(x_train_processed[1])
plt.show()
batch_size = 10
lr = 0.01
verbose_value = 1
train_data = (x_train, x_train_processed)
test_data = (x_test, x_test_processed)
drop_outs = (0.2, 0.4, 0.6)
myfile = open(result_file, "w")
for j in range(0, 2):
x1 = train_data[j]
x2 = test_data[j]
for i in range(0,3):
with open(result_file, "a") as myfile:
myfile.write("======================================\n"
"=========== TEST RUN " + str(j+1) + ", " + str(i+1) + " ============\n"
"======================================\n\n")
myfile.close()
do = drop_outs[i]
Testcases3.testcase1.cnn(x1, y_train, x2, y_test, batch_size, epochs,
num_classes, input_shape, do, lr, verbose_value, result_file)
Testcases3.testcase2.cnn(x1, y_train, x2, y_test, batch_size, epochs,
num_classes, input_shape, do, lr, verbose_value, result_file)
Testcases3.testcase3.cnn(x1, y_train, x2, y_test, batch_size, epochs,
num_classes, input_shape, do, lr, verbose_value, result_file)
Testcases3.testcase4.cnn(x1, y_train, x2, y_test, batch_size, epochs,
num_classes, input_shape, do, lr, verbose_value, result_file)
Testcases3.testcase5.cnn(x1, y_train, x2, y_test, batch_size, epochs,
num_classes, input_shape, do, lr, verbose_value, result_file)
Testcases3.testcase6.cnn(x1, y_train, x2, y_test, batch_size, epochs,
num_classes, input_shape, do, lr, verbose_value, result_file)
Testcases3.testcase7.cnn(x1, y_train, x2, y_test, batch_size, epochs,
num_classes, input_shape, do, lr, verbose_value, result_file)
Testcases3.testcase8.cnn(x1, y_train, x2, y_test, batch_size, epochs,
num_classes, input_shape, do, lr, verbose_value, result_file)
Testcases3.testcase9.cnn(x1, y_train, x2, y_test, batch_size, epochs,
num_classes, input_shape, do, lr, verbose_value, result_file)
Testcases3.testcase10.cnn(x1, y_train, x2, y_test, batch_size, epochs,
num_classes, input_shape, do, lr, verbose_value, result_file)
Testcases3.testcase11.cnn(x1, y_train, x2, y_test, batch_size, epochs,
num_classes, input_shape, do, lr, verbose_value, result_file)
Testcases3.testcase12.cnn(x1, y_train, x2, y_test, batch_size, epochs,
num_classes, input_shape, do, lr, verbose_value, result_file)
Testcases3.testcase13.cnn(x1, y_train, x2, y_test, batch_size, epochs,
num_classes, input_shape, do, lr, verbose_value, result_file)
Testcases3.testcase14.cnn(x1, y_train, x2, y_test, batch_size, epochs,
num_classes, input_shape, do, lr, verbose_value, result_file)
Testcases3.testcase15.cnn(x1, y_train, x2, y_test, batch_size, epochs,
num_classes, input_shape, do, lr, verbose_value, result_file)
Testcases3.testcase16.cnn(x1, y_train, x2, y_test, batch_size, epochs,
num_classes, input_shape, do, lr, verbose_value, result_file)
with open(result_file, "a") as myfile:
myfile.write("======================================\n"
"======= TEST ENDED SUCCESSFULLY ======\n"
"======================================\n\n")
#file = open("results.txt", "w")
#file.write()
#file.close() | [
"filhei@student.chalmers.se"
] | filhei@student.chalmers.se |
7900f11b145d7a10b7f90ff997eec52f7b404b99 | 3beb32a470f411278cbc2619f21fb94a542e468c | /main.py | 227c9e9a736ec8e5ccabb184611978cc6de4ac16 | [
"MIT"
] | permissive | cassiofb-dev/pandasscript | 5d65f0f8fba91a11367da321e39bfd07116d8dd5 | aa708e3d94efd434efdf2db7731c32d2a534c289 | refs/heads/master | 2023-04-28T01:58:43.513833 | 2021-05-21T02:04:09 | 2021-05-21T02:04:09 | 367,987,269 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | import sys
from lang.ps import start
def main():
start(sys.argv)
if __name__ == "__main__":
main() | [
"cassiofb.souza@gmail.com"
] | cassiofb.souza@gmail.com |
3e781df8255b5996389edf5779bd5da42cd892e9 | 000c243b4c30bd089867f73ca1bcfede1c3ef801 | /catkin_ws/devel/lib/python2.7/dist-packages/mapviz/srv/_AddMapvizDisplay.py | 51a8c74fc59d389bcea1f3394153276017a0544d | [] | no_license | dangkhoa1210/SLAM-AND-NAVIGATION-FOR-MOBILE-ROBOT-OUTDOOR-INDOOR- | b4d9bf2757d839d9766d512c2272731300320925 | 7273ea9e966353440d3993dcba112bc0a2262b98 | refs/heads/master | 2023-07-15T14:07:17.123812 | 2021-09-02T10:12:30 | 2021-09-02T10:12:30 | 402,361,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,142 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from mapviz/AddMapvizDisplayRequest.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import marti_common_msgs.msg
class AddMapvizDisplayRequest(genpy.Message):
_md5sum = "d99db34575927545707e7081858716f3"
_type = "mapviz/AddMapvizDisplayRequest"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# Add or updates a mapviz display.
string name # The name of the display.
string type # The plugin type.
int32 draw_order # The display order. 1 corresponds
# to the first displayed, 2 to the
# second, -1 to last, and -2 to the
# second to last, etc. 0 will keep
# the current display order of an
# existing display and give a new
# display the last display order.
bool visible # If the display should be visible.
marti_common_msgs/KeyValue[] properties # Configuration properties.
================================================================================
MSG: marti_common_msgs/KeyValue
# An arbitrary key-value pair
string key
string value
"""
__slots__ = ['name','type','draw_order','visible','properties']
_slot_types = ['string','string','int32','bool','marti_common_msgs/KeyValue[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
name,type,draw_order,visible,properties
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(AddMapvizDisplayRequest, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.name is None:
self.name = ''
if self.type is None:
self.type = ''
if self.draw_order is None:
self.draw_order = 0
if self.visible is None:
self.visible = False
if self.properties is None:
self.properties = []
else:
self.name = ''
self.type = ''
self.draw_order = 0
self.visible = False
self.properties = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_iB().pack(_x.draw_order, _x.visible))
length = len(self.properties)
buff.write(_struct_I.pack(length))
for val1 in self.properties:
_x = val1.key
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.properties is None:
self.properties = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.type = str[start:end].decode('utf-8', 'rosmsg')
else:
self.type = str[start:end]
_x = self
start = end
end += 5
(_x.draw_order, _x.visible,) = _get_struct_iB().unpack(str[start:end])
self.visible = bool(self.visible)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.properties = []
for i in range(0, length):
val1 = marti_common_msgs.msg.KeyValue()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.key = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.key = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.value = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.value = str[start:end]
self.properties.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_iB().pack(_x.draw_order, _x.visible))
length = len(self.properties)
buff.write(_struct_I.pack(length))
for val1 in self.properties:
_x = val1.key
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.properties is None:
self.properties = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.type = str[start:end].decode('utf-8', 'rosmsg')
else:
self.type = str[start:end]
_x = self
start = end
end += 5
(_x.draw_order, _x.visible,) = _get_struct_iB().unpack(str[start:end])
self.visible = bool(self.visible)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.properties = []
for i in range(0, length):
val1 = marti_common_msgs.msg.KeyValue()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.key = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.key = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.value = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.value = str[start:end]
self.properties.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_iB = None
def _get_struct_iB():
global _struct_iB
if _struct_iB is None:
_struct_iB = struct.Struct("<iB")
return _struct_iB
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from mapviz/AddMapvizDisplayResponse.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class AddMapvizDisplayResponse(genpy.Message):
_md5sum = "937c9679a518e3a18d831e57125ea522"
_type = "mapviz/AddMapvizDisplayResponse"
_has_header = False # flag to mark the presence of a Header object
_full_text = """
bool success # indicate successful run of triggered service
string message # informational, e.g. for error messages
"""
__slots__ = ['success','message']
_slot_types = ['bool','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
success,message
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(AddMapvizDisplayResponse, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.success is None:
self.success = False
if self.message is None:
self.message = ''
else:
self.success = False
self.message = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.success
buff.write(_get_struct_B().pack(_x))
_x = self.message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.message = str[start:end].decode('utf-8', 'rosmsg')
else:
self.message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.success
buff.write(_get_struct_B().pack(_x))
_x = self.message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.message = str[start:end].decode('utf-8', 'rosmsg')
else:
self.message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
class AddMapvizDisplay(object):
_type = 'mapviz/AddMapvizDisplay'
_md5sum = '7bd51d7a9d8469fae51039cf79b96d10'
_request_class = AddMapvizDisplayRequest
_response_class = AddMapvizDisplayResponse
| [
"dangkhoaphamdang1210@gmail.com"
] | dangkhoaphamdang1210@gmail.com |
1b7eedeedc6292e24cbbcbc5343e0ff72b5ffc32 | 0cfb7a13e55724b3b0f5594052e26123112ac5e7 | /study/output-format.py | 7c733e659fda68166caa4b9b75afc803a792e1c9 | [
"BSD-3-Clause"
] | permissive | 1GHL/PythonLearn | f6806bda338e1eaa103f8ba425127e5e94b6d591 | d25f0a23d1a76879e62aefae9b2ed99aa8f10d31 | refs/heads/master | 2016-09-06T14:14:52.082741 | 2014-02-22T16:51:39 | 2014-02-22T16:51:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | #output format
s = 'ABC'
print str(s)
print repr(s)
print str(0.1)
print repr(0.1)
a = '1'
b = 1
print a == b
print a == repr(b)
print '10'.rjust(10)
print '10'.ljust(10) ,'|'
print '10'.zfill(10)
print '-10'.zfill(10)
print '10.001'.zfill(2)
print int(a) == b
| [
"yhsponder@gmail.com"
] | yhsponder@gmail.com |
e6d07f2c01c4c1d39ab0dfea9c3f656698ce7b9b | f90c26a082803dd2eeacd3545f1dd1784040e628 | /utils/ps.py | 2a4f52aa4398b063271a08ff9018f0b0f50e273e | [] | no_license | knek-little-projects/smartlock | 37a9fb635a30b30fb5180abfd1be686cf2a3284c | 86a0d2a3b07677238d4e6f2f420ca4715fcefb09 | refs/heads/main | 2023-08-30T17:21:58.938866 | 2021-11-17T11:39:29 | 2021-11-17T11:39:29 | 377,664,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,930 | py | #!python3.8
from typing import *
from psutil import Process
from utils.headhash import headhash
import psutil
import logging
import subprocess
def ps_info(exe, prop) -> bytes:
cmd = '''[System.Diagnostics.FileVersionInfo]::GetVersionInfo("%s").%s''' % (exe, prop)
completed = subprocess.run(["powershell", "-Command", cmd], capture_output=True)
return completed.stdout
def _transform_user_name(s):
s = s.strip()
if '\\' in s:
s = s.split('\\')[-1]
return s.lower()
def is_user_eq(a, b):
return _transform_user_name(a) == _transform_user_name(b)
def ps_bw_filter(
user: str,
path_wl: List[str],
name_bl: List[str],
cname_bl: List[str],
hash_bl: List[str],
) -> Iterator[Process]:
path_wl = {path.strip().lower() for path in path_wl}
name_bl = {name.strip().lower() for name in name_bl}
cname_bl = {cname.strip().encode() for cname in cname_bl}
for p in psutil.pids():
try:
p = Process(p)
if not is_user_eq(p.username(), user):
continue
if not p.exe():
continue
if p.exe().lower() in path_wl:
continue
if p.name().lower() in name_bl:
yield p
continue
if headhash(p.exe()) in hash_bl:
yield p
continue
cname = ps_info(p.exe(), 'CompanyName').strip()
if cname in cname_bl:
yield p
continue
except psutil.AccessDenied:
continue
except psutil.NoSuchProcess:
continue
except OSError:
continue
def killall(processes: Iterator[Process]):
for p in processes:
try:
print("KILL", p.exe())
p.kill()
except Exception as e:
logging.error(e)
continue | [
"sknek@ya.ru"
] | sknek@ya.ru |
5cff6d1e75311f6a39ff6edc9ee7a41307b16b8f | cc1b87f9368e96e9b3ecfd5e0822d0037e60ac69 | /dashboard/dashboard/api/sheriffs.py | 6a66dcc1d8196a45035d18f819709bca37d2f30c | [
"BSD-3-Clause"
] | permissive | CTJyeh/catapult | bd710fb413b9058a7eae6073fe97a502546bbefe | c98b1ee7e410b2fb2f7dc9e2eb01804cf7c94fcb | refs/heads/master | 2020-08-19T21:57:40.981513 | 2019-10-17T09:51:09 | 2019-10-17T18:30:16 | 215,957,813 | 1 | 0 | BSD-3-Clause | 2019-10-18T06:41:19 | 2019-10-18T06:41:17 | null | UTF-8 | Python | false | false | 591 | py | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from dashboard.api import api_request_handler
from dashboard.models import sheriff
class SheriffsHandler(api_request_handler.ApiRequestHandler):
def _CheckUser(self):
pass
def Post(self):
sheriff_keys = sheriff.Sheriff.query().fetch(keys_only=True)
return [key.string_id() for key in sheriff_keys]
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
aeadd558d70fd182c63b90eaf2fd71d11de90b1a | a7821394b1f9817d2d8a33f7638ced65a9f60336 | /p11.py | 835f5a8bfaf8afdff081a43dc4bd102ebdcbd6f4 | [] | no_license | Azizz007/jenkins_python | 0a2f7c34ab6d9a58bcedf0b4ae3d611c1ca7c095 | ffc33bc0fcc980d05f52f12c1842e0ae0949e1fd | refs/heads/master | 2023-01-03T09:15:46.023053 | 2020-10-29T14:18:51 | 2020-10-29T14:18:51 | 308,250,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18 | py | print ("hey, 2")
| [
"you@example.com"
] | you@example.com |
a3a84e6f96f32f5fa8d637410c09ba7d3dce5b21 | e5da380fb50748bd0032d2d121454abe2ae48ded | /source/Predictor_TFLite.py | aa2420e64f9681740620dda1d3504fb6b00c91d0 | [] | no_license | tranminhhieulkqn/APIInteriorDesign-Back-Flask | 65e24ff2c2251ca802b9dd8a74af6f23dbda5cf9 | 23dc77a8db65eb09b8b539041d5d240fe335efc8 | refs/heads/main | 2023-06-21T08:56:32.681905 | 2021-07-25T15:26:17 | 2021-07-25T15:26:17 | 382,636,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,517 | py | import os
import time
import math
import numpy as np
import imageio as io
import tensorflow.lite as tflite
# import tflite_runtime.interpreter as tflite
from PIL import Image, ImageOps
class Predictor_TFLite:
__instance = None
__models = dict({})
__models_dir = ''
__target_size = 224
def __init__(self, models_dir='models_tflite/'):
Predictor_TFLite.__models_dir = models_dir
self.__load_models()
""" Virtually private constructor. """
if Predictor_TFLite.__instance is not None:
raise Exception("This class is a singleton!")
else:
Predictor_TFLite.__instance = self
@staticmethod
def getInstance():
""" Static access method. """
if Predictor_TFLite.__instance == None:
Predictor_TFLite()
return Predictor_TFLite.__instance
@classmethod
def __load_models(self):
""" Load all model """
time_start = time.time()
list_model = [model for model in os.listdir(
self.__models_dir) if ('.tflite' in model)]
for model_name in list_model:
# get model path
model_path = os.path.join(self.__models_dir, model_name)
# loading and active model
model = tflite.Interpreter(model_path=model_path)
model.allocate_tensors()
# get name model
name = str(model_name.split('.tflite')[0])
# append to array models
self.__models[name] = model
time_end = time.time() - time_start
print('Load model successfully! Load in: {}'.format(round(time_end, 3)))
@classmethod
def __get_image_from_url(self, image_url):
""" Get image from URL """
# use library skimage to get image
image = io.imread(image_url)
# get image and convert color system to RBG
image = Image.fromarray(image.astype('uint8'), 'RGB')
return image
@classmethod
def __customize_size(self, original_size, target_size):
""""""
# default ratio = 1
ratio = 1
# get size width, height of image
width, height = original_size
# get ratio from size of image
ratio = (width / target_size) if (width <
height) else (height / target_size)
# return new width, height size
return int(width / ratio), int(height / ratio)
@classmethod
def __get_step_from_size(self, size): # size >= target_size
""" From the target size and size calculate the number of crops """
# get variable
target_size = self.__target_size
# get frac and whole
frac, whole = math.modf(size / target_size)
# if frac > 0.2 => increase whole
if frac > 0.2:
whole += 1
# return result
return int(whole)
@classmethod
def __crop_image(self, image, area):
""" Crop image with """
# get variable
target_size = self.__target_size
# crop image with area
c_img = image.crop(area)
# return with fit image
return ImageOps.fit(c_img, (target_size, target_size), Image.ANTIALIAS)
@classmethod
def __soft_voting(self, output):
""" Use soft voting for results """
# return results of soft voting
return np.sum(output, axis=0) / len(output)
@classmethod
def __data_processing(self, image):
""" """
# get variable
target_size = self.__target_size
# temporary array of images to return
images = np.empty((0, target_size, target_size, 3), dtype='float32')
# get size to resize
w, h = self.__customize_size(image.size, target_size)
# resize image
image = image.resize((w, h))
# get the number of images that can be taken in rows and columns
noCol = self.__get_step_from_size(w)
noRow = self.__get_step_from_size(h)
if noCol == 1 and noRow == 1: # if can get only 1 image, crop the image in the center
# get position crop
x_ = (w - target_size) // 2
y_ = (h - target_size) // 2
# crop image
area = (x_, y_, x_ + target_size, y_ + target_size)
croped_image = self.__crop_image(image, area)
croped_image = np.array(croped_image) / 255
croped_image = croped_image.reshape(
1, target_size, target_size, 3).astype(np.float32)
# add to array
images = np.append(images, croped_image, axis=0)
else: # if can get multi image
# get step and position max for crop
x_max, y_max = np.array((w, h)) - target_size # get max position
# get step
stepCol = (x_max // (noCol - 1)) if (noCol > 1) else 1
stepRow = (y_max // (noRow - 1)) if (noRow > 1) else 1
# process each image with the found step
for random_x in range(0, x_max + 1, stepCol):
for random_y in range(0, y_max + 1, stepRow):
# crop image
area = (random_x, random_y, random_x +
target_size, random_y + target_size)
croped_image = self.__crop_image(image, area)
# normalize and reshape
croped_image = np.array(croped_image) / 255
croped_image = croped_image.reshape(
-1, target_size, target_size, 3).astype(np.float32)
# add to array
images = np.append(images, croped_image, axis=0)
# return array
return images
@classmethod
def __predict(self, model, images):
# get variable
target_size = self.__target_size
# get number of images
noImage = len(images)
# get input and output of interpreter
input_details = model.get_input_details()
output_details = model.get_output_details()
# if input and output not map with input image => reshape
if noImage != input_details[0]['shape'][0]:
model.resize_tensor_input(input_details[0]['index'], (noImage, target_size, target_size, 3))
model.resize_tensor_input(output_details[0]['index'], (noImage, 5))
model.allocate_tensors()
# set input images with input layer interpreter
model.set_tensor(input_details[0]['index'], images)
# invoke
model.invoke()
# get the result in the output layer
output = model.get_tensor(output_details[0]['index'])
# soft voting output
output = self.__soft_voting(output)
# return result
return output
@staticmethod
def ensemble_predict(image_url):
image = Predictor_TFLite.__get_image_from_url(image_url)
images = Predictor_TFLite.__data_processing(image=image)
# images = image.resize((224, 224))
# croped_image = np.array(images) / 255
# images = croped_image.reshape(-1, Predictor_TFLite.__target_size, Predictor_TFLite.__target_size, 3).astype(np.float32)
predictions = []
for model_name in Predictor_TFLite.__models:
prediction = Predictor_TFLite.__predict(
Predictor_TFLite.__models[model_name], images)
predictions.append(prediction)
predictions = Predictor_TFLite.__soft_voting(predictions)
return predictions
| [
"46083795+tranminhhieulkqn@users.noreply.github.com"
] | 46083795+tranminhhieulkqn@users.noreply.github.com |
9be99380551ceec66ed3fd4136ce3dc3cec4facf | d81e47cb813d2265d5d1cdfb9642560ce28ae9c9 | /AlphaBetaAI.py | f4dac9a9d804e821de3e7e10c597df128e60060f | [] | no_license | p-takagi-atilano/ChessAI | 26c22c26301c92ce632d1dfe25a55131b53e045b | 7cf6669c9cf8250269de3c653863fe18283b96da | refs/heads/master | 2020-11-25T18:47:28.432556 | 2019-12-18T09:01:59 | 2019-12-18T09:01:59 | 228,799,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,961 | py | # Paolo Takagi-Atilano, October 3rd
import chess
from heapq import heappush, heappop
from math import inf
class AlphaBetaAI:
def __init__(self, heuristic_fn, depth):
self.depth = depth
self.heuristic_fn = heuristic_fn
self.function_calls = 0
self.depth_fix = (depth % 2 == 0)
def choose_move(self, board):
self.function_calls = 0
moves = list(board.legal_moves)
# setup
final_move = None # temporarily set to None, must change to a move object
final_value = float('-inf')
# starts the loop off
for move in moves:
board.push(move)
move_value = self.min_value(0, board, float('-inf'), float('inf'))
board.pop()
#print("MOVE: ", move)
#print("SCORE: ", move_value)
if move_value > final_value:
final_value = move_value
final_move = move
#print("FINAL MOVE: ", final_move)
print("FINAL SCORE: ", final_value)
print("AlphaBeta AI recommending move " + str(final_move) + " after " + str(self.function_calls) + " function calls")
return final_move
# simulate max player
def max_value(self, depth, board, alpha, beta):
# increment function calls count
self.function_calls += 1
# checks to see if it is a cutoff
if self.cutoff_test(depth, board):
return self.heuristic_fn(board, self.depth_fix)
# find maximum possible value assuming min plays optimally
v = float('-inf')
for move in board.legal_moves:
board.push(move)
v = max(v, self.min_value(depth + 1, board, alpha, beta))
board.pop()
# pruning
if v >= beta:
return v
alpha = max(alpha, v)
return v
# simulate min player
def min_value(self, depth, board, alpha, beta):
# increment function calls count
self.function_calls += 1
# checks to see if it is a cutoff
if self.cutoff_test(depth, board):
return self.heuristic_fn(board, self.depth_fix)
# find minimum possible value assuming max plays optimally
v = float('inf')
for move in board.legal_moves:
board.push(move)
v = min(v, self.max_value(depth + 1, board, alpha, beta))
board.pop()
# pruning
if v <= alpha:
return v
beta = min(beta, v)
return v
def cutoff_test(self, depth, board):
return self.depth <= depth or board.is_game_over()
# Alpha Beta with basic move reordering
class ReorderAlphaBetaAI:
def __init__(self, heuristic_fn, depth):
self.depth = depth
self.heuristic_fn = heuristic_fn
self.function_calls = 0
self.depth_fix = (depth % 2 == 0)
def choose_move(self, board):
self.function_calls = 0
moves_list = list(board.legal_moves)
moves_heap = self.reorder(moves_list, board)
# setup
final_move = None # temporarily set to None, must change to a move object
final_value = float('-inf')
# starts the loop off
while moves_heap:
move_pq = heappop(moves_heap)
board.push(move_pq.move)
move_value = self.min_value(0, board, float('-inf'), float('inf'))
board.pop()
#print("MOVE: ", move)
#print("SCORE: ", move_value)
if move_value > final_value:
final_value = move_value
final_move = move_pq.move
#print("FINAL MOVE: ", final_move)
print("FINAL SCORE: ", final_value)
print("ReorderAlphaBeta AI recommending move " + str(final_move) + " after " + str(
self.function_calls) + " function calls")
return final_move
# simulate max player
def max_value(self, depth, board, alpha, beta):
# increment function calls count
self.function_calls += 1
# checks to see if it is a cutoff
if self.cutoff_test(depth, board):
return self.heuristic_fn(board, self.depth_fix)
# find maximum possible value if min plays optimally
v = float('-inf')
for move in board.legal_moves:
board.push(move)
v = max(v, self.min_value(depth + 1, board, alpha, beta))
board.pop()
# pruning
if v >= beta:
return v
alpha = max(alpha, v)
return v
# simulate min player
def min_value(self, depth, board, alpha, beta):
# increment function calls count
self.function_calls += 1
# checks to see if it is a cutoff
if self.cutoff_test(depth, board):
return self.heuristic_fn(board, self.depth_fix)
# find maximum possible value if min plays optimally
v = float('inf')
for move in board.legal_moves:
board.push(move)
v = min(v, self.max_value(depth + 1, board, alpha, beta))
board.pop()
# pruning
if v <= alpha:
return v
beta = min(beta, v)
return v
# cutoff test, returns true if depth limit is reached or game is over, false otherwise
def cutoff_test(self, depth, board):
return self.depth <= depth or board.is_game_over()
# uses provided heuristic to order moves
def reorder(self, moves_list, board):
# the empty heap
ordered = []
# building the heap
for move in moves_list:
board.push(move)
priority = self.heuristic_fn(board, self.depth_fix)
heappush(ordered, MovePq(move, priority))
board.pop()
return ordered
# Alpha Beta AI with move reordering and transposition table
class TransAlphaBetaAI:
def __init__(self, heuristic_fn, depth):
self.depth = depth
self.heuristic_fn = heuristic_fn
self.function_calls = 0
self.depth_fix = (depth % 2 == 0)
def choose_move(self, board):
self.function_calls = 0
moves_list = list(board.legal_moves)
moves_heap = self.reorder(moves_list, board)
transposition_table = {}
# setup
final_move = None # temporarily set to None, must change to a move object
final_value = float('-inf')
# starts the loop off
while moves_heap:
move_pq = heappop(moves_heap)
board.push(move_pq.move)
move_value = self.min_value(0, board, float('-inf'), float('inf'), transposition_table)
board.pop()
#print("MOVE: ", move)
#print("SCORE: ", move_value)
if move_value > final_value:
final_value = move_value
final_move = move_pq.move
#print("FINAL MOVE: ", final_move)
print("FINAL SCORE: ", final_value)
print("TransAlphaBeta AI recommending move " + str(final_move) + " after " + str(
self.function_calls) + " function calls")
return final_move
# simulate max player
def max_value(self, depth, board, alpha, beta, transposition_table):
# increment function calls count
self.function_calls += 1
# check to see if it is a cutoff
if self.cutoff_test(depth, board):
return self.heuristic_fn(board, self.depth_fix)
# find maximum possible value if min plays optimally
v = float('-inf')
for move in board.legal_moves:
board.push(move)
# transposition table checks
board_str = str(board)
if board_str not in transposition_table.keys() or transposition_table[board_str] < v:
v = max(v, self.min_value(depth + 1, board, alpha, beta, transposition_table))
transposition_table[board_str] = v
else:
v = transposition_table[board_str]
board.pop()
# pruning
if v >= beta:
return v
alpha = max(alpha, v)
return v
# simulate min player
def min_value(self, depth, board, alpha, beta, transposition_table):
# increment function calls count
self.function_calls += 1
# check to see if it is a cutoff
if self.cutoff_test(depth, board):
return self.heuristic_fn(board, self.depth_fix)
# find minimum possible value if max plays optimally
v = float('inf')
for move in board.legal_moves:
board.push(move)
# transposition table checks
board_str = str(board)
if board_str not in transposition_table.keys() or transposition_table[board_str] < v:
v = min(v, self.max_value(depth + 1, board, alpha, beta, transposition_table))
transposition_table[board_str] = v
else:
v = transposition_table[board_str]
board.pop()
# pruning
if v <= alpha:
return v
beta = min(beta, v)
return v
# cutoff test, returns true if depth limit is reached or game is over, false otherwise
def cutoff_test(self, depth, board):
return self.depth <= depth or board.is_game_over()
# uses provided heuristic to order moves
def reorder(self, moves_list, board):
ordered = []
for move in moves_list:
board.push(move)
priority = self.heuristic_fn(board, self.depth_fix)
heappush(ordered, MovePq(move, priority))
board.pop()
return ordered
# class used to rank nodes for move reordering
class MovePq:
def __init__(self, move, priority):
self.move = move
self.priority = priority
def __lt__(self, other):
return self.priority > other.priority
| [
"ptakagia@gmail.com"
] | ptakagia@gmail.com |
62e6f72ada9d3b0c41d832cb762d19674ddd8e33 | 26c4136facd4aaf0cad7b916a401d4961a9162de | /fmhomework.py | d74de0d150724e02b840d17411d227ef617cd907 | [] | no_license | armennmuradyan/python_classes | 3620b598cdb882ea10b371138b9c0f76c74f1724 | 607bedcb1f5b28876f140831f562e8e64bb10275 | refs/heads/master | 2023-09-01T02:07:24.509340 | 2021-10-29T20:10:51 | 2021-10-29T20:10:51 | 410,246,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | import os
user_input = input('do you want to delete folder a and all folders in a? yes for yes \n')
if user_input == 'yes':
cwd = os.getcwd()
list_cwd = os.listdir(cwd)
for i in list_cwd:
if os.path.isdir(os.path.join(cwd, i)):
cwd_1 = cwd + "\\{}".format(i)
list_cwd_1 = os.listdir(cwd_1)
cwd_2 = cwd_1 + f'\\{list_cwd_1[1]}'
list_cwd_2 = os.listdir(cwd_2)
cwd_3 = cwd_2 + f'\\{list_cwd_2[0]}'
cwd_b = cwd_1 +'\\b'
os.rmdir(cwd_3)
os.rmdir(cwd_2)
os.rmdir(cwd_b)
os.rmdir(cwd_1) | [
"armennmuradyan@gmail.com"
] | armennmuradyan@gmail.com |
8244a071ba6c6c82eaa4b39145d65913bec92dbb | 8b11b4b533eb1e1baf27292096920e4201f6fec0 | /ExportTargets.py | e59bbc27f488a89fa10c3d98d7657313486c42d1 | [] | no_license | judysu1983/pythonExercise | e8c72eee723554a1d4e266a567d2a5bc85890681 | c3847d497a88fed078d9fb45cb9d0dd3fb900e22 | refs/heads/master | 2021-01-12T09:14:48.564284 | 2016-12-18T20:47:37 | 2016-12-18T20:47:37 | 76,805,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,693 | py | #! python2.7
#see video https://www.udemy.com/automate/learn/v4/t/lecture/3470590
#https://www.udemy.com/automate/learn/v4/t/lecture/3470614
#export target files by project ID,
#open the project and view full path to find the download path
#get the download URL by regular expression
from selenium import webdriver
import re
import sys
import pyautogui,time
import shutil
import os
#check if the download file with same name already exists and empty the unzip to folder
if os.path.exists("C:\\Users\\sujudy\\Downloads\\assets.zip"):
os.remove("C:\\Users\\sujudy\\Downloads\\assets.zip")
if os.path.exists("C:\\7z\unzip"):
shutil.rmtree("C:\\7z\unzip")
def exporttarget(projectID):
if os.path.exists("C:\\Users\\sujudy\\Downloads\\assets.zip"):
os.remove("C:\\Users\\sujudy\\Downloads\\assets.zip")
browser = webdriver.Firefox()
baseURL="http://worldserver9.amazon.com/ws/assignments_tasks?&token=1416441266&project="
url=baseURL+projectID
browser.get(url)
#login
def login():
login=open('C:\\Python27\\login.txt')
loginname=login.readlines()
name, pw=loginname
name=name.strip()
pw=pw.strip()
login.close()
username=browser.find_element_by_id('username')
username.send_keys(name)
password=browser.find_element_by_id('password')
password.send_keys(pw)
login=browser.find_element_by_id('loginButton')
login.click()
time.sleep(1)
login()
#click View Full asset paths check box
checkbox=browser.find_element_by_name('viewFullPathMode')
if not checkbox.is_selected():
checkbox.click()
#---print(checkbox.is_selected())
asset=browser.find_element_by_partial_link_text('samples') #samples is the root folder name of ocelot projects
filepath=asset.text
#print(asset.text)
#regular experssion to match the taget file download path
assetRegex=re.compile(r'''
#/samples/tam - soafba/Projects/534956_Paramount_2 Workflows_Aug 4_Blurbs_DE/Source-English/Product Identifiers_blurbs_US_clean for translation.xml../Target-German/Product Identifiers_blurbs_US_clean for translation.xml
#samples
#client name
#projects group number project name
/samples/.*?/\d{6}.*?/
''', re.VERBOSE)
foldernameRegex=re.compile(r'''
/\d{6}.*?/
''', re.VERBOSE)
foldername=foldernameRegex.findall(asset.text)
foldernamestr=''.join(foldername) #convert list to string
foldernamestr=foldernamestr.split('/')
downloadFolder=foldernamestr[1]
#print(downloadFolder)
PartialDownloadPath = assetRegex.findall(asset.text)
#convert list to string
PartialDownloadPathStr=''.join(PartialDownloadPath)
#print(PartialDownloadPathStr)
#partialdownloadURL
str1=PartialDownloadPathStr.replace("/","%2F")
str2=str1.replace("(","%28")
str3=str2.replace(")","%29")
str4=str3.replace(" ","+")
str5=str4.replace("%2FProjects%2F","%2FProjects&aisSP=%2F")
#str5 is that path format required by WS
downloadURL="http://worldserver9.amazon.com/ws/download_assets?&aisCF="+str5+"&token=937829789"
print('Downloading '+projectID+'\n'+downloadURL+'\n')
#open project group download page by webdriver
browser.get(downloadURL)
#login again:
login()
#click download button
downloadButton=browser.find_element_by_id('__wsDialog_button_download')
downloadButton.click()
time.sleep(3)
#press Save file and OK on the pop up window
pyautogui.hotkey('alt', 's')
time.sleep(1)
pyautogui.press('enter')
time.sleep(2)
#unzip the download file to C:\7z\unzip
os.system(r"C:\7z\7z e C:\Users\sujudy\Downloads\assets.zip -oC:\7z\unzip -spf -aos")
browser.quit()
#rename the folder from project groupID to projectID
if os.path.exists(os.path.join('C:\\7z\\unzip',downloadFolder)):
#print("Folder found, rename it.")
newname1=projectID+'#'+downloadFolder
os.rename(os.path.join('C:\\7z\\unzip',downloadFolder),os.path.join('C:\\7z\\unzip',newname1))
PJlist=open('C:\\Python27\\ExportTargets.txt')
projectIDs=PJlist.readlines()
for p in projectIDs:
p=p.strip()
exporttarget(p)
time.sleep(2)
PJlist.close()
#rename C:\7z\unzip to a folder wtih todays date as folder name
def renameoutput():
basedir='c:\\7z'
newname=time.strftime("%m_%d_%Y_")+time.strftime("%H%M%S")
os.rename(os.path.join(basedir,"unzip"), os.path.join(basedir,'Dumped_at_'+ newname))
renameoutput()
| [
"noreply@github.com"
] | noreply@github.com |
083489b0aa7e59ee244bf586f19cc81b48dcdda1 | 8175b26c54021f66c85becd70b70e67f3f6536ef | /mongo_blast/getuniprottxt.py | 86b68f16cf958b228f56f20bc8dd4f71e7edf918 | [] | no_license | jiakangyuchi/mongoBlast | e138a2fcfe54e2d3aea4e0ceb8a70335137a9fc6 | 90e7cb1354e663796eb415d985ac769134f35407 | refs/heads/master | 2020-04-01T02:56:18.170947 | 2018-10-12T18:06:35 | 2018-10-12T18:06:35 | 152,802,503 | 0 | 0 | null | 2018-10-12T20:08:52 | 2018-10-12T20:08:52 | null | UTF-8 | Python | false | false | 116 | py | import functions
import os
if not os.path.exists("uniprotData"):
os.makedirs("uniprotData")
functions.getUniprot()
| [
"scn3d@mail.missouri.edu"
] | scn3d@mail.missouri.edu |
fa7f9e576cd87eeec1901d4b55fcce0fa603568c | f83f09f19035638acfb6fca1a0bfbbe3d0a8b193 | /python-package/learn2clean/regression/__init__.py | 7fe70d714a6696425436a4c17ff5557e467d40f5 | [
"BSD-3-Clause"
] | permissive | LaureBerti/Learn2Clean | ce847b97a7f61b1f4c59eab1fad09aff1115e1b7 | 8a83b3d0641c815b8dee4611a65a20877940fd3d | refs/heads/master | 2023-01-10T13:44:16.881474 | 2021-03-16T16:29:01 | 2021-03-16T16:29:01 | 178,432,205 | 42 | 11 | BSD-3-Clause | 2022-12-26T20:53:17 | 2019-03-29T15:35:49 | Python | UTF-8 | Python | false | false | 59 | py | from .regressor import Regressor
__all__ = ['Regressor', ]
| [
"LaureBerti@users.noreply.github.com"
] | LaureBerti@users.noreply.github.com |
12c1e9f39cad94697ac642a2b342136937d4f0fe | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/client/gui/prb_control/entities/base/pre_queue/actions_validator.py | 9751c236c69e44dbb07d504e8b417ae5707659af | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,737 | py | # 2017.08.29 21:45:24 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/prb_control/entities/base/pre_queue/actions_validator.py
from gui.prb_control.entities.base.actions_validator import BaseActionsValidator, ActionsValidatorComposite, CurrentVehicleActionsValidator
from gui.prb_control.items import ValidationResult
class InQueueValidator(BaseActionsValidator):
"""
Is player in queue validator.
"""
def _validate(self):
if self._entity.isInQueue():
return ValidationResult(False)
return super(InQueueValidator, self)._validate()
class PreQueueActionsValidator(ActionsValidatorComposite):
"""
Pre queue actions validator base class. It has several parts:
- state validation
- vehicle validation
"""
def __init__(self, entity):
self._stateValidator = self._createStateValidator(entity)
self._vehiclesValidator = self._createVehiclesValidator(entity)
validators = [self._stateValidator, self._vehiclesValidator]
super(PreQueueActionsValidator, self).__init__(entity, validators)
def _createStateValidator(self, entity):
"""
Part of template method to build state validation part
"""
return InQueueValidator(entity)
def _createVehiclesValidator(self, entity):
"""
Part of template method to build vehicles validation part
"""
return CurrentVehicleActionsValidator(entity)
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\prb_control\entities\base\pre_queue\actions_validator.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:45:25 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
437663f18609fe9bb8eb8a0899a1d6e2f1ff8691 | 81818e370c41f9ded6a4b85e90b24e33a7c1281e | /src/crawler/crawler/spiders/roma.py | 9f3eb37cf3f63a7f1b369344525146d369eeef64 | [] | no_license | havalakm/Updated-Project | 529b349023d2aee71e5039ded07078c29a77b38f | dffa48867320e67c2e41c274d319133903963995 | refs/heads/master | 2022-12-14T09:50:03.080482 | 2020-09-20T20:26:24 | 2020-09-20T20:26:24 | 295,202,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | import colorama
import scrapy
class RomaSpider(scrapy.Spider):
name = 'roma'
allowed_domains = ['roma.de']
def __init__(self, *args, **kwargs):
super(RomaSpider, self).__init__(*args, **kwargs)
self.start_url = "http://roma.de/"
self.start_parse = self.parse_products
self.products = kwargs["products"] if "products" in kwargs else []
def start_requests(self):
yield scrapy.Request(url=self.start_url, callback=self.start_parse)
def parse_products(self, response):
""" Parse products """
print(f"\t{colorama.Fore.CYAN}Crawling: {response.url}")
navigation_section = response.css("div.main-navigation-container")
product_menu = navigation_section.css("li.menu-item.has-subnavigation:first-child")
self.products += product_menu.css("li a ::text").getall()
self.logger.info(f"Products: {self.products}")
| [
"dibyaranjan.sathua@gmail.com"
] | dibyaranjan.sathua@gmail.com |
108d277a97273fa561b7fce4d7e7a90615fe011f | 2bc95e428b262d1db456e1365e82a7babf254e71 | /rest_api_MTM/cust_adr_api/startapp.py | dca4048fb7b10c7d5111e538a11280804877e1f1 | [] | no_license | gaurshetty/rest_api_ORM | 43c91a11f41e0de1852aab1ebf1df560a4152ea1 | f0b8546cb4360abdad18927c43a482a6aaa26442 | refs/heads/master | 2020-11-28T11:35:19.834352 | 2019-12-28T05:45:21 | 2019-12-28T05:45:21 | 229,800,456 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | from rest_api_MTM.cust_adr_api.address.adr_controller import *
from rest_api_MTM.cust_adr_api.customer.cust_controller import *
if __name__ == '__main__':
app.run(debug=True, port=5001)
| [
"gaurshetty@gmail.com"
] | gaurshetty@gmail.com |
d43ee75d816bf4ef9dfdffc51b74e863d683a569 | 4f7dddc0c99e59e9772db109ecae4bf94d1fce54 | /plugin.video.OTV_MEDIA/resources/lib/compat.py | 4b8323afca536ed0b58eeffab1685241ce7f95c9 | [] | no_license | krishnakumar34/otv_yeni | 1f7529a14c568a6abe0a37085050a53394bbce96 | 057578a80cab7e24558d0ce17410fa98f6de0cf9 | refs/heads/master | 2023-05-07T23:19:48.376003 | 2021-06-02T07:26:34 | 2021-06-02T07:26:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,887 | py | from sys import version_info
if version_info[0] == 2:
# Python 2
compat_str = unicode
from re import compile
from urllib import _hextochr
from urllib import urlencode as compat_urlencode
from urllib import quote as compat_quote
from urllib2 import urlopen as compat_urlopen
from urllib2 import Request as compat_Request
from urllib2 import HTTPError as compat_HTTPError
from urllib2 import URLError as compat_URLError
from urlparse import urljoin as compat_urljoin
from urlparse import urlparse as compat_urlparse
from urlparse import urlunparse as compat_urlunparse
def _unquote_to_bytes(string):
if not string:
# Is it a string-like object?
string.split
return b''
if isinstance(string, unicode):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
return string
res = [bits[0]]
for item in bits[1:]:
try:
res.append(_hextochr[item[:2]])
res.append(item[2:])
except KeyError:
res.append(b'%')
res.append(item)
return b''.join(res)
def _unquote(string):
if '%' not in string:
string.split
return string
bits = compile(r'([\x00-\x7f]+)').split(string)
res = [bits[0]]
for i in range(1, len(bits), 2):
res.append(_unquote_to_bytes(bits[i]).decode('utf-8', 'replace'))
res.append(bits[i + 1])
return ''.join(res)
def _parse_qsl(qs):
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value:
continue
nv = name_value.split('=', 1)
if len(nv) == 2 and len(nv[1]):
name = unicode(_unquote(nv[0].replace('+', ' ')))
value = unicode(_unquote(nv[1].replace('+', ' ')))
r.append((name, value))
return r
def compat_parse_qs(qs):
parsed_result = {}
pairs = _parse_qsl(qs)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
else:
# Python 3
compat_str = str
from urllib.parse import urlencode as compat_urlencode
from urllib.parse import quote as compat_quote
from urllib.request import urlopen as compat_urlopen
from urllib.request import Request as compat_Request
from urllib.error import HTTPError as compat_HTTPError
from urllib.error import URLError as compat_URLError
from urllib.parse import urljoin as compat_urljoin
from urllib.parse import urlparse as compat_urlparse
from urllib.parse import parse_qs as compat_parse_qs
from urllib.parse import urlunparse as compat_urlunparse
# Disable certificate verification on python 2.7.9
sslContext = None
if version_info >= (2, 7, 9):
try:
import ssl
sslContext = ssl._create_unverified_context()
except Exception as e:
print('[YouTube] Error in set ssl context', e)
def compat_ssl_urlopen(url):
if sslContext:
return compat_urlopen(url, context=sslContext)
else:
return compat_urlopen(url)
| [
"orhantv@users.noreply.github.com"
] | orhantv@users.noreply.github.com |
38772a989f35297451fe429e94afdc2fa1e97c2d | d049c54f874df038385294a41250748802d07ff8 | /motion_detector.py | 3c363ef2cbda7c5541cd6ff6c6740f1adb877069 | [] | no_license | ribex/computer-vision | a6ee8fd50a5911dc54c086d78648936009663462 | 6bb1e3bd5fbcf51d19aeba7e6d8bb1a3dea6e347 | refs/heads/master | 2021-01-16T18:49:22.719034 | 2017-08-12T15:26:12 | 2017-08-12T15:26:12 | 100,121,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,977 | py | # USAGE
# python motion_detector.py
# python motion_detector.py --video videos/example_01.mp4
# import the necessary packages
import argparse
import datetime
import imutils
import time
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
camera = cv2.VideoCapture(0)
time.sleep(0.25)
# otherwise, we are reading from a video file
else:
camera = cv2.VideoCapture(args["video"])
# initialize the first frame in the video stream
firstFrame = None
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
(grabbed, frame) = camera.read()
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if not grabbed:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
(_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
#save single frame when motion is detected
# k = cv2.waitKey(10)
# if k == 0x63 or k == 0x43:
print('capturing! jrtest.jpg')
cv2.imwrite("jrtest.jpg",frame)
# draw the text and timestamp on the frame
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# show the frame and record if the user presses a key
cv2.imshow("Security Feed", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows() | [
"robertslafamilia@outlook.com"
] | robertslafamilia@outlook.com |
febfd5b2eafe78bde4218ddc057d9e3594551aba | 4fb5eb0a9a24fa5c112a4ebc854ee2604b04adda | /python/oanda/models/client_configure_reject_transaction.py | 3288226fc72e0f163b859744219be769cb51631b | [
"MIT"
] | permissive | KoenBal/OANDA_V20_Client | ed4c182076db62ecf7a216c3e3246ae682300e94 | e67b9dbaddff6ed23e355d3ce7f9c9972799c702 | refs/heads/master | 2020-03-27T20:42:25.777471 | 2019-12-02T15:44:06 | 2019-12-02T15:44:06 | 147,088,130 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,328 | py | # coding: utf-8
"""
OANDA v20 REST API
The full OANDA v20 REST API Specification. This specification defines how to interact with v20 Accounts, Trades, Orders, Pricing and more. To authenticate use the string 'Bearer ' followed by the token which can be obtained at https://www.oanda.com/demo-account/tpa/personal_token # noqa: E501
OpenAPI spec version: 3.0.23
Contact: api@oanda.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ClientConfigureRejectTransaction(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'time': 'str',
'user_id': 'int',
'account_id': 'str',
'batch_id': 'str',
'request_id': 'str',
'type': 'str',
'alias': 'str',
'margin_rate': 'str',
'reject_reason': 'str'
}
attribute_map = {
'id': 'id',
'time': 'time',
'user_id': 'userID',
'account_id': 'AccountID',
'batch_id': 'batchID',
'request_id': 'requestID',
'type': 'type',
'alias': 'alias',
'margin_rate': 'marginRate',
'reject_reason': 'rejectReason'
}
def __init__(self, id=None, time=None, user_id=None, account_id=None, batch_id=None, request_id=None, type=None, alias=None, margin_rate=None, reject_reason=None): # noqa: E501
"""ClientConfigureRejectTransaction - a model defined in Swagger""" # noqa: E501
self._id = None
self._time = None
self._user_id = None
self._account_id = None
self._batch_id = None
self._request_id = None
self._type = None
self._alias = None
self._margin_rate = None
self._reject_reason = None
self.discriminator = None
if id is not None:
self.id = id
if time is not None:
self.time = time
if user_id is not None:
self.user_id = user_id
if account_id is not None:
self.account_id = account_id
if batch_id is not None:
self.batch_id = batch_id
if request_id is not None:
self.request_id = request_id
if type is not None:
self.type = type
if alias is not None:
self.alias = alias
if margin_rate is not None:
self.margin_rate = margin_rate
if reject_reason is not None:
self.reject_reason = reject_reason
@property
def id(self):
"""Gets the id of this ClientConfigureRejectTransaction. # noqa: E501
The Transaction's Identifier. # noqa: E501
:return: The id of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ClientConfigureRejectTransaction.
The Transaction's Identifier. # noqa: E501
:param id: The id of this ClientConfigureRejectTransaction. # noqa: E501
:type: str
"""
self._id = id
@property
def time(self):
"""Gets the time of this ClientConfigureRejectTransaction. # noqa: E501
The date/time when the Transaction was created. # noqa: E501
:return: The time of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: str
"""
return self._time
@time.setter
def time(self, time):
"""Sets the time of this ClientConfigureRejectTransaction.
The date/time when the Transaction was created. # noqa: E501
:param time: The time of this ClientConfigureRejectTransaction. # noqa: E501
:type: str
"""
self._time = time
@property
def user_id(self):
"""Gets the user_id of this ClientConfigureRejectTransaction. # noqa: E501
The ID of the user that initiated the creation of the Transaction. # noqa: E501
:return: The user_id of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: int
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this ClientConfigureRejectTransaction.
The ID of the user that initiated the creation of the Transaction. # noqa: E501
:param user_id: The user_id of this ClientConfigureRejectTransaction. # noqa: E501
:type: int
"""
self._user_id = user_id
@property
def account_id(self):
"""Gets the account_id of this ClientConfigureRejectTransaction. # noqa: E501
The ID of the Account the Transaction was created for. # noqa: E501
:return: The account_id of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this ClientConfigureRejectTransaction.
The ID of the Account the Transaction was created for. # noqa: E501
:param account_id: The account_id of this ClientConfigureRejectTransaction. # noqa: E501
:type: str
"""
self._account_id = account_id
@property
def batch_id(self):
"""Gets the batch_id of this ClientConfigureRejectTransaction. # noqa: E501
The ID of the \"batch\" that the Transaction belongs to. Transactions in the same batch are applied to the Account simultaneously. # noqa: E501
:return: The batch_id of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: str
"""
return self._batch_id
@batch_id.setter
def batch_id(self, batch_id):
"""Sets the batch_id of this ClientConfigureRejectTransaction.
The ID of the \"batch\" that the Transaction belongs to. Transactions in the same batch are applied to the Account simultaneously. # noqa: E501
:param batch_id: The batch_id of this ClientConfigureRejectTransaction. # noqa: E501
:type: str
"""
self._batch_id = batch_id
@property
def request_id(self):
"""Gets the request_id of this ClientConfigureRejectTransaction. # noqa: E501
The Request ID of the request which generated the transaction. # noqa: E501
:return: The request_id of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this ClientConfigureRejectTransaction.
The Request ID of the request which generated the transaction. # noqa: E501
:param request_id: The request_id of this ClientConfigureRejectTransaction. # noqa: E501
:type: str
"""
self._request_id = request_id
@property
def type(self):
"""Gets the type of this ClientConfigureRejectTransaction. # noqa: E501
The Type of the Transaction. Always set to \"CLIENT_CONFIGURE_REJECT\" in a ClientConfigureRejectTransaction. # noqa: E501
:return: The type of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ClientConfigureRejectTransaction.
The Type of the Transaction. Always set to \"CLIENT_CONFIGURE_REJECT\" in a ClientConfigureRejectTransaction. # noqa: E501
:param type: The type of this ClientConfigureRejectTransaction. # noqa: E501
:type: str
"""
allowed_values = ["CREATE", "CLOSE", "REOPEN", "CLIENT_CONFIGURE", "CLIENT_CONFIGURE_REJECT", "TRANSFER_FUNDS", "TRANSFER_FUNDS_REJECT", "MARKET_ORDER", "MARKET_ORDER_REJECT", "FIXED_PRICE_ORDER", "LIMIT_ORDER", "LIMIT_ORDER_REJECT", "STOP_ORDER", "STOP_ORDER_REJECT", "MARKET_IF_TOUCHED_ORDER", "MARKET_IF_TOUCHED_ORDER_REJECT", "TAKE_PROFIT_ORDER", "TAKE_PROFIT_ORDER_REJECT", "STOP_LOSS_ORDER", "STOP_LOSS_ORDER_REJECT", "TRAILING_STOP_LOSS_ORDER", "TRAILING_STOP_LOSS_ORDER_REJECT", "ORDER_FILL", "ORDER_CANCEL", "ORDER_CANCEL_REJECT", "ORDER_CLIENT_EXTENSIONS_MODIFY", "ORDER_CLIENT_EXTENSIONS_MODIFY_REJECT", "TRADE_CLIENT_EXTENSIONS_MODIFY", "TRADE_CLIENT_EXTENSIONS_MODIFY_REJECT", "MARGIN_CALL_ENTER", "MARGIN_CALL_EXTEND", "MARGIN_CALL_EXIT", "DELAYED_TRADE_CLOSURE", "DAILY_FINANCING", "RESET_RESETTABLE_PL"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def alias(self):
"""Gets the alias of this ClientConfigureRejectTransaction. # noqa: E501
The client-provided alias for the Account. # noqa: E501
:return: The alias of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: str
"""
return self._alias
@alias.setter
def alias(self, alias):
"""Sets the alias of this ClientConfigureRejectTransaction.
The client-provided alias for the Account. # noqa: E501
:param alias: The alias of this ClientConfigureRejectTransaction. # noqa: E501
:type: str
"""
self._alias = alias
@property
def margin_rate(self):
"""Gets the margin_rate of this ClientConfigureRejectTransaction. # noqa: E501
The margin rate override for the Account. # noqa: E501
:return: The margin_rate of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: str
"""
return self._margin_rate
@margin_rate.setter
def margin_rate(self, margin_rate):
"""Sets the margin_rate of this ClientConfigureRejectTransaction.
The margin rate override for the Account. # noqa: E501
:param margin_rate: The margin_rate of this ClientConfigureRejectTransaction. # noqa: E501
:type: str
"""
self._margin_rate = margin_rate
@property
def reject_reason(self):
"""Gets the reject_reason of this ClientConfigureRejectTransaction. # noqa: E501
The reason that the Reject Transaction was created # noqa: E501
:return: The reject_reason of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: str
"""
return self._reject_reason
@reject_reason.setter
def reject_reason(self, reject_reason):
"""Sets the reject_reason of this ClientConfigureRejectTransaction.
The reason that the Reject Transaction was created # noqa: E501
:param reject_reason: The reject_reason of this ClientConfigureRejectTransaction. # noqa: E501
:type: str
"""
allowed_values = ["INTERNAL_SERVER_ERROR", "INSTRUMENT_PRICE_UNKNOWN", "ACCOUNT_NOT_ACTIVE", "ACCOUNT_LOCKED", "ACCOUNT_ORDER_CREATION_LOCKED", "ACCOUNT_CONFIGURATION_LOCKED", "ACCOUNT_DEPOSIT_LOCKED", "ACCOUNT_WITHDRAWAL_LOCKED", "ACCOUNT_ORDER_CANCEL_LOCKED", "INSTRUMENT_NOT_TRADEABLE", "PENDING_ORDERS_ALLOWED_EXCEEDED", "ORDER_ID_UNSPECIFIED", "ORDER_DOESNT_EXIST", "ORDER_IDENTIFIER_INCONSISTENCY", "TRADE_ID_UNSPECIFIED", "TRADE_DOESNT_EXIST", "TRADE_IDENTIFIER_INCONSISTENCY", "INSUFFICIENT_MARGIN", "INSTRUMENT_MISSING", "INSTRUMENT_UNKNOWN", "UNITS_MISSING", "UNITS_INVALID", "UNITS_PRECISION_EXCEEDED", "UNITS_LIMIT_EXCEEDED", "UNITS_MIMIMUM_NOT_MET", "PRICE_MISSING", "PRICE_INVALID", "PRICE_PRECISION_EXCEEDED", "PRICE_DISTANCE_MISSING", "PRICE_DISTANCE_INVALID", "PRICE_DISTANCE_PRECISION_EXCEEDED", "PRICE_DISTANCE_MAXIMUM_EXCEEDED", "PRICE_DISTANCE_MINIMUM_NOT_MET", "TIME_IN_FORCE_MISSING", "TIME_IN_FORCE_INVALID", "TIME_IN_FORCE_GTD_TIMESTAMP_MISSING", "TIME_IN_FORCE_GTD_TIMESTAMP_IN_PAST", "PRICE_BOUND_INVALID", "PRICE_BOUND_PRECISION_EXCEEDED", "ORDERS_ON_FILL_DUPLICATE_CLIENT_ORDER_IDS", "TRADE_ON_FILL_CLIENT_EXTENSIONS_NOT_SUPPORTED", "CLIENT_ORDER_ID_INVALID", "CLIENT_ORDER_ID_ALREADY_EXISTS", "CLIENT_ORDER_TAG_INVALID", "CLIENT_ORDER_COMMENT_INVALID", "CLIENT_TRADE_ID_INVALID", "CLIENT_TRADE_ID_ALREADY_EXISTS", "CLIENT_TRADE_TAG_INVALID", "CLIENT_TRADE_COMMENT_INVALID", "ORDER_FILL_POSITION_ACTION_MISSING", "ORDER_FILL_POSITION_ACTION_INVALID", "TRIGGER_CONDITION_MISSING", "TRIGGER_CONDITION_INVALID", "ORDER_PARTIAL_FILL_OPTION_MISSING", "ORDER_PARTIAL_FILL_OPTION_INVALID", "INVALID_REISSUE_IMMEDIATE_PARTIAL_FILL", "TAKE_PROFIT_ORDER_ALREADY_EXISTS", "TAKE_PROFIT_ON_FILL_PRICE_MISSING", "TAKE_PROFIT_ON_FILL_PRICE_INVALID", "TAKE_PROFIT_ON_FILL_PRICE_PRECISION_EXCEEDED", "TAKE_PROFIT_ON_FILL_TIME_IN_FORCE_MISSING", "TAKE_PROFIT_ON_FILL_TIME_IN_FORCE_INVALID", "TAKE_PROFIT_ON_FILL_GTD_TIMESTAMP_MISSING", "TAKE_PROFIT_ON_FILL_GTD_TIMESTAMP_IN_PAST", "TAKE_PROFIT_ON_FILL_CLIENT_ORDER_ID_INVALID", "TAKE_PROFIT_ON_FILL_CLIENT_ORDER_TAG_INVALID", "TAKE_PROFIT_ON_FILL_CLIENT_ORDER_COMMENT_INVALID", "TAKE_PROFIT_ON_FILL_TRIGGER_CONDITION_MISSING", "TAKE_PROFIT_ON_FILL_TRIGGER_CONDITION_INVALID", "STOP_LOSS_ORDER_ALREADY_EXISTS", "STOP_LOSS_ORDER_GUARANTEED_REQUIRED", "STOP_LOSS_ORDER_GUARANTEED_PRICE_WITHIN_SPREAD", "STOP_LOSS_ORDER_GUARANTEED_NOT_ALLOWED", "STOP_LOSS_ORDER_GUARANTEED_HALTED_CREATE_VIOLATION", "STOP_LOSS_ORDER_GUARANTEED_HALTED_TIGHTEN_VIOLATION", "STOP_LOSS_ORDER_GUARANTEED_HEDGING_NOT_ALLOWED", "STOP_LOSS_ORDER_GUARANTEED_MINIMUM_DISTANCE_NOT_MET", "STOP_LOSS_ORDER_NOT_CANCELABLE", "STOP_LOSS_ORDER_NOT_REPLACEABLE", "STOP_LOSS_ORDER_GUARANTEED_LEVEL_RESTRICTION_EXCEEDED", "STOP_LOSS_ORDER_PRICE_AND_DISTANCE_BOTH_SPECIFIED", "STOP_LOSS_ORDER_PRICE_AND_DISTANCE_BOTH_MISSING", "STOP_LOSS_ON_FILL_REQUIRED_FOR_PENDING_ORDER", "STOP_LOSS_ON_FILL_GUARANTEED_NOT_ALLOWED", "STOP_LOSS_ON_FILL_GUARANTEED_REQUIRED", "STOP_LOSS_ON_FILL_PRICE_MISSING", "STOP_LOSS_ON_FILL_PRICE_INVALID", "STOP_LOSS_ON_FILL_PRICE_PRECISION_EXCEEDED", "STOP_LOSS_ON_FILL_GUARANTEED_MINIMUM_DISTANCE_NOT_MET", "STOP_LOSS_ON_FILL_GUARANTEED_LEVEL_RESTRICTION_EXCEEDED", "STOP_LOSS_ON_FILL_DISTANCE_INVALID", "STOP_LOSS_ON_FILL_PRICE_DISTANCE_MAXIMUM_EXCEEDED", "STOP_LOSS_ON_FILL_DISTANCE_PRECISION_EXCEEDED", "STOP_LOSS_ON_FILL_PRICE_AND_DISTANCE_BOTH_SPECIFIED", "STOP_LOSS_ON_FILL_PRICE_AND_DISTANCE_BOTH_MISSING", "STOP_LOSS_ON_FILL_TIME_IN_FORCE_MISSING", "STOP_LOSS_ON_FILL_TIME_IN_FORCE_INVALID", "STOP_LOSS_ON_FILL_GTD_TIMESTAMP_MISSING", "STOP_LOSS_ON_FILL_GTD_TIMESTAMP_IN_PAST", "STOP_LOSS_ON_FILL_CLIENT_ORDER_ID_INVALID", "STOP_LOSS_ON_FILL_CLIENT_ORDER_TAG_INVALID", "STOP_LOSS_ON_FILL_CLIENT_ORDER_COMMENT_INVALID", "STOP_LOSS_ON_FILL_TRIGGER_CONDITION_MISSING", "STOP_LOSS_ON_FILL_TRIGGER_CONDITION_INVALID", "TRAILING_STOP_LOSS_ORDER_ALREADY_EXISTS", "TRAILING_STOP_LOSS_ON_FILL_PRICE_DISTANCE_MISSING", "TRAILING_STOP_LOSS_ON_FILL_PRICE_DISTANCE_INVALID", "TRAILING_STOP_LOSS_ON_FILL_PRICE_DISTANCE_PRECISION_EXCEEDED", "TRAILING_STOP_LOSS_ON_FILL_PRICE_DISTANCE_MAXIMUM_EXCEEDED", "TRAILING_STOP_LOSS_ON_FILL_PRICE_DISTANCE_MINIMUM_NOT_MET", "TRAILING_STOP_LOSS_ON_FILL_TIME_IN_FORCE_MISSING", "TRAILING_STOP_LOSS_ON_FILL_TIME_IN_FORCE_INVALID", "TRAILING_STOP_LOSS_ON_FILL_GTD_TIMESTAMP_MISSING", "TRAILING_STOP_LOSS_ON_FILL_GTD_TIMESTAMP_IN_PAST", "TRAILING_STOP_LOSS_ON_FILL_CLIENT_ORDER_ID_INVALID", "TRAILING_STOP_LOSS_ON_FILL_CLIENT_ORDER_TAG_INVALID", "TRAILING_STOP_LOSS_ON_FILL_CLIENT_ORDER_COMMENT_INVALID", "TRAILING_STOP_LOSS_ORDERS_NOT_SUPPORTED", "TRAILING_STOP_LOSS_ON_FILL_TRIGGER_CONDITION_MISSING", "TRAILING_STOP_LOSS_ON_FILL_TRIGGER_CONDITION_INVALID", "CLOSE_TRADE_TYPE_MISSING", "CLOSE_TRADE_PARTIAL_UNITS_MISSING", "CLOSE_TRADE_UNITS_EXCEED_TRADE_SIZE", "CLOSEOUT_POSITION_DOESNT_EXIST", "CLOSEOUT_POSITION_INCOMPLETE_SPECIFICATION", "CLOSEOUT_POSITION_UNITS_EXCEED_POSITION_SIZE", "CLOSEOUT_POSITION_REJECT", "CLOSEOUT_POSITION_PARTIAL_UNITS_MISSING", "MARKUP_GROUP_ID_INVALID", "POSITION_AGGREGATION_MODE_INVALID", "ADMIN_CONFIGURE_DATA_MISSING", "MARGIN_RATE_INVALID", "MARGIN_RATE_WOULD_TRIGGER_CLOSEOUT", "ALIAS_INVALID", "CLIENT_CONFIGURE_DATA_MISSING", "MARGIN_RATE_WOULD_TRIGGER_MARGIN_CALL", "AMOUNT_INVALID", "INSUFFICIENT_FUNDS", "AMOUNT_MISSING", "FUNDING_REASON_MISSING", "CLIENT_EXTENSIONS_DATA_MISSING", "REPLACING_ORDER_INVALID", "REPLACING_TRADE_ID_INVALID"] # noqa: E501
if reject_reason not in allowed_values:
raise ValueError(
"Invalid value for `reject_reason` ({0}), must be one of {1}" # noqa: E501
.format(reject_reason, allowed_values)
)
self._reject_reason = reject_reason
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ClientConfigureRejectTransaction):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"koen.bal@gmail.com"
] | koen.bal@gmail.com |
37b2edbd5350fdafb3947c51bccff57d8b2a8f8c | a7b78ab632b77d1ed6b7e1fa46c33eda7a523961 | /src/foreign_if/python/UT/src/arima/test_017.py | f77b8c3cf0aef2ee13aca3bae84e6965e68e5dd3 | [
"BSD-2-Clause"
] | permissive | frovedis/frovedis | 80b830da4f3374891f3646a2298d71a3f42a1b2d | 875ae298dfa84ee9815f53db5bf7a8b76a379a6f | refs/heads/master | 2023-05-12T20:06:44.165117 | 2023-04-29T08:30:36 | 2023-04-29T08:30:36 | 138,103,263 | 68 | 13 | BSD-2-Clause | 2018-12-20T10:46:53 | 2018-06-21T01:17:51 | C++ | UTF-8 | Python | false | false | 1,108 | py | #!/usr/bin/env python
import sys
import numpy as np
from frovedis.exrpc.server import FrovedisServer
from frovedis.mllib.tsa.arima.model import ARIMA
desc = "Testing fitted_values_ attribute after performing fit() on numpy array: "
# initializing the Frovedis server
argvs = sys.argv
argc = len(argvs)
if argc < 2:
print ('Please give frovedis_server calling command as the first argument \n'
'(e.g. "mpirun -np 2 /opt/nec/frovedis/ve/bin/frovedis_server")')
quit()
FrovedisServer.initialize(argvs[1])
# sample numpy array of shape (36,)
data = np.asarray([266, 145.9, 183.1, 119.3, 180.3,
168.5, 231.8, 224.5, 192.8, 122.9,
336.5, 185.9, 194.3, 149.5, 210.1,
273.3, 191.4, 287, 226, 303.6, 289.9,
421.6, 264.5, 342.3, 339.7, 440.4,
315.9, 439.3, 401.3, 437.4, 575.5,
407.6, 682, 475.3, 581.3, 646.9])
arima = ARIMA(endog=data, order=(2, 1, 2)).fit()
try:
arima.fittedvalues
print(desc, "Passed")
except:
print(desc, "Failed")
FrovedisServer.shut_down()
| [
"takuya_araki@nec.com"
] | takuya_araki@nec.com |
ce1cb5865e9e911f38666fc19c24063b6ad6e073 | ba2acd97bdae6f38d6f96fe8b87d20b03dbb32da | /api/migrations/0005_article_author.py | 40c62241a967f1977f1960ee8d99f8a3b258fe83 | [] | no_license | tajaouart/civi2 | 6a79fc4603c9e021d4fb9e1d017969ec2ac0f893 | dca97825b5fab743d0d57b198841ad52093d2d53 | refs/heads/master | 2022-11-21T18:19:19.409733 | 2020-07-25T16:15:40 | 2020-07-25T16:15:40 | 266,332,112 | 0 | 0 | null | 2020-06-16T21:18:59 | 2020-05-23T12:29:41 | JavaScript | UTF-8 | Python | false | false | 1,121 | py | # Generated by Django 3.0.6 on 2020-05-29 20:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20200529_2004'),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120)),
('description', models.TextField()),
('body', models.TextField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='articles', to='api.Author')),
],
),
]
| [
"tajaouartm@gmail.com"
] | tajaouartm@gmail.com |
a5e3a6d1d03d65ce0789ad3074c1b7f23c4e7d5e | c481d2a41be5dfaad2cb6ddc1710e78af455bbd9 | /mobile/views.py | 139f37ff37425b71d3ba68bcc304b47fefe61334 | [] | no_license | Qamar41/Empty-Django-Project | 8b571a2ce5cf6580208cad979f4e3a60e1da8b58 | 8f63834f65dd0ae051bc645c967ef2fd4b27f9f0 | refs/heads/master | 2023-03-11T19:57:06.858323 | 2021-03-01T16:25:14 | 2021-03-01T16:25:14 | 299,848,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | from django.shortcuts import render , HttpResponse
# Create your views here.
def home(request):
return render(request,'index.html') | [
"qamarashfaq41@gmail.com"
] | qamarashfaq41@gmail.com |
c52bc5cd19d2140298c65216a92c5de0be6e6b88 | 05d3038180001200fb2dffc403a84f31eda89abd | /aok/apps/utils/appdf/buildmaster.py | 4c80631d22f08fe0db40fed9efc7418f2c7cfc0b | [] | no_license | 1d20/DITBOG | 20d005b018e16238bd72431abd81940508677903 | 5a8b2f0e5acf45c3a2e81e74f5df5d3690ee540d | refs/heads/master | 2021-01-18T21:14:22.276501 | 2017-08-01T14:16:34 | 2017-08-01T14:16:34 | 87,014,061 | 0 | 0 | null | 2017-08-01T14:16:35 | 2017-04-02T20:29:31 | JavaScript | UTF-8 | Python | false | false | 11,550 | py | #-*- coding:utf-8 -*-
from enum import Enum
class ImgType(Enum):
Icon = "icon"
Promo = "promo"
Screenshot = "screenshot"
class BuilderMaster:
dataMaster = None
tmp_dir = "tmp\\"
work_dir = ""
res_dir = ""
apk_dir = ""
d_eng = ""
d_rus = ""
data_table = ""
default_data_file = ""
package = ""
name = ""
appdf_dir = ""
cancelbuild = false
string[] table_values
Dictionary<string, string> table_Dictionary
def BuilderMaster(string dir, string package):
dataMaster = new DataMaster()
this.package = package
Debug.WriteLine(package)
name = package.Split('.')[2]
work_dir = dir + "\\"
res_dir = dir + "\\res_appdf\\" + name + "\\"
if (!Directory.Exists(res_dir))
{
MessageBox.Show(res_dir + " not found")
FolderBrowserDialog fbd = new FolderBrowserDialog()
fbd.SelectedPath = dir + "\\res_appdf\\"
DialogResult dr = fbd.ShowDialog()
if (dr == DialogResult.OK)
{
DirectoryInfo di = new DirectoryInfo(fbd.SelectedPath)
di.MoveTo(res_dir)
}
}
apk_dir = work_dir + "apk\\"
d_eng = work_dir + "full_description_eng.txt"
d_rus = work_dir + "full_description_rus.txt"
data_table = work_dir + "table.csv"
default_data_file = work_dir + "appdf.txt"
appdf_dir = work_dir + "appdf\\"
def PrepareData():
Directory.CreateDirectory(tmp_dir)
foreach (string file in Directory.GetFiles(tmp_dir))
File.Delete(file)
FileInfo fi
#region icon
fi = dataMaster.description.images.icon
fi.CopyTo(tmp_dir + fi.Name)
fi = new FileInfo(tmp_dir + fi.Name)
if (!fi.Extension.Contains(".png"))
{
using (Image icon = Image.FromFile(fi.FullName))
{
icon.Save(tmp_dir + "icon.png", ImageFormat.Png)
icon.Dispose()
}
fi = new FileInfo(tmp_dir + "icon.png")
}
ResizeImage(fi.FullName, ImgType.Icon)
dataMaster.description.images.icon = fi
#endregion
#region promo
fi = dataMaster.description.images.promo
fi.CopyTo(tmp_dir + fi.Name)
fi = new FileInfo(tmp_dir + fi.Name)
if (!fi.Extension.Contains(".png"))
{
using (Image promo = Image.FromFile(fi.FullName))
{
promo.Save(tmp_dir + "promo.png", ImageFormat.Png)
promo.Dispose()
}
fi = new FileInfo(tmp_dir + "promo.png")
}
ResizeImage(fi.FullName, ImgType.Promo)
dataMaster.description.images.promo = fi
#endregion
#region screenshots
List<FileInfo> screenshots = dataMaster.description.images.screenshots
for (int i = 0 i < screenshots.Count i++ )
{
screenshots[i].CopyTo(tmp_dir + screenshots[i].Name)
screenshots[i] = new FileInfo(tmp_dir + screenshots[i].Name)
if (!screenshots[i].Extension.Contains(".png"))
{
try
{
using (Image screen = Image.FromFile(screenshots[i].FullName))
{
screen.Save(tmp_dir + "screen_" + (i + 1).ToString() + ".png", ImageFormat.Png)
screen.Dispose()
}
}
catch
{
cancelbuild = true
return
}
screenshots[i] = new FileInfo(tmp_dir + "screen_" + (i + 1).ToString() + ".png")
}
ResizeImage(screenshots[i].FullName, ImgType.Screenshot)
}
if (screenshots.Count == 0)
cancelbuild = true
else
for (int i = screenshots.Count screenshots.Count < 4 i++)
{
File.Copy(screenshots[i - screenshots.Count].FullName, tmp_dir + "screen_" + (i + 1).ToString() + ".png")
screenshots.Add(new FileInfo(tmp_dir + "screen_" + (i + 1).ToString() + ".png"))
}
dataMaster.description.images.screenshots = screenshots
#endregion
def FindInAppdfTXT(string key):
value = ""
StreamReader sr = new StreamReader(default_data_file)
while (!sr.EndOfStream)
{
line = sr.ReadLine()
if (line.IndexOf(key) == 0)
{
value = line.Split(new char[]{':'}, 2)[1]
break
}
}
sr.Close()
return value
def FindInTableCSV(int j):
string[] table_keys = null
if (table_values == null)
{
StreamReader sr = new StreamReader(data_table)
table_keys = sr.ReadLine().Split('')
while (!sr.EndOfStream)
{
string line = sr.ReadLine()
if (line.Contains("" + name + ""))
{
table_values = line.Split('')
break
}
}
sr.Close()
}
if (table_Dictionary == null)
{
table_Dictionary = new Dictionary<string, string>()
for (int i = 0 i < table_keys.Length i++)
table_Dictionary.Add(table_keys[i], table_values[i])
}
return table_values[j]
def DescriptionReplacer(string str):
foreach (string key in from key in table_Dictionary.Keys orderby key.Length descending select key)
{
str = str.Replace("$" + key, table_Dictionary[key])
}
return str
def CollectData():
dataMaster.version = "1"
dataMaster.platform = "android"
dataMaster.package = package
dataMaster.categorization.type = FindInAppdfTXT("type")
dataMaster.categorization.category = FindInAppdfTXT("category")
dataMaster.categorization.subcategory = FindInAppdfTXT("subcategory")
if (dataMaster.categorization.subcategory == "-")
dataMaster.categorization.subcategory = ""
dataMaster.description.texts.title = FindInTableCSV(0)
dataMaster.description.texts.keywords = (FindInAppdfTXT("keywords").Replace("\"","") + "," + FindInTableCSV(3)).Replace(",", ", ")
dataMaster.description.texts.full_description = DescriptionReplacer(new StreamReader(d_eng).ReadToEnd())
dataMaster.description.texts.short_description = dataMaster.description.texts.full_description.Remove(77) + "..."
dataMaster.description.texts.features.Add("-")
dataMaster.description.texts.features.Add("-")
dataMaster.description.texts.features.Add("-")
try
{
dataMaster.description.images.icon = new FileInfo(Directory.GetFiles(res_dir, FindInAppdfTXT("icon_name_tamplate") + ".*")[0])
dataMaster.description.images.promo = new FileInfo(Directory.GetFiles(res_dir, FindInAppdfTXT("big_image_template") + ".*")[0])
}
catch
{
cancelbuild = true
return
}
foreach (string str in Directory.GetFiles(res_dir, FindInAppdfTXT("screenshots_name_tamplate") + "*"))
dataMaster.description.images.screenshots.Add(new FileInfo(str))
///////////////////////////////////////////////ru///////////////////////
Description_localization description_localization = new Description_localization()
description_localization.texts.title = FindInTableCSV(1)
description_localization.texts.keywords = dataMaster.description.texts.keywords
description_localization.texts.full_description = DescriptionReplacer(new StreamReader(d_rus).ReadToEnd())
description_localization.texts.short_description = description_localization.texts.full_description.Remove(77) + "..."
description_localization.texts.features.Add("-")
description_localization.texts.features.Add("-")
description_localization.texts.features.Add("-")
dataMaster.description_localizations.Add(description_localization)
dataMaster.apk_files.apk_file = new FileInfo(apk_dir + dataMaster.description.texts.title.Replace("Memory:", "Memoria") + ".apk")
dataMaster.customer_support.phone = FindInAppdfTXT("phone")
dataMaster.customer_support.email = FindInAppdfTXT("email")
dataMaster.customer_support.website = FindInAppdfTXT("website")
def BuildDescriptionXML():
string description = tmp_dir + "description.xml"
StreamWriter sw = new StreamWriter(description)
sw.Write(dataMaster.ToXML())
sw.Close()
def PackFile(string sourceFile, string destFile):
try
{
//Проверяем файл на существование
if (!File.Exists(sourceFile))
return false
//Создаем объект для работы с архивом
//Encoding может быть и UTF8
using (ZipFile zip = new ZipFile(destFile, Encoding.Default))
{
//Устанавливаем уровень сжатия
zip.CompressionLevel = Ionic.Zlib.CompressionLevel.Level9
//Задаем системную директорию TEMP для временных файлов
zip.TempFileFolder = Path.GetTempPath()
//Добавляем файл и указываем где он будет располагаться в архиве
//В данном случае - в корне архива
zip.AddFile(sourceFile, "\\")
//Сохраняем архив
zip.Save()
}
return true
}
catch
{
return false
}
def BuildAppDF()
CollectData()
if(!cancelbuild)
PrepareData()
if (!cancelbuild)
BuildDescriptionXML()
if (cancelbuild)
return
Debug.WriteLine("BuildAppDF")
string appdf_file = tmp_dir + package + ".appdf"
/*
if (!File.Exists(tmp_dir + "description.xml"))
{
Debug.WriteLine("no description.xml file!!!!!!!!!!!!!!!!!!!!!!!!!! skiped")
return
}
if( !File.Exists(dataMaster.description.images.icon.FullName) )
{
Debug.WriteLine("no icon file!!!!!!!!!!!!!!!!!!!!!!!!!! skiped")
return
}
if( !File.Exists(dataMaster.description.images.promo.FullName) )
{
Debug.WriteLine("no promo file!!!!!!!!!!!!!!!!!!!!!!!!!! skiped")
return
}
if( !File.Exists(dataMaster.apk_files.apk_file.FullName))
{
Debug.WriteLine("no apk file!!!!!!!!!!!!!!!!!!!!!!!!!! skiped")
return
}
*/
PackFile(tmp_dir + "description.xml", appdf_file)
PackFile(dataMaster.description.images.icon.FullName, appdf_file)
PackFile(dataMaster.description.images.promo.FullName, appdf_file)
PackFile(dataMaster.apk_files.apk_file.FullName, appdf_file)
foreach (FileInfo screen in dataMaster.description.images.screenshots)
PackFile(screen.FullName, appdf_file)
File.Copy(appdf_file, appdf_file.Replace(tmp_dir, appdf_dir))
| [
"detonavomek@gmail.com"
] | detonavomek@gmail.com |
52cfbc0fa5e8e1fdece97464294b988aff291601 | 071de17518a2efb6c7f7e347c137ca749afece8c | /src/Speech recongtion/train_mspeech.py | 7f7cc369fda3b3d5564b7ee2d9f78857a6d4df05 | [
"MIT"
] | permissive | robot-python/robot | 2232c37aef0d4438d89c6ea361472a4a83362319 | cd962e17c0048351b59d21128b66682fef4a3362 | refs/heads/master | 2021-02-08T10:35:53.199155 | 2020-03-07T00:14:25 | 2020-03-07T00:14:25 | 244,142,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | import platform as plat
import os
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from SpeechModel import ModelSpeech
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#进行配置,使用95%的GPU
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.95
#config.gpu_options.allow_growth=True #不全部占满显存, 按需分配
set_session(tf.Session(config=config))
datapath = ''
modelpath = 'model_speech'
if(not os.path.exists(modelpath)): # 判断保存模型的目录是否存在
os.makedirs(modelpath) # 如果不存在,就新建一个,避免之后保存模型的时候炸掉
system_type = plat.system() # 由于不同的系统的文件路径表示不一样,需要进行判断
if(system_type == 'Windows'):
datapath = 'E:\\语音数据集'
modelpath = modelpath + '\\'
elif(system_type == 'Linux'):
datapath = 'dataset'
modelpath = modelpath + '/'
else:
print('*[Message] Unknown System\n')
datapath = 'dataset'
modelpath = modelpath + '/'
ms = ModelSpeech(datapath)
#ms.LoadModel(modelpath + 'speech_model251_e_0_step_327500.model')
ms.TrainModel(datapath, epoch = 50, batch_size = 16, save_step = 500)
| [
"noreply@github.com"
] | noreply@github.com |
0dde22a75e526322e57315e97858c0e68117a40a | 37ed7babe536279dc9882de938fb738efd1e1e57 | /tests/util.py | e1619acb28c573c42a1e7c7605821a7feec8dd28 | [
"Apache-2.0"
] | permissive | iRobotCorporation/ssm-ctl | 5edcefe3f81789cda8f725c582131da4af1c7db5 | e6b379e388ff0ac60954aac155ea8c179da67bbf | refs/heads/master | 2021-04-27T10:35:42.596206 | 2019-07-26T13:26:21 | 2019-07-26T13:26:21 | 122,541,879 | 13 | 1 | Apache-2.0 | 2018-02-27T17:57:50 | 2018-02-22T22:16:16 | Python | UTF-8 | Python | false | false | 808 | py | from __future__ import absolute_import, print_function
from textwrap import dedent
def load(s):
return dedent(s)
class Prompter(object):
def __init__(self, value=None, values=None):
self.value = value
self.values = values
self.prompts = []
self.times_prompted = 0
self._index = 0
def get_value(self, prompt):
if self.value is not None:
return self.value
if isinstance(self.values, dict):
value = self.values[prompt]
else:
value = self.values[self._index]
self._index += 1
return value
def __call__(self, prompt):
self.times_prompted += 1
self.prompts.append(prompt)
value = self.get_value(prompt)
return value | [
"bkehoe@irobot.com"
] | bkehoe@irobot.com |
cff47f16980cff35c6c45c8a8d3773a7ecea2c7a | 80275240fe67a9c5e4b2981fd041763d7c785cc3 | /libs/__init__.py | d45a2f2828d2fd4fe625c7a42b0ff6a6f9f95a32 | [] | permissive | francis-mujani/recognition_son | 8996a92b1a219f6e0ba4bf5c4efee34d8b7e2d5c | 900f5a5d1fae2a9980c253555302097335b3e310 | refs/heads/master | 2023-02-10T16:21:52.750096 | 2021-01-09T15:29:48 | 2021-01-09T15:29:48 | 303,012,206 | 0 | 0 | MIT | 2020-10-11T02:15:38 | 2020-10-11T00:16:37 | Python | UTF-8 | Python | false | false | 154 | py | import matplotlib
# matplotlib.use('Agg')
matplotlib.use('TkAgg')
print('################### Helo ###########################')
def x():
print('XXX')
| [
"francisqo2002@yahoo.fr"
] | francisqo2002@yahoo.fr |
d0d7c4e1ac1cf56073d085772bab377f38fcb71e | 1b2d74c35420255781c867c95bc8c510e88d42db | /assignment1/chapter3_problem4.py | 347956d4dea89e9fcc41f946bc8f2f5b3bc41c42 | [] | no_license | BayoOlawumi/computer-applications-MTECH | fca62fec83337b8c3bf7b72e0637c57a34259ee3 | 57106cf9102a8cacd186f1af4fd22efed6a0b1d8 | refs/heads/master | 2023-07-19T05:31:44.616885 | 2021-09-13T21:22:59 | 2021-09-13T21:22:59 | 394,872,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | import turtle
win = turtle.Screen()
win.bgcolor("lightgreen")
turtle = turtle.Turtle()
turtle.left(3645)
win.mainloop() | [
"olawumiebay@gmail.com"
] | olawumiebay@gmail.com |
b072252a83ed9c9b0c1060b1badaa0369f8af2b4 | 67510dff509d4d2f54ab4dcdfa3ec1368f6f37b6 | /two_dimensional_js/python/task8.py | 997e244092804d828ac40dd8a469bddd97754f15 | [] | no_license | aleksei-stsigartsov/Old-Beginnings-In-Programming | 21c1de7628dcbf9ba1d4402d0a3b13db1592a6ff | d9bae08bc0a86070d5d1277c73a6988dd6c8cde2 | refs/heads/master | 2023-08-27T05:07:30.025292 | 2021-10-29T14:16:15 | 2021-10-29T14:16:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | import os
import readFrom
arr = readFrom.fromfileToArr('input.txt')
file = open('task8.txt', 'w', encoding='utf-8')
summa= 0
elements= 0
i=0
while i<len(arr):
j=0
while j<len(arr[i]):
elements+= 1
summa+= arr[i][j]
j+= 1
i+=1
file.write("Результат: " + str(summa / elements))
file.close()
| [
"aleksei.stsigartsov@gmail.com"
] | aleksei.stsigartsov@gmail.com |
6d877c86baea3e70dd56186f2c654adf536915f5 | 6beade777642805d5e16e5926ef937d6e036e24e | /services/models.py | ba4ba1220473deb5a2b77be63b18e4013897348a | [] | no_license | LuisSepulveda21/La-Cafettiera | 0f80999c7a00388dbe8aa7d7fb8c48aad0f87126 | 7d6f4dcd414055f28a94f441676326648e5e280a | refs/heads/master | 2020-04-21T14:38:47.346881 | 2019-02-07T21:08:57 | 2019-02-07T21:08:57 | 169,641,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | from django.db import models
class Service(models.Model):
title = models.CharField(max_length=200, verbose_name="Titulo")
subtitle = models.CharField(max_length=200, verbose_name="Subtitulo")
content = models.TextField(verbose_name="contenido")
image = models.ImageField(verbose_name="imagen", upload_to="services")
created = models.DateTimeField(auto_now_add=True, verbose_name="Fecha de creacion")
updated = models.DateTimeField(auto_now=True, verbose_name="Fecha de actualizacion")
# metadatos
class Meta:
verbose_name = "Servicio"
verbose_name_plural = "Servicios"
ordering = ["-created"]
def __str__(self):
return self.title
| [
"lesepulveda@uninorte.edu.co"
] | lesepulveda@uninorte.edu.co |
0849cdd4cde7d85d4c31400bd24249726c14f011 | aa8004de7c4d29a2f6a2bdecae8ab4e315448779 | /Operators/Arithmetic_Operators.py | 43fb1c9208238ac97e5af40e014a8d66f219ce6e | [] | no_license | manhar336/manohar_learning_python_kesav | 22b906babcf526c61c44ac1258c580c1cd724891 | 1230a3b6bdcbf077954ac1d8780d21d54d5e08e1 | refs/heads/master | 2023-01-03T16:32:55.659250 | 2020-10-26T08:26:34 | 2020-10-26T08:26:34 | 307,305,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | '''--------------------------------------------------------------'''
# 1. Arithmetic Operators:
'''--------------------------------------------------------------'''
'''
1. + = Addition
2. - = Subtraction
3. * = Multiplication
4. / = Division
5. % = Modulus
6. ** = Exponent
7. // = Floor Division
# Example #1: Arithmetic operators in Python
x = 15
y = 4
print(x,id(x),type(x))
print("")
print(y,id(y),type(y))
print("")
# Output: x + y = 19
print('x + y =',x+y)
# Output: x - y = 11
print('x - y =',x-y)
# Output: x * y = 60
print('x * y =',x*y)
# Output: x / y = 3.75
print('x / y =',x/y)
# Output: x // y = 3
print('x // y =',x//y)
# Output: x ** y = 50625
print('x ** y =',x**y)
'''
x= 10
y= 2
print(x,type(x),id(x))
print(y,type(y),id(y))
#Addition
print("addition of 2 numbers is :%i" %(x+y))
#Subtraction
print("Subtraction of 2 numebrs is:%i" %(x-y))
#Multiplication
print("Multiplication of 2 numbers is:",(x*y))
#Division
print("Division of 2 numebrs is",(x/y))
#Modules
print("Module of 2 numebrs",(x%y))
#Exponent
print("exponent of 2 numebrs",(x**y))
#Floor Division
print("Floor division",(x//y))
| [
"manhar336@gmail.com"
] | manhar336@gmail.com |
52dc307a311107094322bc70d3acd4111236bbde | 51618cf343bcc948da77e2085143a6110067b02f | /app.py | fc0ad89adeea44975203e10d9a0dc31e827c840f | [] | no_license | vilvainc/FlaskRESTSimple | bbc1cfc86014d63926926016645b28b91af8fcd6 | 19f081b087f51e9f9084f6032f24903774628f05 | refs/heads/master | 2020-12-06T10:35:45.344952 | 2020-01-08T00:05:47 | 2020-01-08T00:05:47 | 232,440,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | from flask import Flask, jsonify, request
app = Flask(__name__)
languages = [{'name': 'JavaScript'}, {'name': 'Python'}, {'name': 'Ruby'}]
@app.route('/', methods=['GET'])
def test():
return jsonify({'message': 'It works!'})
@app.route('/lang', methods=['GET'])
def return_all():
return jsonify({'languages': languages})
@app.route('/lang/<string:name>', methods=['GET'])
def return_one(name):
langs = [language for language in languages if language['name'] == name]
return jsonify({'language': langs[0]})
@app.route('/lang', methods=['POST'])
def add_one():
language = {'name' : request.json['name']}
languages.append(language)
return jsonify({'languages': languages})
@app.route('/lang/<string:name>', methods=['PUT'])
def edit_one(name):
langs = [language for language in languages if language['name'] == name]
langs[0]['name'] = request.json['name']
return jsonify({'language': langs[0]})
@app.route('/lang/<string:name>', methods=['DELETE'])
def remove_one(name):
lang = [language for language in languages if language['name'] == name]
languages.remove(lang[0])
return jsonify({'languages': languages})
if __name__ == '__main__':
app.run(debug=True, port=8080) | [
"tranquilram@gmail.com"
] | tranquilram@gmail.com |
d5d199e83ae7039dce538234c4fd52c1271f01f4 | 4364fb1fec2ebda2cd240ddc19ef89243812c122 | /tensorflow_datasets/image/diabetic_retinopathy_detection_test.py | c6729f05bccf57a228449fa8db506e268ffc95fc | [
"Apache-2.0"
] | permissive | undeadinu/datasets | 67ebbe6c20462ed6f58713ccd8dc1d67db89f4d9 | a6f1bce86404d534b7343fb90f0ebfd6d098c346 | refs/heads/master | 2020-04-16T03:31:37.564934 | 2019-01-11T10:12:42 | 2019-01-11T10:13:12 | 165,234,637 | 0 | 0 | Apache-2.0 | 2019-01-11T11:44:44 | 2019-01-11T11:41:26 | Python | UTF-8 | Python | false | false | 1,329 | py | # coding=utf-8
# Copyright 2018 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for diabetic_retinopathy_detection dataset module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_datasets.image import diabetic_retinopathy_detection
from tensorflow_datasets.testing import dataset_builder_testing
class DiabeticRetinopathyDetectionTest(dataset_builder_testing.TestCase):
DATASET_CLASS = diabetic_retinopathy_detection.DiabeticRetinopathyDetection
SPLITS = { # Expected number of examples on each split.
"sample": 4,
"train": 12,
"test": 12,
}
OVERLAPPING_SPLITS = ["sample"] # contains examples from other examples
if __name__ == "__main__":
dataset_builder_testing.main()
| [
"copybara-piper@google.com"
] | copybara-piper@google.com |
8cec22947180520850dc5942e0a76fd487114a6b | c75d56c5485fce9605cd48d87b798fd5a7559834 | /transfer.py | 87964d25ee2422176d7675cd4540c59ea9fc77b0 | [] | no_license | nickwareing/file-transfer-server | 37a76d3d5af19f99a68688fe5ecd3ac03a3395a6 | 486ba69495c441b59c662c142b8d2daa8a60c450 | refs/heads/master | 2022-03-09T16:44:39.211503 | 2013-11-03T09:48:54 | 2013-11-03T09:48:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,765 | py | from struct import *
OPEN_REQUEST = 33
OPEN_RESPONSE = 44
READ_REQUEST = 55
READ_RESPONSE = 66
CLOSE_REQUEST = 77
CLOSE_RESPONSE = 88
MAX_HEADER_SIZE = 176 #IP + UDP + read_response headers.
CHUNK_SIZE = 1324 #Ethernet upper bound (1500 bytes) minus MAX_HEADER_SIZE.
#Reads each 16 bit (2 byte) packet field individually.
def read_2bytes(data_buffer):
byte1 = ord(data_buffer.pop(0))
byte2 = ord(data_buffer.pop(0))
data = byte1 + (byte2 * 256)
return data
#Reads a 32 bit (4 byte) packet field.
def read_4bytes(data_buffer):
byte1 = ord(data_buffer.pop(0))
byte2 = ord(data_buffer.pop(0))
byte3 = ord(data_buffer.pop(0))
byte4 = ord(data_buffer.pop(0))
data = byte1 + (byte2 * 256) + (byte3 * 256**2) + (byte4 * 256**3)
return data
#Reads the rest of an open request packet, returning the filename.
def open_request(data_buffer):
filename = ''
while data_buffer != []:
filename += data_buffer.pop(0)
return filename
#Reads in the remaining fields for an open response packet.
def open_response(data_buffer):
i = 0
info = []
while data_buffer != []:
if i <= 2:
info.append(read_2bytes(data_buffer))
else:
info.append(read_4bytes(data_buffer))
i += 1
return info #This includes the status, file_length and file_handle fields.
#Reads in the remaining fields for an read request packet.
def read_request(data_buffer):
i = 0
info = []
while data_buffer != []:
if i < 2:
info.append(read_2bytes(data_buffer))
else:
info.append(read_4bytes(data_buffer))
i += 1
return info #This includes the file_handle, start position and number of bytes to read.
#Reads in the remaining fields for an read response packet, including a string of the downloaded file.
def read_response(data_buffer):
i = 0
info = []
data = ''
while data_buffer != []:
if i <= 2:
info.append(read_2bytes(data_buffer))
elif 2 < i <= 4 :
info.append(read_4bytes(data_buffer))
else:
data += data_buffer.pop(0)
i += 1
info.append(data)
return info #This includes the status, file handle, start position, number of bytes and the file data itself.
def close_request(data_buffer):
info = []
while data_buffer != []:
info.append(read_2bytes(data_buffer))
return info
def packet_type(data_buffer):
p_type = read_2bytes(data_buffer)
if p_type == OPEN_REQUEST:
info = open_request(data_buffer)
elif p_type == OPEN_RESPONSE:
info = open_response(data_buffer)
elif p_type == READ_REQUEST:
info = read_request(data_buffer)
elif p_type == READ_RESPONSE:
info = read_response(data_buffer)
elif p_type == CLOSE_REQUEST:
info = close_request(data_buffer)
elif p_type == CLOSE_RESPONSE:
info = close_request(data_buffer)
else:
p_type = -1
info = "Invalid packet"
return (p_type, info)
| [
"nwareing@gmail.com"
] | nwareing@gmail.com |
c618026c9962936fdc9c07d9881c1e5b4d611e77 | 99351753f51b2a585f3a0bb1dc11b8c6eebd76df | /setup.py | f547ea175656df3ebba7efc860cec92119a0174e | [] | no_license | FND/tiddlywebplugins.imaker | 6ef680e76145f9f954a66ba2d1cabd15cc0b4637 | bcaeca5a4f2b44d9e48414f48cfa5cae468f6c4c | refs/heads/master | 2021-01-15T18:30:52.466042 | 2013-07-13T10:51:54 | 2013-07-13T10:51:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | AUTHOR = 'Chris Dent'
AUTHOR_EMAIL = 'cdent@peermore.com'
NAME = 'tiddlywebplugins.imaker'
DESCRIPTION = 'Make TiddlyWeb instances'
VERSION = '0.1.3'
import os
from setuptools import setup, find_packages
# You should carefully review the below (install_requires especially).
setup(
namespace_packages = ['tiddlywebplugins'],
name = NAME,
version = VERSION,
description = DESCRIPTION,
long_description = open(os.path.join(os.path.dirname(__file__), 'README')).read(),
author = AUTHOR,
author_email = AUTHOR_EMAIL,
url = 'http://pypi.python.org/pypi/%s' % NAME,
platforms = 'Posix; MacOS X; Windows',
packages = find_packages(exclude=['test', 'testpackage']),
install_requires = ['tiddlyweb',
'tiddlywebplugins.utils',
'tiddlywebplugins.pkgstore',
],
zip_safe = False
)
| [
"chris.dent@gmail.com"
] | chris.dent@gmail.com |
cde3346e90bf0b24b91ea9df9de7d3821dc8a338 | d850f5f7cc09a8379c04d38f5c26c2e6b73f3484 | /kimai_python/models/project_rate.py | 76b7342f2bc64b749686f63d73e8f0362a61bf71 | [
"MIT"
] | permissive | MPW1412/kimai-python | 8d78e3df3036ab11573e800dce96011552aa6946 | 7c89b0866b85fbc4b1092b30eca21f1be48db533 | refs/heads/master | 2022-10-12T17:24:50.522103 | 2020-04-24T06:21:57 | 2020-04-24T06:21:57 | 264,545,139 | 0 | 0 | MIT | 2020-05-16T23:14:13 | 2020-05-16T23:14:12 | null | UTF-8 | Python | false | false | 5,828 | py | # coding: utf-8
"""
Kimai 2 - API Docs
JSON API for the Kimai 2 time-tracking software. Read more about its usage in the [API documentation](https://www.kimai.org/documentation/rest-api.html) and then download a [Swagger file](doc.json) for import e.g. in Postman. Be aware: it is not yet considered stable and BC breaks might happen. # noqa: E501
OpenAPI spec version: 0.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ProjectRate(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'rate': 'float',
'internal_rate': 'float',
'is_fixed': 'bool',
'user': 'User'
}
attribute_map = {
'id': 'id',
'rate': 'rate',
'internal_rate': 'internalRate',
'is_fixed': 'isFixed',
'user': 'user'
}
def __init__(self, id=None, rate=None, internal_rate=None, is_fixed=None, user=None): # noqa: E501
"""ProjectRate - a model defined in Swagger""" # noqa: E501
self._id = None
self._rate = None
self._internal_rate = None
self._is_fixed = None
self._user = None
self.discriminator = None
if id is not None:
self.id = id
if rate is not None:
self.rate = rate
if internal_rate is not None:
self.internal_rate = internal_rate
self.is_fixed = is_fixed
if user is not None:
self.user = user
@property
def id(self):
"""Gets the id of this ProjectRate. # noqa: E501
:return: The id of this ProjectRate. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ProjectRate.
:param id: The id of this ProjectRate. # noqa: E501
:type: int
"""
self._id = id
@property
def rate(self):
"""Gets the rate of this ProjectRate. # noqa: E501
:return: The rate of this ProjectRate. # noqa: E501
:rtype: float
"""
return self._rate
@rate.setter
def rate(self, rate):
"""Sets the rate of this ProjectRate.
:param rate: The rate of this ProjectRate. # noqa: E501
:type: float
"""
self._rate = rate
@property
def internal_rate(self):
"""Gets the internal_rate of this ProjectRate. # noqa: E501
:return: The internal_rate of this ProjectRate. # noqa: E501
:rtype: float
"""
return self._internal_rate
@internal_rate.setter
def internal_rate(self, internal_rate):
"""Sets the internal_rate of this ProjectRate.
:param internal_rate: The internal_rate of this ProjectRate. # noqa: E501
:type: float
"""
self._internal_rate = internal_rate
@property
def is_fixed(self):
"""Gets the is_fixed of this ProjectRate. # noqa: E501
:return: The is_fixed of this ProjectRate. # noqa: E501
:rtype: bool
"""
return self._is_fixed
@is_fixed.setter
def is_fixed(self, is_fixed):
"""Sets the is_fixed of this ProjectRate.
:param is_fixed: The is_fixed of this ProjectRate. # noqa: E501
:type: bool
"""
if is_fixed is None:
raise ValueError("Invalid value for `is_fixed`, must not be `None`") # noqa: E501
self._is_fixed = is_fixed
@property
def user(self):
"""Gets the user of this ProjectRate. # noqa: E501
:return: The user of this ProjectRate. # noqa: E501
:rtype: User
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this ProjectRate.
:param user: The user of this ProjectRate. # noqa: E501
:type: User
"""
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ProjectRate, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProjectRate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"kajetan.bancerz@gmail.com"
] | kajetan.bancerz@gmail.com |
3f6ec3c498e1b7f2e86b5eb7a081a044566d4b30 | 626fb6bd71433319eb20cbaa7ba110158289a168 | /reducer.py | 8b047f06cac8a291c1fa4bffdc78a24e1dbea252 | [] | no_license | skalyanmoguloju/HadoopShortestPath | b63d79fbb46bf4913a022da34358bee22962c0dc | 90aa3b7e788276d1b33616fbbb0624ebde2f5b57 | refs/heads/master | 2021-01-18T18:43:26.020875 | 2017-04-19T08:30:08 | 2017-04-19T08:30:08 | 86,872,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | import sys
prev = -1
lower = 125
nodes = "UNMODED"
for line in sys.stdin:
inputData = line.split(' ')
if prev == -1:
prev = int(inputData[0])
lower = 125
if int(inputData[0]) != prev:
print(str(prev) +" "+str(lower)+" "+str(nodes[:len(nodes)-1]))
prev = int(inputData[0])
lower = 125
nodes = "UNMODED"
if inputData[1] == "NODES":
nodes = inputData[2]
else:
distance = int(inputData[2])
lower = min(distance, lower)
| [
"noreply@github.com"
] | noreply@github.com |
ef49801866725976796fa02217a1c0d54dc5e425 | b0dd0d20fa4f6ff33965a1608679aecff8f00367 | /Webapp/appBase.py | 9adb9519c1fab9c07c47b0642684a65628157c72 | [] | no_license | elit-altum/Reddit-Flair-Detector-Indian-Subreddit | d5779c9666aad25dae7049786afd29f56a0cefd4 | 8ddc7da89777a6c80a8481de15128c198a415fc0 | refs/heads/master | 2022-04-24T01:47:17.860515 | 2020-04-26T17:51:50 | 2020-04-26T17:51:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,030 | py | import pickle
import praw
from praw.models import MoreComments
from bs4 import BeautifulSoup
import re
#import nltk
from nltk.corpus import stopwords
import gensim
from flask import Flask, render_template, request
import json
reddit = praw.Reddit(client_id="ffKcEa2xKfnhyg", client_secret="IJqQkTrDio0xKsKYKYmgeWSoOLM",
user_agent="flair_predication", username="ASingh1206",
password="g5gh#4$iQFGNBad")
replace_by_space = re.compile('[/(){}\[\]\|@,;]')
replace_symbol = re.compile('[^0-9a-z #+_]')
STOPWORDS = set(stopwords.words('english'))
def clean_text(text):
text = text.lower() # lowercase text
text = replace_by_space.sub(' ', text) # replace certain symbols by space in text
text = replace_symbol.sub('', text) # delete symbols from text
text = ' '.join(word for word in text.split() if word not in STOPWORDS) # remove STOPWORDS from text
return text
def get_data(link):
submission = reddit.submission(url = link)
data = {}
data["title"] = str(submission.title)
data["url"] = str(submission.url)
data["body"] = str(submission.selftext)
submission.comments.replace_more(limit=None)
comment = ''
count = 0
for top_level_comment in submission.comments:
comment = comment + ' ' + top_level_comment.body
count+=1
if(count > 10):
break
data["comment"] = str(comment)
data['title'] = clean_text(str(data['title']))
data['body'] = clean_text(str(data['body']))
data['comment'] = clean_text(str(data['comment']))
combined_features = data["title"] + data["comment"] + data["body"]
return combined_features
app = Flask(__name__, template_folder='templates')
model = pickle.load(open('RM.pkl','rb'))
@app.route('/', methods=['GET', 'POST'])
def main():
if request.method == 'GET':
return render_template('main.html')
if request.method == 'POST':
link = request.form['link']
cb_features = get_data(link)
result = model.predict([cb_features])
print(link , result)
return render_template('main.html',original_input={'URl of post':link}, result=result)
@app.route('/automated_testing', methods=['GET', 'POST'])
def automated_testing():
if request.method == 'POST':
file = request.files['upload_file']
#content = str(file.read())
links = file.readlines()
#print(links)
json_dict = {}
for url in links:
url = str(url)
url = url[2:-3]
#print(url)
cb_features = get_data(str(url))
result = str(model.predict([cb_features]))
result = result[2:-2]
#print(result)
json_dict[url] = result
json_dict = json.dumps(json_dict)
return json.loads(json_dict)
if request.method == 'GET':
return "Please send a post request with a text file containing links to r/india."
if __name__ == '__main__':
app.run(debug = False)
| [
"adhirajsingh1206@gmail.com"
] | adhirajsingh1206@gmail.com |
def2f9542d47fd2055869a485e738ece66c185bf | 859fc6793e1c2e019e0ec119a367ea43beeefcdc | /python/ccxt/bitget.py | 127d3af30c18385dab501a01211f45a8f3eafdb4 | [
"MIT"
] | permissive | siwenHT/ccxt | 638b8cee929c2638e7b742eae9c7ac13fdb026ec | d6da2a081f8d66f82509bb720e3f23b124016a5b | refs/heads/master | 2023-09-04T07:54:44.408565 | 2021-11-06T14:17:06 | 2021-11-06T14:17:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139,831 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class bitget(Exchange):
def describe(self):
return self.deep_extend(super(bitget, self).describe(), {
'id': 'bitget',
'name': 'Bitget',
'countries': ['SG'],
'version': 'v3',
'rateLimit': 1000, # up to 3000 requests per 5 minutes ≈ 600 requests per minute ≈ 10 requests per second ≈ 100 ms
'has': {
'cancelOrder': True,
'cancelOrders': True,
'CORS': None,
'createOrder': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDeposits': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderTrades': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchWithdrawals': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'12h': '12h',
'1d': '1d',
'1w': '1w',
},
'hostname': 'bitget.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/88317935-a8a21c80-cd22-11ea-8e2b-4b9fac5975eb.jpg',
'api': {
'data': 'https://api.{hostname}',
'api': 'https://api.{hostname}',
'capi': 'https://capi.{hostname}',
'swap': 'https://capi.{hostname}',
},
'www': 'https://www.bitget.com',
'doc': [
'https://bitgetlimited.github.io/apidoc/en/swap',
'https://bitgetlimited.github.io/apidoc/en/spot',
],
'fees': 'https://www.bitget.cc/zh-CN/rate?tab=1',
'test': {
'rest': 'https://testnet.bitget.com',
},
'referral': 'https://www.bitget.com/expressly?languageType=0&channelCode=ccxt&vipCode=tg9j',
},
'api': {
'data': {
'get': [
'market/history/kline', # Kline data
'market/detail/merged', # Get aggregated ticker
'market/tickers', # Get all trading tickers
'market/allticker', # Get all trading market method 2
'market/depth', # Get Market Depth Data
'market/trade', # Get Trade Detail Data
'market/history/trade', # Get record of trading
'market/detail', # Get Market Detail 24h Volume
'common/symbols', # Query all trading pairs and accuracy supported in the station
'common/currencys', # Query all currencies supported in the station
'common/timestamp', # Query system current time
],
},
'api': {
'get': [
'account/accounts', # Get all accounts of current user(即account_id)。
'accounts/{account_id}/balance', # Get the balance of the specified account
'order/orders', # Query order, deprecated
'order/orders/openOrders',
'order/orders/history',
'order/deposit_withdraw', # Query assets history
],
'post': [
'order/orders/place', # Place order
'order/orders/{order_id}/submitcancel', # Request to cancel an order request
'order/orders/batchcancel', # Bulk order cancellation
'order/orders/{order_id}', # Query an order details
'order/orders/{order_id}/matchresults', # Query the transaction details of an order
'order/matchresults', # Query current order, order history
],
},
'capi': {
'get': [
'market/time',
'market/contracts',
'market/depth',
'market/tickers',
'market/ticker',
'market/trades',
'market/candles',
'market/index',
'market/open_count',
'market/open_interest',
'market/price_limit',
'market/funding_time',
'market/mark_price',
'market/open_count',
'market/historyFundRate',
],
},
'swap': {
'get': [
'account/accounts',
'account/account',
'account/settings',
'position/allPosition',
'position/singlePosition',
'position/holds',
'order/detail',
'order/orders',
'order/fills',
'order/current',
'order/currentPlan', # conditional
'order/history',
'order/historyPlan', # conditional
'trace/closeTrack',
'trace/currentTrack',
'trace/historyTrack',
'trace/summary',
'trace/profitSettleTokenIdGroup',
'trace/profitDateGroupList',
'trace/profitDateList',
'trace/waitProfitDateList',
],
'post': [
'account/leverage',
'account/adjustMargin',
'account/modifyAutoAppendMargin',
'order/placeOrder',
'order/batchOrders',
'order/cancel_order',
'order/cancel_batch_orders',
'order/plan_order',
'order/cancel_plan',
'position/changeHoldModel',
'trace/closeTrackOrder',
],
},
},
'fees': {
'spot': {
'taker': self.parse_number('0.002'),
'maker': self.parse_number('0.002'),
},
'swap': {
'taker': self.parse_number('0.0006'),
'maker': self.parse_number('0.0004'),
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'exceptions': {
# http error codes
# 400 Bad Request — Invalid request format
# 401 Unauthorized — Invalid API Key
# 403 Forbidden — You do not have access to the requested resource
# 404 Not Found
# 500 Internal Server Error — We had a problem with our server
'exact': {
'1': ExchangeError, # {"code": 1, "message": "System error"}
# undocumented
'failure to get a peer from the ring-balancer': ExchangeNotAvailable, # {"message": "failure to get a peer from the ring-balancer"}
'4010': PermissionDenied, # {"code": 4010, "message": "For the security of your funds, withdrawals are not permitted within 24 hours after changing fund password / mobile number / Google Authenticator settings "}
# common
# '0': ExchangeError, # 200 successful,when the order placement / cancellation / operation is successful
'4001': ExchangeError, # no data received in 30s
'4002': ExchangeError, # Buffer full. cannot write data
# --------------------------------------------------------
'30001': AuthenticationError, # {"code": 30001, "message": 'request header "OK_ACCESS_KEY" cannot be blank'}
'30002': AuthenticationError, # {"code": 30002, "message": 'request header "OK_ACCESS_SIGN" cannot be blank'}
'30003': AuthenticationError, # {"code": 30003, "message": 'request header "OK_ACCESS_TIMESTAMP" cannot be blank'}
'30004': AuthenticationError, # {"code": 30004, "message": 'request header "OK_ACCESS_PASSPHRASE" cannot be blank'}
'30005': InvalidNonce, # {"code": 30005, "message": "invalid OK_ACCESS_TIMESTAMP"}
'30006': AuthenticationError, # {"code": 30006, "message": "invalid OK_ACCESS_KEY"}
'30007': BadRequest, # {"code": 30007, "message": 'invalid Content_Type, please use "application/json" format'}
'30008': RequestTimeout, # {"code": 30008, "message": "timestamp request expired"}
'30009': ExchangeError, # {"code": 30009, "message": "system error"}
'30010': AuthenticationError, # {"code": 30010, "message": "API validation failed"}
'30011': PermissionDenied, # {"code": 30011, "message": "invalid IP"}
'30012': AuthenticationError, # {"code": 30012, "message": "invalid authorization"}
'30013': AuthenticationError, # {"code": 30013, "message": "invalid sign"}
'30014': DDoSProtection, # {"code": 30014, "message": "request too frequent"}
'30015': AuthenticationError, # {"code": 30015, "message": 'request header "OK_ACCESS_PASSPHRASE" incorrect'}
'30016': ExchangeError, # {"code": 30015, "message": "you are using v1 apiKey, please use v1 endpoint. If you would like to use v3 endpoint, please subscribe to v3 apiKey"}
'30017': ExchangeError, # {"code": 30017, "message": "apikey's broker id does not match"}
'30018': ExchangeError, # {"code": 30018, "message": "apikey's domain does not match"}
'30019': ExchangeNotAvailable, # {"code": 30019, "message": "Api is offline or unavailable"}
'30020': BadRequest, # {"code": 30020, "message": "body cannot be blank"}
'30021': BadRequest, # {"code": 30021, "message": "Json data format error"}, {"code": 30021, "message": "json data format error"}
'30022': PermissionDenied, # {"code": 30022, "message": "Api has been frozen"}
'30023': BadRequest, # {"code": 30023, "message": "{0} parameter cannot be blank"}
'30024': BadSymbol, # {"code":30024,"message":"\"instrument_id\" is an invalid parameter"}
'30025': BadRequest, # {"code": 30025, "message": "{0} parameter category error"}
'30026': DDoSProtection, # {"code": 30026, "message": "requested too frequent"}
'30027': AuthenticationError, # {"code": 30027, "message": "login failure"}
'30028': PermissionDenied, # {"code": 30028, "message": "unauthorized execution"}
'30029': AccountSuspended, # {"code": 30029, "message": "account suspended"}
'30030': ExchangeError, # {"code": 30030, "message": "endpoint request failed. Please try again"}
'30031': BadRequest, # {"code": 30031, "message": "token does not exist"}
'30032': BadSymbol, # {"code": 30032, "message": "pair does not exist"}
'30033': BadRequest, # {"code": 30033, "message": "exchange domain does not exist"}
'30034': ExchangeError, # {"code": 30034, "message": "exchange ID does not exist"}
'30035': ExchangeError, # {"code": 30035, "message": "trading is not supported in self website"}
'30036': ExchangeError, # {"code": 30036, "message": "no relevant data"}
'30037': ExchangeNotAvailable, # {"code": 30037, "message": "endpoint is offline or unavailable"}
# '30038': AuthenticationError, # {"code": 30038, "message": "user does not exist"}
'30038': OnMaintenance, # {"client_oid":"","code":"30038","error_code":"30038","error_message":"Matching engine is being upgraded. Please try in about 1 minute.","message":"Matching engine is being upgraded. Please try in about 1 minute.","order_id":"-1","result":false}
# futures
'32001': AccountSuspended, # {"code": 32001, "message": "futures account suspended"}
'32002': PermissionDenied, # {"code": 32002, "message": "futures account does not exist"}
'32003': CancelPending, # {"code": 32003, "message": "canceling, please wait"}
'32004': ExchangeError, # {"code": 32004, "message": "you have no unfilled orders"}
'32005': InvalidOrder, # {"code": 32005, "message": "max order quantity"}
'32006': InvalidOrder, # {"code": 32006, "message": "the order price or trigger price exceeds USD 1 million"}
'32007': InvalidOrder, # {"code": 32007, "message": "leverage level must be the same for orders on the same side of the contract"}
'32008': InvalidOrder, # {"code": 32008, "message": "Max. positions to open(cross margin)"}
'32009': InvalidOrder, # {"code": 32009, "message": "Max. positions to open(fixed margin)"}
'32010': ExchangeError, # {"code": 32010, "message": "leverage cannot be changed with open positions"}
'32011': ExchangeError, # {"code": 32011, "message": "futures status error"}
'32012': ExchangeError, # {"code": 32012, "message": "futures order update error"}
'32013': ExchangeError, # {"code": 32013, "message": "token type is blank"}
'32014': ExchangeError, # {"code": 32014, "message": "your number of contracts closing is larger than the number of contracts available"}
'32015': ExchangeError, # {"code": 32015, "message": "margin ratio is lower than 100% before opening positions"}
'32016': ExchangeError, # {"code": 32016, "message": "margin ratio is lower than 100% after opening position"}
'32017': ExchangeError, # {"code": 32017, "message": "no BBO"}
'32018': ExchangeError, # {"code": 32018, "message": "the order quantity is less than 1, please try again"}
'32019': ExchangeError, # {"code": 32019, "message": "the order price deviates from the price of the previous minute by more than 3%"}
'32020': ExchangeError, # {"code": 32020, "message": "the price is not in the range of the price limit"}
'32021': ExchangeError, # {"code": 32021, "message": "leverage error"}
'32022': ExchangeError, # {"code": 32022, "message": "self function is not supported in your country or region according to the regulations"}
'32023': ExchangeError, # {"code": 32023, "message": "self account has outstanding loan"}
'32024': ExchangeError, # {"code": 32024, "message": "order cannot be placed during delivery"}
'32025': ExchangeError, # {"code": 32025, "message": "order cannot be placed during settlement"}
'32026': ExchangeError, # {"code": 32026, "message": "your account is restricted from opening positions"}
'32027': ExchangeError, # {"code": 32027, "message": "cancelled over 20 orders"}
'32028': AccountSuspended, # {"code": 32028, "message": "account is suspended and liquidated"}
'32029': ExchangeError, # {"code": 32029, "message": "order info does not exist"}
'32030': InvalidOrder, # The order cannot be cancelled
'32031': ArgumentsRequired, # client_oid or order_id is required.
'32038': AuthenticationError, # User does not exist
'32040': ExchangeError, # User have open contract orders or position
'32044': ExchangeError, # {"code": 32044, "message": "The margin ratio after submitting self order is lower than the minimum requirement({0}) for your tier."}
'32045': ExchangeError, # String of commission over 1 million
'32046': ExchangeError, # Each user can hold up to 10 trade plans at the same time
'32047': ExchangeError, # system error
'32048': InvalidOrder, # Order strategy track range error
'32049': ExchangeError, # Each user can hold up to 10 track plans at the same time
'32050': InvalidOrder, # Order strategy rang error
'32051': InvalidOrder, # Order strategy ice depth error
'32052': ExchangeError, # String of commission over 100 thousand
'32053': ExchangeError, # Each user can hold up to 6 ice plans at the same time
'32057': ExchangeError, # The order price is zero. Market-close-all function cannot be executed
'32054': ExchangeError, # Trade not allow
'32055': InvalidOrder, # cancel order error
'32056': ExchangeError, # iceberg per order average should between {0}-{1} contracts
'32058': ExchangeError, # Each user can hold up to 6 initiative plans at the same time
'32059': InvalidOrder, # Total amount should exceed per order amount
'32060': InvalidOrder, # Order strategy type error
'32061': InvalidOrder, # Order strategy initiative limit error
'32062': InvalidOrder, # Order strategy initiative range error
'32063': InvalidOrder, # Order strategy initiative rate error
'32064': ExchangeError, # Time Stringerval of orders should set between 5-120s
'32065': ExchangeError, # Close amount exceeds the limit of Market-close-all(999 for BTC, and 9999 for the rest tokens)
'32066': ExchangeError, # You have open orders. Please cancel all open orders before changing your leverage level.
'32067': ExchangeError, # Account equity < required margin in self setting. Please adjust your leverage level again.
'32068': ExchangeError, # The margin for self position will fall short of the required margin in self setting. Please adjust your leverage level or increase your margin to proceed.
'32069': ExchangeError, # Target leverage level too low. Your account balance is insufficient to cover the margin required. Please adjust the leverage level again.
'32070': ExchangeError, # Please check open position or unfilled order
'32071': ExchangeError, # Your current liquidation mode does not support self action.
'32072': ExchangeError, # The highest available margin for your order’s tier is {0}. Please edit your margin and place a new order.
'32073': ExchangeError, # The action does not apply to the token
'32074': ExchangeError, # The number of contracts of your position, open orders, and the current order has exceeded the maximum order limit of self asset.
'32075': ExchangeError, # Account risk rate breach
'32076': ExchangeError, # Liquidation of the holding position(s) at market price will require cancellation of all pending close orders of the contracts.
'32077': ExchangeError, # Your margin for self asset in futures account is insufficient and the position has been taken over for liquidation.(You will not be able to place orders, close positions, transfer funds, or add margin during self period of time. Your account will be restored after the liquidation is complete.)
'32078': ExchangeError, # Please cancel all open orders before switching the liquidation mode(Please cancel all open orders before switching the liquidation mode)
'32079': ExchangeError, # Your open positions are at high risk.(Please add margin or reduce positions before switching the mode)
'32080': ExchangeError, # Funds cannot be transferred out within 30 minutes after futures settlement
'32083': ExchangeError, # The number of contracts should be a positive multiple of %%. Please place your order again
# token and margin trading
'33001': PermissionDenied, # {"code": 33001, "message": "margin account for self pair is not enabled yet"}
'33002': AccountSuspended, # {"code": 33002, "message": "margin account for self pair is suspended"}
'33003': InsufficientFunds, # {"code": 33003, "message": "no loan balance"}
'33004': ExchangeError, # {"code": 33004, "message": "loan amount cannot be smaller than the minimum limit"}
'33005': ExchangeError, # {"code": 33005, "message": "repayment amount must exceed 0"}
'33006': ExchangeError, # {"code": 33006, "message": "loan order not found"}
'33007': ExchangeError, # {"code": 33007, "message": "status not found"}
'33008': InsufficientFunds, # {"code": 33008, "message": "loan amount cannot exceed the maximum limit"}
'33009': ExchangeError, # {"code": 33009, "message": "user ID is blank"}
'33010': ExchangeError, # {"code": 33010, "message": "you cannot cancel an order during session 2 of call auction"}
'33011': ExchangeError, # {"code": 33011, "message": "no new market data"}
'33012': ExchangeError, # {"code": 33012, "message": "order cancellation failed"}
'33013': InvalidOrder, # {"code": 33013, "message": "order placement failed"}
'33014': OrderNotFound, # {"code": 33014, "message": "order does not exist"}
'33015': InvalidOrder, # {"code": 33015, "message": "exceeded maximum limit"}
'33016': ExchangeError, # {"code": 33016, "message": "margin trading is not open for self token"}
'33017': InsufficientFunds, # {"code": 33017, "message": "insufficient balance"}
'33018': ExchangeError, # {"code": 33018, "message": "self parameter must be smaller than 1"}
'33020': ExchangeError, # {"code": 33020, "message": "request not supported"}
'33021': BadRequest, # {"code": 33021, "message": "token and the pair do not match"}
'33022': InvalidOrder, # {"code": 33022, "message": "pair and the order do not match"}
'33023': ExchangeError, # {"code": 33023, "message": "you can only place market orders during call auction"}
'33024': InvalidOrder, # {"code": 33024, "message": "trading amount too small"}
'33025': InvalidOrder, # {"code": 33025, "message": "base token amount is blank"}
'33026': ExchangeError, # {"code": 33026, "message": "transaction completed"}
'33027': InvalidOrder, # {"code": 33027, "message": "cancelled order or order cancelling"}
'33028': InvalidOrder, # {"code": 33028, "message": "the decimal places of the trading price exceeded the limit"}
'33029': InvalidOrder, # {"code": 33029, "message": "the decimal places of the trading size exceeded the limit"}
'33034': ExchangeError, # {"code": 33034, "message": "You can only place limit order after Call Auction has started"}
'33035': ExchangeError, # This type of order cannot be canceled(This type of order cannot be canceled)
'33036': ExchangeError, # Exceeding the limit of entrust order
'33037': ExchangeError, # The buy order price should be lower than 130% of the trigger price
'33038': ExchangeError, # The sell order price should be higher than 70% of the trigger price
'33039': ExchangeError, # The limit of callback rate is 0 < x <= 5%
'33040': ExchangeError, # The trigger price of a buy order should be lower than the latest transaction price
'33041': ExchangeError, # The trigger price of a sell order should be higher than the latest transaction price
'33042': ExchangeError, # The limit of price variance is 0 < x <= 1%
'33043': ExchangeError, # The total amount must be larger than 0
'33044': ExchangeError, # The average amount should be 1/1000 * total amount <= x <= total amount
'33045': ExchangeError, # The price should not be 0, including trigger price, order price, and price limit
'33046': ExchangeError, # Price variance should be 0 < x <= 1%
'33047': ExchangeError, # Sweep ratio should be 0 < x <= 100%
'33048': ExchangeError, # Per order limit: Total amount/1000 < x <= Total amount
'33049': ExchangeError, # Total amount should be X > 0
'33050': ExchangeError, # Time interval should be 5 <= x <= 120s
'33051': ExchangeError, # cancel order number not higher limit: plan and track entrust no more than 10, ice and time entrust no more than 6
'33059': BadRequest, # {"code": 33059, "message": "client_oid or order_id is required"}
'33060': BadRequest, # {"code": 33060, "message": "Only fill in either parameter client_oid or order_id"}
'33061': ExchangeError, # Value of a single market price order cannot exceed 100,000 USD
'33062': ExchangeError, # The leverage ratio is too high. The borrowed position has exceeded the maximum position of self leverage ratio. Please readjust the leverage ratio
'33063': ExchangeError, # Leverage multiple is too low, there is insufficient margin in the account, please readjust the leverage ratio
'33064': ExchangeError, # The setting of the leverage ratio cannot be less than 2, please readjust the leverage ratio
'33065': ExchangeError, # Leverage ratio exceeds maximum leverage ratio, please readjust leverage ratio
# account
'21009': ExchangeError, # Funds cannot be transferred out within 30 minutes after swap settlement(Funds cannot be transferred out within 30 minutes after swap settlement)
'34001': PermissionDenied, # {"code": 34001, "message": "withdrawal suspended"}
'34002': InvalidAddress, # {"code": 34002, "message": "please add a withdrawal address"}
'34003': ExchangeError, # {"code": 34003, "message": "sorry, self token cannot be withdrawn to xx at the moment"}
'34004': ExchangeError, # {"code": 34004, "message": "withdrawal fee is smaller than minimum limit"}
'34005': ExchangeError, # {"code": 34005, "message": "withdrawal fee exceeds the maximum limit"}
'34006': ExchangeError, # {"code": 34006, "message": "withdrawal amount is lower than the minimum limit"}
'34007': ExchangeError, # {"code": 34007, "message": "withdrawal amount exceeds the maximum limit"}
'34008': InsufficientFunds, # {"code": 34008, "message": "insufficient balance"}
'34009': ExchangeError, # {"code": 34009, "message": "your withdrawal amount exceeds the daily limit"}
'34010': ExchangeError, # {"code": 34010, "message": "transfer amount must be larger than 0"}
'34011': ExchangeError, # {"code": 34011, "message": "conditions not met"}
'34012': ExchangeError, # {"code": 34012, "message": "the minimum withdrawal amount for NEO is 1, and the amount must be an integer"}
'34013': ExchangeError, # {"code": 34013, "message": "please transfer"}
'34014': ExchangeError, # {"code": 34014, "message": "transfer limited"}
'34015': ExchangeError, # {"code": 34015, "message": "subaccount does not exist"}
'34016': PermissionDenied, # {"code": 34016, "message": "transfer suspended"}
'34017': AccountSuspended, # {"code": 34017, "message": "account suspended"}
'34018': AuthenticationError, # {"code": 34018, "message": "incorrect trades password"}
'34019': PermissionDenied, # {"code": 34019, "message": "please bind your email before withdrawal"}
'34020': PermissionDenied, # {"code": 34020, "message": "please bind your funds password before withdrawal"}
'34021': InvalidAddress, # {"code": 34021, "message": "Not verified address"}
'34022': ExchangeError, # {"code": 34022, "message": "Withdrawals are not available for sub accounts"}
'34023': PermissionDenied, # {"code": 34023, "message": "Please enable futures trading before transferring your funds"}
'34026': ExchangeError, # transfer too frequently(transfer too frequently)
'34036': ExchangeError, # Parameter is incorrect, please refer to API documentation
'34037': ExchangeError, # Get the sub-account balance interface, account type is not supported
'34038': ExchangeError, # Since your C2C transaction is unusual, you are restricted from fund transfer. Please contact our customer support to cancel the restriction
'34039': ExchangeError, # You are now restricted from transferring out your funds due to abnormal trades on C2C Market. Please transfer your fund on our website or app instead to verify your identity
# swap
'35001': ExchangeError, # {"code": 35001, "message": "Contract does not exist"}
'35002': ExchangeError, # {"code": 35002, "message": "Contract settling"}
'35003': ExchangeError, # {"code": 35003, "message": "Contract paused"}
'35004': ExchangeError, # {"code": 35004, "message": "Contract pending settlement"}
'35005': AuthenticationError, # {"code": 35005, "message": "User does not exist"}
'35008': InvalidOrder, # {"code": 35008, "message": "Risk ratio too high"}
'35010': InvalidOrder, # {"code": 35010, "message": "Position closing too large"}
'35012': InvalidOrder, # {"code": 35012, "message": "Incorrect order size"}
'35014': InvalidOrder, # {"code": 35014, "message": "Order price is not within limit"}
'35015': InvalidOrder, # {"code": 35015, "message": "Invalid leverage level"}
'35017': ExchangeError, # {"code": 35017, "message": "Open orders exist"}
'35019': InvalidOrder, # {"code": 35019, "message": "Order size too large"}
'35020': InvalidOrder, # {"code": 35020, "message": "Order price too high"}
'35021': InvalidOrder, # {"code": 35021, "message": "Order size exceeded current tier limit"}
'35022': ExchangeError, # {"code": 35022, "message": "Contract status error"}
'35024': ExchangeError, # {"code": 35024, "message": "Contract not initialized"}
'35025': InsufficientFunds, # {"code": 35025, "message": "No account balance"}
'35026': ExchangeError, # {"code": 35026, "message": "Contract settings not initialized"}
'35029': OrderNotFound, # {"code": 35029, "message": "Order does not exist"}
'35030': InvalidOrder, # {"code": 35030, "message": "Order size too large"}
'35031': InvalidOrder, # {"code": 35031, "message": "Cancel order size too large"}
'35032': ExchangeError, # {"code": 35032, "message": "Invalid user status"}
'35037': ExchangeError, # No last traded price in cache
'35039': ExchangeError, # {"code": 35039, "message": "Open order quantity exceeds limit"}
'35040': InvalidOrder, # {"error_message":"Invalid order type","result":"true","error_code":"35040","order_id":"-1"}
'35044': ExchangeError, # {"code": 35044, "message": "Invalid order status"}
'35046': InsufficientFunds, # {"code": 35046, "message": "Negative account balance"}
'35047': InsufficientFunds, # {"code": 35047, "message": "Insufficient account balance"}
'35048': ExchangeError, # {"code": 35048, "message": "User contract is frozen and liquidating"}
'35049': InvalidOrder, # {"code": 35049, "message": "Invalid order type"}
'35050': InvalidOrder, # {"code": 35050, "message": "Position settings are blank"}
'35052': InsufficientFunds, # {"code": 35052, "message": "Insufficient cross margin"}
'35053': ExchangeError, # {"code": 35053, "message": "Account risk too high"}
'35055': InsufficientFunds, # {"code": 35055, "message": "Insufficient account balance"}
'35057': ExchangeError, # {"code": 35057, "message": "No last traded price"}
'35058': ExchangeError, # {"code": 35058, "message": "No limit"}
'35059': BadRequest, # {"code": 35059, "message": "client_oid or order_id is required"}
'35060': BadRequest, # {"code": 35060, "message": "Only fill in either parameter client_oid or order_id"}
'35061': BadRequest, # {"code": 35061, "message": "Invalid instrument_id"}
'35062': InvalidOrder, # {"code": 35062, "message": "Invalid match_price"}
'35063': InvalidOrder, # {"code": 35063, "message": "Invalid order_size"}
'35064': InvalidOrder, # {"code": 35064, "message": "Invalid client_oid"}
'35066': InvalidOrder, # Order interval error
'35067': InvalidOrder, # Time-weighted order ratio error
'35068': InvalidOrder, # Time-weighted order range error
'35069': InvalidOrder, # Time-weighted single transaction limit error
'35070': InvalidOrder, # Algo order type error
'35071': InvalidOrder, # Order total must be larger than single order limit
'35072': InvalidOrder, # Maximum 6 unfulfilled time-weighted orders can be held at the same time
'35073': InvalidOrder, # Order price is 0. Market-close-all not available
'35074': InvalidOrder, # Iceberg order single transaction average error
'35075': InvalidOrder, # Failed to cancel order
'35076': InvalidOrder, # LTC 20x leverage. Not allowed to open position
'35077': InvalidOrder, # Maximum 6 unfulfilled iceberg orders can be held at the same time
'35078': InvalidOrder, # Order amount exceeded 100,000
'35079': InvalidOrder, # Iceberg order price variance error
'35080': InvalidOrder, # Callback rate error
'35081': InvalidOrder, # Maximum 10 unfulfilled trail orders can be held at the same time
'35082': InvalidOrder, # Trail order callback rate error
'35083': InvalidOrder, # Each user can only hold a maximum of 10 unfulfilled stop-limit orders at the same time
'35084': InvalidOrder, # Order amount exceeded 1 million
'35085': InvalidOrder, # Order amount is not in the correct range
'35086': InvalidOrder, # Price exceeds 100 thousand
'35087': InvalidOrder, # Price exceeds 100 thousand
'35088': InvalidOrder, # Average amount error
'35089': InvalidOrder, # Price exceeds 100 thousand
'35090': ExchangeError, # No stop-limit orders available for cancelation
'35091': ExchangeError, # No trail orders available for cancellation
'35092': ExchangeError, # No iceberg orders available for cancellation
'35093': ExchangeError, # No trail orders available for cancellation
'35094': ExchangeError, # Stop-limit order last traded price error
'35095': BadRequest, # Instrument_id error
'35096': ExchangeError, # Algo order status error
'35097': ExchangeError, # Order status and order ID cannot exist at the same time
'35098': ExchangeError, # An order status or order ID must exist
'35099': ExchangeError, # Algo order ID error
# option
'36001': BadRequest, # Invalid underlying index.
'36002': BadRequest, # Instrument does not exist.
'36005': ExchangeError, # Instrument status is invalid.
'36101': AuthenticationError, # Account does not exist.
'36102': PermissionDenied, # Account status is invalid.
'36103': AccountSuspended, # Account is suspended due to ongoing liquidation.
'36104': PermissionDenied, # Account is not enabled for options trading.
'36105': PermissionDenied, # Please enable the account for option contract.
'36106': AccountSuspended, # Funds cannot be transferred in or out, as account is suspended.
'36107': PermissionDenied, # Funds cannot be transferred out within 30 minutes after option exercising or settlement.
'36108': InsufficientFunds, # Funds cannot be transferred in or out, as equity of the account is less than zero.
'36109': PermissionDenied, # Funds cannot be transferred in or out during option exercising or settlement.
'36201': PermissionDenied, # New order function is blocked.
'36202': PermissionDenied, # Account does not have permission to short option.
'36203': InvalidOrder, # Invalid format for client_oid.
'36204': ExchangeError, # Invalid format for request_id.
'36205': BadRequest, # Instrument id does not match underlying index.
'36206': BadRequest, # Order_id and client_oid can not be used at the same time.
'36207': InvalidOrder, # Either order price or fartouch price must be present.
'36208': InvalidOrder, # Either order price or size must be present.
'36209': InvalidOrder, # Either order_id or client_oid must be present.
'36210': InvalidOrder, # Either order_ids or client_oids must be present.
'36211': InvalidOrder, # Exceeding max batch size for order submission.
'36212': InvalidOrder, # Exceeding max batch size for oder cancellation.
'36213': InvalidOrder, # Exceeding max batch size for order amendment.
'36214': ExchangeError, # Instrument does not have valid bid/ask quote.
'36216': OrderNotFound, # Order does not exist.
'36217': InvalidOrder, # Order submission failed.
'36218': InvalidOrder, # Order cancellation failed.
'36219': InvalidOrder, # Order amendment failed.
'36220': InvalidOrder, # Order is pending cancel.
'36221': InvalidOrder, # Order qty is not valid multiple of lot size.
'36222': InvalidOrder, # Order price is breaching highest buy limit.
'36223': InvalidOrder, # Order price is breaching lowest sell limit.
'36224': InvalidOrder, # Exceeding max order size.
'36225': InvalidOrder, # Exceeding max open order count for instrument.
'36226': InvalidOrder, # Exceeding max open order count for underlying.
'36227': InvalidOrder, # Exceeding max open size across all orders for underlying
'36228': InvalidOrder, # Exceeding max available qty for instrument.
'36229': InvalidOrder, # Exceeding max available qty for underlying.
'36230': InvalidOrder, # Exceeding max position limit for underlying.
# --------------------------------------------------------
# swap
'400': BadRequest, # Bad Request
'401': AuthenticationError, # Unauthorized access
'403': PermissionDenied, # Access prohibited
'404': BadRequest, # Request address does not exist
'405': BadRequest, # The HTTP Method is not supported
'415': BadRequest, # The current media type is not supported
'429': DDoSProtection, # Too many requests
'500': ExchangeNotAvailable, # System busy
'1001': RateLimitExceeded, # The request is too frequent and has been throttled
'1002': ExchangeError, # {0} verifications within 24 hours
'1003': ExchangeError, # You failed more than {0} times today, the current operation is locked, please try again in 24 hours
# '00000': ExchangeError, # success
'40001': AuthenticationError, # ACCESS_KEY cannot be empty
'40002': AuthenticationError, # SECRET_KEY cannot be empty
'40003': AuthenticationError, # Signature cannot be empty
'40004': InvalidNonce, # Request timestamp expired
'40005': InvalidNonce, # Invalid ACCESS_TIMESTAMP
'40006': AuthenticationError, # Invalid ACCESS_KEY
'40007': BadRequest, # Invalid Content_Type
'40008': InvalidNonce, # Request timestamp expired
'40009': AuthenticationError, # sign signature error
'40010': AuthenticationError, # sign signature error
'40011': AuthenticationError, # ACCESS_PASSPHRASE cannot be empty
'40012': AuthenticationError, # apikey/password is incorrect
'40013': ExchangeError, # User status is abnormal
'40014': PermissionDenied, # Incorrect permissions
'40015': ExchangeError, # System is abnormal, please try again later
'40016': PermissionDenied, # The user must bind the phone or Google
'40017': ExchangeError, # Parameter verification failed
'40018': PermissionDenied, # Invalid IP
'40102': BadRequest, # Contract configuration does not exist, please check the parameters
'40103': BadRequest, # Request method cannot be empty
'40104': ExchangeError, # Lever adjustment failure
'40105': ExchangeError, # Abnormal access to current price limit data
'40106': ExchangeError, # Abnormal get next settlement time
'40107': ExchangeError, # Abnormal access to index price data
'40108': InvalidOrder, # Wrong order quantity
'40109': OrderNotFound, # The data of the order cannot be found, please confirm the order number
'40200': OnMaintenance, # Server upgrade, please try again later
'40201': InvalidOrder, # Order number cannot be empty
'40202': ExchangeError, # User information cannot be empty
'40203': BadRequest, # The amount of adjustment margin cannot be empty or negative
'40204': BadRequest, # Adjustment margin type cannot be empty
'40205': BadRequest, # Adjusted margin type data is wrong
'40206': BadRequest, # The direction of the adjustment margin cannot be empty
'40207': BadRequest, # The adjustment margin data is wrong
'40208': BadRequest, # The accuracy of the adjustment margin amount is incorrect
'40209': BadRequest, # The current page number is wrong, please confirm
'40300': ExchangeError, # User does not exist
'40301': PermissionDenied, # Permission has not been obtained yet. If you need to use it, please contact customer service
'40302': BadRequest, # Parameter abnormality
'40303': BadRequest, # Can only query up to 20,000 data
'40304': BadRequest, # Parameter type is abnormal
'40305': BadRequest, # Client_oid length is not greater than 50, and cannot be Martian characters
'40306': ExchangeError, # Batch processing orders can only process up to 20
'40308': OnMaintenance, # The contract is being temporarily maintained
'40309': BadSymbol, # The contract has been removed
'40400': ExchangeError, # Status check abnormal
'40401': ExchangeError, # The operation cannot be performed
'40402': BadRequest, # The opening direction cannot be empty
'40403': BadRequest, # Wrong opening direction format
'40404': BadRequest, # Whether to enable automatic margin call parameters cannot be empty
'40405': BadRequest, # Whether to enable the automatic margin call parameter type is wrong
'40406': BadRequest, # Whether to enable automatic margin call parameters is of unknown type
'40407': ExchangeError, # The query direction is not the direction entrusted by the plan
'40408': ExchangeError, # Wrong time range
'40409': ExchangeError, # Time format error
'40500': InvalidOrder, # Client_oid check error
'40501': ExchangeError, # Channel name error
'40502': ExchangeError, # If it is a copy user, you must pass the copy to whom
'40503': ExchangeError, # With the single type
'40504': ExchangeError, # Platform code must pass
'40505': ExchangeError, # Not the same as single type
'40506': AuthenticationError, # Platform signature error
'40507': AuthenticationError, # Api signature error
'40508': ExchangeError, # KOL is not authorized
'40509': ExchangeError, # Abnormal copy end
'40600': ExchangeError, # Copy function suspended
'40601': ExchangeError, # Followers cannot be KOL
'40602': ExchangeError, # The number of copies has reached the limit and cannot process the request
'40603': ExchangeError, # Abnormal copy end
'40604': ExchangeNotAvailable, # Server is busy, please try again later
'40605': ExchangeError, # Copy type, the copy number must be passed
'40606': ExchangeError, # The type of document number is wrong
'40607': ExchangeError, # Document number must be passed
'40608': ExchangeError, # No documented products currently supported
'40609': ExchangeError, # The contract product does not support copying
'40700': BadRequest, # Cursor parameters are incorrect
'40701': ExchangeError, # KOL is not authorized
'40702': ExchangeError, # Unauthorized copying user
'40703': ExchangeError, # Bill inquiry start and end time cannot be empty
'40704': ExchangeError, # Can only check the data of the last three months
'40705': BadRequest, # The start and end time cannot exceed 90 days
'40706': InvalidOrder, # Wrong order price
'40707': BadRequest, # Start time is greater than end time
'40708': BadRequest, # Parameter verification is abnormal
'40709': ExchangeError, # There is no position in self position, and no automatic margin call can be set
'40710': ExchangeError, # Abnormal account status
'40711': InsufficientFunds, # Insufficient contract account balance
'40712': InsufficientFunds, # Insufficient margin
'40713': ExchangeError, # Cannot exceed the maximum transferable margin amount
'40714': ExchangeError, # No direct margin call is allowed
# spot
'invalid sign': AuthenticationError,
'invalid currency': BadSymbol, # invalid trading pair
'invalid symbol': BadSymbol,
'invalid period': BadRequest, # invalid Kline type
'invalid user': ExchangeError,
'invalid amount': InvalidOrder,
'invalid type': InvalidOrder, # {"status":"error","ts":1595700344504,"err_code":"invalid-parameter","err_msg":"invalid type"}
'invalid orderId': InvalidOrder,
'invalid record': ExchangeError,
'invalid accountId': BadRequest,
'invalid address': BadRequest,
'accesskey not None': AuthenticationError, # {"status":"error","ts":1595704360508,"err_code":"invalid-parameter","err_msg":"accesskey not null"}
'illegal accesskey': AuthenticationError,
'sign not null': AuthenticationError,
'req_time is too much difference from server time': InvalidNonce,
'permissions not right': PermissionDenied, # {"status":"error","ts":1595704490084,"err_code":"invalid-parameter","err_msg":"permissions not right"}
'illegal sign invalid': AuthenticationError, # {"status":"error","ts":1595684716042,"err_code":"invalid-parameter","err_msg":"illegal sign invalid"}
'user locked': AccountSuspended,
'Request Frequency Is Too High': RateLimitExceeded,
'more than a daily rate of cash': BadRequest,
'more than the maximum daily withdrawal amount': BadRequest,
'need to bind email or mobile': ExchangeError,
'user forbid': PermissionDenied,
'User Prohibited Cash Withdrawal': PermissionDenied,
'Cash Withdrawal Is Less Than The Minimum Value': BadRequest,
'Cash Withdrawal Is More Than The Maximum Value': BadRequest,
'the account with in 24 hours ban coin': PermissionDenied,
'order cancel fail': BadRequest, # {"status":"error","ts":1595703343035,"err_code":"bad-request","err_msg":"order cancel fail"}
'base symbol error': BadSymbol,
'base date error': ExchangeError,
'api signature not valid': AuthenticationError,
'gateway internal error': ExchangeError,
'audit failed': ExchangeError,
'order queryorder invalid': BadRequest,
'market no need price': InvalidOrder,
'limit need price': InvalidOrder,
'userid not equal to account_id': ExchangeError,
'your balance is low': InsufficientFunds, # {"status":"error","ts":1595594160149,"err_code":"invalid-parameter","err_msg":"invalid size, valid range: [1,2000]"}
'address invalid cointype': ExchangeError,
'system exception': ExchangeError, # {"status":"error","ts":1595711862763,"err_code":"system exception","err_msg":"system exception"}
'50003': ExchangeError, # No record
'50004': BadSymbol, # The transaction pair is currently not supported or has been suspended
'50006': PermissionDenied, # The account is forbidden to withdraw. If you have any questions, please contact customer service.
'50007': PermissionDenied, # The account is forbidden to withdraw within 24 hours. If you have any questions, please contact customer service.
'50008': RequestTimeout, # network timeout
'50009': RateLimitExceeded, # The operation is too frequent, please try again later
'50010': ExchangeError, # The account is abnormally frozen. If you have any questions, please contact customer service.
'50014': InvalidOrder, # The transaction amount under minimum limits
'50015': InvalidOrder, # The transaction amount exceed maximum limits
'50016': InvalidOrder, # The price can't be higher than the current price
'50017': InvalidOrder, # Price under minimum limits
'50018': InvalidOrder, # The price exceed maximum limits
'50019': InvalidOrder, # The amount under minimum limits
'50020': InsufficientFunds, # Insufficient balance
'50021': InvalidOrder, # Price is under minimum limits
'50026': InvalidOrder, # Market price parameter error
'invalid order query time': ExchangeError, # start time is greater than end time; or the time interval between start time and end time is greater than 48 hours
'invalid start time': BadRequest, # start time is a date 30 days ago; or start time is a date in the future
'invalid end time': BadRequest, # end time is a date 30 days ago; or end time is a date in the future
'20003': ExchangeError, # operation failed, {"status":"error","ts":1595730308979,"err_code":"bad-request","err_msg":"20003"}
'01001': ExchangeError, # order failed, {"status":"fail","err_code":"01001","err_msg":"系统异常,请稍后重试"}
},
'broad': {
'invalid size, valid range': ExchangeError,
},
},
'precisionMode': TICK_SIZE,
'options': {
'createMarketBuyOrderRequiresPrice': True,
'fetchMarkets': [
'spot',
'swap',
],
'parseOHLCV': {
'volume': {
'spot': 'amount',
'swap': 5,
},
},
'defaultType': 'spot', # 'spot', 'swap'
'accountId': None, # '1012838157',
'timeframes': {
'spot': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '60min',
'2h': '120min',
'4h': '240min',
'6h': '360min',
'12h': '720min',
'1d': '1day',
'1w': '1week',
},
'swap': {
'1m': '60',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'1w': '604800',
},
},
},
})
def fetch_time(self, params={}):
response = self.dataGetCommonTimestamp(params)
#
# {
# "status":"ok",
# "data":"1595525139400"
# }
#
return self.safe_integer(response, 'data')
def fetch_markets(self, params={}):
types = self.safe_value(self.options, 'fetchMarkets')
if not len(types):
types = [
self.options['defaultType'],
]
result = []
for i in range(0, len(types)):
markets = self.fetch_markets_by_type(types[i], params)
result = self.array_concat(result, markets)
return result
def parse_markets(self, markets):
result = []
for i in range(0, len(markets)):
result.append(self.parse_market(markets[i]))
return result
def parse_market(self, market):
#
# spot
#
# {
# "base_currency":"btc",
# "quote_currency":"usdt",
# "symbol":"btc_usdt",
# "tick_size":"2",
# "size_increment":"4",
# "status":"1",
# "base_asset_precision":"8"
# }
#
#
# swap
#
# {
# "symbol":"btcusd",
# "underlying_index":"BTC",
# "quote_currency":"USD",
# "coin":"BTC",
# "contract_val":"1",
# "listing":null,
# "delivery":["07:00:00","15:00:00","23:00:00"],
# "size_increment":"0",
# "tick_size":"1",
# "forwardContractFlag":false,
# "priceEndStep":5
# }
#
id = self.safe_string(market, 'symbol')
marketType = 'spot'
spot = True
swap = False
baseId = self.safe_string_2(market, 'base_currency', 'coin')
quoteId = self.safe_string(market, 'quote_currency')
contractVal = self.safe_number(market, 'contract_val')
if contractVal is not None:
marketType = 'swap'
spot = False
swap = True
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = id.upper()
if spot:
symbol = base + '/' + quote
tickSize = self.safe_string(market, 'tick_size')
sizeIncrement = self.safe_string(market, 'size_increment')
precision = {
'amount': self.parse_number(self.parse_precision(sizeIncrement)),
'price': self.parse_number(self.parse_precision(tickSize)),
}
minAmount = self.safe_number_2(market, 'min_size', 'base_min_size')
status = self.safe_string(market, 'status')
active = None
if status is not None:
active = (status == '1')
fees = self.safe_value_2(self.fees, marketType, 'trading', {})
return self.extend(fees, {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'type': marketType,
'spot': spot,
'swap': swap,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': precision['price'],
'max': None,
},
'cost': {
'min': precision['price'],
'max': None,
},
},
})
def fetch_markets_by_type(self, type, params={}):
if type == 'spot':
response = self.dataGetCommonSymbols(params)
#
# {
# "status":"ok",
# "ts":1595526622408,
# "data":[
# {
# "base_currency":"btc",
# "quote_currency":"usdt",
# "symbol":"btc_usdt",
# "tick_size":"2",
# "size_increment":"4",
# "status":"1",
# "base_asset_precision":"8"
# },
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_markets(data)
elif type == 'swap':
response = self.capiGetMarketContracts(params)
#
# {
# "data":{
# "contractApis":[
# {
# "instrument_id":"btcusd",
# "underlying_index":"BTC",
# "quote_currency":"USD",
# "coin":"BTC",
# "contract_val":"1",
# "delivery":["07:00:00","15:00:00","23:00:00"],
# "size_increment":"0",
# "tick_size":"1",
# "forwardContractFlag":false,
# "priceEndStep":"5"
# },
# ]
# },
# "status":"ok",
# "err_code":"00000"
# }
#
return self.parse_markets(response)
else:
raise NotSupported(self.id + ' fetchMarketsByType does not support market type ' + type)
def fetch_currencies(self, params={}):
response = self.dataGetCommonCurrencys(params)
#
# {
# "status":"ok",
# "ts":1595537740466,
# "data":[
# "btc",
# "bft",
# "usdt",
# "usdt-omni",
# "usdt-erc20"
# ]
# }
#
result = {}
data = self.safe_value(response, 'data', [])
for i in range(0, len(data)):
id = data[i]
code = self.safe_currency_code(id)
result[code] = {
'id': id,
'code': code,
'info': id,
'type': None,
'name': None,
'active': None,
'fee': None,
'precision': None,
'limits': {
'amount': {'min': None, 'max': None},
'withdraw': {'min': None, 'max': None},
},
}
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
method = None
if market['spot']:
method = 'dataGetMarketDepth'
request['type'] = 'step0' # step0, step1, step2, step3, step4, step5, do not merge depth if step0
elif market['swap']:
method = 'capiGetMarketDepth'
request['limit'] = 100 if (limit is None) else limit # max 100
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "ch":"market.btc_usdt.depth.step0",
# "ts":1595607628197,
# "data":{
# "id":"1595607628197",
# "ts":"1595607628197",
# "bids":[
# ["9534.99","15.36160000000000000000"],
# ["9534.85","0.14580000000000000000"],
# ["9534.73","0.02100000000000000000"],
# ],
# "asks":[
# ["9535.02","7.37160000000000000000"],
# ["9535.03","0.09040000000000000000"],
# ["9535.05","0.02180000000000000000"],
# ]
# }
# }
#
# swap
#
# {
# "asks":[
# ["9579.0","119865",1],
# ["9579.5","90069",1],
# ["9580.0","256673",1],
# ],
# "bids":[
# ["9578.5","2417",1],
# ["9577.5","3024",1],
# ["9577.0","21548",1],
# ],
# "timestamp":"1595664767349"
# }
#
data = self.safe_value(response, 'data', response)
timestamp = self.safe_integer_2(data, 'timestamp', 'ts')
nonce = self.safe_integer(data, 'id')
orderbook = self.parse_order_book(data, symbol, timestamp)
orderbook['nonce'] = nonce
return orderbook
def parse_ticker(self, ticker, market=None):
#
# spot
#
# fetchTicker
#
# {
# "id":"1595538241113",
# "bid":["0.028474000000","1.139400000000"],
# "ask":["0.028482000000","0.353100000000"],
# "amount":"2850.6649",
# "count":"818",
# "open":"0.02821",
# "close":"0.028474",
# "low":"0.02821",
# "high":"0.029091",
# "vol":"79.4548693404"
# }
#
# fetchTickers
#
# {
# "amount":"30086.8095",
# "count":"22450",
# "open":"9525.11",
# "close":"9591.81",
# "low":"9510.68",
# "high":"9659.7",
# "vol":"286239092.250461",
# "symbol":"btc_usdt"
# }
#
# swap
#
# {
# "instrument_id":"btcusd",
# "last":"9574.5",
# "best_ask":"9575.0",
# "best_bid":"9574.0",
# "high_24h":"9672",
# "low_24h":"9512",
# "volume_24h":"567697050",
# "timestamp":"1595538450096"
# }
#
timestamp = self.safe_integer_2(ticker, 'timestamp', 'id')
symbol = None
marketId = self.safe_string_2(ticker, 'instrument_id', 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
elif marketId is not None:
parts = marketId.split('_')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId
if (symbol is None) and (market is not None):
symbol = market['symbol']
last = self.safe_number_2(ticker, 'last', 'close')
open = self.safe_number(ticker, 'open')
bidVolume = None
askVolume = None
bid = self.safe_value(ticker, 'bid')
if bid is None:
bid = self.safe_number(ticker, 'best_bid')
else:
bidVolume = self.safe_number(bid, 1)
bid = self.safe_number(bid, 0)
ask = self.safe_value(ticker, 'ask')
if ask is None:
ask = self.safe_number(ticker, 'best_ask')
else:
askVolume = self.safe_number(ask, 1)
ask = self.safe_number(ask, 0)
baseVolume = self.safe_number_2(ticker, 'amount', 'volume_24h')
quoteVolume = self.safe_number(ticker, 'vol')
vwap = self.vwap(baseVolume, quoteVolume)
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number_2(ticker, 'high', 'high_24h'),
'low': self.safe_number_2(ticker, 'low', 'low_24h'),
'bid': bid,
'bidVolume': bidVolume,
'ask': ask,
'askVolume': askVolume,
'vwap': vwap,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
method = None
if market['spot']:
method = 'dataGetMarketDetailMerged'
elif market['swap']:
method = 'capiGetMarketTicker'
request = {
'symbol': market['id'],
}
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "ch":"market.eth_btc.detail.merged",
# "ts":1595538241474,
# "data":{
# "id":"1595538241113",
# "bid":["0.028474000000","1.139400000000"],
# "ask":["0.028482000000","0.353100000000"],
# "amount":"2850.6649",
# "count":"818",
# "open":"0.02821",
# "close":"0.028474",
# "low":"0.02821",
# "high":"0.029091",
# "vol":"79.4548693404"
# }
# }
#
# swap
#
# {
# "symbol":"btcusd",
# "last":"9575.5",
# "best_ask":"9576.0",
# "best_bid":"9575.0",
# "high_24h":"9646",
# "low_24h":"9516",
# "volume_24h":"516656839",
# "timestamp":"1595664217405"
# }
#
data = self.safe_value(response, 'data', response)
return self.parse_ticker(data, market)
def fetch_tickers_by_type(self, type, symbols=None, params={}):
self.load_markets()
method = None
if type == 'spot':
method = 'dataGetMarketTickers'
elif type == 'swap':
method = 'capiGetMarketTickers'
response = getattr(self, method)(params)
#
# spot
#
# {
# "status":"ok",
# "ts":1595542893250,
# "data":[
# {
# "amount":"30086.8095",
# "count":"22450",
# "open":"9525.11",
# "close":"9591.81",
# "low":"9510.68",
# "high":"9659.7",
# "vol":"286239092.250461",
# "symbol":"btc_usdt"
# }
# ]
# }
#
# swap
#
# [
# {
# "symbol":"btcusd",
# "last":"9572",
# "best_ask":"9571.5",
# "best_bid":"9570.5",
# "high_24h":"9646",
# "low_24h":"9516",
# "volume_24h":"515401635",
# "timestamp":"1595664479952"
# }
# ]
#
data = self.safe_value(response, 'data', response)
timestamp = None
if not isinstance(response, list):
timestamp = self.safe_integer(response, 'ts')
result = {}
for i in range(0, len(data)):
ticker = self.parse_ticker(self.extend({
'timestamp': timestamp,
}, data[i]))
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_tickers(self, symbols=None, params={}):
defaultType = self.safe_string_2(self.options, 'fetchTickers', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
return self.fetch_tickers_by_type(type, symbols, self.omit(params, 'type'))
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# spot
#
# {
# "id":"1",
# "price":"9533.81",
# "amount":"0.7326",
# "direction":"sell",
# "ts":"1595604964000"
# }
#
# swap
#
# {
# "trade_id":"670581881367954915",
# "price":"9553.00",
# "size":"20",
# "side":"sell",
# "timestamp":"1595605100004",
# "symbol":"btcusd"
# }
#
# spot fetchMyTrades(private)
#
# {
# "id": 29555,
# "order_id": 59378,
# "match_id": 59335,
# "symbol": "eth_usdt",
# "type": "buy-limit",
# "source": "api",
# "price": "100.1000000000",
# "filled_amount": "0.9845000000",
# "filled_fees": "0.0019690000",
# "created_at": 1494901400487
# }
#
# fetchOrderTrades(private)
#
# spot
#
# {
# "id":"614164775",
# "created_at":"1596298860602",
# "filled_amount":"0.0417000000000000",
# "filled_fees":"0.0000834000000000",
# "match_id":"673491702661292033",
# "order_id":"673491720340279296",
# "price":"359.240000000000",
# "source":"接口",
# "symbol":"eth_usdt",
# "type":"buy-market"
# }
#
# swap
#
# {
# "trade_id":"6667390",
# "symbol":"cmt_btcusdt",
# "order_id":"525946425993854915",
# "price":"9839.00",
# "order_qty":"3466",
# "fee":"-0.0000528407360000",
# "timestamp":"1561121514442",
# "exec_type":"M",
# "side":"3"
# }
#
symbol = None
marketId = self.safe_string(trade, 'symbol')
base = None
quote = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
base = market['base']
quote = market['quote']
elif marketId is not None:
parts = marketId.split('_')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId.upper()
if (symbol is None) and (market is not None):
symbol = market['symbol']
base = market['base']
quote = market['quote']
timestamp = self.safe_integer(trade, 'created_at')
timestamp = self.safe_integer_2(trade, 'timestamp', 'ts', timestamp)
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string_2(trade, 'filled_amount', 'order_qty')
amountString = self.safe_string_2(trade, 'size', 'amount', amountString)
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
takerOrMaker = self.safe_string_2(trade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
orderType = self.safe_string(trade, 'type')
side = None
type = None
if orderType is not None:
side = self.safe_string(trade, 'type')
type = self.parse_order_type(side)
side = self.parse_order_side(side)
else:
side = self.safe_string_2(trade, 'side', 'direction')
type = self.parse_order_type(side)
side = self.parse_order_side(side)
feeCostString = self.safe_string(trade, 'fee')
if feeCostString is None:
feeCostString = self.safe_string(trade, 'filled_fees')
else:
feeCostString = Precise.string_neg(feeCostString)
feeCost = self.parse_number(feeCostString)
fee = None
if feeCost is not None:
feeCurrency = base if (side == 'buy') else quote
fee = {
# fee is either a positive number(invitation rebate)
# or a negative number(transaction fee deduction)
# therefore we need to invert the fee
# more about it https://github.com/ccxt/ccxt/issues/5909
'cost': feeCost,
'currency': feeCurrency,
}
orderId = self.safe_string(trade, 'order_id')
id = self.safe_string_2(trade, 'trade_id', 'id')
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': orderId,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, limit=None, since=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
method = None
if market['spot']:
method = 'dataGetMarketHistoryTrade'
elif market['swap']:
method = 'capiGetMarketTrades'
if market['spot']:
if limit is not None:
request['size'] = limit # default 1, max 2000
elif market['swap']:
if limit is None:
limit = 100 # default 20, max 100
request['limit'] = limit
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "ch":"market.btc_usdt.trade.detail",
# "ts":1595604968430,
# "data":{
# "ts":"1595604964000",
# "data":[
# {"id":"1","price":"9533.81","amount":"0.7326","direction":"sell","ts":"1595604964000"},
# {"id":"2","price":"9533.67","amount":"1.1591","direction":"buy","ts":"1595604961000"},
# {"id":"3","price":"9533.67","amount":"1.5022","direction":"sell","ts":"1595604959000"},
# ]
# }
# }
#
# swap
#
# [
# {"trade_id":"670833198971748613","price":"9578.50","size":"5412","side":"sell","timestamp":"1595665018790","symbol":"btcusd"},
# {"trade_id":"670833194240574915","price":"9579.00","size":"3972","side":"buy","timestamp":"1595665017662","symbol":"btcusd"},
# {"trade_id":"670833194240573915","price":"9579.00","size":"1227","side":"buy","timestamp":"1595665017662","symbol":"btcusd"},
# ]
#
trades = None
if isinstance(response, list):
trades = response
else:
data = self.safe_value(response, 'data', {})
trades = self.safe_value_2(data, 'data', [])
return self.parse_trades(trades, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m'):
#
# spot
#
# {
# "id":"1594694700000",
# "amount":"283.6811",
# "count":"234",
# "open":"9230.00",
# "close":"9227.15",
# "low":"9206.66",
# "high":"9232.33",
# "vol":"2618015.032504000000"
# }
#
# swap
#
# [
# "1594693800000",
# "9240",
# "9241",
# "9222",
# "9228.5",
# "3913370",
# "424.003616350563"
# ]
#
options = self.safe_value(self.options, 'parseOHLCV', {})
volume = self.safe_value(options, 'volume', {})
if isinstance(ohlcv, list):
volumeIndex = self.safe_string(volume, market['type'], 'amount')
return [
self.safe_integer(ohlcv, 0), # timestamp
self.safe_number(ohlcv, 1), # Open
self.safe_number(ohlcv, 2), # High
self.safe_number(ohlcv, 3), # Low
self.safe_number(ohlcv, 4), # Close
# self.safe_number(ohlcv, 5), # Quote Volume
# self.safe_number(ohlcv, 6), # Base Volume
self.safe_number(ohlcv, volumeIndex), # Volume, bitget will return base volume in the 7th element for future markets
]
else:
volumeIndex = self.safe_value(volume, market['type'], 6)
return [
self.safe_integer(ohlcv, 'id'),
self.safe_number(ohlcv, 'open'), # Open
self.safe_number(ohlcv, 'high'), # High
self.safe_number(ohlcv, 'low'), # Low
self.safe_number(ohlcv, 'close'), # Close
self.safe_number(ohlcv, volumeIndex), # Base Volume
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
method = None
type = market['type']
options = self.safe_value(self.options, 'timeframes', {})
intervals = self.safe_value(options, type, {})
interval = self.safe_value(intervals, self.timeframes[timeframe])
if market['spot']:
method = 'dataGetMarketHistoryKline'
request['period'] = interval
if limit is not None:
request['size'] = limit # default 150, max 1000
elif market['swap']:
duration = self.parse_timeframe(timeframe)
method = 'capiGetMarketCandles'
request['granularity'] = interval
now = self.milliseconds()
if since is None:
if limit is None:
limit = 1000
request['start'] = self.iso8601(now - limit * duration * 1000)
request['end'] = self.iso8601(now)
else:
request['start'] = self.iso8601(since)
if limit is None:
request['end'] = self.iso8601(now)
else:
request['end'] = self.iso8601(self.sum(since, limit * duration * 1000))
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "ch":"market.btc_usdt.kline.15min",
# "ts":1595594183874,
# "data":[
# {"id":"1594694700000","amount":"283.6811","count":"234","open":"9230.00","close":"9227.15","low":"9206.66","high":"9232.33","vol":"2618015.032504000000"},
# {"id":"1594695600000","amount":"457.2904","count":"238","open":"9227.15","close":"9229.46","low":"9223.80","high":"9235.14","vol":"4220734.684570000000"},
# {"id":"1594696500000","amount":"501.2353","count":"255","open":"9229.46","close":"9227.78","low":"9222.69","high":"9230.74","vol":"4625779.185006000000"},
# ]
# }
#
# swap
#
# [
# ["1594764900000","9255.5","9261","9251","9255.5","3958946","427.742307964305"],
# ["1594765800000","9255.5","9264","9252","9258","3609496","389.832756058107"],
# ["1594766700000","9258","9260","9244.5","9250.5","3738600","403.97870345085"],
# ]
#
candles = response
if not isinstance(response, list):
candles = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(candles, market, timeframe, since, limit)
def parse_spot_balance(self, response):
#
# {
# "status":"ok",
# "ts":1595681450932,
# "data":{
# "list":[
# {"balance":"0.0000000000000000","currency":"BTC","type":"trade"},
# {"balance":"0.0000000000000000","currency":"BTC","type":"frozen"},
# {"balance":"0.0000000000000000","currency":"BTC","type":"lock"},
# ],
# "id":"7420922606",
# "type":"spot",
# "state":"working"
# }
# }
#
result = {'info': response}
data = self.safe_value(response, 'data')
balances = self.safe_value(data, 'list')
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
if not (code in result):
account = self.account()
result[code] = account
type = self.safe_value(balance, 'type')
if type == 'trade':
result[code]['free'] = self.safe_string(balance, 'balance')
elif (type == 'frozen') or (type == 'lock'):
used = self.safe_string(result[code], 'used')
result[code]['used'] = Precise.string_add(used, self.safe_string(balance, 'balance'))
return self.parse_balance(result)
def parse_swap_balance(self, response):
#
# swap
#
# [
# {"equity":"0","fixed_balance":"0","total_avail_balance":"0","margin":"0","realized_pnl":"0","unrealized_pnl":"0","symbol":"bchusd","margin_frozen":"0","timestamp":"1595673431547","margin_mode":"fixed","forwardContractFlag":false},
# {"equity":"0","fixed_balance":"0","total_avail_balance":"0","margin":"0","realized_pnl":"0","unrealized_pnl":"0","symbol":"ethusd","margin_frozen":"0","timestamp":"1595673431573","margin_mode":"fixed","forwardContractFlag":false},
# {"equity":"0","fixed_balance":"0","total_avail_balance":"0","margin":"0","realized_pnl":"0","unrealized_pnl":"0","symbol":"cmt_btcsusdt","margin_frozen":"0","timestamp":"1595673431577","margin_mode":"fixed","forwardContractFlag":true},
# ]
#
#
result = {}
for i in range(0, len(response)):
balance = response[i]
marketId = self.safe_string(balance, 'symbol')
symbol = marketId
if marketId in self.markets_by_id:
symbol = self.markets_by_id[marketId]['symbol']
account = self.account()
# it may be incorrect to use total, free and used for swap accounts
account['total'] = self.safe_string(balance, 'equity')
account['free'] = self.safe_string(balance, 'total_avail_balance')
result[symbol] = account
return self.parse_balance(result)
def fetch_accounts(self, params={}):
request = {
'method': 'accounts',
}
response = self.apiGetAccountAccounts(self.extend(request, params))
#
# {
# "status":"ok",
# "ts":1595679591824,
# "data":[
# {"id":"7420922606","type":"spot","state":"working"}
# ]
# }
#
data = self.safe_value(response, 'data', [])
result = []
for i in range(0, len(data)):
account = data[i]
accountId = self.safe_string(account, 'id')
type = self.safe_string_lower(account, 'type')
result.append({
'id': accountId,
'type': type,
'currency': None,
'info': account,
})
return result
def find_account_by_type(self, type):
self.load_markets()
self.load_accounts()
accountsByType = self.group_by(self.accounts, 'type')
accounts = self.safe_value(accountsByType, type)
if accounts is None:
raise ExchangeError(self.id + " findAccountByType() could not find an accountId with type '" + type + "', specify the 'accountId' parameter instead") # eslint-disable-line quotes
numAccounts = len(accounts)
if numAccounts > 1:
raise ExchangeError(self.id + " findAccountByType() found more than one accountId with type '" + type + "', specify the 'accountId' parameter instead") # eslint-disable-line quotes
return accounts[0]
def get_account_id(self, params):
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_string(self.options, 'accountId')
accountId = self.safe_string(params, 'accountId', defaultAccountId)
if accountId is not None:
return accountId
defaultType = self.safe_string(self.options, 'defaultType', 'margin')
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
if type is None:
raise ArgumentsRequired(self.id + " getAccountId() requires an 'accountId' parameter")
account = self.find_account_by_type(type)
return account['id']
def fetch_balance(self, params={}):
self.load_markets()
self.load_accounts()
defaultType = self.safe_string_2(self.options, 'fetchBalance', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchBalance() requires a 'type' parameter, one of 'spot', 'swap'")
method = None
query = self.omit(params, 'type')
if type == 'spot':
accountId = self.get_account_id(params)
method = 'apiGetAccountsAccountIdBalance'
query['account_id'] = accountId
query['method'] = 'balance'
elif type == 'swap':
method = 'swapGetAccountAccounts'
response = getattr(self, method)(query)
#
# spot
#
# {
# "status":"ok",
# "ts":1595681450932,
# "data":{
# "list":[
# {"balance":"0.0000000000000000","currency":"BTC","type":"trade"},
# {"balance":"0.0000000000000000","currency":"BTC","type":"frozen"},
# {"balance":"0.0000000000000000","currency":"BTC","type":"lock"},
# ],
# "id":"7420922606",
# "type":"spot",
# "state":"working"
# }
# }
#
# swap
#
# [
# {"equity":"0","fixed_balance":"0","total_avail_balance":"0","margin":"0","realized_pnl":"0","unrealized_pnl":"0","symbol":"bchusd","margin_frozen":"0","timestamp":"1595673431547","margin_mode":"fixed","forwardContractFlag":false},
# {"equity":"0","fixed_balance":"0","total_avail_balance":"0","margin":"0","realized_pnl":"0","unrealized_pnl":"0","symbol":"ethusd","margin_frozen":"0","timestamp":"1595673431573","margin_mode":"fixed","forwardContractFlag":false},
# {"equity":"0","fixed_balance":"0","total_avail_balance":"0","margin":"0","realized_pnl":"0","unrealized_pnl":"0","symbol":"cmt_btcsusdt","margin_frozen":"0","timestamp":"1595673431577","margin_mode":"fixed","forwardContractFlag":true},
# ]
#
return self.parse_balance_by_type(type, response)
def parse_balance_by_type(self, type, response):
if type == 'spot':
return self.parse_spot_balance(response)
elif type == 'swap':
return self.parse_swap_balance(response)
raise NotSupported(self.id + " fetchBalance does not support the '" + type + "' type(the type must be one of 'account', 'spot', 'margin', 'futures', 'swap')")
def parse_order_status(self, status):
statuses = {
'submitted': 'open',
'partial-filled': 'open',
'partial-canceled': 'canceled',
'filled': 'closed',
'canceled': 'canceled',
'-2': 'failed',
'-1': 'canceled',
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'open',
'4': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order_side(self, side):
sides = {
'buy-market': 'buy',
'sell-market': 'sell',
'buy-limit': 'buy',
'sell-limit': 'sell',
'1': 'long', # open long
'2': 'short', # open short
'3': 'long', # close long
'4': 'short', # close short
}
return self.safe_string(sides, side, side)
def parse_order_type(self, type):
types = {
'buy-market': 'market',
'sell-market': 'market',
'buy-limit': 'limit',
'sell-limit': 'limit',
'1': 'open', # open long
'2': 'open', # open short
'3': 'close', # close long
'4': 'close', # close short
}
return self.safe_string(types, type, type)
def parse_order(self, order, market=None):
#
# createOrder
#
# spot
#
# {
# "status":"ok",
# "ts":1595792596056,
# "data":671368296142774272
# }
#
# swap
#
# {
# "client_oid":"58775e54-0592-491c-97e8-e2369025f2d1",
# "order_id":"671757564085534713"
# }
#
# cancelOrder
#
# spot
#
# {
# "status": "ok",
# "ts": 1595818631279,
# "data": 671368296142774272
# }
#
# swap
#
# {
# "order_id":"671757564085534713",
# "client_oid":"58775e54-0592-491c-97e8-e2369025f2d1",
# "symbol":"cmt_ethusdt",
# "result":true,
# "err_code":null,
# "err_msg":null
# }
#
# fetchOpenOrders, fetchClosedOrders, fetchOrder
#
# spot
#
# {
# "account_id":"7420922606",
# "amount":"0.1000000000000000",
# "canceled_at":"1595872129618",
# "created_at":"1595872089525",
# "filled_amount":"0.000000000000",
# "filled_cash_amount":"0.000000000000",
# "filled_fees":"0.000000000000",
# "finished_at":"1595872129618",
# "id":"671701716584665088",
# "price":"150.000000000000",
# "source":"接口",
# "state":"canceled",
# "symbol":"eth_usdt",
# "type":"buy-limit"
# }
#
# swap
#
# {
# "symbol":"cmt_ethusdt",
# "size":"1",
# "timestamp":"1595885546770",
# "client_oid":"f3aa81d6-9a4c-4eab-bebe-ebc19da21cf2",
# "createTime":"1595885521200",
# "filled_qty":"0",
# "fee":"0.00000000",
# "order_id":"671758053112020913",
# "price":"150.00",
# "price_avg":"0.00",
# "status":"0",
# "type":"1",
# "order_type":"0",
# "totalProfits":null
# }
#
id = self.safe_string(order, 'order_id')
id = self.safe_string_2(order, 'id', 'data', id)
timestamp = self.safe_integer_2(order, 'created_at', 'createTime')
type = self.safe_string(order, 'type')
side = self.parse_order_side(type)
type = self.parse_order_type(type)
# if (side != 'buy') and (side != 'sell'):
# side = self.parse_order_side(type)
# }
# if (type != 'limit') and (type != 'market'):
# if 'pnl' in order:
# type = 'futures'
# else:
# type = 'swap'
# }
# }
symbol = None
marketId = self.safe_string(order, 'symbol')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
symbol = marketId.upper()
if (symbol is None) and (market is not None):
symbol = market['symbol']
amount = self.safe_string_2(order, 'amount', 'size')
filled = self.safe_string_2(order, 'filled_amount', 'filled_qty')
cost = self.safe_string(order, 'filled_cash_amount')
price = self.safe_string(order, 'price')
average = self.safe_string(order, 'price_avg')
status = self.parse_order_status(self.safe_string_2(order, 'state', 'status'))
feeCost = self.safe_number_2(order, 'filled_fees', 'fee')
fee = None
if feeCost is not None:
feeCurrency = None
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
clientOrderId = self.safe_string(order, 'client_oid')
return self.safe_order2({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': None,
'status': status,
'fee': fee,
'trades': None,
})
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
self.load_accounts()
market = self.market(symbol)
#
# spot
#
# account_id True string Account ID, obtained using the accounts method. Currency transactions use the accountid of the'spot' account; for loan asset transactions, please use the accountid of the'margin' account
# amount True string A limit order indicates the quantity of the order, when a market price buy order indicates how much money to buy, and when a market price sell order indicates how much currency to sell
# price False string Order price, market order does not pass self parameter
# source False string Order source api
# symbol True string Trading pair btc_usdt, eth_btc ...
# type True string Order Type buy-market: buy at market price, sell-market: sell at market price, buy-limit: buy at limit price, sell-limit: sell at limit price
#
# swap
#
# symbol String Yes Contract ID
# client_oid String Yes customize order IDs to identify your orders.(Less than 50 characters without special characters,
# size String Yes Quantity to buy or sell(value not equal to 0 or negative)
# type String Yes 1 Open long 2Open short 3 Close long 4 Close short
# order_type String Yes 0: Normal order(Unfilled and 0 imply normal limit order) 1: Post only 2: Fill or Kill 3: Immediate Or Cancel
# match_price String Yes 0 Limit price 1 market price
# price String No Price of each contract
#
request = {
'symbol': market['id'],
}
clientOrderId = self.safe_string_2(params, 'client_oid', 'clientOrderId', self.uuid())
params = self.omit(params, ['client_oid', 'clientOrderId'])
method = None
if market['spot']:
accountId = self.get_account_id({
'type': market['type'],
})
method = 'apiPostOrderOrdersPlace'
request['account_id'] = accountId
request['method'] = 'place'
request['type'] = side + '-' + type
if type == 'limit':
request['amount'] = self.amount_to_precision(symbol, amount)
request['price'] = self.price_to_precision(symbol, price)
elif type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
cost = self.safe_number(params, 'amount')
createMarketBuyOrderRequiresPrice = self.safe_value(self.options, 'createMarketBuyOrderRequiresPrice', True)
if createMarketBuyOrderRequiresPrice:
if price is not None:
if cost is None:
cost = amount * price
elif cost is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False and supply the total cost value in the 'amount' argument or in the 'amount' extra parameter(the exchange-specific behaviour)")
else:
cost = amount if (cost is None) else cost
request['amount'] = self.cost_to_precision(symbol, cost)
elif side == 'sell':
request['amount'] = self.amount_to_precision(symbol, amount)
# ...
elif market['swap']:
request['order_type'] = '0' # '0' = Normal order, None and 0 imply a normal limit order, '1' = Post only, '2' = Fill or Kill, '3' = Immediate Or Cancel
request['client_oid'] = clientOrderId
orderType = self.safe_string(params, 'type')
if orderType is None:
raise ArgumentsRequired(self.id + " createOrder() requires a type parameter, '1' = open long, '2' = open short, '3' = close long, '4' = close short for " + market['type'] + ' orders')
request['size'] = self.amount_to_precision(symbol, amount)
request['type'] = orderType
# if match_price is set to '1', the price parameter will be ignored for market orders
if type == 'limit':
request['match_price'] = '0'
request['price'] = self.price_to_precision(symbol, price)
elif type == 'market':
request['match_price'] = '1'
method = 'swapPostOrderPlaceOrder'
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "ts":1595792596056,
# "data":"671368296142774272"
# }
#
# swap
#
# {
# "client_oid":"58775e54-0592-491c-97e8-e2369025f2d1",
# "order_id":"671757564085534713"
# }
#
return self.parse_order(response, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
market = None
type = None
if symbol is None:
defaultType = self.safe_string_2(self.options, 'cancelOrder', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
if type == 'spot':
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument for spot orders')
else:
market = self.market(symbol)
type = market['type']
query = self.omit(params, 'type')
method = None
request = {}
if type == 'spot':
method = 'apiPostOrderOrdersOrderIdSubmitcancel'
request['order_id'] = id
request['method'] = 'submitcancel'
elif type == 'swap':
method = 'swapPostOrderCancelOrder'
request['orderId'] = id
request['symbol'] = market['id']
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
# {"status": "ok", "ts": 1595818631279, "data": 671368296142774272}
#
# swap
#
# {
# "order_id":"671757564085534713",
# "client_oid":"58775e54-0592-491c-97e8-e2369025f2d1",
# "symbol":"cmt_ethusdt",
# "result":true,
# "err_code":null,
# "err_msg":null
# }
#
return self.parse_order(response, market)
def cancel_orders(self, ids, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = self.safe_string(params, 'type', market['type'])
if type is None:
raise ArgumentsRequired(self.id + " cancelOrders() requires a type parameter(one of 'spot', 'swap').")
params = self.omit(params, 'type')
request = {}
method = None
if type == 'spot':
method = 'apiPostOrderOrdersBatchcancel'
request['method'] = 'batchcancel'
jsonIds = self.json(ids)
parts = jsonIds.split('"')
request['order_ids'] = ''.join(parts)
elif type == 'swap':
method = 'swapPostOrderCancelBatchOrders'
request['symbol'] = market['id']
request['ids'] = ids
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status": "ok",
# "data": {
# "success": [
# "673451224205135872",
# ],
# "failed": [
# {
# "err-msg": "invalid record",
# "order-id": "673451224205135873",
# "err-code": "base record invalid"
# }
# ]
# }
# }
#
# swap
#
# {
# "result":true,
# "symbol":"cmt_btcusdt",
# "order_ids":[
# "258414711",
# "478585558"
# ],
# "fail_infos":[
# {
# "order_id":"258414711",
# "err_code":"401",
# "err_msg":""
# }
# ]
# }
#
return response
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = self.safe_string(params, 'type', market['type'])
if type is None:
raise ArgumentsRequired(self.id + " fetchOrder() requires a type parameter(one of 'spot', 'swap').")
method = None
request = {}
if type == 'spot':
clientOid = self.safe_string(params, 'client_oid')
if clientOid is not None:
method = 'apiPostOrderOrdersClientOid'
request['client_oid'] = clientOid
else:
method = 'apiPostOrderOrdersOrderId'
request['order_id'] = id
request['method'] = 'getOrder'
elif type == 'swap':
method = 'swapGetOrderDetail'
request['symbol'] = market['id']
request['orderId'] = id
query = self.omit(params, 'type')
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
# {
# "status":"ok",
# "ts":1595897886717,
# "data":{
# "account_id":"7420922606",
# "amount":"0.1000000000000000",
# "canceled_at":"1595818631541",
# "created_at":"1595792595897",
# "filled_amount":"0.000000000000",
# "filled_cash_amount":"0.000000000000",
# "filled_fees":"0.000000000000",
# "finished_at":"1595818631541",
# "id":"671368296142774272",
# "price":"150.000000000000",
# "source":"接口",
# "state":"canceled",
# "symbol":"eth_usdt",
# "type":"buy-limit"
# }
# }
#
#
# swap
#
# {
# "symbol":"cmt_ethusdt",
# "size":"1",
# "timestamp":"1595896459890",
# "client_oid":"58775e54-0592-491c-97e8-e2369025f2d1",
# "createTime":"1595885404607",
# "filled_qty":"0",
# "fee":"0",
# "order_id":"671757564085534713",
# "price":"150",
# "price_avg":"0",
# "status":"-1",
# "type":"1",
# "order_type":"0",
# "totalProfits":"0"
# }
#
data = self.safe_value(response, 'data', response)
return self.parse_order(data, market)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = self.safe_string(params, 'type', market['type'])
request = {
'symbol': market['id'],
}
method = None
if type == 'spot':
method = 'apiGetOrderOrdersOpenOrders'
# request['from'] = self.safe_string(params, 'from') # order id
# request['direct'] = 'next' # or 'prev'
request['method'] = 'openOrders'
if limit is None:
request['size'] = limit # default 100, max 1000
elif type == 'swap':
method = 'swapGetOrderOrders'
request['status'] = '3' # 0 Failed, 1 Partially Filled, 2 Fully Filled 3 = Open + Partially Filled, 4 Canceling
request['from'] = '1'
request['to'] = '1'
if limit is None:
request['limit'] = 100 # default 100, max 100
query = self.omit(params, 'type')
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
#
# {
# "status":"ok",
# "ts":1595875165865,
# "data":[
# {
# "account_id":"7420922606",
# "amount":"0.1000000000000000",
# "canceled_at":"1595872129618",
# "created_at":"1595872089525",
# "filled_amount":"0.000000000000",
# "filled_cash_amount":"0.000000000000",
# "filled_fees":"0.000000000000",
# "finished_at":"1595872129618",
# "id":"671701716584665088",
# "price":"150.000000000000",
# "source":"接口",
# "state":"canceled",
# "symbol":"eth_usdt",
# "type":"buy-limit"
# }
# ]
# }
#
# swap
#
# [
# {
# "symbol":"cmt_ethusdt",
# "size":"1",
# "timestamp":"1595885546770",
# "client_oid":"f3aa81d6-9a4c-4eab-bebe-ebc19da21cf2",
# "createTime":"1595885521200",
# "filled_qty":"0",
# "fee":"0.00000000",
# "order_id":"671758053112020913",
# "price":"150.00",
# "price_avg":"0.00",
# "status":"0",
# "type":"1",
# "order_type":"0",
# "totalProfits":null
# }
# ]
#
data = response
if not isinstance(response, list):
data = self.safe_value(response, 'data', [])
return self.parse_orders(data, market, None, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchClosedOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = self.safe_string(params, 'type', market['type'])
request = {
'symbol': market['id'],
}
method = None
if type == 'spot':
method = 'apiGetOrderOrdersHistory'
# Value range [((end_time) – 48h),(end_time)]
# the query window is 48 hours at most
# the window shift range is the last 30 days
if since is not None:
request['start_time'] = since
# request['end_time'] = self.safe_integer(params, 'end_time')
# request['from'] = self.safe_string(params, 'from') # order id
# request['direct'] = 'next' # or 'prev'
request['method'] = 'openOrders'
if limit is None:
request['size'] = limit # default 100, max 1000
elif type == 'swap':
method = 'swapGetOrderOrders'
request['status'] = '2' # 0 Failed, 1 Partially Filled, 2 Fully Filled 3 = Open + Partially Filled, 4 Canceling
request['from'] = '1'
request['to'] = '1'
if limit is None:
request['limit'] = 100 # default 100, max 100
query = self.omit(params, 'type')
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
#
# {
# "status":"ok",
# "ts":1595875165865,
# "data":[
# {
# "account_id":"7420922606",
# "amount":"0.1000000000000000",
# "canceled_at":"1595872129618",
# "created_at":"1595872089525",
# "filled_amount":"0.000000000000",
# "filled_cash_amount":"0.000000000000",
# "filled_fees":"0.000000000000",
# "finished_at":"1595872129618",
# "id":"671701716584665088",
# "price":"150.000000000000",
# "source":"接口",
# "state":"canceled",
# "symbol":"eth_usdt",
# "type":"buy-limit"
# }
# ]
# }
#
# swap
#
# [
# {
# "symbol":"cmt_ethusdt",
# "size":"1",
# "timestamp":"1595885546770",
# "client_oid":"f3aa81d6-9a4c-4eab-bebe-ebc19da21cf2",
# "createTime":"1595885521200",
# "filled_qty":"0",
# "fee":"0.00000000",
# "order_id":"671758053112020913",
# "price":"150.00",
# "price_avg":"0.00",
# "status":"0",
# "type":"1",
# "order_type":"0",
# "totalProfits":null
# }
# ]
#
data = response
if not isinstance(response, list):
data = self.safe_value(response, 'data', [])
return self.parse_orders(data, market, None, limit)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
if code is None:
raise ArgumentsRequired(self.id + ' fetchDeposits() requires a currency code argument')
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'method': 'deposit_withdraw',
'type': 'deposit',
'size': 12,
}
response = self.apiGetOrderDepositWithdraw(self.extend(request, params))
#
# {
# "status": "ok",
# "data": [
# {
# "id": 1171,
# "type": "deposit",
# "currency": "usdt",
# "tx_hash": "ed03094b84eafbe4bc16e7ef766ee959885ee5bcb265872baaa9c64e1cf86c2b",
# "amount": 7.457467,
# "address": "rae93V8d2mdoUQHwBDBdM4NHCMehRJAsbm",
# "address_tag": "100040",
# "fee": 0,
# "state": "safe",
# "created_at": 1510912472199,
# "updated_at": 1511145876575
# },
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_transactions(data, currency, since, limit, params)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
if code is None:
raise ArgumentsRequired(self.id + ' fetchWithdrawals() requires a currency code argument')
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'method': 'deposit_withdraw',
'type': 'withdraw',
'size': 12,
}
response = self.apiGetOrderDepositWithdraw(self.extend(request, params))
#
# {
# "status": "ok",
# "data": [
# {
# "id": 1171,
# "type": "withdraw",
# "currency": "usdt",
# "tx_hash": "ed03094b84eafbe4bc16e7ef766ee959885ee5bcb265872baaa9c64e1cf86c2b",
# "amount": 7.457467,
# "address": "rae93V8d2mdoUQHwBDBdM4NHCMehRJAsbm",
# "address_tag": "100040",
# "fee": 0,
# "state": "safe",
# "created_at": 1510912472199,
# "updated_at": 1511145876575
# },
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_transactions(data, currency, since, limit, params)
def parse_transaction_status(self, status):
statuses = {
# withdrawals
'WaitForOperation': 'pending', # 等待提现
'OperationLock': 'pending', # 初审锁定成功
'OperationSuccess': 'ok', # 提现成功
'Cancel': 'canceled', # 用户撤销
'Sure': 'ok', # 复审锁定成功
'Fail': 'failed', # 出币异常
'WaitForChainSure': 'ok', # 等待链上确认
# deposits
'WAIT_0': 'pending', # 待确认
'WAIT_1': 'pending', # 待确认
'DATA_CHANGE': 'pending', # 待确认中
'SUCCESS': 'ok', # 充值成功
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits, fetchWithdrawals
#
# {
# "id": 1171,
# "type": "withdraw",
# "currency": "usdt",
# "tx_hash": "ed03094b84eafbe4bc16e7ef766ee959885ee5bcb265872baaa9c64e1cf86c2b",
# "amount": 7.457467,
# "address": "rae93V8d2mdoUQHwBDBdM4NHCMehRJAsbm",
# "address_tag": "100040",
# "fee": 0,
# "state": "safe",
# "created_at": 1510912472199,
# "updated_at": 1511145876575
# }
#
id = self.safe_string(transaction, 'id')
address = self.safe_string(transaction, 'address')
tag = self.safe_string(transaction, 'address_tag')
tagFrom = None
tagTo = tag
addressFrom = None
addressTo = address
type = self.safe_string(transaction, 'type')
if type == 'withdraw':
type = 'withdrawal'
elif type == 'deposit':
type = 'deposit'
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
amount = self.safe_number(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'state'))
txid = self.safe_string(transaction, 'tx_hash')
timestamp = self.safe_integer(transaction, 'created_at')
updated = self.safe_integer(transaction, 'updated_at')
feeCost = self.safe_number(transaction, 'fee')
fee = None
if feeCost is not None:
fee = {
'currency': code,
'cost': feeCost,
}
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'addressFrom': addressFrom,
'addressTo': addressTo,
'address': address,
'tagFrom': tagFrom,
'tagTo': tagTo,
'tag': tag,
'status': status,
'type': type,
'updated': updated,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = self.safe_string(params, 'type', market['type'])
query = self.omit(params, 'type')
if type == 'swap':
raise ArgumentsRequired(self.id + ' fetchMyTrades() is not supported for ' + type + ' type')
#
# spot
#
# POST /api/v1/order/matchresults Query current order, order history
# symbol True string trading pair btc_usdt, eth_btc ...
# types False string Query order type combination buy-market, sell-market, buy-limit, sell-limit
# start_date False string Query start date, date format yyyy-mm-dd -61 days [-61day, end-date]
# end_date False string Query end date, date format yyyy-mm-dd Now [start-date, now]
# from False string Query start ID order record id
# direct False string Query direction ‘next’ is default , the transaction record ID is sorted from large to small prev,next
# size False string Query record size 100 <=100
#
request = {
'symbol': market['id'],
'method': 'matchresults',
# 'types': 'buy-market,sell-market,buy-limit,sell-limit',
# 'start_date': self.ymd(since),
# 'end_date': self.ymd(self.milliseconds()),
# 'size': 100,
# 'direct': 'next',
}
if since is not None:
request['start_date'] = self.ymd(since)
end = self.sum(since, 2 * 24 * 60 * 60 * 1000)
request['end_date'] = self.ymd(end)
if limit is not None:
request['size'] = limit # default 100, max 100
response = self.apiPostOrderMatchresults(self.extend(request, query))
#
# {
# "status": "ok",
# "data": [
# {
# "id": 29555,
# "order_id": 59378,
# "match_id": 59335,
# "symbol": "eth_usdt",
# "type": "buy-limit",
# "source": "api",
# "price": "100.1000000000",
# "filled_amount": "0.9845000000",
# "filled_fees": "0.0019690000",
# "created_at": 1494901400487
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrderTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = self.safe_string(params, 'type', market['type'])
params = self.omit(params, 'type')
method = None
request = {}
if type == 'spot':
request['order_id'] = id
request['method'] = 'matchresults'
method = 'apiPostOrderOrdersOrderIdMatchresults'
elif type == 'swap':
request['orderId'] = id
request['symbol'] = market['id']
method = 'swapGetOrderFills'
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "ts":1596298917277,
# "data":[
# {
# "id":"614164775",
# "created_at":"1596298860602",
# "filled_amount":"0.0417000000000000",
# "filled_fees":"0.0000834000000000",
# "match_id":"673491702661292033",
# "order_id":"673491720340279296",
# "price":"359.240000000000",
# "source":"接口",
# "symbol":"eth_usdt",
# "type":"buy-market"
# }
# ]
# }
#
# swap
#
#
# [
# {
# "trade_id":"6667390",
# "symbol":"cmt_btcusdt",
# "order_id":"525946425993854915",
# "price":"9839.00",
# "order_qty":"3466",
# "fee":"-0.0000528407360000",
# "timestamp":"1561121514442",
# "exec_type":"M",
# "side":"3"
# }
# ]
#
data = response
if not isinstance(data, list):
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
def fetch_position(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.swapGetPositionSinglePosition(self.extend(request, params))
#
# {
# "margin_mode":"fixed", # Margin mode: crossed / fixed
# "holding":[
# {
# "symbol":"cmt_btcusdt", # Contract name
# "liquidation_price":"0.00", # Estimated liquidation price
# "position":"0", # Position Margin, the margin for holding current positions
# "avail_position":"0", # Available position
# "avg_cost":"0.00", # Transaction average price
# "leverage":"2", # Leverage
# "realized_pnl":"0.00000000", # Realized Profit and loss
# "keepMarginRate":"0.005", # Maintenance margin rate
# "side":"1", # Position Direction Long or short, Mark obsolete
# "holdSide":"1", # Position Direction Long or short
# "timestamp":"1557571623963", # System timestamp
# "margin":"0.0000000000000000", # Used margin
# "unrealized_pnl":"0.00000000", # Unrealized profit and loss
# }
# ]
# }
return response
def fetch_positions(self, symbols=None, params={}):
self.load_markets()
response = self.swapGetPositionAllPosition(params)
#
# [
# {
# "margin_mode":"fixed",
# "holding":[
# {
# "liquidation_price":"0.00",
# "position":"0",
# "avail_position":"0",
# "avg_cost":"0.00",
# "symbol":"btcusd",
# "leverage":"20",
# "keepMarginRate":"0.005",
# "realized_pnl":"0.00000000",
# "unrealized_pnl":"0",
# "side":"long",
# "holdSide":"1",
# "timestamp":"1595698564915",
# "margin":"0.0000000000000000"
# },
# ]
# },
# ]
#
# todo unify parsePosition/parsePositions
return response
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.implode_params(path, params)
if (api == 'capi') or (api == 'swap'):
request = '/api/swap/' + self.version + request
else:
request = '/' + api + '/v1' + request
query = self.omit(params, self.extract_params(path))
url = self.implode_hostname(self.urls['api'][api]) + request
if (api == 'data') or (api == 'capi'):
if query:
url += '?' + self.urlencode(query)
elif api == 'swap':
self.check_required_credentials()
timestamp = str(self.milliseconds())
auth = timestamp + method + request
if method == 'POST':
body = self.json(params)
auth += body
else:
if params:
query = self.urlencode(self.keysort(params))
url += '?' + query
auth += '?' + query
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256, 'base64')
headers = {
'ACCESS-KEY': self.apiKey,
'ACCESS-SIGN': signature,
'ACCESS-TIMESTAMP': timestamp,
'ACCESS-PASSPHRASE': self.password,
}
if method == 'POST':
headers['Content-Type'] = 'application/json'
elif api == 'api':
timestamp = str(self.milliseconds())
auth = ''
query = self.keysort(query)
auth = self.rawencode(query)
hash = self.hash(self.encode(self.secret), 'sha1')
signed = auth
signature = self.hmac(self.encode(auth), self.encode(hash), hashlib.md5)
if len(auth) > 0:
signed += '&'
signed += 'sign=' + signature + '&req_time=' + timestamp + '&accesskey=' + self.apiKey
if method == 'GET':
if query:
url += '?' + signed
elif method == 'POST':
url += '?sign=' + signature + '&req_time=' + timestamp + '&accesskey=' + self.apiKey
body = auth
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if not response:
return # fallback to default error handler
#
# spot
#
# {"status":"fail","err_code":"01001","err_msg":"系统异常,请稍后重试"}
# {"status":"error","ts":1595594160149,"err_code":"invalid-parameter","err_msg":"invalid size, valid range: [1,2000]"}
# {"status":"error","ts":1595684716042,"err_code":"invalid-parameter","err_msg":"illegal sign invalid"}
# {"status":"error","ts":1595700216275,"err_code":"bad-request","err_msg":"your balance is low!"}
# {"status":"error","ts":1595700344504,"err_code":"invalid-parameter","err_msg":"invalid type"}
# {"status":"error","ts":1595703343035,"err_code":"bad-request","err_msg":"order cancel fail"}
# {"status":"error","ts":1595704360508,"err_code":"invalid-parameter","err_msg":"accesskey not null"}
# {"status":"error","ts":1595704490084,"err_code":"invalid-parameter","err_msg":"permissions not right"}
# {"status":"error","ts":1595711862763,"err_code":"system exception","err_msg":"system exception"}
# {"status":"error","ts":1595730308979,"err_code":"bad-request","err_msg":"20003"}
#
# swap
#
# {"code":"40015","msg":"","requestTime":1595698564931,"data":null}
# {"code":"40017","msg":"Order id must not be blank","requestTime":1595702477835,"data":null}
# {"code":"40017","msg":"Order Type must not be blank","requestTime":1595698516162,"data":null}
# {"code":"40301","msg":"","requestTime":1595667662503,"data":null}
# {"code":"40017","msg":"Contract code must not be blank","requestTime":1595703151651,"data":null}
# {"code":"40108","msg":"","requestTime":1595885064600,"data":null}
# {"order_id":"513468410013679613","client_oid":null,"symbol":"ethusd","result":false,"err_code":"order_no_exist_error","err_msg":"订单不存在!"}
#
message = self.safe_string(response, 'err_msg')
errorCode = self.safe_string_2(response, 'code', 'err_code')
feedback = self.id + ' ' + body
nonEmptyMessage = ((message is not None) and (message != ''))
if nonEmptyMessage:
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
nonZeroErrorCode = (errorCode is not None) and (errorCode != '00000')
if nonZeroErrorCode:
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
if nonZeroErrorCode or nonEmptyMessage:
raise ExchangeError(feedback) # unknown message
| [
"travis@travis-ci.org"
] | travis@travis-ci.org |
f0883ebedea52ca4ef555b7902a40b1994d8d001 | bd4a7b5c70043d3029d31d9846283afa49fa0833 | /miscutil/files.py | 47aff12b65a9ed652daa2ee23c27da69a739725d | [] | no_license | koji8y/miscutil | a868dfdf05c0680883574997da09bd191806f77a | b434a2b412657f7af6e53cf5da652e3bc0f47372 | refs/heads/master | 2023-03-03T01:30:20.091811 | 2021-02-18T17:21:11 | 2021-02-18T17:21:11 | 268,417,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | """File related uttilities."""
from typing import Any
from typing import Callable
from typing import Union
from contextlib import contextmanager
from contextlib import redirect_stderr
from contextlib import redirect_stdout
import gzip
from io import StringIO
from os import devnull
from pathlib import Path
import pickle
import sys
def pickledump(obj: Any, filename: Union[str, Path]) -> None:
"""dump object to pickle file."""
filename_ = str(filename)
if filename_.endswith(".gz"):
with gzip.open(filename_, 'wb') as filestream:
pickle.dump(obj, filestream)
return
with open(filename_, 'wb') as filestream:
pickle.dump(obj, filestream)
def pickleload(filename: Union[str, Path]) -> Any:
"""load object from pickle file."""
filename_ = str(filename)
if filename_.endswith(".gz"):
with gzip.open(filename_, 'rb') as filestream:
return pickle.load(filestream)
with open(filename_, 'rb') as filestream:
return pickle.load(filestream)
@contextmanager
def suppress_stdout_stderr():
"""suppress stdout and stderr."""
with open(devnull, 'w') as fnull:
with redirect_stderr(fnull) as err, redirect_stdout(fnull) as out:
yield (err, out)
@contextmanager
def suppress_stderr_only():
"""suppress stderr."""
with open(devnull, 'w') as fnull:
with redirect_stderr(fnull) as err:
yield err
@contextmanager
def filter_stderr(is_line_to_be_suppressed: Callable[[str, str], bool]):
"""filter stderr."""
output = StringIO()
try:
with redirect_stderr(output) as err:
yield err
finally:
prev = ''
for line in output.getvalue().split('\n'):
try:
if is_line_to_be_suppressed(line, prev):
continue
print(line, file=sys.stderr)
finally:
prev = line
| [
"kouji@acm.org"
] | kouji@acm.org |
102d157c0f373f982a69fa38185b325003677893 | d18d643763956958b56b84d492106c9d1ccf53a9 | /aula7_calculadora1.py | 35fabd6b88d62d1ee6aa7733274ee461db8e9814 | [] | no_license | josrenato132/LearningPython | ddbed736b0bbb1b87d15eaa3620b774c0eee55f0 | 5a5755eb6cce93b9eed6474b75fd9fae8ff4f8ee | refs/heads/main | 2023-03-21T01:22:51.894575 | 2021-03-19T03:47:37 | 2021-03-19T03:47:37 | 295,542,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | class Calculadora:
def __init__(self, num1, num2):
self.a = num1
self.b = num2
def soma(self):
return self.a + self.b
def sub(self):
return self.a - self.b
def mult(self):
return self.a * self.b
def div(self):
return self.a / self.b
c = int(input("Digite um número: "))
d = int(input("Digite um número"))
calculadora = Calculadora(c, d)
print("A soma é: " + str(calculadora.soma()))
print("A subtração é: " + str(calculadora.sub()))
print("A multiplicação é: " + str(calculadora.mult()))
print("A divisão é: " + str(calculadora.div())) | [
"noreply@github.com"
] | noreply@github.com |
b42413b133a14aafa23e322797c0046dceedd8c6 | 56bcaadc74d685edb6ef7fdd97a7c32708b9b007 | /python1019.py | b0bc3bb9fe5c726b10471100094631dceee76124 | [] | no_license | Diamond346/HelloPython1019 | fa345ceb9c09757a4ca0b5c03cc3f434330b15f0 | 9452f24ecb305ec6b9ccc527f0f51f91c12c4634 | refs/heads/master | 2020-04-01T22:55:11.603006 | 2018-10-19T13:19:55 | 2018-10-19T13:19:55 | 153,732,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | def get_date():
print("天天向上")
if __name__ == '__main__':
get_date() | [
"15930669346@163.com"
] | 15930669346@163.com |
54d7ed874c67f126e1d7efec7c5ed4f4e8df7548 | 0815463443f2d2f1a3c62c4efd833c06704c692c | /blog/migrations/0002_userinfo.py | 80898b6073ac2b90098cfd11a8bd72f5c11d1f92 | [] | no_license | thinkthango/mysite | 84f8284fe6144268d67d4edd2954f150e4e1c70c | 2d0ef32a6b06ea9788439183ec94db63edf5ac39 | refs/heads/master | 2020-03-26T08:26:31.616725 | 2018-10-31T05:28:44 | 2018-10-31T05:28:44 | 144,702,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | # Generated by Django 2.1 on 2018-08-14 05:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.CharField(max_length=32)),
('pwd', models.CharField(max_length=32)),
],
),
]
| [
"sky_wangjz@163.com"
] | sky_wangjz@163.com |
45e82330b7a0652c44e04e218aaf3df3adc06d33 | 450965a6ec1b109d60421ea368daa0c5b89a1731 | /sendmessage.py | 6aad113e872072d9082fe85d52ae8b3d9d81c08a | [] | no_license | Aravinda-Harithsa/Smart-Intermediate-Node-for-Rural-Banking- | f06a3446141693efee7a0f084ad4b33c07053ec5 | dfca05e6c6cdf73fa64ea4c35a8b74951eeffd16 | refs/heads/master | 2022-12-25T01:10:16.341725 | 2020-10-02T11:32:54 | 2020-10-02T11:32:54 | 300,594,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,280 | py | from firebase import firebase
import RPi.GPIO as GPIO
import serial
import time,sys
from gtrans import translate_text, translate_html
def getInfo(mail):
fb=firebase.FirebaseApplication("https://athena-f1dc4.firebaseio.com/")
userdb=fb.get('/Userdatabase',None)
mail=mail.split(".")[1]
number=userdb[mail]["phone"]
language=userdb[lang]["lang"]
return number,language
def sendmessage(phno,string):
#string=str(string,encoding="utf-8")
convert = lambda x:"0".join([hex(ord(c))[2:].zfill(3) for c in x])
msg="0"+convert(string)
msg=msg.replace("0ccd0200c","0ccd")
SERIAL_PORT="/dev/ttyAMA0"
ser=serial.Serial(SERIAL_PORT, baudrate=9600,timeout=5)
ser.write(str.encode("AT+CMGF=1\r"))
time.sleep(3)
ser.write(str.encode('AT+CSCS="HEX"\r'))
time.sleep(10)
ser.write(str.encode("AT+CSMP=17,167,0,8\r"))
time.sleep(10)
#b=str('AT+CMGS="9449191976"\r',encoding='utf-8')
ser.write(str.encode('AT+CMGS="'+str(phno)+'"\r'))
time.sleep(3)
ser.write((str(msg)+chr(26)).encode())
time.sleep(3)
print("sentt!")
def changeLang(message,language):
return (translate_text(message, 'en', language))
#########################################
# Code to be written to interface GSM #
# module and send the message #
#########################################
| [
"50070233+Aravinda-Harithsa@users.noreply.github.com"
] | 50070233+Aravinda-Harithsa@users.noreply.github.com |
96b859cda0f5857246eb10753713581c1b1cd783 | fba7d8e900a8a9a949fff49fd416fb690aa7cac9 | /country_info/forms.py | 63c55588b592e257e972402664c8c36ec961a8cd | [] | no_license | Vladiverev/django_test2 | 57c4a81a81734fbfb698b54cf4dae393eba3171a | 9dbf9d6352f53029dc2f040173ecf9d00acc6528 | refs/heads/master | 2020-03-30T19:59:29.093932 | 2018-10-07T16:15:26 | 2018-10-07T16:15:26 | 151,568,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from django import forms
from .models import Country, City
class CityForm(forms.ModelForm):
class Meta:
model = City
fields = ('title', 'desc',)
| [
"vladiverev@gmail.com"
] | vladiverev@gmail.com |
702bb9f9e7fd04e202ac4996bebb2a4e2a3b70af | 9a8164be8f097f4c6af1412d0556f575d91f7365 | /app.py | 44007885daa1abef68abad7b5ea3f171bf95e4a6 | [] | no_license | Rocky12312/Recommender_system | a8a9d408a59ffedc567eabe4356c304b309b2272 | de77ebc375c48a8408c5fa6575fc1b2872f1c6ad | refs/heads/master | 2023-01-24T00:13:18.348212 | 2020-11-06T10:40:51 | 2020-11-06T10:40:51 | 310,564,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,144 | py | import io
import re
import os
import time
#import nltk
#import pickle
import string
import base64
import joblib
import zipfile
#import logging
import warnings
import numpy as np
import pandas as pd
from flask import jsonify
#from nltk.corpus import stopwords
#from healthcheck import HealthCheck
from flask_bootstrap import Bootstrap
from flask import Flask, make_response, request, render_template, url_for
warnings.filterwarnings("ignore")
app = Flask(__name__)
#logging.basicConfig(filename="flask.log", level=logging.DEBUG, format="%(asctime)s %(levelname)s %(name)s %(threadName)s:%(message)s")
#logging.info("All useful files loaded")
#health = HealthCheck(app, "/hcheck")
#def app_ab():
#return True, "i am good"
#health.add_check(app_ab)
joblib_file1 = "Indices.pkl"
joblib_file2 = "Cosine_similarity.pkl"
indices = joblib.load(joblib_file1)
cosine_similarity = joblib.load(joblib_file2)
def zipFiles(file_List, name_list):
outfile = io.BytesIO()
with zipfile.ZipFile(outfile, 'w') as zf:
for name, data in zip(name_list, file_List):
zf.writestr(name, data.to_csv())
return outfile.getvalue()
Bootstrap(app)
@app.route('/')
def index():
movies_titles = pd.read_csv("data/movies.csv")
movies_titles.sort_values('title',inplace=True)
movies_list = movies_titles['title'].values.tolist()
return render_template('index.html',movies = movies_list)
@app.route('/predict', methods=['POST'])
#Now as we have got the similarity matrix our final task is going to be creating a function that gonn'a be taking a movie name as a input and will output a list of similar movies that can be recommended to the users# Function that takes in movie title as input and outputs most similar movies
def predict():
df = pd.read_csv("data/movies.csv")
df_neutral = pd.read_csv("data/Neutral.csv")
#Taking movie name
argument = request.form['choice']
#Index of the movie that matches the title
idx = indices[argument]
#Getting the pairwsie similarity scores of all movies with that movie
sim_scores = list(enumerate(cosine_similarity[idx]))
#Sorting the movies based on the similarity scores
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
#Getting the scores of the 20 most similar movies
sim_scores = sim_scores[1:20]
#Get the movie indices
movie_indices = [i[0] for i in sim_scores]
#Return the top 20 most similar movies
movies = pd.DataFrame()
#Retrieving movies
movies = df.iloc[movie_indices]
#Sorting the recommended movies on basis of average vote
movies = movies.sort_values(by=['vote_average'], ascending=False)
movies = movies[["title"]]
#Resetting the index
movies.reset_index(drop=True,inplace=True)
if len(movies)<2:
movies = pd.DataFrame()
movies = df_neutral
#return render_template('view.html',tables=[movies.to_html(classes='recommend')],titles = ['Recommendations'])
return render_template("view.html", column_names=movies.columns.values, row_data=list(movies.values.tolist()), zip=zip)
if __name__ == '__main__':
app.run(debug=True)
| [
"sourabhchoudharysml810gmail.com"
] | sourabhchoudharysml810gmail.com |
873c063fe0ebb90c4077cc4cf2fa52446bffcf72 | 40e726189392289e5528c42a70003ffea3fccb93 | /45_pep.py | 4bac0d0ab8aa70a727444b8a466df0ea1b9bc4d6 | [] | no_license | Grzegorz72/50_pytan | 9d6f498cee64fa32e2bc17e0222ff5a1b2d466a0 | 1712d63cecf96c687570a7dca4c5176709b89019 | refs/heads/master | 2022-11-30T20:14:09.418954 | 2020-08-10T19:37:51 | 2020-08-10T19:37:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | # Pytanie 46 - w jaki sposób zadbasz o to, aby twój kod był czytelny
# i łatwy do zrozumienia dla innych programistów?
# PEP8 ! # link do PEP8 w materiałach dodatkowych
import os # piszemy po jednym imporcie na linijkę
import math # w bloku najwyżej importujemy moduły bibliteki standardowej
import modul_zewnetrzny # w bloku niżej importujemy moduły zewnętrzne
import moj_fajny_modul # w bloku najniżej importujemy moduły wewnętrzne (prywatne, firmowe)
class KlasyDuzymiLiterami: # nad klasą i pod klasą powinny być dwie puste linie
pass
zmienne_malymi_literami = 'snake case'
def funkcje_rowniez_malymi(): # nad funkcją i pod funkcją powinno być po jednej pustej linii
pass
A = [1, 2, 2] # zapisując sekwencje dajemy spację po przecinku
B = ['tak','jest','zle'] # czyli ta linijka zapisana jest niepoprawnie
| [
"marta.m.slusarczyk@gmail.com"
] | marta.m.slusarczyk@gmail.com |
9d5aafb14738f910a84f9fa615fc45a6cd8f3cc2 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /es6qJTs5zYf8nEBkG_10.py | 6084216bb61e092527df14f88ae710259e20fc5d | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py |
import re
def is_rectangle(lst):
if len(lst) != 4:
return False
num = []
items = []
for x in range(0, len(lst)):
result = [int(d) for d in re.findall(r'-?\d+', lst[x])]
num.extend(result)
for x in range(0, len(num)):
if num[x] not in items:
items.append(num[x])
if len(items) != 4:
return False
return True
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
280fb641865f250ad939f99abca5599f5e6ab59b | f870ec4b9065805f38c50568439f4eb385f18e01 | /basic-app/main.py | 303cd7630c42f78cff3a22699a5852f68621890a | [] | no_license | learn-co-curriculum/cssi-7-app-engine-review | 5398c5881bbf125de1d77e2b5b7c8b60a07e242d | be7a14090caebef73fceb9234ccf9e20064f0be9 | refs/heads/master | 2020-12-24T17:45:15.337331 | 2016-01-29T16:05:55 | 2016-01-29T16:05:55 | 39,812,644 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | import webapp2
class MainHandler(webapp2.RequestHandler):
def get(self):
self.response.write('<!DOCTYPE html>')
self.response.write('<title>My First Web Page</title>')
self.response.write('<p>My First Web Page</p>')
self.response.write('<p>Hello, World!</p>')
app = webapp2.WSGIApplication([
('/', MainHandler),
], debug=True)
| [
"vrk@chromium.org"
] | vrk@chromium.org |
6409a5f5135bc863b8fc843babc1f6b74c5626e8 | 7b534aa525270553b0f4bf3c5d4f71bed4d4bc87 | /main.py | 3e39896ef0758c0ad8fe2d1b0d45da34dfc0a066 | [] | no_license | rahulgitx/grand-project | 1582e6a5b6622630a75f08063bd5d07b9074a239 | 5c4cb6e54ef016ab3fdc3c1e6cd8f9a38628a4a3 | refs/heads/main | 2023-05-07T21:09:25.395178 | 2021-05-31T05:43:57 | 2021-05-31T05:43:57 | 371,939,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | def main():
x='start'
while x=='start':
import os
from Bigdatafolder import Bigdata
#clearing screen
if os.name=='nt':
os.system("cls")
else:
os.system('clear')
print("\n\n\n\n\n")
print("Welcome To The Future".center(180))
print()
print("Which of the following services do you want to work one:\n\n\n".center(180))
print("MENU".center(180))
print("\t\t\t\t\t\t\t\t\t\t1. Bigdata (Hadoop)")
print("\t\t\t\t\t\t\t\t\t\t2. Ansible")
print("\t\t\t\t\t\t\t\t\t\t3. Cloud ")
print("\t\t\t\t\t\t\t\t\t\t4. Docker")
print("\t\t\t\t\t\t\t\t\t\t5. Kubernetes ")
print("\t\t\t\t\t\t\t\t\t\t6. Jenkins")
print()
print()
x=input("\t\t\t\t\t\t\t\t\tChoose any of the above number : ")
while x=='1':
x=Bigdata.frontpage()
print('done')
if x=='2':
print('This section is under maintenance please come back another time : '.center(180), x)
x='start'
input()
if x !='1' and x !='2' and x != 'start':
print('\t\t\t\t\t\t\t\tPlease give an appropriate input(press enter to try again)')
x='start'
input()
main()
| [
"rahul18bhardwaj.23@gmail.com"
] | rahul18bhardwaj.23@gmail.com |
85c24b945001b9beefecb64f4372162b82d060a3 | 3e4c52381e225bb65f10a920106cc52dc0e87394 | /metaspace/engine/sm/engine/annotation_lithops/utils.py | b4d04c7f633ecbee50c236ffd4527ac822b34ac9 | [
"Apache-2.0"
] | permissive | keshava/metaspace | 207aa8da96cc736519cb2727d3676e7ba0231e57 | d8926b86d745f5387b1825a1bd317002b0cd306b | refs/heads/master | 2023-04-26T01:08:41.816261 | 2021-05-18T12:31:02 | 2021-05-18T12:31:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | py | from __future__ import annotations
import json
import logging
from base64 import urlsafe_b64encode
from hashlib import blake2b
import numpy as np
logger = logging.getLogger('annotation-pipeline')
def ds_dims(coordinates):
min_x, min_y = np.amin(coordinates, axis=0)[:2]
max_x, max_y = np.amax(coordinates, axis=0)[:2]
nrows, ncols = max_y - min_y + 1, max_x - min_x + 1
return nrows, ncols
def get_pixel_indices(coordinates):
"""
Converts original spectrum indexes (which may be out of order, or sparse) to "sp_i" values,
which represent the pixel index of the output image, i.e. `y, x = divmod(sp_i, width)`.
"""
_coord = np.array(coordinates, dtype=np.int64)[:, :2]
_coord -= np.amin(_coord, axis=0)
ncols = np.max(_coord[:, 0]) + 1
pixel_indices = _coord[:, 1] * ncols + _coord[:, 0]
return pixel_indices.astype(np.uint32)
def jsonhash(obj) -> str:
"""
Calculates a hash for a JSON-stringifiable object. Intended for compacting large sets of
parameters into a simple key that can be used to distinctly identify a cache entry.
The output is collision-resistant, but shouldn't be assumed to be cryptographically secure.
In most cases a motivated adversary could figure out the original object contents easily, as
there's no hidden key and it's unlikely there will be much variety in the objects hashed.
"""
json_val = json.dumps(obj, sort_keys=True)
hash_val = blake2b(json_val.encode(), digest_size=12).digest()
return str(urlsafe_b64encode(hash_val), 'utf-8')
| [
"noreply@github.com"
] | noreply@github.com |
62218ce3a76c02cd63910daf051bdfbd6986d54d | 3a7ada7fd8b6d940053605a138d4dfeeabe05cc4 | /wap/urls.py | 29f4251bc29f0d5b5d66e020888d1176f1bfb812 | [] | no_license | sandeep9illinois/weather | b0446e08d693792930b19cbbafa7c389b46e00c6 | 167908e99c7c67ade99fe779830fe9167364461c | refs/heads/master | 2023-06-17T16:17:56.142248 | 2021-07-26T05:08:22 | 2021-07-26T05:08:22 | 389,489,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | from django.urls import path, include
from .views import index
urlpatterns = [
path('', index),
]
| [
"sayadav@zynga.com"
] | sayadav@zynga.com |
daf1d6bccbbbb2fb67704a2af9670dc1e17ed8ea | 11f119a32e5c8d7934dbec612cf2fb7ecafee733 | /Python/ejercicios/leer_archivo.py | d5d2411719d92d3c60d9799321de62f416125d8f | [] | no_license | blasmorelbarboza/Cuaderno | 625ad88731fcc6c9b46a6eabe42ff1479fb1e0dd | db8c5b46e9263a09720fb8047829b57ed9a19951 | refs/heads/master | 2020-04-02T23:59:28.611920 | 2018-10-25T03:37:21 | 2018-10-25T03:37:21 | 154,885,418 | 1 | 0 | null | 2018-10-26T19:37:26 | 2018-10-26T19:37:25 | null | UTF-8 | Python | false | false | 325 | py | # -*- unicode: utf-8 -*-
def run():
counter = 0
with open('aleph.txt') as f:
# print(f.readlines) # IMPRIME TODO EL TEXTO DEL ARCHIVO
for line in f:
counter += line.count('Beatriz')
print('Beatriz se encuentra {} en el texto'.format(counter))
if __name__ == '__main__':
run()
| [
"rodolfougaldeochoa@gmail.com"
] | rodolfougaldeochoa@gmail.com |
4e276f619075e92f420d8ff66b66225873af6a9a | 86ac68017449707798964ad939c31ccc2dcc4056 | /tune.py | b6a4fb267ea0f9fec50059bc01c0673a829d9eae | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | soorix/openpilot_075_opkr | 5db0be286cf6606ce2e0c8b57ca6e6f1f19f017f | e711fd259d713e170d36d262fda708d926d65852 | refs/heads/master | 2022-11-23T13:15:11.801141 | 2020-07-28T11:13:23 | 2020-07-28T11:13:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,021 | py | from selfdrive.kegman_conf import kegman_conf
import subprocess
import os
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../"))
letters = { "a":[ "###", "# #", "###", "# #", "# #"], "b":[ "###", "# #", "###", "# #", "###"], "c":[ "###", "#", "#", "#", "###"], "d":[ "##", "# #", "# #", "# #", "##"], "e":[ "###", "#", "###", "#", "###"], "f":[ "###", "#", "###", "#", "#"], "g":[ "###", "# #", "###", " #", "###"], "h":[ "# #", "# #", "###", "# #", "# #"], "i":[ "###", " #", " #", " #", "###"], "j":[ "###", " #", " #", " #", "##"], "k":[ "# #", "##", "#", "##", "# #"], "l":[ "#", "#", "#", "#", "###"], "m":[ "# #", "###", "###", "# #", "# #"], "n":[ "###", "# #", "# #", "# #", "# #"], "o":[ "###", "# #", "# #", "# #", "###"], "p":[ "###", "# #", "###", "#", "#"], "q":[ "###", "# #", "###", " #", " #"], "r":[ "###", "# #", "##", "# #", "# #"], "s":[ "###", "#", "###", " #", "###"], "t":[ "###", " #", " #", " #", " #"], "u":[ "# #", "# #", "# #", "# #", "###"], "v":[ "# #", "# #", "# #", "# #", " #"], "w":[ "# #", "# #", "# #", "###", "###"], "x":[ "# #", " #", " #", " #", "# #"], "y":[ "# #", "# #", "###", " #", "###"], "z":[ "###", " #", " #", "#", "###"], " ":[ " "], "1":[ " #", "##", " #", " #", "###"], "2":[ "###", " #", "###", "#", "###"], "3":[ "###", " #", "###", " #", "###"], "4":[ "#", "#", "# #", "###", " #"], "5":[ "###", "#", "###", " #", "###"], "6":[ "###", "#", "###", "# #", "###"], "7":[ "###", " # ", " #", " #", "#"], "8":[ "###", "# #", "###", "# #", "###"], "9":[ "###", "# #", "###", " #", "###"], "0":[ "###", "# #", "# #", "# #", "###"], "!":[ " # ", " # ", " # ", " ", " # "], "?":[ "###", " #", " ##", " ", " # "], ".":[ " ", " ", " ", " ", " # "], "]":[ " ", " ", " ", " #", " # "], "/":[ " #", " #", " # ", "# ", "# "], ":":[ " ", " # ", " ", " # ", " "], "@":[ "###", "# #", "## ", "# ", "###"], "'":[ " # ", " # ", " ", " ", " "], "#":[ " # ", "###", " # ", "###", " # "], "-":[ " ", " ","###"," "," "] }
# letters stolen from here: http://www.stuffaboutcode.com/2013/08/raspberry-pi-minecraft-twitter.html
def print_letters(text):
bigletters = []
for i in text:
bigletters.append(letters.get(i.lower(),letters[' ']))
output = ['']*5
for i in range(5):
for j in bigletters:
temp = ' '
try:
temp = j[i]
except:
pass
temp += ' '*(5-len(temp))
temp = temp.replace(' ',' ')
temp = temp.replace('#','@')
output[i] += temp
return '\n'.join(output)
import sys, termios, tty, os, time
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
button_delay = 0.2
kegman = kegman_conf()
kegman.conf['tuneGernby'] = "1"
#kegman.write_config(kegman.conf)
param = ["cameraOffset", "Kp", "Ki", "Kf", "steerRatio", "sR_boost", "sR_BP0", \
"sR_BP1", "sR_time", "steerRateCost"]
#param = ["Kp", "Ki", "Kf", "steerRatio", "sR_boost", "sR_BP0", \
# "sR_BP1", "sR_time", "steerRateCost", "deadzone", "slowOnCurves", \
# "1barBP0", "1barBP1", "1barMax", "2barBP0", "2barBP1", \
# "2barMax", "3barBP0", "3barBP1", "3barMax", \
# "1barHwy", "2barHwy", "3barHwy"]
j = 0
while True:
print ("")
print (print_letters(param[j][0:9]))
print ("")
print (print_letters(kegman.conf[param[j]]))
print ("")
print ("w: +1 1: +0.1 3: +0.05 5: +0.01 7: +0.001 r: +0.00001")
print ("x: -1 a: -0.1 d: -0.05 g: -0.01 j: -0.001 v: -0.00001")
# print ("w,1,3,5,7,r to incr 1,0.1,0.05,0.01,0.001,0.00001")
# print ("x,a,d,g,j,v to decr 1,0.1,0.05,0.01,0.001,0.00001")
print ("0: 0 L: 1")
# print ("0 / L to make the value 0 / 1")
print ("SPACE: next m: prev")
print ("z: quit")
char = getch()
write_json = False
if (char == "v"):
kegman.conf[param[j]] = str(round((float(kegman.conf[param[j]]) - 0.00001),5))
write_json = True
if (char == "r"):
kegman.conf[param[j]] = str(round((float(kegman.conf[param[j]]) + 0.00001),5))
write_json = True
if (char == "7"):
kegman.conf[param[j]] = str(round((float(kegman.conf[param[j]]) + 0.001),5))
write_json = True
if (char == "5"):
kegman.conf[param[j]] = str(round((float(kegman.conf[param[j]]) + 0.01),5))
write_json = True
elif (char == "3"):
kegman.conf[param[j]] = str(round((float(kegman.conf[param[j]]) + 0.05),5))
write_json = True
elif (char == "1"):
kegman.conf[param[j]] = str(round((float(kegman.conf[param[j]]) + 0.1),5))
write_json = True
elif (char == "w"):
kegman.conf[param[j]] = str(round((float(kegman.conf[param[j]]) + 1),5))
write_json = True
elif (char == "j"):
kegman.conf[param[j]] = str(round((float(kegman.conf[param[j]]) - 0.001),5))
write_json = True
elif (char == "g"):
kegman.conf[param[j]] = str(round((float(kegman.conf[param[j]]) - 0.01),5))
write_json = True
elif (char == "d"):
kegman.conf[param[j]] = str(round((float(kegman.conf[param[j]]) - 0.05),5))
write_json = True
elif (char == "a"):
kegman.conf[param[j]] = str(round((float(kegman.conf[param[j]]) - 0.1),5))
write_json = True
elif (char == "x"):
kegman.conf[param[j]] = str(round((float(kegman.conf[param[j]]) - 1),5))
write_json = True
elif (char == "0"):
kegman.conf[param[j]] = "0"
write_json = True
elif (char == "l"):
kegman.conf[param[j]] = "1"
write_json = True
elif (char == " "):
if j < len(param) - 1:
j = j + 1
else:
j = 0
elif (char == "m"):
if j > 0:
j = j - 1
else:
j = len(param) - 1
elif (char == "z"):
process.kill()
break
if float(kegman.conf['tuneGernby']) != 1 and float(kegman.conf['tuneGernby']) != 0:
kegman.conf['tuneGernby'] = "1"
if float(kegman.conf['Ki']) < 0 and float(kegman.conf['Ki']) != -1:
kegman.conf['Ki'] = "0"
if float(kegman.conf['Ki']) > 2:
kegman.conf['Ki'] = "2"
if float(kegman.conf['Kp']) < 0 and float(kegman.conf['Kp']) != -1:
kegman.conf['Kp'] = "0"
if float(kegman.conf['Kp']) > 3:
kegman.conf['Kp'] = "3"
if kegman.conf['liveParams'] != "1" and kegman.conf['liveParams'] != "0":
kegman.conf['liveParams'] = "1"
if float(kegman.conf['steerRatio']) < 1 and float(kegman.conf['steerRatio']) != -1:
kegman.conf['steerRatio'] = "1"
if float(kegman.conf['steerRateCost']) < 0.01 and float(kegman.conf['steerRateCost']) != -1:
kegman.conf['steerRateCost'] = "0.01"
# if float(kegman.conf['deadzone']) < 0:
# kegman.conf['deadzone'] = "0"
# if float(kegman.conf['1barBP0']) < -0.5:
# kegman.conf['1barBP0'] = "-0.5"
# if float(kegman.conf['1barBP0']) > 0.5:
# kegman.conf['1barBP0'] = "0.5"
# if float(kegman.conf['1barBP1']) < 0.5:
# kegman.conf['1barBP1'] = "0.5"
# if float(kegman.conf['1barBP1']) > 8:
# kegman.conf['1barBP1'] = "8"
# if float(kegman.conf['1barMax']) < 0.9:
# kegman.conf['1barMax'] = "0.9"
# if float(kegman.conf['1barMax']) > 2.5:
# kegman.conf['1barMax'] = "2.5"
# if float(kegman.conf['2barBP0']) < -0.5:
# kegman.conf['2barBP0'] = "-0.5"
# if float(kegman.conf['2barBP0']) > 0.5:
# kegman.conf['2barBP0'] = "0.5"
# if float(kegman.conf['2barBP1']) < 0.5:
# kegman.conf['2barBP1'] = "0.5"
# if float(kegman.conf['2barBP1']) > 8:
# kegman.conf['2barBP1'] = "8"
# if float(kegman.conf['2barMax']) < 1.3:
# kegman.conf['2barMax'] = "1.3"
# if float(kegman.conf['2barMax']) > 2.5:
# kegman.conf['2barMax'] = "2.5"
# if float(kegman.conf['3barBP0']) < -0.5:
# kegman.conf['3barBP0'] = "-0.5"
# if float(kegman.conf['3barBP0']) > 0.5:
# kegman.conf['3barBP0'] = "0.5"
# if float(kegman.conf['3barBP1']) < 0.5:
# kegman.conf['3barBP1'] = "0.5"
# if float(kegman.conf['3barBP1']) > 8:
# kegman.conf['3barBP1'] = "8"
# if float(kegman.conf['3barMax']) < 1.8:
# kegman.conf['3barMax'] = "1.8"
# if float(kegman.conf['3barMax']) > 2.5:
# kegman.conf['3barMax'] = "2.5"
# if float(kegman.conf['1barHwy']) < 0:
# kegman.conf['1barHwy'] = "0"
# if float(kegman.conf['2barHwy']) < 0:
# kegman.conf['2barHwy'] = "0"
# if float(kegman.conf['3barHwy']) < 0:
# kegman.conf['3barHwy'] = "0"
# if float(kegman.conf['1barHwy']) > 2:
# kegman.conf['1barHwy'] = "2"
# if float(kegman.conf['2barHwy']) > 2:
# kegman.conf['2barHwy'] = "2"
# if float(kegman.conf['3barHwy']) > 2:
# kegman.conf['3barHwy'] = "2"
if float(kegman.conf['Kf']) > 0.01:
kegman.conf['Kf'] = "0.01"
if float(kegman.conf['Kf']) < 0:
kegman.conf['Kf'] = "0"
if float(kegman.conf['sR_boost']) < 0:
kegman.conf['sR_boost'] = "0"
if float(kegman.conf['sR_BP0']) < 0:
kegman.conf['sR_BP0'] = "0"
if float(kegman.conf['sR_BP1']) < 0:
kegman.conf['sR_BP1'] = "0"
if float(kegman.conf['sR_time']) < 0.1:
kegman.conf['sR_time'] = "0.1"
#if float(kegman.conf['Kf']) < 0.00001:
kegman.conf['Kf'] = str("{:.5f}".format(float(kegman.conf['Kf'])))
# if float(kegman.conf['slowOnCurves']) > 0.00001:
# kegman.conf['slowOnCurves'] = "1"
# if float(kegman.conf['slowOnCurves']) <= 0.99999:
# kegman.conf['slowOnCurves'] = "0"
if write_json:
kegman.write_config(kegman.conf)
time.sleep(button_delay)
else:
process.kill()
| [
"device@comma.ai"
] | device@comma.ai |
b134c32851d82e25df959973cbeb9149ec080ea5 | 966fa5fb8ad51fa429210e92dac35fea6552298d | /cs410videosearchengine/cs410videosearchengine/celery.py | 71c63960d23bac5a39388afb8ab16592979b60b2 | [] | no_license | FASLADODO/cs410project | 90ca7e2f5edfa63805ed73fbff079f74b6e3454a | 76e54cc19bcec7fe3fe74d3e899a58d2f8bb7735 | refs/heads/master | 2022-03-20T18:22:27.247795 | 2018-12-17T02:02:10 | 2018-12-17T02:02:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | import os
from celery import Celery
from django.conf import settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cs410videosearchengine.settings")
app = Celery("cs410videosearchengine")
app.config_from_object("django.conf:settings")
app.autodiscover_tasks()
| [
"cconnon11@gmail.com"
] | cconnon11@gmail.com |
10c90a1f2fcb3965f0547a6245ec075dbfa4e315 | 8d0c81d0905377244f4d864958bc789e881b85fd | /routes/yard_cluster_min_distance.py | 6068f76dc0a40c1d43487b9ae6c694fd4277cd37 | [] | no_license | usmanr149/jsprit_create_xml | 90f62b3ed8db988641357baea8a42d03d51186d8 | f2e003a8a5f2b076f325d0df1693e95b1dea673c | refs/heads/master | 2021-01-20T04:43:37.436517 | 2017-05-25T18:29:21 | 2017-05-25T18:29:21 | 89,724,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,389 | py | import numpy as np
import pandas as pd
import requests
import pprint
import sys
depots = pd.read_csv("data/southside_depots.csv")
depots.columns = ['depot_name','team_lead','latitude','longitude','start_time','id']
depots_mat = depots.as_matrix()
# vehicles_72GM = pd.read_csv("data/72GM_vehicles.csv")
vehicles_72GM = pd.read_csv("data/580D_vehicles.csv")
serv_dur = pd.read_csv("service_duration.csv")
yard_distances = pd.read_csv("distance_yard_services.csv")
services = pd.read_csv("data/turf_services.csv")
service_points = list(zip(services['latitude'],services['longitude'],services['pk_site_id']))
service_dict = {}
for x in service_points:
service_dict[x[2]] = x
service_dur = dict(serv_dur.as_matrix())
num_vehicles = {'GBY':0,'RBV':0,'OKY':0,'ASY':0,'DON':0,'MWD':0}
labels = dict(enumerate(['GBY','RBV','OKY','ASY','DON','MWD']))
vehicle_yard = [id.split('-')[0] for id in list(vehicles_72GM['vehicle-id'])]
for x in vehicle_yard:
num_vehicles[x]+=1
load_yard = {k:v*7.85 for k,v in num_vehicles.items()}
print(load_yard)
serv_cluster = {0:set(),1:set(),2:set(),3:set(),4:set(),5:set()}
serv_yard = {0:[],1:[],2:[],3:[],4:[],5:[]}
unassigned = []
yard_dist = yard_distances.as_matrix()
for s in yard_dist:
sort_dist = sorted(enumerate(s[1:]), key=lambda tup: tup[1])
yard = sort_dist[0]
serv_yard[yard[0]].append((s[0],yard[1],sort_dist[1:]))
yard_order = [2,3,0,1,5,4]
finished = False
while not finished:
for i in range(len(yard_order)):
yard = yard_order[i]
print("---- {} ----".format(yard))
if len(serv_yard[yard])>0:
# if yard==4:
# pprint.pprint(serv_yard[yard])
sorted_serv = sorted(serv_yard[yard], key=lambda tup: tup[1])
else:
continue
for d in sorted_serv:
if load_yard[labels[yard]]-service_dur[d[0]]>=0:
load_yard[labels[yard]] -= service_dur[d[0]]
serv_cluster[yard].add(d[0])
else:
# print("Site {} not assigned to yard {}".format(d[0],yard))
# print(d)
if len(d[2])>0:
next_yard = d[2][0]
# print("next yard to try: {}".format(next_yard))
# print(serv_yard[next_yard[0]])
serv_yard[next_yard[0]].append((d[0],d[2][0][1],d[2][1:]))
else:
unassigned.append(d[0])
serv_yard[yard] = []
finished = sum([len(x) for x in serv_yard.values()])==0
# pprint.pprint(serv_cluster)
print(unassigned)
print(load_yard)
for y in range(6):
fname="services_yard_{}.csv".format(depots_mat[y][0])
with open(fname,"w") as fout:
fout.write("pk_site_id,latitude,longitude,yard,distance\n")
for s in serv_cluster[y]:
info = service_dict[int(s)]
fout.write("{},{},{},{}\n".format(int(s),info[0],info[1],0))
# fout.write('\n')
fname="services_yard_all.csv"
with open(fname, "w") as fout:
fout.write("pk_site_id,latitude,longitude,yard,distance\n")
for y in range(6):
for s in serv_cluster[y]:
info = service_dict[int(s)]
fout.write("{},{},{},{},{}\n".format(int(s),info[0],info[1],y,0))
print(unassigned)
| [
"antbus@R90KY0F0.COE.ADS"
] | antbus@R90KY0F0.COE.ADS |
a452ea7ad0e3382704d855952aaedceaacd20385 | 13db66632f63fefbcf0582d49bba2a8148f9eadb | /src/2_baseline_fixedpoint/emnistl/train.py | 7300ddb8eb4418d0c856c06ed10152d291101b03 | [
"MIT"
] | permissive | malamleh93/lnsdnn | d8c1a767bb66491ebe4b0c3cb4d2db21fb57e998 | 702c90b4cc001d900365001ecbb54688ec7c9430 | refs/heads/master | 2022-04-02T08:32:35.328414 | 2020-01-29T03:53:32 | 2020-01-29T03:53:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,765 | py | ###################################################
#
# Author - Arnab Sanyal
# USC. Spring 2019
###################################################
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
import argparse
class fixed_point:
def __init__(self, qi, qf):
self.lshifter = (1 << qf)
self.rshifter = (2 ** -qf)
self.intlim = (1 << (qi - 1))
self.maxfrac = 1 - self.rshifter
def quantize_array(self, _matrix_m1):
whole = _matrix_m1.astype('int64')
frac = _matrix_m1 - whole
frac = frac * self.lshifter
frac = np.round(frac)
frac = frac * self.rshifter
f1 = (whole >= self.intlim)
f2 = (whole < -self.intlim)
whole[f1] = (self.intlim - 1)
whole[f2] = -self.intlim
frac[f1] = self.maxfrac
frac[f2] = -self.maxfrac
whole = whole + frac
whole[_matrix_m1 == -np.inf] = -np.inf
whole[_matrix_m1 == np.inf] = np.inf
return whole
def quantize_array_p(self, _matrix_m1):
f1 = (np.floor(_matrix_m1) >= self.intlim)
f2 = (np.floor(_matrix_m1) < -self.intlim)
if(np.sum(f1)):
print(_matrix_m1[f1])
exit(0)
if(np.sum(f2)):
print(_matrix_m1[f2])
exit(0)
return _matrix_m1
def softmax(inp):
max_vals = np.max(inp, axis=1)
max_vals.shape = max_vals.size, 1
u = np.exp(inp - max_vals)
v = np.sum(u, axis=1)
v.shape = v.size, 1
u = u / v
return u
def main(main_params):
is_training = bool(main_params['is_training'])
qi = int(main_params['bi'])
qf = int(main_params['bf'])
leaking_coeff = float(main_params['leaking_coeff'])
batchsize = int(main_params['minibatch_size'])
lr = float(main_params['learning_rate'])
_lambda = float(main_params['lambda'])
num_epoch = int(main_params['num_epoch'])
ones = np.ones((batchsize, 1))
fp = fixed_point(qi, qf)
_step = 10
print('lambda: %f' %_lambda)
print('bi: %d\tbf: %d' % (qi, qf))
if is_training:
# load mnist data and split into train and test sets
# one-hot encoded target column
file = np.load('./../../datasets/emnist_letters.npz', 'r') # dataset
x_train = fp.quantize_array(file['train_data'])
y_train = file['train_labels']
x_test = fp.quantize_array(file['test_data'])
y_test = file['test_labels']
x_train, y_train = shuffle(x_train, y_train)
x_test, y_test = shuffle(x_test, y_test)
file.close()
split = int(main_params['split'])
x_val = x_train[split:]
y_val = y_train[split:]
y_train = y_train[:split]
x_train = x_train[:split]
# print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
W1 = fp.quantize_array(np.random.normal(0, 0.1, (785, 100)))
W2 = fp.quantize_array(np.random.normal(0, 0.1, (101, 26)))
delta_W1 = np.zeros(W1.shape)
delta_W2 = np.zeros(W2.shape)
performance = {}
performance['loss_train'] = np.zeros(num_epoch)
performance['acc_train'] = np.zeros(num_epoch)
performance['acc_val'] = np.zeros(num_epoch)
accuracy = 0.0
for epoch in range(num_epoch):
print('At Epoch %d:' % (1 + epoch))
# if (epoch % _step == 0) and (epoch != 0):
# lr = lr * 0.1
loss = 0.0
for mbatch in range(int(split / batchsize)):
start = mbatch * batchsize
x = x_train[start:(start + batchsize)]
y = y_train[start:(start + batchsize)]
s1 = fp.quantize_array(np.hstack((ones, x)) @ W1)
###################################################
mask = (s1 > 0) + (leaking_coeff * (s1 < 0))
###################################################
a1 = fp.quantize_array(s1 * mask)
s2 = fp.quantize_array(np.hstack((ones, a1)) @ W2)
a2 = softmax(s2)
cat_cross_ent = np.log(a2) * y
cat_cross_ent[np.isnan(cat_cross_ent)] = 0
loss -= np.sum(cat_cross_ent)
grad_s2 = fp.quantize_array_p((a2 - y) / batchsize)
###################################################
delta_W2 = fp.quantize_array_p(np.hstack((ones, a1)).T @ grad_s2)
###################################################
grad_a1 = fp.quantize_array_p(grad_s2 @ W2[1:].T)
grad_s1 = fp.quantize_array_p(mask * grad_a1)
###################################################
delta_W1 = fp.quantize_array_p(np.hstack((ones, x)).T @ grad_s1)
###################################################
# grad_x =
# W2 -= fp.quantize_array(lr * (delta_W2 + (_lambda * W2)))
# W1 -= fp.quantize_array(lr * (delta_W1 + (_lambda * W1)))
W2 = fp.quantize_array_p(W2 - (lr * (delta_W2 + (_lambda * W2))))
W1 = fp.quantize_array_p(W1 - (lr * (delta_W1 + (_lambda * W1))))
loss /= split
performance['loss_train'][epoch] = loss
print('Loss at epoch %d: %f' %((1 + epoch), loss))
correct_count = 0
for mbatch in range(int(split / batchsize)):
start = mbatch * batchsize
x = x_train[start:(start + batchsize)]
y = y_train[start:(start + batchsize)]
s1 = fp.quantize_array(np.hstack((ones, x)) @ W1)
###################################################
mask = (s1 > 0) + (leaking_coeff * (s1 < 0))
###################################################
a1 = fp.quantize_array(s1 * mask)
s2 = fp.quantize_array(np.hstack((ones, a1)) @ W2)
correct_count += np.sum(np.argmax(y, axis=1) == np.argmax(s2, axis=1))
accuracy = correct_count / split
performance['acc_train'][epoch] = 100 * accuracy
print("Train-set accuracy at epoch %d: %f" % ((1 + epoch), performance['acc_train'][epoch]))
correct_count = 0
for mbatch in range(int(x_val.shape[0] / batchsize)):
start = mbatch * batchsize
x = x_val[start:(start + batchsize)]
y = y_val[start:(start + batchsize)]
s1 = fp.quantize_array(np.hstack((ones, x)) @ W1)
###################################################
mask = (s1 > 0) + (leaking_coeff * (s1 < 0))
###################################################
a1 = fp.quantize_array(s1 * mask)
s2 = fp.quantize_array(np.hstack((ones, a1)) @ W2)
correct_count += np.sum(np.argmax(y, axis=1) == np.argmax(s2, axis=1))
accuracy = correct_count / x_val.shape[0]
performance['acc_val'][epoch] = 100 * accuracy
print("Val-set accuracy at epoch %d: %f\n" % ((1 + epoch), performance['acc_val'][epoch]))
correct_count = 0
for mbatch in range(int(x_test.shape[0] / batchsize)):
start = mbatch * batchsize
x = x_test[start:(start + batchsize)]
y = y_test[start:(start + batchsize)]
s1 = fp.quantize_array(np.hstack((ones, x)) @ W1)
###################################################
mask = (s1 > 0) + (leaking_coeff * (s1 < 0))
###################################################
a1 = fp.quantize_array(s1 * mask)
s2 = fp.quantize_array(np.hstack((ones, a1)) @ W2)
correct_count += np.sum(np.argmax(y, axis=1) == np.argmax(s2, axis=1))
accuracy = 100.0 * (correct_count / x_test.shape[0])
print('Test-set performance: %f' % accuracy)
np.savez_compressed('./lin_model_EMNIST_letters_1_%d_%d.npz' %(qi - 1, qf), W1=W1, W2=W2, loss_train=performance['loss_train'], \
acc_train=performance['acc_train'], acc_val=performance['acc_val'])
else:
file = np.load('./lin_model_EMNIST_letters_1_%d_%d.npz' %(qi - 1, qf), 'r')
W1 = file['W1']
W2 = file['W2']
performance = {}
performance['loss_train'] = file['loss_train']
performance['acc_train'] = file['acc_train']
performance['acc_val'] = file['acc_val']
file.close()
file = np.load('./../../datasets/emnist_letters.npz', 'r') # dataset
x_test = fp.quantize_array(file['test_data'])
y_test = file['test_labels']
x_test, y_test = shuffle(x_test, y_test)
file.close()
correct_count = 0
for mbatch in range(int(x_test.shape[0] / batchsize)):
start = mbatch * batchsize
x = x_test[start:(start + batchsize)]
y = y_test[start:(start + batchsize)]
s1 = fp.quantize_array(np.hstack((ones, x)) @ W1)
###################################################
mask = (s1 > 0) + (leaking_coeff * (s1 < 0))
###################################################
a1 = fp.quantize_array(s1 * mask)
s2 = fp.quantize_array(np.hstack((ones, a1)) @ W2)
correct_count += np.sum(np.argmax(y, axis=1) == np.argmax(s2, axis=1))
accuracy = 100.0 * (correct_count / x_test.shape[0])
print('Test-set performance: %f' % accuracy)
'''
The model architecture that we trained is as follows
_________________________________________________________________
OPERATION DATA DIMENSIONS WEIGHTS(N) WEIGHTS(%)
Input ##### 784
InputLayer | ------------------- 0 0.0%
##### 784
Dense XXXXX ------------------- 78500 96.8%
Leaky relu ##### 100
Dense XXXXX ------------------- 2626 3.2%
softmax ##### 26
=================================================================
Total params: 81,126
Trainable params: 81,126
Non-trainable params: 0
_________________________________________________________________
'''
# Plots for training accuracies
if is_training:
fig = plt.figure(figsize = (16, 9))
ax = fig.add_subplot(111)
x = range(1, 1 + performance['loss_train'].size)
ax.plot(x, performance['acc_train'], 'r')
ax.plot(x, performance['acc_val'], 'g')
ax.set_xlabel('Number of Epochs')
ax.set_ylabel('Accuracy')
ax.set_title('Test-set Accuracy at %.2f%%' % accuracy)
plt.suptitle('Validation and Training Accuracies', fontsize=14)
ax.legend(['train', 'validation'])
plt.grid(which='both', axis='both', linestyle='-.')
plt.savefig('accuracy.png')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--is_training', default = False)
parser.add_argument('--split', default = 104000)
parser.add_argument('--learning_rate', default = 0.015625)
parser.add_argument('--minibatch_size', default = 5)
parser.add_argument('--num_epoch', default = 20)
parser.add_argument('--lambda', default = 0.0009765625)
parser.add_argument('--leaking_coeff', default = 0.0078125)
parser.add_argument('--bi', default = 6)
parser.add_argument('--bf', default = 18)
args = parser.parse_args()
main_params = vars(args)
main(main_params) | [
"arnabsan@usc.edu"
] | arnabsan@usc.edu |
24bc7ba76c15b98244ddf4f4063496f5e886948e | 84549233ae71593d3979e386221d4aec15fa24f3 | /admin_get.py | 5a0cebad9aff049654976372bb86019d735c04c8 | [] | no_license | mallamsripooja/StaffLeaveManagement | d0710de632ebddf48fc264e3b0b4f90c4608db2a | cc0c08d35103dfba36075382b0ec1b8deb77e086 | refs/heads/master | 2021-09-05T22:02:19.720532 | 2018-01-31T08:06:16 | 2018-01-31T08:06:16 | 119,653,203 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,252 | py | import cgi
import os
from http.cookies import *
import sqlite3
import datetime
import msvcrt
try:
global form
global con
global cur
con=sqlite3.connect("F:\Tom\staffapp.db")
cur=con.cursor()
if 'HTTP_COOKIE' in os.environ:
cookie_string=os.environ.get('HTTP_COOKIE')
ck=SimpleCookie()
ck.load(cookie_string)
if 'username' in cookie_string:
id=ck['username'].value
else:
id="Nil"
else:
id="None"
form=cgi.FieldStorage()
msvcrt.setmode(0, os.O_BINARY)
msvcrt.setmode(1, os.O_BINARY)
print("Content-Type:text/html\n\n")
ht1='''
<html>
<head>
<link rel="icon" href="../favicon.ico" type="image/x-icon">
<link rel="stylesheet" type="text/css" href="../style.css" />
<title>Admin</title>
<table width="100%" bgcolor="black">
<tr>
<td width="10%"><img src="../logo.jpg" alt="img" height="130" width="148" ></td>
<td width="90%"><img src="../clg4.jpg" alt="img" height="130" width="1170" ></td>
</tr>
</table>
</head>
<body>
<table width="100%" class="top">
<tr>
<td align="right">
<img src="../100.JPG" alt="No image" class="profile"></td>
<td>Admin</td>
<td align="center"><a href="admin_get.py">Get details</a> | 
<a href="admin_new.py">Add a record</a> | 
<a href="admin_del.py">Delete a record</a> | 
<a href="admin_sms.py">SMS leave</a> | 
<a href="record.py">Who's on leave?</a> 
</td>
<td align="right">
<a href="stafflogin.py">Logout</a> </td>
</tr>
</table>
<br>
<table align="center" height=55>
<tr>
<td align="center" style="font-family:Tahoma;font-size:24px;">Admin</td>
</tr>
<tr>
<td align="center" style="font-family:Tahoma;font-size:18px;">Get details / Edit details</td>
</tr></table>'''
ht2='''
<form method=POST>
<table align="center" class="lf1" width="15%">
<tr>
<td align="center"> <b>Id</b> </td>
<td align="center"><input type="text" name="id" placeholder="Enter staff id" autofocus autocomplete="off"></td>
</tr>
<tr>
<td colspan=2 align="center"><input type="submit" name="sub" value="Get details"><td>
</tr>
</table>
</form>
'''
if "sub" in form:#submitting id to get details
sid=form.getvalue('id')
#print(sid)
cur.execute('select * from staff_det where id='+str(sid))
data=cur.fetchall()[0]
#print(data)
htd='''<datalist id="dept-list">
<option value="H&S">H&S</option>
<option value="CSE">CSE</option>
<option value="ECE">ECE</option>
<option value="EIE">EIE</option>
<option value="IT">IT</option>
</datalist>
<datalist id="desg-list">
<option value="Professor">Professor</option>
<option value="Asst.Prof">Assistant Professor</option>
<option value="Asso.Prof">Associate Professor</option>
<option value="Lab.In.">Lab Incharge</option>
<option value="Lab.Asst">Lab Assistant</option>
</datalist>
<datalist id="role-list">
<option value="staff">Staff</option>
<option value="co-ord">Co-ordinator</option>
<option value="admin">Admin</option>
<option value="director">Director</option>
</datalist>
<datalist id="qual-list">
<option value="Ph.D">Ph.D</option>
<option value="M.Sc">M.Sc</option>
<option value="M.Tech">M.Tech</option>
<option value="B.Tech">B.Tech</option>
</datalist>
<datalist id="cord-list">'''
cur.execute('select id,name,lastname from staff_det where role="co-ord"')#co-ord datalist
spdata=cur.fetchall()
#print(spdata)
for ele in spdata:
htd=htd+'''<option value="'''+str(ele[0])+'''">'''+ele[1]+'''</option>'''
htd=htd+'''</datalist>
<datalist id="yr-join">'''#datalist for yr of join from 2007 -- current year
yr=2007
cur_yr=datetime.date.today().year
while yr!=(cur_yr+1):
htd=htd+'''<option value="'''+str(yr)+'''">'''+str(yr)+'''</option>'''
yr+=1
htd=htd+'''</datalist>'''# readonly in input of id disables modification
pat='[789][0-9]{9}'
ht2=htd+'''
<form method=POST>
<table align="center" class="lf1" border=0>
<tr>
<td> <b>Id</b> 
<input type="text" name="id" size=14 value='''+str(sid)+''' readonly></td>
<th rowspan=4>
<object data="../'''+str(sid)+'''.JPG" class="profile" height=100 width=100>
<img src="../100.JPG" alt="No image" class="profile" height=100 width=100></object></th>
</tr>
<tr>
<td> <b>Name </b></td></tr>
<tr><td> <input type="text" name="fname" value="'''+data[1]+'''" autocomplete="off"></td>
</tr>
<tr>
<td> <b>Address </b></td></tr>
<tr><td colspan=2> <textarea name="addr" cols=47 rows=3>'''+data[4]+'''</textarea> </td>
</tr>
<tr>
<td> <b>Email </b></td>
<td><b>Mobile </b></td></tr>
<tr><td> <input type="email" name="email" value='''+data[6]+''' autocomplete="off"></td>
<td><input type="tel" name="mob" value='''+data[5]+''' autocomplete="off" pattern={pat}> </td>
</tr>
<tr>
<td> <b>Department </b></td>
<td><b>Designation </b></td></tr>
<tr><td> <input type="text" name="dept" list="dept-list" value='''+data[2]+''' autocomplete="off"></td>
<td><input type="text" name="desg" list="desg-list" value='''+data[3]+''' autocomplete="off"> </td>
</tr>
<tr>
<td> <b>Qualification </b></td>
<td><b>Role </b></td></tr>
<tr><td> <input type="text" name="qual" list="qual-list" value='''+data[7]+''' autocomplete="off"></td>
<td><input type="text" name="role" list="role-list" value='''+data[8]+''' autocomplete="off"> </td>
</tr>
<tr>
<td> <b>Co-ordinator </b></td>
<td><b>Year of join </b></td></tr>
<tr><td> <input type="text" name="cord" list="cord-list" value='''+str(data[9])+''' autocomplete="off"></td>
<td><input type="text" name="yr" list="yr-join" value='''+data[11]+''' autocomplete="off"> </td>
</tr>
<tr>
<td colspan=2 align="center"><input type="submit" name="sub1" value="Save Changes"></td>
</tr>
</table>
</form><br><br><br><br><br>
'''
if "sub1" in form:#to save changes in get details form
cur.execute('update staff_det set name="'+str(form.getvalue('fname')).capitalize()+'" ,lastname="'+str(form.getvalue('lname')).capitalize()+'" ,addr="'+str(form.getvalue('addr'))+'"'
' ,mob="'+str(form.getvalue('mob'))+'" ,email="'+str(form.getvalue('email'))+'" ,dept="'+str(form.getvalue('dept'))+'" ,desgn="'+str(form.getvalue('desg'))+'"'
' ,qual="'+str(form.getvalue('qual'))+'" ,role="'+str(form.getvalue('role'))+'" ,cord="'+str(form.getvalue('cord'))+'" ,entry="'+str(form.getvalue('yr'))+'"'
' where id='+str(form.getvalue('id')))
con.commit()
print((ht1+ht2).format(**locals()))
except Exception as err:
html='''
<html>
<body onload="window.location='http://localhost:8080/test/cgi-bin/staff_err.py'"></body>
</html>
'''
print(html.format(**locals()))
print(err) | [
"mallamsripooja@gmail.com"
] | mallamsripooja@gmail.com |
90e5046fc8476afe0d60d75cc0fe2675cc28a371 | 3ba96611e530fecb65ae653ec20ad450f1c8222f | /core/migrations/0015_auto_20201123_1637.py | af81ebb291a55088ca7ec3fe50bad44edbfc7b83 | [] | no_license | Hophoet/comii | 79becf122476a8935d1eac2b4638a2648d704cbe | 000f88ce995208ace81097bc6ac01671c985312e | refs/heads/master | 2023-04-15T22:33:58.408043 | 2021-04-28T10:24:54 | 2021-04-28T10:24:54 | 305,330,047 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | # Generated by Django 3.1 on 2020-11-23 16:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0014_auto_20200207_1608'),
]
operations = [
migrations.AlterField(
model_name='item',
name='image',
field=models.CharField(max_length=1000, verbose_name='image'),
),
]
| [
"hophoet@gmail.com"
] | hophoet@gmail.com |
5d7a1545020a74d4eaf42233b3640fa37992623e | 8d13236a9215ecd98b6364b6d0e9a16c61d5b313 | /regression_new_data.py | 68b38e52e3f22c07a3ecd358a02bc2dbe80b0096 | [] | no_license | jonona/modeling-week | de445b94e4c17a125fe684ac116f8c72c26752b5 | 15e8dcc0f9a97bdf5ddbd1a4c0fe68d30f6eb0c0 | refs/heads/master | 2022-11-30T11:45:16.701456 | 2020-08-21T17:52:35 | 2020-08-21T17:52:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,048 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 18 15:37:42 2020
@author: jonona
"""
import numpy as np
from loadData import *
import matplotlib.pyplot as plt
#from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import explained_variance_score as evs
from sklearn.metrics import r2_score as r_sq
import pandas as pd
from sklearn.preprocessing import normalize
import statsmodels.api as sm
Tensors, Eigenvectors, Eigenvalues, f_calc, f_eigval, g_calc, g_eigval, cn_calc, cn_eigval = loadData();
n = 500
r1 = 10 * np.ones(22)
phi1 = 0.35 * np.ones(22)
my_n = np.zeros(22)
num= 4
cn_calc_old = cn_calc[:]
cn_eigval_old = cn_eigval[:]
"""NEW DATA"""
Tensors = np.tile(Tensors,(num,1))
f_eigval = np.tile(f_eigval,(1, num)).squeeze()
g_eigval = np.tile(g_eigval,(1, num)).squeeze()
f_calc = np.tile(f_calc,(1, num)).squeeze()
g_calc = np.tile(g_calc,(1, num)).squeeze()
### add 3 more values of r and phi ###
r2 = np.concatenate((np.ones(22)*25, np.ones(22)*82, np.ones(22)*154))
phi2 = np.concatenate((np.ones(22)*0.22, np.ones(22)*0.4, np.ones(22)*0.58))
r = np.concatenate((r1,r2), axis=0)
phi = np.concatenate((phi1,phi2), axis=0)
### calculate new values exactly and with eigenvalue functions ###
cn_calc = 8/np.pi * phi * r * f_calc + 4 * phi * (g_calc + 1)
cn_eigval = 8/np.pi * phi * r * f_eigval + 4 * phi * (g_eigval + 1)
# assert (cn_calc_old == cn_calc[:22]).all()
# assert (cn_eigval_old == cn_eigval[:22]).all()
""" Input is vector with F and G functions calculated with eigenvalues + R and Phi values """
# X = np.stack((f_eigval, g_eigval, r, phi), axis=1)
""" Input is vector with flattened matrix A """
# X = Tensors.reshape((22*num,9))
# X = np.concatenate((X, np.stack((r, phi), axis=1)), axis=1)
""" Input is vector with flattened matrix A + F and G functions calculated with eigenvalues """
X = Tensors.reshape((22*4,9))
X = np.concatenate((X, np.stack((f_eigval, g_eigval, r, phi), axis=1)), axis=1)
""" Input is vector with diagonal elements of matrix A + F and G functions calculated with eigenvalues """
# X = np.zeros((22*4,3))
# for i in range(22):
# X[i,:] = np.diagonal(Tensors[i,:,:])
# X = np.concatenate((X, np.stack((f_eigval, g_eigval, r, phi), axis=1)), axis=1)
""" Target vector """
y = cn_calc
indices = np.arange(len(y))
X_train, X_test, y_train, y_test, idx_train, idx_test = train_test_split(
X, y, indices, test_size=0.33, random_state=25, stratify=X[:,12])
# p = np.random.permutation(len(y))
# X = X[p]
# y = y[p]
# indices = indices[p]
reg = LinearRegression(fit_intercept = True).fit(X, y)
#reg = sm.OLS(y_train, X_train).fit()
test_matrix = np.stack((y_test,reg.predict(X_test),cn_eigval[idx_test]), axis=1)
train_matrix = np.stack((y_train,reg.predict(X_train),cn_eigval[idx_train]), axis=1)
overall_matrix = np.concatenate((test_matrix, train_matrix), axis=0)
# adding indices to sort examples as in the presentation
overall_matrix = np.concatenate((overall_matrix,np.expand_dims(indices, axis=1)), axis=1)
overall_matrix = overall_matrix[overall_matrix[:, 3].argsort()]
overall_matrix = overall_matrix[:,:3]
print("\n==================== TESTING ==================")
print("Ground truth Regression Eigenvalue")
print(test_matrix)
print("\n\n==================== FITTED ==================")
print("Ground truth Regression Eigenvalue")
print(train_matrix)
print("\n\n==================== ERRORS OVERALL ==================")
print("MSE for regression: {:.04f}".format(mse(overall_matrix[:,0], overall_matrix[:,1])))
print("MSE for eigenvalues: {:.04f}".format(mse(overall_matrix[:,0], overall_matrix[:,2])))
print("\nExplained variance for regression: {:.04f}".format(evs(overall_matrix[:,0], overall_matrix[:,1])))
print("Explained variance for eigenvalues: {:.04f}".format(evs(overall_matrix[:,0], overall_matrix[:,2])))
print("\nR2 for regression: {:.04f}".format(r_sq(overall_matrix[:,0], overall_matrix[:,1])))
print("R2 for eigenvalues: {:.04f}".format(r_sq(overall_matrix[:,0], overall_matrix[:,2])))
print("\n\n==================== ERRORS TEST ==================")
print("MSE for regression: {:.04f}".format(mse(test_matrix[:,0], test_matrix[:,1])))
print("MSE for eigenvalues: {:.04f}".format(mse(test_matrix[:,0], test_matrix[:,2])))
print("\nExplained variance for regression: {:.04f}".format(evs(test_matrix[:,0], test_matrix[:,1])))
print("Explained variance for eigenvalues: {:.04f}".format(evs(test_matrix[:,0], test_matrix[:,2])))
print("\n\n==================== COEFFICIENTS ==================")
#df = pd.DataFrame({'Parameter': ['a11', 'a12', 'a13', 'a21', 'a22', 'a23', 'a31', 'a32', 'a33', 'f_eigval', 'g_eigval'], 'Coefficients': getattr(reg, 'coef_')})
#df = pd.DataFrame({'Parameter': ['a11', 'a22', 'a33', 'f_eigval', 'g_eigval'], 'Coefficients': getattr(reg, 'coef_')})
#print(df)
# fig, ax = plt.subplots(figsize=(15,10))
# x = np.arange(22)
# #rects1 = ax.bar(x - width/2, overall_matrix[:,1], width, label='Regression')
# #rects2 = ax.bar(x + width/2, overall_matrix[:,0], width, label='Calculated')
# ax.bar(x-0.25, overall_matrix[:,0], width=0.25, align='center', label='Exact', color='#1b9e77', edgecolor='k')
# ax.bar(x, overall_matrix[:,1], width=0.25, align='center', label='Regression', color='#7570b3', edgecolor='k')
# ax.bar(x+0.25, overall_matrix[:,2], width=0.25, align='center', label='Eigenvalue Method', color="#d95f02", edgecolor='k')
# fig.suptitle("Exact vs Regression vs Eigenvalue Method", size = 20, x=0.5, y=0.95)
# box = ax.get_position()
# ax.set_position([box.x0, box.y0 + box.height * 0.1,
# box.width, box.height * 0.9])
# plt.ylabel('Number of contact points', size = 16)
# # Put a legend below current axis
# ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.03),
# fancybox=True, shadow=True, ncol=5, prop={"size": 14}) | [
"jonona@macbook-pro.localdomain"
] | jonona@macbook-pro.localdomain |
ae865f7b92ac80c10c1856ef9f7171239f85057a | 7cf06162efb87c19db2d04f23518e33b14015876 | /04/task/test.py | 53d3acb22c916fdf25aeba06262a1db166f5dd3d | [] | no_license | ivanmolchanov1988/client-server | d5fdfadcd6d6463f5787db5282b825d7b95b1127 | 58bf488df6e5a758adffffd6065eb480a336f15f | refs/heads/main | 2023-06-03T12:05:14.246932 | 2021-05-26T09:06:22 | 2021-05-26T09:06:22 | 362,454,753 | 0 | 0 | null | 2021-05-27T10:28:21 | 2021-04-28T12:12:01 | Python | UTF-8 | Python | false | false | 629 | py | import unittest
import server01
import client01
import argparse
class Test_client_server(unittest.TestCase):
def test_server_parser(self):
test_parser = argparse.ArgumentParser()
self.assertEqual((server01.createParser().parse_args().addr,
server01.createParser().parse_args().port),
('', 7777))
def test_client_parser(self):
self.assertEqual((client01.createParser().parse_args().addr,
client01.createParser().parse_args().port),
('', 7777))
if __name__ == '__main__':
unittest.main() | [
"silencer1588@yandex.ru"
] | silencer1588@yandex.ru |
930c2c52d19f93eb89a1d6d1cd65fddba65c9851 | df126574e5fae32aa6ba8ae927942208107897b5 | /pyconll/load.py | 2cfdd19dd6edcbb04a5310f3f37bfb3799be6585 | [
"MIT"
] | permissive | ZmeiGorynych/pyconll | 865781a9ac2b5c0b9fe2a26d7d14fce60d4454a7 | 6784295db5fde769754e2b1ac46d6100484e45cc | refs/heads/master | 2020-04-14T11:38:14.167823 | 2018-12-28T22:12:38 | 2018-12-28T22:12:38 | 163,819,354 | 0 | 0 | null | 2019-01-02T09:15:40 | 2019-01-02T09:15:40 | null | UTF-8 | Python | false | false | 3,400 | py | """
A wrapper around the Conll class that allow for easy loading of treebanks from
multiple formats. This module also contains logic for iterating over treebank
data without storing Conll objects in memory.
"""
import requests
from pyconll._parser import iter_sentences
from pyconll.unit import Conll
def load_from_string(source):
"""
Load CoNLL-U source in a string into a Conll object.
Args:
source: The CoNLL-U formatted string.
Returns:
A Conll object equivalent to the provided source.
Raises:
ParseError: If there is an error parsing the input into a Conll object.
"""
lines = source.splitlines()
c = Conll(lines)
return c
def load_from_file(filename):
"""
Load a CoNLL-U file given the filename where it resides.
Args:
filename: The location of the file.
Returns:
A Conll object equivalent to the provided file.
Raises:
IOError: If there is an error opening the given filename.
ParseError: If there is an error parsing the input into a Conll object.
"""
with open(filename) as f:
c = Conll(f)
return c
def load_from_url(url):
"""
Load a CoNLL-U file that is pointed to by a given URL.
Args:
url: The URL that points to the CoNLL-U file.
Returns:
A Conll object equivalent to the provided file.
Raises:
requests.exceptions.RequestException: If the url was unable to be properly
retrieved and status was 4xx or 5xx.
ParseError: If there is an error parsing the input into a Conll object.
"""
resp = requests.get(url)
resp.raise_for_status()
resp.encoding = 'utf-8'
lines = resp.text.splitlines()
c = Conll(lines)
return c
def iter_from_string(source):
"""
Iterate over a CoNLL-U string's sentences.
Use this method if you only need to iterate over the CoNLL-U file once and
do not need to create or store the Conll object.
Args:
source: The CoNLL-U string.
Yields:
The sentences that make up the CoNLL-U file.
Raises:
ParseError: If there is an error parsing the input into a Conll object.
"""
lines = source.splitlines()
for sentence in iter_sentences(lines):
yield sentence
def iter_from_file(filename):
"""
Iterate over a CoNLL-U file's sentences.
Args:
filename: The name of the file whose sentences should be iterated over.
Yields:
The sentences that make up the CoNLL-U file.
Raises:
IOError if there is an error opening the file.
ParseError: If there is an error parsing the input into a Conll object.
"""
with open(filename) as f:
for sentence in iter_sentences(f):
yield sentence
def iter_from_url(url):
"""
Iterate over a CoNLL-U file that is pointed to by a given URL.
Args:
url: The URL that points to the CoNLL-U file.
Yields:
The sentences that make up the CoNLL-U file.
Raises:
requests.exceptions.RequestException: If the url was unable to be properly
retrieved.
ParseError: If there is an error parsing the input into a Conll object.
"""
resp = requests.get(url)
resp.raise_for_status()
lines = resp.text.splitlines()
for sentence in iter_sentences(lines):
yield sentence
| [
"matgrioni@gmail.com"
] | matgrioni@gmail.com |
80081ce8debb6e747b91f780c2308831bbe341eb | 5059dd81358e4a6acf5ec5c0acaac49f834b1726 | /functions/socketio/video.py | 20b5a3837d98800d6af3db1760c1887348c065b8 | [
"MIT"
] | permissive | VMAJSTER/openstreamingplatform | 4e41267f7f1b0f687733525d0e02e9bcc9e9d315 | f002246db922dab9a3f019f46001f3901326feaf | refs/heads/master | 2023-03-25T04:04:25.057061 | 2021-02-24T18:02:57 | 2021-02-24T18:02:57 | 350,442,822 | 0 | 0 | MIT | 2021-03-22T18:38:24 | 2021-03-22T18:08:25 | null | UTF-8 | Python | false | false | 9,262 | py | from flask import abort
from flask_security import current_user
from classes.shared import db, socketio
from classes import RecordedVideo
from classes import settings
from classes import notifications
from classes import subscriptions
from functions import system
from functions import webhookFunc
from functions import templateFilters
from functions import videoFunc
from functions import subsFunc
from app import r
@socketio.on('deleteVideo')
def deleteVideoSocketIO(message):
if current_user.is_authenticated:
videoID = int(message['videoID'])
result = videoFunc.deleteVideo(videoID)
if result is True:
db.session.commit()
db.session.close()
return 'OK'
else:
db.session.commit()
db.session.close()
return abort(500)
else:
db.session.commit()
db.session.close()
return abort(401)
@socketio.on('editVideo')
def editVideoSocketIO(message):
if current_user.is_authenticated:
videoID = int(message['videoID'])
videoName = system.strip_html(message['videoName'])
videoTopic = int(message['videoTopic'])
videoDescription = message['videoDescription']
videoAllowComments = False
if message['videoAllowComments'] == "True" or message['videoAllowComments'] == True:
videoAllowComments = True
result = videoFunc.changeVideoMetadata(videoID, videoName, videoTopic, videoDescription, videoAllowComments)
if result is True:
db.session.commit()
db.session.close()
return 'OK'
else:
db.session.commit()
db.session.close()
return abort(500)
else:
db.session.commit()
db.session.close()
return abort(401)
@socketio.on('createClip')
def createclipSocketIO(message):
if current_user.is_authenticated:
videoID = int(message['videoID'])
clipName = system.strip_html(message['clipName'])
clipDescription = message['clipDescription']
startTime = float(message['clipStart'])
stopTime = float(message['clipStop'])
result = videoFunc.createClip(videoID, startTime, stopTime, clipName, clipDescription)
if result[0] is True:
db.session.commit()
db.session.close()
return 'OK'
else:
db.session.commit()
db.session.close()
return abort(500)
else:
db.session.commit()
db.session.close()
return abort(401)
@socketio.on('moveVideo')
def moveVideoSocketIO(message):
if current_user.is_authenticated:
videoID = int(message['videoID'])
newChannel = int(message['destinationChannel'])
result = videoFunc.moveVideo(videoID, newChannel)
if result is True:
db.session.commit()
db.session.close()
return 'OK'
else:
db.session.commit()
db.session.close()
return abort(500)
else:
db.session.commit()
db.session.close()
return abort(401)
@socketio.on('togglePublished')
def togglePublishedSocketIO(message):
sysSettings = settings.settings.query.first()
if current_user.is_authenticated:
videoID = int(message['videoID'])
videoQuery = RecordedVideo.RecordedVideo.query.filter_by(owningUser=current_user.id, id=videoID).first()
if videoQuery is not None:
newState = not videoQuery.published
videoQuery.published = newState
if videoQuery.channel.imageLocation is None:
channelImage = (sysSettings.siteProtocol + sysSettings.siteAddress + "/static/img/video-placeholder.jpg")
else:
channelImage = (sysSettings.siteProtocol + sysSettings.siteAddress + "/images/" + videoQuery.channel.imageLocation)
if newState is True:
webhookFunc.runWebhook(videoQuery.channel.id, 6, channelname=videoQuery.channel.channelName,
channelurl=(sysSettings.siteProtocol + sysSettings.siteAddress + "/channel/" + str(videoQuery.channel.id)),
channeltopic=templateFilters.get_topicName(videoQuery.channel.topic),
channelimage=channelImage, streamer=templateFilters.get_userName(videoQuery.channel.owningUser),
channeldescription=str(videoQuery.channel.description), videoname=videoQuery.channelName,
videodate=videoQuery.videoDate, videodescription=str(videoQuery.description),
videotopic=templateFilters.get_topicName(videoQuery.topic),
videourl=(sysSettings.siteProtocol + sysSettings.siteAddress + '/play/' + str(videoQuery.id)),
videothumbnail=(sysSettings.siteProtocol + sysSettings.siteAddress + '/videos/' + str(videoQuery.thumbnailLocation)))
subscriptionQuery = subscriptions.channelSubs.query.filter_by(channelID=videoQuery.channel.id).all()
for sub in subscriptionQuery:
# Create Notification for Channel Subs
newNotification = notifications.userNotification(templateFilters.get_userName(videoQuery.channel.owningUser) + " has posted a new video to " + videoQuery.channel.channelName + " titled " + videoQuery.channelName, '/play/' + str(videoQuery.id), "/images/" + str(videoQuery.channel.owner.pictureLocation), sub.userID)
db.session.add(newNotification)
db.session.commit()
subsFunc.processSubscriptions(videoQuery.channel.id, sysSettings.siteName + " - " + videoQuery.channel.channelName + " has posted a new video", "<html><body><img src='" +
sysSettings.siteProtocol + sysSettings.siteAddress + sysSettings.systemLogo + "'><p>Channel " + videoQuery.channel.channelName + " has posted a new video titled <u>" +
videoQuery.channelName + "</u> to the channel.</p><p>Click this link to watch<br><a href='" + sysSettings.siteProtocol + sysSettings.siteAddress + "/play/" +
str(videoQuery.id) + "'>" + videoQuery.channelName + "</a></p>")
db.session.commit()
db.session.close()
return 'OK'
else:
db.session.commit()
db.session.close()
return abort(500)
else:
db.session.commit()
db.session.close()
return abort(401)
@socketio.on('togglePublishedClip')
def togglePublishedClipSocketIO(message):
if current_user.is_authenticated:
clipID = int(message['clipID'])
clipQuery = RecordedVideo.Clips.query.filter_by(id=clipID).first()
if clipQuery is not None and current_user.id == clipQuery.recordedVideo.owningUser:
newState = not clipQuery.published
clipQuery.published = newState
if newState is True:
subscriptionQuery = subscriptions.channelSubs.query.filter_by(channelID=clipQuery.recordedVideo.channel.id).all()
for sub in subscriptionQuery:
# Create Notification for Channel Subs
newNotification = notifications.userNotification(templateFilters.get_userName(clipQuery.recordedVideo.owningUser) + " has posted a new clip to " +
clipQuery.recordedVideo.channel.channelName + " titled " + clipQuery.clipName,'/clip/' +
str(clipQuery.id),"/images/" + str(clipQuery.recordedVideo.channel.owner.pictureLocation), sub.userID)
db.session.add(newNotification)
db.session.commit()
db.session.close()
return 'OK'
else:
db.session.commit()
db.session.close()
return abort(500)
else:
db.session.commit()
db.session.close()
return abort(401)
@socketio.on('editClip')
def changeClipMetadataSocketIO(message):
if current_user.is_authenticated:
clipID = int(message['clipID'])
clipName = message['clipName']
clipDescription = message['clipDescription']
result = videoFunc.changeClipMetadata(clipID, clipName, clipDescription)
if result is True:
db.session.commit()
db.session.close()
return 'OK'
else:
db.session.commit()
db.session.close()
return abort(500)
else:
db.session.commit()
db.session.close()
return abort(401)
@socketio.on('deleteClip')
def deleteClipSocketIO(message):
if current_user.is_authenticated:
clipID = int(message['clipID'])
result = videoFunc.deleteClip(clipID)
if result is True:
db.session.commit()
db.session.close()
return 'OK'
else:
db.session.commit()
db.session.close()
return abort(500)
else:
db.session.commit()
db.session.close()
return abort(401) | [
"dave.r.lockwood@gmail.com"
] | dave.r.lockwood@gmail.com |
540ced66fe7f32f695d31c3c930aba3f968ccbf5 | 3ba73ee31160769d1736b1516024fdf40dd7492c | /4.String Manipulation/4.1MakingAnagrams.py | 3cdd00cdcaf06f25235574e8c790983e4affa50d | [] | no_license | chaerim-kim/Data-Structures-and-Algorithms | 86d4dfcd4948a08b16ead1492e9a825adc4deaad | 80d685ef2fcb54f3299225fa080df7d64e672735 | refs/heads/master | 2023-03-31T01:32:49.638962 | 2021-04-07T16:12:20 | 2021-04-07T16:12:20 | 290,717,702 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | import math
import os
import random
import re
import sys
from collections import Counter
# find the number of deletions to be made in order for two strings to be anagrams
def makeAnagram(a, b):
itsc = list((Counter(a) & Counter(b)).elements())
# print(itsc) #['r', 'x', 'w', 's', 'm', 'm', 'l', 'i', 'g', 'v']
sum = (len(a) - len(itsc)) + (len(b) - len(itsc))
# print(sum)
return sum
########## Driver code ##########
makeAnagram("fcrxzwscanmligyxyvym", "jxwtrhvujlmrpdoqbisbwhmgpmeoke")
# Output: 30
#################################
# if __name__ == '__main__':
# fptr = open(os.environ['OUTPUT_PATH'], 'w')
# a = input()
# b = input()
# res = makeAnagram(a, b)
# fptr.write(str(res) + '\n')
# fptr.close()
| [
"sc17crk@leeds.ac.uk"
] | sc17crk@leeds.ac.uk |
42e066146f1fa97f71238d54a52fa96707339fed | 0274f2c465f110598456624581f569331221068b | /impl/set_mode.py | 4e67d0d1e69ae5d21f0e2a6144f1fe0e173dbafa | [] | no_license | bluecube/thesis | 63e745076c86a3122e9c3d7ff42ff22e32921860 | 588db206e64de9b681372fea9a70d3fa2aa598df | refs/heads/master | 2016-09-06T00:01:03.840006 | 2013-05-27T09:36:51 | 2013-05-27T09:36:51 | 1,376,241 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | #!/usr/bin/python
"""
set_mode.py
Set the GPS to SiRF or NMEA mode.
"""
from __future__ import division, print_function, unicode_literals
import gps
import logging
import sys
import argparse
from gps.sirf_messages import *
def setup_logging():
logging.basicConfig(
format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level = logging.INFO
)
setup_logging()
logger = logging.getLogger('main')
logger.setLevel(logging.DEBUG)
arg_parser = argparse.ArgumentParser(description="Set the GPS to SiRF or NMEA mode.")
arg_parser.add_argument('gps',
help="Port with a GPS receiver.")
arg_parser.add_argument('--protocol',
help="To which mode to switch the receiver, protocol is either 'NMEA' or 'SIRF'",
default="SIRF")
arguments = arg_parser.parse_args()
x = gps.gps.Gps(arguments.gps)
x.set_protocol(arguments.protocol)
| [
"blue.cube@seznam.cz"
] | blue.cube@seznam.cz |
8745eab3a8a025abd42708022865113cd6d9859f | fd326562890d4f1987c384fc7c60374938231222 | /OOP/DefinningClasses/Spoopify/project/album.py | 55660f2a89fde3334a12f48e6c5ecfbc8cdc378d | [] | no_license | miro-lp/SoftUni | cc3b0ff742218c9ceaf93f05c319ccfeed5bc8a4 | 283d9328537919de49f7f6a301e58593bae9ca2a | refs/heads/main | 2023-08-23T21:22:07.856226 | 2021-08-25T15:10:18 | 2021-08-25T15:10:18 | 318,134,101 | 2 | 1 | null | 2021-08-10T12:51:54 | 2020-12-03T09:03:08 | Python | UTF-8 | Python | false | false | 1,675 | py | from .song import Song
class Album:
def __init__(self, name, *songs):
self.name = name
self.songs = list(songs)
self.published = False
def add_song(self, song: Song):
if self.published:
return "Cannot add songs. Album is published."
else:
if song.single:
return f"Cannot add {song.name}. It's a single"
else:
if song.name not in [i.name for i in self.songs]:
self.songs.append(song)
return f"Song {song.name} has been added to the album {self.name}."
else:
return "Song is already in the album."
def remove_song(self, song: str):
if self.published:
return "Cannot remove songs. Album is published."
else:
if song in [i.name for i in self.songs]:
for s in self.songs:
if s.name == song:
self.songs.remove(s)
break
return f"Removed song {song} from album {self.name}."
else:
return "Song is not in the album."
def publish(self):
if self.published:
return f"Album {self.name} is already published."
else:
self.published = True
return f"Album {self.name} has been published."
def details(self):
name_info = f"Album {self.name}"
album_info = "\n".join([f"== {s.get_info()}" for s in self.songs])
if len(self.songs) > 0:
return name_info + "\n" + album_info + "\n"
else:
return name_info + "\n"
| [
"miro_lp@abv.bg"
] | miro_lp@abv.bg |
a88e1a023d71d914356416a10dd34007022e69ff | 4fc8c10c6881f394387aebed9bb115d8df841f71 | /hw7/part1.py | a11e2021f482ac30cf6934107884be28ed1ba96a | [] | no_license | lolazz/python_geekbrains | 71e837162be1dfcbd3db934a73c4383072f22a16 | 2657aeb479830bdd71f9f894db985dd96b2b9c9a | refs/heads/main | 2023-02-21T23:40:45.655976 | 2021-01-25T20:53:31 | 2021-01-25T20:53:31 | 322,034,995 | 0 | 0 | null | 2021-01-26T14:54:12 | 2020-12-16T16:10:52 | Python | UTF-8 | Python | false | false | 980 | py | class Matrix:
def __init__(self, matrix):
self.matrix = matrix
def __str__(self):
string = ''
for i in self.matrix:
for j in i:
string += f' {j}'
string += '\n'
return string
def __add__(self, second):
result = []
numbers = []
for i in range(len(self.matrix)):
for j in range(len(self.matrix[0])):
summa = self.matrix[i][j] + second.matrix[i][j]
numbers.append(summa)
if len(numbers) == len(self.matrix[0]):
result.append(numbers)
numbers = []
return Matrix(result)
a = [[5, 55, 66], [3, 33, 44], [2, 22, 33]]
b = [[5, 45, 34], [-3, -33, -44], [8, 78, -33]]
m = Matrix(a)
mm = Matrix(b)
print("\nМатрица №1")
print(m.__str__(), "\n")
print("Матрица №2")
print(mm.__str__(), "\n")
print("Сумма матриц №1 и №2")
print(m + mm) | [
"ivan.kupreenkov@kofax.com"
] | ivan.kupreenkov@kofax.com |
fadd90032588d510a575c957674db5fd66300f15 | 75fd2cc26f5efc24f04a7ec7f59209ebfef5f88f | /Sprint-Challenge--Data-Structures-Python-master/names/names.py | 29498388eac98883bef151712cd33125e22de1db | [
"MIT"
] | permissive | RCTom168/Sprint-Challenge-Data-Structures | 026951dc1414d81ff0e279f3ef555bd5e6c1a95d | 54cbd2f6025bb88fbf5a5b0ec622b87310d94665 | refs/heads/master | 2022-10-15T23:06:41.134629 | 2020-06-12T17:31:30 | 2020-06-12T17:31:30 | 271,854,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,739 | py | import time
from binary_search_tree import BSTNode
# (Hint: You might try importing a data structure you built during the week)
# Our Binary Search Tree data structure from earlier in the week should be very helpful
### binary_search_tree.py BSTNode Code ###
# class BSTNode:
# def __init__(self, value):
# self.value = value
# self.left = None
# self.right = None
# # Insert the given value into the tree
# # Return statements technically aren't needed
# def insert(self, value):
# if value >= self.value:
# if self.right is None:
# self.right = BSTNode(value)
# return
# else:
# self = self.right
# return self.insert(value)
# elif value < self.value:
# if self.left is None:
# self.left = BSTNode(value)
# return
# else:
# self = self.left
# return self.insert(value)
# # Return True if the tree contains the value
# # False if it does not
# def contains(self, target):
# if self.value == target:
# return True
# elif target > self.value:
# if self.right is None:
# return False
# else:
# self = self.right
# return self.contains(target)
# else:
# if self.left is None:
# return False
# else:
# self = self.left
# return self.contains(target)
### Names.py Code ###
print("\n Original Names.py Code:\n")
start_time = time.time()
# f = open('names_1.txt', 'r')
f = open(r'C:\Users\robtom\Desktop\Sprint-Challenge--Data-Structures-Python-master\names\names_1.txt', 'r')
names_1 = f.read().split("\n") # List containing 10000 names
f.close()
# f = open('names_2.txt', 'r')
f = open(r'C:\Users\robtom\Desktop\Sprint-Challenge--Data-Structures-Python-master\names\names_2.txt', 'r')
names_2 = f.read().split("\n") # List containing 10000 names
f.close()
duplicates = [] # Return the list of duplicates in this data structure
# Replace the nested for loops below with your improvements
for name_1 in names_1:
for name_2 in names_2:
if name_1 == name_2:
duplicates.append(name_1)
end_time = time.time()
print (f"{len(duplicates)} duplicates:\n\n{', '.join(duplicates)}\n\n")
print (f"runtime: {end_time - start_time} seconds", '\n', "-"*85)
### Improved Names.py Code ###
print("\n Improved Names.py Code:\n")
start_time = time.time()
# f = open('names_1.txt', 'r')
f = open(r'C:\Users\robtom\Desktop\Sprint-Challenge--Data-Structures-Python-master\names\names_1.txt', 'r')
names_1 = f.read().split("\n") # List containing 10000 names
f.close()
# f = open('names_2.txt', 'r')
f = open(r'C:\Users\robtom\Desktop\Sprint-Challenge--Data-Structures-Python-master\names\names_2.txt', 'r')
names_2 = f.read().split("\n") # List containing 10000 names
f.close()
duplicates = [] # Return the list of duplicates in this data structure
# Replace the nested for loops below with your improvements
bst = BSTNode('names')
for name in names_1:
bst.insert(name)
for name in names_2:
if bst.contains(name):
duplicates.append(name)
end_time = time.time()
print (f"{len(duplicates)} duplicates:\n\n{', '.join(duplicates)}\n\n")
print (f"runtime: {end_time - start_time} seconds", '\n', "-"*85)
# ---------- Stretch Goal -----------
# Python has built-in tools that allow for a very efficient approach to this problem
# What's the best time you can accomplish? Thare are no restrictions on techniques or data
# structures, but you may not import any additional libraries that you did not write yourself.
### Stretch Goal Names.py Code ###
print("\n Stretch Goal Names.py Code:\n")
start_time = time.time()
# f = open('names_1.txt', 'r')
f = open(r'C:\Users\robtom\Desktop\Sprint-Challenge--Data-Structures-Python-master\names\names_1.txt', 'r')
names_1 = f.read().split("\n") # List containing 10000 names
f.close()
# f = open('names_2.txt', 'r')
f = open(r'C:\Users\robtom\Desktop\Sprint-Challenge--Data-Structures-Python-master\names\names_2.txt', 'r')
names_2 = f.read().split("\n") # List containing 10000 names
f.close()
duplicates = [] # Return the list of duplicates in this data structure
# Replace the nested for loops below with your improvements
names = {}
for name in names_1 + names_2:
if name in names:
duplicates.append(name)
else:
names[name] = 1
end_time = time.time()
print (f"{len(duplicates)} duplicates:\n\n{', '.join(duplicates)}\n\n")
print (f"runtime: {end_time - start_time} seconds", '\n', "-"*85) | [
"noreply@github.com"
] | noreply@github.com |
a8c2d9a632e98e7653679b1e6c6322c07cb1ce1c | fe7e450eacbae170dbcda3a308daf0e90490dfec | /qsq/transfer/const.py~ | 1db2899a9e06ad2fe1cbb1473b11fcff2ad39aba | [] | no_license | buaaqsq/lingyunTest | c1fe3d047a67f41b3d1ea4ca7da485be90ee402b | acf4661457979e4854d9fdf2491558e3a97c710f | refs/heads/master | 2021-01-23T03:27:04.437897 | 2014-06-11T09:11:41 | 2014-06-11T09:11:41 | 20,719,647 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | #!/usr/bin/python
class HDFSUrl:
| [
"qiaoshaoqing@gmail.com"
] | qiaoshaoqing@gmail.com | |
1fe16eeaae768425b0033eb5c0e303ca9a59d626 | aafd0b42c393b1709bf07d71588559d41956cfa1 | /analysis/islamqa-categories.py | 719e752021a9d8c2caadebd313f7708fd6fab1f8 | [] | no_license | nicholashanoian/fatwa-website-analysis | 0a5490584c8333acd3f9357852a23b8714cf8b0f | bd801eafb3a61f816e1a8d29c233b1b0b51bdd14 | refs/heads/master | 2022-12-22T14:27:42.642500 | 2020-01-28T20:49:27 | 2020-01-28T20:49:27 | 185,402,113 | 0 | 1 | null | 2022-12-08T05:05:21 | 2019-05-07T12:56:19 | HTML | UTF-8 | Python | false | false | 1,137 | py | import requests
from bs4 import BeautifulSoup as bs
import random
# estimates the number of fatwas in each general category at islamqa.info
# (run-python "venv/bin/python -i")
def get_num_posts(url):
r = requests.get(url)
soup = bs(r.text)
last_page_a = soup.find("a", {"rel":"last"})
# extract 6 from "https://islamqa.info/en/categories/topics/3/basic-tenets-of-faith?page=6"
# print(last_page_a)
if last_page_a:
num_pages = int(str.split(str.split(last_page_a.get('href'), "?")[1], "=")[1])
# paginated to 15 per page, take a guess at how many are on last page
return 15 * (num_pages - 1) + random.randint(1,15)
else:
# just one page
return random.randint(1,15)
with open('sidebar.html', 'r') as file:
data = file.read().replace('\n', '')
sidebar = bs(data)
# i = 0
d = dict()
for topic in sidebar.select("#top > li"):
title = topic.select_one("a").text
num_fatwas = 0
for link in topic.find_all('a'):
print(link.get('href'))
num_fatwas += get_num_posts(link.get('href'))
d[title] = num_fatwas
# i += 1
print(d)
| [
"nicholashanoian@gmail.com"
] | nicholashanoian@gmail.com |
9bc5ca18a12c96f4e0ca190e8213607b463a5682 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/fsa.py | f6b272fc35d04a6d7b15c3cc189b3879a96524a4 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 140 | py | ii = [('ShawHDE.py', 2), ('PettTHE.py', 71), ('ClarGE2.py', 2), ('GellWPT2.py', 1), ('CrokTPS.py', 2), ('BuckWGM.py', 1), ('RoscTTI.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
4593d1bbf98af0f8912fdc82c271b3dc609c2368 | 3467bd7ea78649ebfb39746cf05663b7913691b3 | /Tree.py | 06c6f53ecfb7bd034751ae315b272c629389fede | [] | no_license | anindyy/halma-ai | a04b8bc50a73945c43309eb810d9e2bdfabb2d59 | 7113e5d3c2497c1da0d5a6b284ce75bda3342d30 | refs/heads/main | 2023-02-24T14:27:35.838865 | 2021-01-22T03:49:28 | 2021-01-22T03:49:28 | 331,825,614 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,640 | py | import Board
import copy
from math import exp
import random
class Tree:
#Node (board)
#Listofchild (list of board)
def __init__(self, board):
self.node = board
def generateChild(self, id):
listChild = []
# somebody wins = terminal node
if (self.node.winCheck() != 0):
return listChild
# kalau id bot ini 2, list pion miliknya adalah listPionComp
if (id == 2):
listPion = self.node.listPionComp
# ketika bukan gilirannya, dia akan menjalankan pion lawan
if (self.node.playerTurn != id):
listPion = self.node.listPionUser
else:
listPion =self.node.listPionUser
if (self.node.playerTurn != id):
listPion = self.node.listPionComp
# generate semua kemungkinan langkah untuk pion di list
for p in listPion:
listMove = self.node.generateMove(p)
listJump = self.node.generateAllJump(p)
# append semua kemungkinan jump
for (x,y) in listJump:
if self.node.isMoveValid(p,x,y,False):
child = copy.deepcopy(self.node)
newP=child.getMatrix()[p.getX()][p.getY()]
child.move(newP, x, y)
listChild.append(child)
# append semua kemungkinan move
for (x,y) in listMove:
if self.node.isMoveValid(p,x,y,False):
child = copy.deepcopy(self.node)
newP = child.getMatrix()[p.getX()][p.getY()]
child.move(newP, x, y)
listChild.append(child)
if (self.node in listChild):
listChild.remove(self.node)
return listChild
def generateChildLocal(self, listSize, T, id):
listChild = []
if (self.node.winCheck()!=0):
return listChild
for i in range (listSize):
child=self.simulatedAnnealing(T, id)
while (child in listChild):
child=self.simulatedAnnealing(T, id)
listChild.append(child)
if (self.node in listChild):
listChild.remove(self.node)
return listChild
def simulatedAnnealing(self, T, id):
listNext = []
listPion = []
if (id == 2):
listPion = self.node.listPionComp
if (self.node.playerTurn!=id):
listPion = self.node.listPionUser
else:
listPion =self.node.listPionUser
if (self.node.playerTurn!=id):
listPion = self.node.listPionComp
# handle kl pion yg kepilih gabisa gerak
while len(listNext)==0:
pion = random.choice(listPion)
listNext = self.node.generateMove(pion)
for move in listNext:
x,y = move
if not self.node.isMoveValid(pion,x,y,False):
listNext.remove(move)
listJump = self.node.generateAllJump(pion)
for move in listJump:
x,y = move
if not self.node.isMoveValid(pion,x,y,True):
listJump.remove(move)
listNext.extend(listJump)
currentMove = random.choice(listNext)
listNext.remove(currentMove)
currentBoard = copy.deepcopy(self.node)
newP= currentBoard.getMatrix()[pion.getX()][pion.getY()]
x,y = currentMove
currentBoard.move(newP, x, y)
# T berkurang secara decrement
while True:
if (T==0 or len(listNext)==0):
return currentBoard
else:
nextMove = random.choice(listNext)
listNext.remove(nextMove)
nextBoard = copy.deepcopy(self.node)
newP=nextBoard.getMatrix()[pion.getX()][pion.getY()]
x,y = nextMove
nextBoard.move(newP,x,y)
dE = nextBoard.value(id) - currentBoard.value(id)
if dE>0:
currentBoard = nextBoard
else:
if (exp(dE/T)>0.5):
currentBoard = nextBoard
T-=1
| [
"pamsrewari@gmail.com"
] | pamsrewari@gmail.com |
f99cac074d7c49aa1c3afe7ac9fb13be90c2aeac | 415bbc28ffbf2fe0311efd900ea92fd7e977029a | /paybutton/tokenapi/migrations/0003_auto_20180318_1626.py | 4722dc5fbff1acce544b1b467d2ecc23e384ddfc | [] | no_license | rossi1/cashpay | dcf45f780ac1bf51ca585304885876dced847b69 | 648c66768b5880854db2a8e48e067cafae2650a6 | refs/heads/master | 2022-12-12T05:11:28.854952 | 2018-03-20T01:50:10 | 2018-03-20T01:50:10 | 125,932,899 | 0 | 0 | null | 2022-11-22T02:16:58 | 2018-03-19T23:20:55 | JavaScript | UTF-8 | Python | false | false | 489 | py | # Generated by Django 2.0 on 2018-03-18 23:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tokenapi', '0002_auto_20180318_1625'),
]
operations = [
migrations.AlterField(
model_name='merchantinfo',
name='merchant_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tokenapi.MerchantStore'),
),
]
| [
"emmanuelukwuegbu2016@gmail.com"
] | emmanuelukwuegbu2016@gmail.com |
7430d287ee9c99442de1b9dbdb27c9d6495a28b4 | cc48d2f4f0a04290b4f37038432d7e898658d50b | /day1/sumwhile!=1000.py | c37ef28a76dfa71051ecbfd055cdad455d06b878 | [] | no_license | RuslanZaremba/RuslanZaremba | 87bcff89f9cb86d6cd4b1a38415e9a606cac2f06 | cde555391ed0a07915aeff0155ebde051a75177e | refs/heads/master | 2022-11-22T22:48:12.709816 | 2020-07-22T19:02:26 | 2020-07-22T19:02:26 | 265,620,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | # Счетчик до 1000 while
i = 0
while i != 1000:
i += 1
print('i = ', i)
# Счетчик до 1000 for
a = 0
for i in range(1000):
a += 1
print('a = ', a)
| [
"ruslanzaremba1990@gmail.com"
] | ruslanzaremba1990@gmail.com |
2b519425fc80b6a980b77f8685872dc03c6b8b2c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03547/s895509734.py | 521f80f68c31034aaefe1ca9b2e831263c85a6bb | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | #ABC078A
x,y = input().split()
print("<" if x<y else "=" if x==y else ">") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.