blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d5cab9a4b1e7e1be3cf13dddebae13f6a4066b74 | 1d9a6406c859fda186f520bb4472c551fc572c7b | /src/hopla/hoplalib/zoo/petcontroller.py | b5f9e7d0243348d6b233bab73bbf200a2f76e11e | [
"Apache-2.0"
] | permissive | rickie/hopla | af21b794ce6719d402721550e1ee4091790410b6 | 24a422194e42c03d5877dc167b2b07147326a595 | refs/heads/main | 2023-08-13T17:33:03.612293 | 2021-10-12T12:13:25 | 2021-10-12T12:13:25 | 408,538,704 | 0 | 0 | Apache-2.0 | 2021-09-20T17:30:15 | 2021-09-20T17:30:15 | null | UTF-8 | Python | false | false | 2,211 | py | """
A module for performing feeding Pet HTTP requests.
"""
from typing import NoReturn, Optional, Union
import requests
from hopla.hoplalib.http import HabiticaRequest, UrlBuilder
from hopla.hoplalib.zoo.feed_clickhelper import get_feed_data_or_exit
from hopla.hoplalib.zoo.zoofeed_algorithms import FeedPlanItem
class FeedPostRequester(HabiticaRequest):
"""
The FeedPostRequester sends a post request to feed a pet.
Note: this API endpoint expects query params instead
of a request body (even though it is a HTTP POST).
[APIDOCS](https://habitica.com/apidoc/#api-User-UserFeed)
"""
_DEFAULT_FOOD_AMOUNT = 1
def __init__(self, *,
pet_name: str,
food_name: str,
food_amount: Optional[int] = _DEFAULT_FOOD_AMOUNT):
self.pet_name = pet_name
self.food_name = food_name
self.query_params = {
"amount": food_amount or FeedPostRequester._DEFAULT_FOOD_AMOUNT
}
@property
def path(self) -> str:
"""Return the URL used to feed a pet"""
return f"/user/feed/{self.pet_name}/{self.food_name}"
@property
def feed_pet_food_url(self) -> str:
"""Return the url to feed a pet"""
return UrlBuilder(path_extension=self.path).url
def post_feed_request(self) -> requests.Response:
"""Performs the feed pet post requests and return the response"""
return requests.post(url=self.feed_pet_food_url, headers=self.default_headers,
params=self.query_params)
def post_feed_request_get_data_or_exit(self) -> Union[NoReturn, dict]:
"""
Performs the feed pet post requests and return
the feed response if successful. Else exit
:return:
"""
response: requests.Response = self.post_feed_request()
return get_feed_data_or_exit(response)
@classmethod
def build_from(cls, feed_item: FeedPlanItem) -> "FeedPostRequester":
"""Create a request from a feed plan item."""
return FeedPostRequester(
pet_name=feed_item.pet_name,
food_name=feed_item.food_name,
food_amount=feed_item.times
)
| [
"31448155+melvio@users.noreply.github.com"
] | 31448155+melvio@users.noreply.github.com |
d519e664e95852fb901d5d6561d3e86e92f36e6e | 897632fe38e39bca8e067df435971079daed6be3 | /droid.py | cd37451097924b07b3662bc4c8d3178fe69f8251 | [] | no_license | ksuscella/GoKirkGo | 2c07710e0d6bd2a9eae8917862ef6890d9633b5d | 9394afdc2ce5ff430f9d2d0c203083cca352c004 | refs/heads/master | 2021-01-22T11:03:55.687410 | 2017-06-28T20:49:27 | 2017-06-28T20:49:27 | 51,276,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,214 | py | from gopigo import *
import urllib
import urllib2
import json
import time
import logging
def main():
status = getStatus()
if status == 'gopi':
# Execute robot command
print("execute robot")
skynet()
normal() #return back to waiting
main() #recursive method
if status == 'nope':
time.sleep(10) # Delay for 1 minute (60 seconds). (JUST WAITING AROUND)
main()
if status == 'quit':
print "These are not the droids you were looking for"
def skynet():
enc_tgt(1,1,90)
fwd()
time.sleep(5)
enc_tgt(1,1,90)
bwd()
# time.sleep(2)
# enc_tgt(1,1,16.5)
# left_rot()
# #compass adjustment
# time.sleep(1)
# enc_tgt(1,1,90)
# fwd()
def normal():
url_path = 'https://bluepen.herokuapp.com/?type=set&getValue=nope'
# Send HTTP POST request
request = urllib2.Request(url_path)
#Temp code - proxy
#proxy = urllib2.ProxyHandler({'https': 'go-proxy.fpl.com:8080'})
#opener = urllib2.build_opener(proxy)
#urllib2.install_opener(opener)
#Temp code - proxy
response = urllib2.urlopen(request)
parsed_json = json.load(response)
#droid_status = parsed_json['status'];
print "return to normal"
def getStatus():
#module that calls a web service to see if its time to move forward
#https://bluepen.herokuapp.com/?type=get
#https://bluepen.herokuapp.com/?type=set&getValue=gopi
#python -m pip install SomePackage --proxy go-proxy.fpl.com:8080
url_path = 'https://bluepen.herokuapp.com/?type=get'
# page id query
query_args = { 'type':'get'}
# urlencode data (need urllib)
data = urllib.urlencode(query_args)
# Send HTTP POST request
request = urllib2.Request(url_path)
#Temp code - proxy
#proxy = urllib2.ProxyHandler({'https': 'go-proxy.fpl.com:8080'})
#opener = urllib2.build_opener(proxy)
#urllib2.install_opener(opener)
#Temp code - proxy
response = urllib2.urlopen(request)
parsed_json = json.load(response)
droid_status = parsed_json['status'];
print droid_status
return droid_status
main() | [
"ksuscella@gmail.com"
] | ksuscella@gmail.com |
e213883df71c095995d36d7be412ea5743d098bc | ee853e2cdee4000b27c3fcfb2b54463c5466f5ee | /memory_tcp_ip.py | d8728f7b7eee024311fea196a71d81d745f347d8 | [] | no_license | foragingBRAIN/rPimemory | f0a76c878d41bd847ed5fe5c777643c041033888 | 3d8139fcacf82c77dfb3e0fe621d976fdc342002 | refs/heads/master | 2022-03-17T09:14:22.550462 | 2019-09-17T23:13:12 | 2019-09-17T23:13:12 | 170,197,110 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | import socket, time
TCP_IP = '192.168.0.105' # pi IP
TCP_PORT = 1234
BUFFER_SIZE = 24
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
conn, addr = s.accept()
print(time.time())
print('Connected to:', addr)
while 1:
data = conn.recv(BUFFER_SIZE)
if not data: break
print ("received data:", data)
conn.send(data) # echo
print(time.time())
conn.close() | [
"caziot@bcm.edu"
] | caziot@bcm.edu |
466716ee755d12f65be44f4eeee168d6dee42986 | eee1f30ceb9908dce04aacb27c8ae1ea864d4022 | /part02-e01_integers_in_brackets/src/integers_in_brackets.py | 858fb722be7abb8788f19d8f048d03e480ed9f09 | [] | no_license | yodirh/Data-Analysis-with-python | d4f746f53c4e4cac38fbb638b1f3e06261da3208 | f30754ef62d9a869faf8e78c5be3267eb6b5385b | refs/heads/master | 2023-07-29T09:18:39.067595 | 2021-09-10T17:57:40 | 2021-09-10T17:57:40 | 282,915,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | #!/usr/bin/env python3
import re
'''def integers_in_brackets(s):
List =[]
a = re.sub(r"a[\d]+","ab", s)
b = re.sub(r"[\d]+[+]","ab", a)
c = re.sub(r"\+-[\d]+","ab", b)
#a = re.findall(r"[\w+]+", s)
d = re.findall(r"[-\d]+", c)
for i in d:
List.append(int(i))
return List'''
def integers_in_brackets(s):
result = re.findall(r"\[\s*([+-]?\d+)\s*\]", s)
return list(map(int, result))
def main():
m = integers_in_brackets(" afd [128+] [47 ] [a34] [ +-43 ]tt [+12]xxx!")
print (m)
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | yodirh.noreply@github.com |
520450d4d532245638edae0cf909e6a6c118d47b | 93ea27c42903aa991298b959d40a74f27724fd39 | /new_site/urls.py | 31a4b486228fd38cda322f42d8a53377f17cf484 | [] | no_license | ltlrn/meeting_room | 3a577ddb9943630118b037872457ed9b2a81ede9 | 47e5152bc978e196468bf3a9c27cf3a68cfc6799 | refs/heads/main | 2023-07-29T05:50:17.017293 | 2021-08-22T13:50:04 | 2021-08-22T13:50:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | from django.conf import settings
from django.contrib.auth import views as auth_views
from django.contrib.auth.views import LoginView, LogoutView
from django.urls import path
from . import views
app_name = "new_site"
urlpatterns = [
path("", views.index, name="index"),
path("rooms/<int:room_id>", views.room_schedule, name="room_schedule"),
path("coworkers/", views.coworkers, name="coworkers"),
path("addroom/", views.add_room, name="add_room"),
path(
"login/",
auth_views.LoginView.as_view(template_name="new_site/login.html"),
),
path(
"logout/",
LogoutView.as_view(),
{"next_page": settings.LOGOUT_REDIRECT_URL},
name="logout",
),
]
| [
"koro6@yandex.ru"
] | koro6@yandex.ru |
e1461c6411425ee974d36267c209f92b7be55c59 | f6188c0c27da8d2507e832908ba9de524f0b907d | /client_speed_test.py | b23314ac7daa63ea09593b18003d1800d1583546 | [] | no_license | lforet/repfibdigit | df1df2fe2ba40ede9662120ea94f7d529f5d4abc | 945ce755fd2526a1a3e242b909b93a79ac4e65fb | refs/heads/master | 2021-01-10T08:43:11.123781 | 2018-12-21T05:32:45 | 2018-12-21T05:32:45 | 8,490,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,997 | py | #!/usr/bin/python # This is client.py file
#System modules
import os
import time
import itertools
import cPickle as pickle
import numpy as np
#import cProfile
import timeit
import uuid
import sys
import fib
########################################################################
pgbreak = "-----------------------------------------------"
#----------------------------------------------------------------------
def is_repfibdigit( number_to_test):
n = map(int,str(number_to_test))
while number_to_test > n[0]:
n=n[1:]+[sum(n)]
if (number_to_test == n[0]) & (number_to_test>9):
show_proof(number_to_test)
#raw_input()
#time.sleep(1)
#else:
# print number_to_test, " is NOT a Keith Number"
return
def is_repfibdigit2( number_to_test):
if fib.is_repfibdigit(number_to_test) == True:
show_proof(number_to_test)
#raw_input()
#time.sleep(1)
#else:
# print number_to_test, " is NOT a Keith Number"
return
#this function is to get around the 32bit native int barrier
#not needed in 64 native systems
def my_xrange( start, stop, step):
i = start
while i < stop:
yield i
i += step
def show_proof(kn):
print '---------------------------------------------'
#print 'queue:', threading.current_thread()
print kn, " is a Keith Number!"
print "PROOF:"
n=map(int,str(kn))
while kn > sum(n):
print n ," = ", sum(n)
n=n[1:]+[sum(n)]
print n ," = ", sum(n)
#self.report_keith_num(number_to_test)
#print "new keith number reported!!!!"
print '---------------------------------------------'
print "press ENTER to continue"
########################################################################
if __name__=="__main__":
if len(sys.argv) > 1:
end_num = sys.argv[1]
nowtime = time.clock()
# get num to work from
start_num = 0
print "Starting number:", start_num
for x in xrange(start_num, int(end_num)):
is_repfibdigit2(x)
print
print "completion time:", abs(nowtime - time.clock())
print pgbreak
#raw_input()
| [
"laird@isotope11.com"
] | laird@isotope11.com |
1f4f364904e2edd4c2fbb1ac8c89fc351c735e6a | 4e0f1e1992c98f2892eabd1d014afbc6d9fc0d20 | /blog/models.py | b60a657adcce1928c60f9d15f8da6921ce592b72 | [] | no_license | kevinyohe/djangoblogpacktpub | 1ec29794ddc67bd89d05ac8be640e874a63d5562 | e75f1129f95d2e464c3e59745770541feab31de5 | refs/heads/master | 2020-04-10T17:32:45.127967 | 2018-12-10T13:10:23 | 2018-12-10T13:10:23 | 161,177,055 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | from django.db import models
# Create your models here.
class Blogpost(models.Model):
title = models.CharField(max_length=240)
author = models.CharField(max_length=240, db_index=True)
body = models.TextField()
published = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
| [
"kevinyohe7@gmail.com"
] | kevinyohe7@gmail.com |
567df595e5f5b8287d586cb34b876c5bd83a6f21 | 0bb58d5ee536b92631053fb8e18840b462475722 | /PythonDjango/ajax_notes/venv/bin/easy_install | cca0d853c112ce269115402584d364d8eeb54260 | [] | no_license | tutfakulunto/codingDojo | a0868a46229cc3b74ff72e5a8cc5b2d18d18168b | d04ac8b49dbf5f440287ce9f73d357ca7ff274e5 | refs/heads/master | 2021-01-12T15:53:51.032396 | 2017-02-22T05:33:41 | 2017-02-22T05:33:41 | 69,322,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | #!/Users/scott/projects/CodingDojo/Python2/ajax_notes/venv/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"c.scott.johnson@gmail.com"
] | c.scott.johnson@gmail.com | |
c2cd9e2eeb29a5ea4f14e0d08a2feeb22a11f158 | 88a39b8ec20b386400bd8b1d5fc1d5ad3314681d | /alembic/versions/2020110510_explicitly_set_autoincrement__4bac4855e710.py | f1ce52d0622ca39fc4eb31745264c74368d19d70 | [
"Apache-2.0"
] | permissive | codalab/codalab-worksheets | bb35681454a0d74903aaa7468e17303986793464 | 5be8cb3fa4b43c9e7e8f0a3b217644a7f0a39628 | refs/heads/master | 2023-08-18T10:16:01.766541 | 2023-08-06T20:02:30 | 2023-08-06T20:02:30 | 27,352,490 | 126 | 65 | NOASSERTION | 2023-09-14T14:54:07 | 2014-11-30T22:33:18 | Python | UTF-8 | Python | false | false | 2,416 | py | """Explicitly set autoincrement to True
Revision ID: 4bac4855e710
Revises: fcb22a612d2a
Create Date: 2020-11-05 10:27:43.989896
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4bac4855e710'
down_revision = 'fcb22a612d2a'
TABLES = [
"bundle",
"bundle_metadata",
"bundle_dependency",
"worksheet",
"worksheet_item",
"worksheet_tag",
"group",
"user_group",
"group_bundle_permission",
"group_object_permission",
"user",
"user_verification",
"user_reset_code",
"oauth2_client",
"oauth2_token",
"oauth2_auth_code",
"chat",
]
def upgrade():
for table in TABLES:
# This is necessary because the previous revision fcb22a612d2a suffers from a bug.
# In case (1), when upgrading to fcb22a612d2a , the id column does not retain the auto_increment property.
# In case (2), when starting from fcb22a612d2a , the id column has the auto_increment property.
# In case (1), upgrading to this revision, alembic is unable to make the id column auto_incrementing
# if it has a row with the id 0. The error is something like:
# ERROR 1062 (23000): ALTER TABLE causes auto_increment resequencing, resulting in duplicate entry
# '1' for key 'PRIMARY'
#
# This happens because the AUTO_INCREMENT value starts at 1, so a column with AUTO_INCREMENT
# cannot have a 0 row. As a result, we get around this by moving the 0 to the end of the table (the next ID),
# before setting it to be auto-incrementing.
# Note that this is MySQL-specific.
op.execute(
f'''
SET @maxid = (SELECT MAX(id)+1 FROM `{table}`);
UPDATE `{table}` SET id = @maxid WHERE id = 0;
'''
)
op.alter_column(
table,
'id',
type_=sa.BigInteger().with_variant(sa.Integer, "sqlite"),
existing_type=sa.BigInteger().with_variant(sa.Integer, "sqlite"),
nullable=False,
autoincrement=True,
)
def downgrade():
for table in TABLES:
op.alter_column(
table,
'id',
type_=sa.BigInteger().with_variant(sa.Integer, "sqlite"),
existing_type=sa.BigInteger().with_variant(sa.Integer, "sqlite"),
nullable=False,
autoincrement=True,
)
| [
"noreply@github.com"
] | codalab.noreply@github.com |
942e2725c6d37214f3820468ac9cfc3e856fc7cf | c33fd38d2c79ca740b0bcde934034168a37698aa | /manage.py | 649d71afb79043b84f3062536aa7b37fac5e5095 | [] | no_license | kxnaylorCLE216/resumeSite | 7531da230b35e7abb057f53cb44d747e22c4c81a | 7aaa2aa59ae1fc4b8faf7c56c904c65b9fdf9b6e | refs/heads/master | 2022-10-06T04:47:28.667125 | 2019-06-28T02:59:02 | 2019-06-28T02:59:02 | 193,295,961 | 1 | 0 | null | 2022-09-23T22:26:15 | 2019-06-23T01:56:11 | Python | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'resumeSite.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"naylorkx@gmail.com"
] | naylorkx@gmail.com |
674dae6998f7cf6e22bb6b05bbc6cc4cb55e8218 | 1103df1a87e270aa4f1887cd1d73f3edbff0b87a | /opgg_champion_tier.py | 3d9f648aa362cae265e4235019285b05b0f854d2 | [] | no_license | rhdtka21/Crawling | 941f03941254e72cd826c8b53395d4dfbd9954f2 | 57d96c161248412d98029e5ff5ea5336553ed87d | refs/heads/master | 2022-09-16T13:41:21.060684 | 2020-06-05T10:10:22 | 2020-06-05T10:10:22 | 268,285,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,248 | py | import time
from bs4 import BeautifulSoup
import telegram
import requests
import os
LOLALARMCHANNEL = ---your_telegram_channel_code---
bot = telegram.Bot(token='---your_telegram_token---')
hdr = {'Accept-Language': 'ko_KR,en;q=0.8', 'User-Agent': (
'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Mobile Safari/537.36')}
url = 'https://www.op.gg/champion/statistics'
req = requests.get(url, headers=hdr)
html = req.text
s = BeautifulSoup(html, 'html.parser')
message = ''
for lane in ['TOP', 'JUNGLE', 'MID', 'ADC', 'SUPPORT']:
message = lane + ' 상위 5 챔피언\n'
soup = s.find("div", {"class" : "detail-ranking__content detail-ranking__content--champ-list ChampionRankingList-WinRatio-{} tabItem".format(lane)})
names = soup.find_all("div", {"class" : "champion-ratio__name"})[:5]
infos = soup.find_all("div", {"class" : "champion-ratio__percent"})[:10]
for idx in range(5):
message += (str(idx+1) + '. ' + names[idx].text.strip() + '\n')
message += (' ' + infos[2*idx].text.strip().replace('\n', ' ') + ' ' + infos[2*idx+1].text.strip().replace('\n', ' ')) + '\n'
message += '\n'
bot.sendMessage(LOLALARMCHANNEL, message)
| [
"noreply@github.com"
] | rhdtka21.noreply@github.com |
b323ebfc79904933320dde343940702d84f5b41c | 1ea21334d3d925f11bf560fa056f3d7822d47791 | /Client/Client.py | 913a851dd2ebe867f41b9c7997587b7dd1520435 | [] | no_license | gabivel/Practica3 | 0a0d057d78f7a4d0f719f4079c89c22fe3cfe766 | 0d138690fc43db29c1857508f4100315c8acd45f | refs/heads/main | 2023-09-06T08:40:27.737576 | 2021-11-15T01:52:01 | 2021-11-15T01:52:01 | 428,089,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,086 | py | import socket
import sys
import os
import struct
IP = "192.168.1.79"
PORT = 1121
buffer_size = 1024
def user(user):
try:
#enviando comando USER
TCPClientSocket.send(str.encode(user))
except:
print("Couldn't make server request. Make sure a connection has bene established.")
return
# Esperando estado del servidor(if the user exists)
estado = TCPClientSocket.recv(buffer_size)
estado = estado.decode('utf-8')
print(estado)
if estado[:3] == "331":
return True
elif estado[:3] == "332":
return False
def passwrd(passwrd):
try:
# Enviando comando PASS
TCPClientSocket.send(str.encode(passwrd))
except:
print("Couldn't make server request. Makesure a connection has bene established.")
return
# Esperando estado del servidor(password is correct)
estado = TCPClientSocket.recv(buffer_size)
estado = estado.decode('utf-8')
print(estado)
if estado[:3] == "230":
return True
elif estado[:3] == "332":
return False
def port_data(port_data):
try:
# Enviando comando PASS
TCPClientSocket.send(str.encode(port_data))
except:
print("Couldn't make server request. Make sure a connection has bene established.")
return
def retr(file_name,peticion):
try:
# Enviando comando
TCPClientSocket.send(str.encode(peticion))
except:
print("Couldn't make server request. Make sure a connection has bene established.")
return
estado = TCPClientSocket.recv(buffer_size)
estado = estado.decode('utf-8')
print(estado)
PORT_DATA = input('Ingrese comando PORT X: ')
if PORT_DATA[:4].upper() == "PORT":
DataSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
DataSocket.bind(("192.168.1.89", int(PORT_DATA[5:])))
DataSocket.listen()
port_data(PORT_DATA)
conn, addr = DataSocket.accept()
estado = TCPClientSocket.recv(buffer_size)
estado = estado.decode('utf-8')
print(estado)
file_size = struct.unpack("i", conn.recv(4))[0]
output_file = open(file_name, "wb")
bytes_recieved = 0
print("\nDownloading...")
while bytes_recieved < file_size:
l = conn.recv(buffer_size)
output_file.write(l)
bytes_recieved += buffer_size
output_file.close()
DataSocket.close()
print("Successfully downloaded {}".format(file_name))
else:
print("comando incorrecto, operacion cancelada")
def stor(file_name,peticion):
try:
# Enviando comando PASS
TCPClientSocket.send(str.encode(peticion))
except:
print("Couldn't make server request. Make sure a connection has bene established.")
return
estado = TCPClientSocket.recv(buffer_size)
estado = estado.decode('utf-8')
print(estado)
PORT_DATA = input('Ingrese comando PORT X: ')
if PORT_DATA[:4].upper() == "PORT":
DataSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
DataSocket.bind(("192.168.1.89", int(PORT_DATA[5:])))
DataSocket.listen()
port_data(PORT_DATA)
conn, addr = DataSocket.accept()
estado = TCPClientSocket.recv(buffer_size)
estado = estado.decode('utf-8')
print(estado)
try:
# Check the file exists
content = open(file_name, "rb")
except:
print("Couldn't open file. Make sure the file name was entered correctly.")
conn.send(struct.pack("i", os.path.getsize(file_name)))
print("Sending file...")
l = content.read(buffer_size)
while l:
conn.send(l)
l = content.read(buffer_size)
content.close()
conn.close()
DataSocket.close()
else:
print("comando incorrecto, operacion cancelada")
def quit():
TCPClientSocket.send(str.encode("QUIT"))
# Wait for server go-ahead
print(TCPClientSocket.recv(buffer_size).decode('utf-8'))
TCPClientSocket.close()
print("Server connection ended")
print("\nBienvenido al cliente FTP para transferencia de datos. \n\nPara transferir archivos, ingrese usuario y contraseña\n\nComandos disponibles\nUSER : Connect to server\nPASS : Connect to server\nSTOR file_path : envia file\nRETR file_path : Download file\nPORT number : data_port\nQUIT : Exit")
TCPClientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TCPClientSocket.connect((IP, PORT))
estado = TCPClientSocket.recv(buffer_size)
estado = estado.decode('utf-8')
print(estado)
#while True:
# Listen for a command
prompt = input("\nComando> ")
if prompt[:4].upper() == "USER":
if user(prompt):
print("Se espera pass")
pwd= input("\nComando> ")
if pwd[:4].upper() == "PASS":
if passwrd(pwd):
os.chdir("C:/Users/Velasco/Documents/Redes2/FTP/"+prompt[5:])
while True:
print("\nPuede realizar tranferencia de archivos ")
command = input("Comando> ")
if command[:4].upper() == "RETR":
file_name = command[5:]#Se envia la peticion
retr(file_name,command)
elif command[:4].upper() == "STOR":
file_name = command[5:]
stor(file_name,command)
elif command.upper() == "QUIT":
quit()
break
else:
print("Command not recognised; please try again")
else:
print("Command not recognised; please try again")
elif prompt[:4].upper() == "QUIT":
quit()
# break
else:
print("Command not recognised; please try again") | [
"gabi.vellop@gmail.com"
] | gabi.vellop@gmail.com |
6bb7c1483d15b67fce22f86a548a65af22f97b86 | ec50be519921e5f16ad559b66fa037d8d7d0f09e | /wallet.py | 29a88cc092f071158f794958bbae758ae18df649 | [] | no_license | dingsky/mcjDemo | 097003bd4d5edf566182696e650611ee4a8fb512 | 49140de8e83e4f301a4ebb4f638d912c14ff24a5 | refs/heads/master | 2023-04-03T07:02:32.325653 | 2021-04-15T05:37:01 | 2021-04-15T05:37:01 | 357,154,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,241 | py | import win32com.client
import time
dm = win32com.client.Dispatch('dm.dmsoft')
# 移动到目标程序的位置
dm.MoveTo(275,9)
time.sleep(1)
# 鼠标双机打开
dm.LeftDoubleClick()
time.sleep(3)
# 单击鼠标选择窗口
dm.LeftClick()
time.sleep(1)
# 按tab选择第一个输入框
dm.KeyPress(9)
time.sleep(1)
# 输入用户名
dm.KeyPress(16)
dm.KeyPressChar('s')
dm.KeyPressChar('u')
dm.KeyPressChar('p')
dm.KeyPressChar('e')
dm.KeyPressChar('r')
time.sleep(1)
# 按tab选择第二个输入框
dm.KeyPress(9)
time.sleep(1)
# 输入密码
dm.KeyPressChar('1')
time.sleep(1)
# 按tab选择确认按钮
dm.KeyPress(9)
time.sleep(1)
# 按回车登录
dm.KeyPress(13)
time.sleep(1)
# 获取窗口句柄
dm.KeyPress(9)
hwnd = dm.GetMousePointWindow()
print('hwnd',hwnd)
# 绑定窗口
ret = dm.BindWindow(hwnd, 'normal', 'normal', 'normal', 0)
if ret != 1:
print('bindWindow error')
exit
time.sleep(1)
# 查找钱包服务的图标位置
x, y = 0, 0
dm_ret = dm.FindPic(0,0,2000,2000,"E:\golang\gocode\src\mcjDemo\wallet.bmp","000000",0.9,0, x, y)
print('dm_ret', dm_ret[1], dm_ret[2])
if dm_ret != -1:
dm.MoveTo(dm_ret[1], dm_ret[2])
time.sleep(1)
dm.LeftClick()
time.sleep(1)
else:
print("does not found ", x, y)
# 按钱包创建按钮
dm.KeyPress(9)
time.sleep(1)
dm.KeyPress(13)
time.sleep(3)
# 按钱包选择下拉
dm.KeyPress(9)
time.sleep(1)
dm.KeyPress(13)
time.sleep(1)
# 选择hsm-hd钱包
dm_ret = dm.FindPic(0,0,2000,2000,"E:\golang\gocode\src\mcjDemo\hsm.bmp","000000",0.9,0, x, y)
print('dm_ret', dm_ret[1], dm_ret[2])
if dm_ret != -1:
dm.MoveTo(dm_ret[1], dm_ret[2])
time.sleep(1)
dm.LeftClick()
time.sleep(1)
else:
print("does not found ", x, y)
# 输入钱包名称
dm.KeyPress(9)
time.sleep(1)
dm.KeyPressChar('t')
dm.KeyPressChar('e')
dm.KeyPressChar('s')
dm.KeyPressChar('x')
time.sleep(1)
# 按下一步
dm.KeyPress(9)
time.sleep(1)
dm.KeyPress(13)
time.sleep(1)
# 输入密码
dm.KeyPress(9)
time.sleep(1)
dm.KeyPressChar('1')
# 下一步
dm.KeyPress(9)
time.sleep(1)
dm.KeyPress(9)
time.sleep(1)
dm.KeyPress(13)
time.sleep(1) | [
"dingchun_sky@163.com"
] | dingchun_sky@163.com |
cc7e0e0355ebe8a4e751cf70dfecb49e3cec4013 | 339b587415639bb3824663eb7e3456cb14cc618d | /tests/unit/thompson/mappeds_test.py | 4719c179e81a322aeb3b6ff054b0e6859a5840fc | [
"MIT"
] | permissive | ageldama/thompson-py-poc | 676855efcd7221d24cc8900136827052384f6e75 | d90a32948f2abfa497f84a209b7e591f637c7662 | refs/heads/master | 2020-03-05T18:28:02.194911 | 2017-01-25T15:15:02 | 2017-01-25T15:15:02 | 85,395,509 | 1 | 0 | null | 2017-03-18T11:13:58 | 2017-03-18T11:13:58 | null | UTF-8 | Python | false | false | 3,306 | py | # -*- coding: utf-8; -*-
import thompson.evaluators.registry # noqa: F401
from thompson.nodes.literals import StringVal, NumberVal, BoolVal, NilConst
from thompson.nodes.literals import FunctionParamVal
from thompson.nodes.literals import MappedVal, MappedFunctionVal
from thompson.nodes.literals import NoWrappingMappedFunctionVal
from thompson.nodes.ops import Assign, BindingRef, Funcall
from operator import add
def test_mapped_var(empty_context_eval):
E = empty_context_eval
E(Assign('x', MappedVal(42)))
assert isinstance(E(BindingRef('x')), MappedVal)
def test_mapped_fun(empty_context_eval):
E = empty_context_eval
N = NumberVal
E(Assign('add', MappedFunctionVal(add,
[FunctionParamVal('a'),
FunctionParamVal('b')])))
result = E(Funcall(BindingRef('add'), [N(1), N(2)]))
assert result == N(3)
def simple_str_fun(s):
return s * 3
def simple_bool_fun(b):
return not b
def always_none(v):
assert v is None
return None
def test_mapped_fun_unwrap_and_wrap(empty_context_eval):
E = empty_context_eval
S, B = StringVal, BoolVal
E(Assign('simple_str_fun',
MappedFunctionVal(simple_str_fun,
[FunctionParamVal('s')])))
E(Assign('simple_bool_fun',
MappedFunctionVal(simple_bool_fun,
[FunctionParamVal('b')])))
E(Assign('always_none',
MappedFunctionVal(always_none,
[FunctionParamVal('n')])))
# str.
result = E(Funcall(BindingRef('simple_str_fun'), [S('foo')]))
assert result == S('foofoofoo')
# bool.
result = E(Funcall(BindingRef('simple_bool_fun'), [B(True)]))
assert result == B(False)
# none.
result = E(Funcall(BindingRef('always_none'), [NilConst]))
assert result == NilConst
def dict_create():
return {}
def dict_put(d, k, v):
d[k] = v
def dict_get(d, k):
return d[k]
def test_mapped_dict_funs(empty_context_eval):
E = empty_context_eval
S = StringVal
E(Assign('dict_create', MappedFunctionVal(dict_create, [])))
E(Assign('dict_put',
MappedFunctionVal(dict_put,
[FunctionParamVal('d'),
FunctionParamVal('k'),
FunctionParamVal('v')])))
E(Assign('dict_get',
MappedFunctionVal(dict_get,
[FunctionParamVal('d'),
FunctionParamVal('k')])))
#
d = E(Funcall(BindingRef('dict_create'), []))
assert isinstance(d, MappedVal)
E(Funcall(BindingRef('dict_put'), [d, S('foo'), S('bar')]))
result = E(Funcall(BindingRef('dict_get'), [d, S('foo')]))
assert result == S('bar')
def str_mult(s, times):
S = StringVal
s_ = s.get()
times_ = times.get()
return S(s_ * times_)
def test_no_wrapping_mapped_fun(empty_context_eval):
E = empty_context_eval
S, N = StringVal, NumberVal
params = [FunctionParamVal('s'), FunctionParamVal('times')]
E(Assign('str_mult', NoWrappingMappedFunctionVal(str_mult, params)))
#
result = E(Funcall(BindingRef('str_mult'), [S('foo'), N(3)]))
assert result == S('foo' * 3)
| [
"jong-hyouk.yun@zalando.de"
] | jong-hyouk.yun@zalando.de |
af1992f34cf24c1e8fada9620f28064fd69b6054 | 396c6ee6cc5a3b24b6659eb867570577933b262f | /lib/page_objects/grading/grading_page.py | 70b79d7112f14f1b89f0ca3e6e6de157756c5e5c | [
"MIT"
] | permissive | tcmRyan/DojoMiner | 51b4201fcc10cd165b7b45d55cd15f419de35161 | 19ce44e737dce13e9ee4975347ec9e9fdddce1bb | refs/heads/master | 2023-01-05T14:38:05.526670 | 2020-11-02T01:22:16 | 2020-11-02T01:22:16 | 170,051,375 | 0 | 0 | MIT | 2020-11-02T01:22:17 | 2019-02-11T01:56:03 | Python | UTF-8 | Python | false | false | 2,540 | py | STARS_EARNED_COL = 5
DETAILS_COL = 6
class GradingPage:
"""
The Grading Page is a page object representation of the grading page in the dojo.
This means that you can access the elements of the GDP as if you were accessing the
properties of a class.
"""
def __init__(self, page):
self.page = page
self.validate_login()
def validate_login(self):
login_selector = "div.container-fluid:nth-child(2) > h2:nth-child(1)"
if (
self.page.html.find(login_selector, first=True)
or self.page.url == "https://gdp.code.ninja/Account/Login"
):
raise EnvironmentError("Browser Not Authenticated")
@property
def page_count(self):
pagination_selector = (
"body > div.body-content > div:nth-child(2) > div:nth-child(2) > ul"
)
pagination = self.page.html.find(pagination_selector, first=True)
page_count = len(pagination.find("a"))
return page_count
@property
def table(self):
table_selector = "/html/body/div[2]/div[2]/div[3]/table"
table = self.page.html.xpath(table_selector, first=True)
return table
@property
def table_headers(self):
headers_selector = "thead > tr > th"
headers_html = self.table.find(headers_selector)
return [
header.text if header.text else "Play and Grade" for header in headers_html
]
def _row_data(self, row_html):
td_tags = row_html.find("td")
row_data = []
for i, td in enumerate(td_tags):
if i == STARS_EARNED_COL:
row_data.append(self._handle_star_td(td))
elif i == DETAILS_COL:
row_data.append(self._get_details_link(td))
else:
row_data.append(td.text)
return row_data
def _handle_star_td(self, td):
if td.text == "Not Graded":
return td.text
else:
star_selector = "div > span > i.fa-star"
earned_stars = td.find(star_selector)
return str(len(earned_stars))
def _get_details_link(self, td):
anchor = td.find("a", first=True)
return anchor.attrs["href"]
@property
def grading_rows(self):
row_selectors = "tbody > tr"
rows = self.table.find(row_selectors)
return [self._row_data(row_html) for row_html in rows]
@property
def grades(self):
return [dict(zip(self.table_headers, row)) for row in self.grading_rows]
| [
"ryan@tacticalcodemonkeys.com"
] | ryan@tacticalcodemonkeys.com |
9f00a06680c92b223b7ee26d58a52a1c5c40d28e | 26051533c02fb162854acb62c20e0819b9a1d0a1 | /meiduo_mall/meiduo_mall/apps/orders/views.py | a27afe35509519b21a1b2b4400505defd2ba8e6b | [
"MIT"
] | permissive | HOXI818/meiduomail | 5916259d2adff73788a03a6fcf6812a3ee7774dc | 8f398ac8ac0414fd23c60314db4f27dd9e68b2cd | refs/heads/master | 2020-04-23T15:35:35.107782 | 2019-04-24T10:37:20 | 2019-04-24T10:37:20 | 171,270,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,242 | py | from decimal import Decimal
from django.shortcuts import render
from django_redis import get_redis_connection
from rest_framework import status
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from goods.models import SKU
from orders.serializers import OrderSKUSerializer, OrderSerializer
# Create your views here.
# POST /orders/
class OrdersView(GenericAPIView):
permission_classes = [IsAuthenticated]
serializer_class = OrderSerializer
def post(self, request):
"""
订单数据保存:
1. 获取参数并进行校验(参数完整性,address是否存在,pay_method是否合法)
2. 保存订单的数据
3. 返回应答,订单创建成功
"""
# 1. 获取参数并进行校验(参数完整性,address是否存在,pay_method是否合法)
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
# 2. 保存订单的数据(create)
serializer.save()
# 3. 返回应答,订单创建成功
return Response(serializer.data, status=status.HTTP_201_CREATED)
# GET /orders/settlement/
class OrderSettlementView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request):
"""
获取登录用户结算商品的数据:
1. 从登录用户的redis购物车记录中获取用户购物车中被勾选的商品id和对应数量count
2. 根据商品id获取对应的商品数据并组织运费
3. 将数据序列化并返回
"""
# 获取登录用户
user = request.user
# 1. 从登录用户的redis购物车记录中获取用户购物车中被勾选的商品id和对应数量count
# 获取redis链接
redis_conn = get_redis_connection('cart')
# 从redis set中获取用户购物车中被勾选的商品的id
cart_selected_key = 'cart_selected_%s' % user.id
# (b'<sku_id>', b'<sku_id>', ...)
sku_ids = redis_conn.smembers(cart_selected_key)
# 从redis hash中获取用户购物车中添加的所有商品的id和对应数量count
cart_key = 'cart_%s' % user.id
# {
# b'<sku_id>': b'<count>',
# ...
# }
cart_redis = redis_conn.hgetall(cart_key)
# 组织数量
# {
# '<sku_id>': '<count>',
# ...
# }
cart_dict = {}
for sku_id, count in cart_redis.items():
cart_dict[int(sku_id)] = int(count)
# 2. 根据商品id获取对应的商品数据并组织运费
skus = SKU.objects.filter(id__in=sku_ids)
for sku in skus:
# 给sku对象增加属性count,保存该商品所要结算的数量
sku.count = cart_dict[sku.id]
serializer = OrderSKUSerializer(skus, many=True)
# 组织运费:10
freight = Decimal(10.0)
# 3. 将数据序列化并返回
res_data = {
'freight': freight,
'skus': serializer.data
}
return Response(res_data)
| [
"HOXI_818@outlook.com"
] | HOXI_818@outlook.com |
d6068063c2423e47264845a135b5280ce088daf1 | d5a3d88e0787349d08f519ce3f7abef2797443ba | /Utils.py | 4a67dcb7ff4738bd63fb4cfddf561830a5cd99b0 | [] | no_license | 572192274/BERT-MultiLabel-Multilingual-Classification | 30f3319588704076f8f8369a3f27e11c5cd4030e | 61dbb9f6576b2aa5b5bfe166a463e299e9f615c5 | refs/heads/main | 2023-06-08T04:08:38.325194 | 2021-06-30T13:12:28 | 2021-06-30T13:12:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,777 | py | import pandas as pd
import sklearn
import numpy as np
def optimizer_params(model):
param_optimizer = list(model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.001},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
]
return optimizer_parameters
# True_label :- true label
# Predicted_label :- predicted label
def accuracy_score(True_label,Predicted_label):
Accuracy_Score = sklearn.metrics.accuracy_score(True_label, Predicted_label)
return Accuracy_Score
# True_label :- true label
# Predicted_label :- predicted label
def f1_score(True_label,Predicted_label):
F1_Score = sklearn.metrics.f1_score(True_label,Predicted_label,average = "micro")
return F1_Score
# True_label :- true label
# Predicted_label :- predicted label
# labels = :- a list of column label name
def multilabel_confusiom_matrix(True_label,Predicted_label,labels):
MCM = sklearn.metrics.multilabel_confusion_matrix(y_true=True_label,
y_pred = Predicted_label,
labels =labels)
return MCM
#Creating the Accuracy Measurement Function
# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(predicted, true):
pred_flat = np.argmax(predicted, axis=1).flatten()
labels_flat = true.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
| [
"noreply@github.com"
] | 572192274.noreply@github.com |
d5cf2cc4602a29dd3977f0ad4b917e84da9e24e4 | 68c2a4148651b5259323fdc52002c357e3e001c3 | /net.py | 371298a57d2d455bd5d7da37777a07e98ac6a0e2 | [] | no_license | homura10086/Network_optimization | 4fb07ff0ec3e9b5924fb159054d860709f75f825 | e8d6ea5d018540a6cbcbdfbde872a6a6a000cbcd | refs/heads/master | 2023-08-18T07:36:38.459263 | 2021-10-18T11:18:56 | 2021-10-18T11:18:56 | 410,818,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,973 | py | import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib as mat
import json
# fr = open("Performance.csv", "r")
# ls = []
# for line in fr:
# line = line.replace("\n", "")
# ls.append(line.split(','))
# fr.close()
# fw = open("Performance.json", "w")
# for i in range(1, len(ls)):
# ls[i] = dict(zip(ls[0], ls[i]))
# json.dump(ls[1:], fw, sort_keys=True, indent=4, ensure_ascii=False)
# fw.close()
node = np.loadtxt('node.csv', delimiter=',')
edge = np.loadtxt('edge.csv', delimiter=',')
label = node[:, 0].astype(np.int)
dis = node[:, 1:3]
label1, label2, label3 = label[:3], label[3:9], label[9:]
dis1, dis2, dis3 = dis[:3], dis[3:9], dis[9:]
G, G1, G2, G3 = nx.Graph(), nx.Graph(), nx.Graph(), nx.Graph()
# G.add_nodes_from(node)
G1.add_nodes_from(label1)
G2.add_nodes_from(label2)
G3.add_nodes_from(label3)
for col in edge:
G.add_edges_from([(col[0], col[1])])
pos, pos1, pos2, pos3 = {}, {}, {}, {}
for i, (row1, row2) in enumerate(dis):
pos.update({i + 1: [row1, row2]})
for i, (row1, row2) in enumerate(dis1):
pos1.update({i + 1: [row1, row2]})
for i, (row1, row2) in enumerate(dis2):
pos2.update({i + 4: [row1, row2]})
for i, (row1, row2) in enumerate(dis3):
pos3.update({i + 10: [row1, row2]})
nx.draw_networkx(G, pos=pos, with_labels=False, node_color='r', node_size=1, width=0.2)
nx.draw_networkx(G1, pos=pos1, with_labels=True, nodelist=label1, node_color='c', node_size=200, font_size=10)
nx.draw_networkx(G2, pos=pos2, with_labels=True, nodelist=label2, node_color='b', node_size=200, font_color='w',
font_size=10)
nx.draw_networkx(G3, pos=pos3, with_labels=False, nodelist=label3, node_color='r', node_size=1)
plt.savefig("net.png")
plt.show()
# labels = '4G', '5G', '6G'
# sizes = [60, 30, 10]
# plt.pie(sizes, labels=labels, shadow=True, startangle=90, autopct='%d%%')
# plt.title('SupportedType')
# plt.savefig('SupportedType.png')
# plt.show()
#
# labels = "手机", "Pad", "电脑", "传感器"
# sizes = [50, 20, 25, 5]
# matplotlib.rcParams['font.family'] = 'SimHei'
# plt.pie(sizes, labels=labels, shadow=True, startangle=90, autopct='%d%%',)
# plt.title('TerminalType(cpe)')
# plt.savefig('TerminalType(cpe).png')
# plt.show()
# sizes = [80, 10, 5, 5]
# plt.pie(sizes, labels=labels, shadow=True, startangle=90, autopct='%d%%',)
# plt.title('TerminalType(cpn)')
# plt.savefig('TerminalType(cpn).png')
# plt.show()
# labels = "华为", "小米"
# sizes = [50, 50]
# mat.rcParams['font.family'] = 'SimHei'
# plt.pie(sizes, labels=labels, shadow=True, startangle=90, autopct='%1.1f%%')
# plt.title('TerminalBrand(传感器)')
# plt.savefig('TerminalBrand(传感器).png')
# plt.show()
# data = np.loadtxt('data.csv', dtype=int, delimiter=',')
# plt.hist(data[:, 1], bins=30, edgecolor='black', histtype='bar', facecolor='b')
# plt.title('UpOctDL')
# plt.xlabel('UpOctDL(KB)')
# plt.ylabel('Frequency')
# plt.savefig('UpOctDL.png')
# plt.show()
| [
"1771931743@qq.com"
] | 1771931743@qq.com |
9a03609836f456e14f248d95784d91d30d8912b7 | f3d9ea69ff4ebd74ed5d9ece10b4c029b4281437 | /c4c_employee_publisher.py | a99b20864220d9f26329ebc15ba2a8987f2ee021 | [] | no_license | mydev-work/cloudformation | 3b0de2f85c4865accbe2b7edcba596d02418010a | b4972acfa058628544488dd0f645fbae1627b06a | refs/heads/master | 2020-11-26T11:37:17.109685 | 2020-07-03T11:09:41 | 2020-07-03T11:09:41 | 229,060,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,611 | py | # imports
from datetime import datetime
import requests
import uuid
import json
import logging
import boto3
from os import environ
from Common.LambdaResponse import LambdaResponse
from Common.DataLogger import DataLogger
from Common.Log import Log
from Common.ParameterStoreHelper import ParameterStoreHelper
# initial common properties
version = environ.get('version') or 'v1'
environment = environ.get('environment') or 'dev'
eventSource = 'C4C'
invocation_timestamp = datetime.utcnow()
odataHeaders = {'Content-Type': 'application/json'}
# sqs settings
sqsClient = boto3.client('sqs')
queueUrl = environ.get('queueUrl') or ''
# initialze logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def handler(event, context):
try:
# get odata settings
Debugging=!!!!
odataDoman = ParameterStoreHelper.get_value_from_ssm_parameter_store("/{env}/aws_c4c/employee_publisher/{ver}/odata_doman".format(env=environment, ver=version))
odataUserName = ParameterStoreHelper.get_value_from_ssm_parameter_store("/{env}/aws_c4c/employee_publisher/{ver}/odata_userName".format(env=environment, ver=version))
odataPassword = ParameterStoreHelper.get_value_from_ssm_parameter_store("/{env}/aws_c4c/employee_publisher/{ver}/odata_password".format(env=environment, ver=version), True)
odataUrlFormat = ParameterStoreHelper.get_value_from_ssm_parameter_store("/{env}/aws_c4c/employee_publisher/{ver}/odata_urlFormat".format(env=environment, ver=version))
for record in event['Records']:
try:
payload = json.loads(record['body'])
# request c4c for the details
result = requests.get(url=odataUrlFormat.format(domain=odataDoman, objectId=payload["data"]["root-entity-id"]), headers=odataHeaders, auth=(odataUserName, odataPassword))
if(result.status_code != 200):
raise Exception(result)
jsonResult = result.json()
record_to_publish = {
"ObjectID: jsonResult['d']['results']['ObjectID'],
"EmployeeID": jsonResult['d']['results']['EmployeeID'],
"BusinessPartnerID": jsonResult['d']['results']['BusinessPartnerID'],
"FirstName": jsonResult['d']['results']['FirstName'],
"MiddleName": jsonResult['d']['results']['MiddleName'],
"LastName": jsonResult['d']['results']['LastName'],
"Email": jsonResult['d']['results']['Email'],
"CountryCode": jsonResult['d']['results']['CountryCode'],
"CountryCodeText": jsonResult['d']['results']['CountryCodeText'],
"UserLockedIndicator": str(jsonResult['d']['results']['UserLockedIndicator']),
"TeamID": getattr(jsonResult['d']['results'], 'TeamID', ''),
"TeamName": getattr(jsonResult['d']['results'], 'TeamName', ''),
"UserAvailableIndicator": str(getattr(jsonResult['d']['results'], 'UserAvailableIndicator', '')),
"SupportedCountries": getattr(jsonResult['d']['results'], 'SupportedCountries', ''),
"SupportedLanguages": getattr(jsonResult['d']['results'], 'SupportedLanguages', ''),
}
# publish to Employee Queue
result = sqsClient.send_message(
QueueUrl=queueUrl,
MessageBody=json.dumps(record_to_publish, ensure_ascii=False).encode('utf-8').decode(),
MessageAttributes={
'countryCode': {
'DataType': 'String',
'StringValue': jsonResult['d']['results']['CountryCode']
},
'eventSource': {
'DataType': 'String',
'StringValue': eventSource
},
'correlationId': {
'DataType': 'String',
'StringValue': payload['event-id'] or str(uuid.uuid4())
}
})
DataLogger.info(Log(
message="Published employee record",
correlation_id=payload['event-id'] or str(uuid.uuid4()),
invoked_component="{env}-c4c-employee-event-publisher-{ver}".format(env=environment, ver=version),
invoker_agent="{env}-c4c-employee-event-queue-{ver}".format(env=environment, ver=version),
target_idp_application="{env}-c4c-employee-queue-{ver}".format(env=environment, ver=version),
processing_stage="employee-publisher-created",
request_payload=json.dumps(payload),
response_details=json.dumps(result),
invocation_timestamp=str(invocation_timestamp),
original_source_app=eventSource))
except Exception as error:
DataLogger.info(Log(
message="Error publishing employee record",
error=str(error),
correlation_id=payload['event-id'] or str(uuid.uuid4()),
invoked_component="{env}-c4c-employee-event-publisher-{ver}".format(env=environment, ver=version),
invoker_agent="{env}-c4c-employee-event-queue-{ver}".format(env=environment, ver=version),
target_idp_application="{env}-c4c-employee-queue-{ver}".format(env=environment, ver=version),
processing_stage="employee-publisher-created",
request_payload=json.dumps(payload),
invocation_timestamp=str(invocation_timestamp),
original_source_app=eventSource))
raise Exception(error)
return LambdaResponse.ok_response()
except Exception as error:
DataLogger.info(Log(
message="Error publishing employee record",
error=str(error),
invoked_component="{env}-c4c-employee-event-publisher-{ver}".format(env=environment, ver=version),
invoker_agent="{env}-c4c-employee-event-queue-{ver}".format(env=environment, ver=version),
target_idp_application="{env}-c4c-employee-queue-{ver}".format(env=environment, ver=version),
processing_stage="employee-publisher-created",
invocation_timestamp=str(invocation_timestamp),
original_source_app=eventSource))
raise Exception(error)
| [
"arunkumar.bala@idp.com"
] | arunkumar.bala@idp.com |
00db2c8c3ed972b7163d98736f55e12ede747a2c | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/577538_Poor_Man_unit_tests/recipe-577538.py | a085b12780b5428ff89dbdb43e8d9d754e602175 | [
"MIT"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 3,834 | py | #! /usr/bin/env python
######################################################################
# Written by Kevin L. Sitze on 2010-12-03
# This code may be used pursuant to the MIT License.
######################################################################
import sys
import traceback
from types import FloatType, ComplexType
__all__ = (
'assertEquals',
'assertNotEquals',
'assertException',
'assertFalse',
'assertNone',
'assertNotNone',
'assertSame',
'assertNotSame',
'assertTrue'
)
def colon( msg ):
if msg:
return ": " + str( msg )
else:
return ""
def assertEquals( exp, got, msg = None ):
"""assertEquals( exp, got[, message] )
Two objects test as "equal" if:
* they are the same object as tested by the 'is' operator.
* either object is a float or complex number and the absolute
value of the difference between the two is less than 1e-8.
* applying the equals operator ('==') returns True.
"""
if exp is got:
r = True
elif ( type( exp ) in ( FloatType, ComplexType ) or
type( got ) in ( FloatType, ComplexType ) ):
r = abs( exp - got ) < 1e-8
else:
r = ( exp == got )
if not r:
print >>sys.stderr, "Error: expected <%s> but got <%s>%s" % ( repr( exp ), repr( got ), colon( msg ) )
traceback.print_stack()
def assertNotEquals( exp, got, msg = None ):
"""assertNotEquals( exp, got[, message] )
Two objects test as "equal" if:
* they are the same object as tested by the 'is' operator.
* either object is a float or complex number and the absolute
value of the difference between the two is less than 1e-8.
* applying the equals operator ('==') returns True.
"""
if exp is got:
r = False
elif ( type( exp ) in ( FloatType, ComplexType ) or
type( got ) in ( FloatType, ComplexType ) ):
r = abs( exp - got ) >= 1e-8
else:
r = ( exp != got )
if not r:
print >>sys.stderr, "Error: expected different values but both are equal to <%s>%s" % ( repr( exp ), colon( msg ) )
traceback.print_stack()
def assertException( exceptionType, f, msg = None ):
"""Assert that an exception of type \var{exceptionType}
is thrown when the function \var{f} is evaluated.
"""
try:
f()
except exceptionType:
assert True
else:
print >>sys.stderr, "Error: expected <%s> to be thrown by function%s" % ( exceptionType.__name__, colon( msg ) )
traceback.print_stack()
def assertFalse( b, msg = None ):
"""assertFalse( b[, message] )
"""
if b:
print >>sys.stderr, "Error: expected value to be False%s" % colon( msg )
traceback.print_stack()
def assertNone( x, msg = None ):
assertSame( None, x, msg )
def assertNotNone( x, msg = None ):
assertNotSame( None, x, msg )
def assertSame( exp, got, msg = None ):
if got is not exp:
print >>sys.stderr, "Error: expected <%s> to be the same object as <%s>%s" % ( repr( exp ), repr( got ), colon( msg ) )
traceback.print_stack()
def assertNotSame( exp, got, msg = None ):
if got is exp:
print >>sys.stderr, "Error: expected two distinct objects but both are the same object <%s>%s" % ( repr( exp ), colon( msg ) )
traceback.print_stack()
def assertTrue( b, msg = None ):
if not b:
print >>sys.stderr, "Error: expected value to be True%s" % colon( msg )
traceback.print_stack()
if __name__ == "__main__":
assertNone( None )
assertEquals( 5, 5 )
assertException( KeyError, lambda: {}['test'] )
assertNone( 5, 'this assertion is expected' )
assertEquals( 5, 6, 'this assertion is expected' )
assertException( KeyError, lambda: {}, 'this assertion is expected' )
| [
"betty@qburst.com"
] | betty@qburst.com |
294780ff7ab60dc91677fc1d89295b77c146b850 | 4a53aba78d55247e185d8cef5e2a1f8892ae68be | /learn_python/08.jpype.py | 13917ac8e31a1518b4553ebf02a516bd1b6ee5af | [] | no_license | axu4github/Learn | 665bb8ddd2eb420a0e7bc3d1ff68f66958936645 | 2eb33b5a97f1730e3f774b80e3b206c49faa2228 | refs/heads/master | 2023-01-22T15:49:53.260777 | 2018-10-25T15:21:56 | 2018-10-25T15:21:56 | 61,703,577 | 1 | 0 | null | 2023-01-12T08:23:28 | 2016-06-22T08:46:46 | JavaScript | UTF-8 | Python | false | false | 664 | py | # encoding=utf-8
'''
Mac安装JPype
1. 下载 https://sourceforge.net/projects/jpype/ 最新版本
2. 解压,进入目录
3. 执行 sudo python setup.py install
若存在 `error: command 'clang' failed with exit status 1` 的问题
则需要在 setup.py 文件的 `self.includeDirs` 中添加 `"/System/Library/Frameworks/JavaVM.framework/Versions/A/Headers/"` 以便可以找到 `jni.h` 等头文件。
具体可详见:http://blog.csdn.net/jerrychenly/article/details/20545995 说明
'''
from jpype import *
startJVM('/Library/Java/JavaVirtualMachines/jdk1.7.0_09.jdk/Contents/MacOS/libjli.dylib')
java.lang.System.out.println("hello world")
shutdownJVM()
| [
"axu.home@gmail.com"
] | axu.home@gmail.com |
a5c5843f631bfde5c8971652b304badb13620f23 | 9dc76d5bed9190bd6b44a363c2a6324b5cb7b525 | /touchingperimeter/__init__.py | 5005f562540bd9bc327ac15fa327d3cd01442ecf | [
"MIT"
] | permissive | tasptz/py-touchingperimeter | d8f651ecb0d7faec51b4af4446851fc3a32e51da | b581a1ad477629c5ee5fcbe6a9d5747af6f9183b | refs/heads/master | 2021-07-21T19:28:11.511244 | 2020-08-13T08:00:09 | 2020-08-13T08:00:09 | 205,883,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | from . import box, rect, packer
Box = box.Box
Rect = rect.Rect
Packer = packer.Packer | [
"tasptz@gmail.com"
] | tasptz@gmail.com |
77f31f717ae740760de5100e7adf45751edbe729 | 8575c8e29ac5bdac7736e39535fdbadbd3ce2b3d | /account/migrations/0060_auto_20200818_0836.py | 78cc7199d2dab2faa94dacdc0fa845a92ebf8c9e | [] | no_license | tanvir43/daily-eCommerce | 795bf4abb4e3021dfc05b7a42b194f66fd591ab1 | 418158b3e3559c3a644c2b34ca63170b1b1cb8d2 | refs/heads/master | 2022-12-17T23:30:08.348855 | 2020-09-22T05:06:13 | 2020-09-22T05:06:13 | 255,512,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # Generated by Django 3.0.7 on 2020-08-18 08:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0059_auto_20200817_1625'),
]
operations = [
migrations.AlterField(
model_name='address',
name='created_at',
field=models.DateTimeField(),
),
]
| [
"raihncse43@gmail.com"
] | raihncse43@gmail.com |
0bef9143b83163f975c7182a3a50534504e12b47 | 5f4465a32c9db2489405b070b52463c44b1cbc9a | /02-Application-And-Routes/django_project/django_project/urls.py | 8a93579106e9c2e29e455b7a651224e5fb6c426c | [] | no_license | stormindia/learn_django | 7d98e6b971a746f57d8657b580c517340bf57662 | a87dd536c30954ee89e2cdbd1d7c6a1609869b34 | refs/heads/master | 2023-07-02T10:11:23.177876 | 2021-08-01T09:12:09 | 2021-08-01T09:12:09 | 284,666,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | """django_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('blog/', include('blog.urls'))
]
| [
"bajpaiharshit14@gmail.com"
] | bajpaiharshit14@gmail.com |
8c817d375a6e4429ab2667c176887dbb4d65c7da | a499fbdd93f85a286505433a08afc25d84c8ff04 | /python/tvm/runtime/vm.py | 2be3f3ec1a78cdc5de989fd61d84de1855339132 | [
"Apache-2.0",
"Zlib",
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] | permissive | elphinkuo/tvm | a81e0ccc5950a1473efdcdbb8263de9adbe36787 | 9df2ae8eaa8b394013182a7ad09ac57fe401f80e | refs/heads/main | 2023-08-05T07:41:18.652097 | 2021-09-28T00:38:26 | 2021-09-28T00:38:26 | 411,311,927 | 2 | 0 | Apache-2.0 | 2021-09-28T14:51:56 | 2021-09-28T14:17:46 | null | UTF-8 | Python | false | false | 21,091 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable, invalid-name, redefined-builtin
"""
The Relay Virtual Machine runtime.
Implements a Python interface to executing the compiled VM object.
"""
import numpy as np
import tvm
from tvm.runtime import Module
from tvm._ffi.runtime_ctypes import TVMByteArray
from tvm._ffi import base as _base
from .object import Object
from . import _ffi_api, container
from ..rpc.base import RPC_SESS_MASK
def _convert(arg, cargs):
if isinstance(arg, Object):
cargs.append(arg)
elif isinstance(arg, np.ndarray):
nd_arr = tvm.nd.array(arg, device=tvm.cpu(0))
cargs.append(nd_arr)
elif isinstance(arg, tvm.runtime.NDArray):
cargs.append(arg)
elif isinstance(arg, (tuple, list)):
field_args = []
for field in arg:
_convert(field, field_args)
cargs.append(container.tuple_object(field_args))
elif isinstance(arg, (_base.numeric_types, bool)):
dtype = "int32" if isinstance(arg, (_base.integer_types, bool)) else "float32"
value = tvm.nd.array(np.array(arg, dtype=dtype), device=tvm.cpu(0))
cargs.append(value)
elif isinstance(arg, str):
cargs.append(arg)
else:
raise TypeError("Unsupported type: %s" % (type(arg)))
def convert(args):
cargs = []
for arg in args:
_convert(arg, cargs)
return cargs
class Executable(object):
"""Relay VM executable"""
def __init__(self, mod):
self.mod = mod
self._function_params = {}
self._save = self.mod["save"]
self._get_lib = self.mod["get_lib"]
self._get_bytecode = self.mod["get_bytecode"]
self._get_stats = self.mod["get_stats"]
self._get_function_arity = self.mod["get_function_arity"]
self._get_function_param_name = self.mod["get_function_param_name"]
def save(self):
"""Save the Relay VM Executable.
Returns
-------
code : bytearray
The binary blob representing a serialized Relay VM executable. It
can then be saved to disk and later deserialized into a new
Executable.
lib : :py:class:`~tvm.runtime.Module`
The runtime module that contains the generated code. It is
basically a library that is composed of hardware dependent code.
Notes
-----
The returned code is organized with the following sections in order.
- Global section. This section contains the globals used by the
virtual machine.
- Constant section. This section is used to store the constant pool of
a virtual machine.
- Primitive name section. This section is introduced to accommodate
the list of primitive operator names that will be invoked by the
virtual machine.
- Code section. The VM functions, including bytecode, are sitting in
this section.
Examples
--------
.. code-block:: python
import numpy as np
import tvm
from tvm import te
from tvm import relay
# define a simple network.
x = relay.var('x', shape=(10, 10))
f = relay.Function([x], x + x)
mod = tvm.IRModule({"main": f})
# create a Relay VM.
dev = tvm.cpu()
target = "llvm"
executable = relay.vm.compile(mod, target)
code, lib = executable.save()
# save and load the code and lib file.
tmp = tvm.contrib.utils.tempdir()
path_lib = tmp.relpath("lib.so")
lib.export_library(path_lib)
with open(tmp.relpath("code.ro"), "wb") as fo:
fo.write(code)
loaded_lib = tvm.runtime.load_module(path_lib)
loaded_code = bytearray(open(tmp.relpath("code.ro"), "rb").read())
# deserialize.
des_exec = tvm.runtime.vm.Executable.load_exec(loaded_code, loaded_lib)
# execute the deserialized executable.
x_data = np.random.rand(10, 10).astype('float32')
des_vm = tvm.runtime.vm.VirtualMachine(des_exec, dev)
res = des_vm.run(x_data)
print(res.numpy())
"""
return self._save(), self._get_lib()
@staticmethod
def load_exec(bytecode, lib):
"""Construct an executable from saved artifacts.
Parameters
----------
bytecode : bytearray
The binary blob representing a the Relay VM bytecode.
lib : :py:class:`~tvm.runtime.Module`
The runtime module that contains the generated code.
Returns
-------
exec: Executable
An executable constructed using the provided artifacts.
"""
if isinstance(bytecode, (bytes, str)):
code = bytearray(bytecode)
elif not isinstance(bytecode, (bytearray, TVMByteArray)):
raise TypeError(
"bytecode is expected to be the type of bytearray "
+ "or TVMByteArray, but received {}".format(type(code))
)
if lib is not None and not isinstance(lib, tvm.runtime.Module):
raise TypeError(
"lib is expected to be the type of tvm.runtime.Module"
+ ", but received {}".format(type(lib))
)
return Executable(_ffi_api.Load_Executable(bytecode, lib))
@property
def lib(self):
"""Get the library that contains hardware dependent code.
Returns
-------
ret : :py:class:`~tvm.runtime.Module`
The runtime module that contains hardware dependent code.
"""
return self._get_lib()
@property
def stats(self):
"""Get the statistics of the Relay VM executable.
Returns
-------
ret : String
The statistic information of the VM executable.
"""
return self._get_stats()
@property
def primitive_ops(self):
"""Get the name of the primitive ops contained in the executable.
Returns
-------
ret : List[String]
The list of primitive ops.
"""
ret = []
num_primitives = _ffi_api.GetNumOfPrimitives(self.module)
for i in range(num_primitives):
ret.append(_ffi_api.GetPrimitiveFields(self.module, i))
return ret
@property
def bytecode(self):
"""Get the bytecode of the Relay VM executable.
Returns
-------
ret : String
The bytecode of the executable.
Notes
-----
The bytecode is in the following format:
func_name reg_file_size num_instructions
param1 param2 ... paramM
instruction1
instruction2
...
instructionN
Each instruction is printed in the following format:
hash opcode field1 ... fieldX # The text format.
The part starting from # is only used for visualization and debugging.
The real serialized code doesn't contain it, therefore the deserializer
doesn't need to deal with it as well.
"""
return self._get_bytecode()
@property
def globals(self):
"""Get the globals used by the Relay VM executable.
Returns
-------
ret : List[String]
The globals contained in the executable.
"""
ret = []
num_globals = _ffi_api.GetNumOfGlobals(self.module)
for i in range(num_globals):
ret.append(_ffi_api.GetGlobalFields(self.module, i))
return ret
@property
def module(self):
"""Return the runtime module contained in a virtual machine executable."""
return self.mod
def get_function_params(self, func_name):
"""Get VM Function parameters"""
if func_name in self._function_params:
return self._function_params[func_name]
arity = self._get_function_arity(func_name)
assert arity >= 0
params = []
for i in range(arity):
p = self._get_function_param_name(func_name, i)
assert p
params.append(p)
self._function_params[func_name] = params
return params
class VirtualMachine(object):
"""Relay VM runtime.
Parameters
----------
exe : Executable
The VM executable.
device : tvm.runtime.Device or List[tvm.runtime.Device]
The device to deploy the module
memory_cfg : str or Dict[tvm.runtime.Device, str], optional
Config the type of memory allocator. The allocator type can be ["naive",
"pooled"]. If memory_cfg is None, all devices will use pooled allocator
by default. If memory_cfg is string, all devices will use the specified
allocator type. If memory_cfg is a dict, each device uses the allocator
type specified in the dict, or pooled allocator if not specified in the
dict.
"""
NAIVE_ALLOCATOR = 1
POOLED_ALLOCATOR = 2
def __init__(self, exe, device, memory_cfg=None):
"""
Construct a VirtualMachine wrapper class which provides a simple
interface over the raw C++ Module based API.
Parameters
----------
exe: Union[Executable, Module]
The executable either with the wrapper Python type or the raw runtime.Module.
In most cases this will be the Python wrapper class tvm.runtime.vm.Executable but
if you instead get the underlying runtime.Module subclass (i.e `exe.mod`) you
can directly pass it to this method.
This case can occur when doing things such as RPC where TVM's module APIs
return the raw modules, not the wrapped modules. This constructor will
handle this internally.
device: Union[Device, List[Device]]
The device, or devices on which to execute the VM code.
memory_cfg: Optional[str]
The allocator behavior to use for the VM.
Returns
-------
vm: VirtualMachine
A VM wrapper object.
"""
if not isinstance(exe, Executable) and not isinstance(exe, Module):
raise TypeError(
"exe is expected to be the type of Executable, "
+ "but received {}".format(type(exe))
)
if not isinstance(exe, Executable):
exe = Executable(exe)
self.module = exe.mod["vm_load_executable"]()
self._exec = exe
self._init = self.module["init"]
self._invoke = self.module["invoke"]
self._invoke_stateful = self.module["invoke_stateful"]
self._get_output = self.module["get_output"]
self._get_num_outputs = self.module["get_num_outputs"]
self._get_input_index = self.module["get_input_index"]
self._set_input = self.module["set_input"]
self._setup_device(device, memory_cfg)
def _setup_device(self, dev, memory_cfg):
"""Init devices and allocators."""
devs = dev
if not isinstance(dev, (list, tuple)):
if not isinstance(dev, tvm.runtime.Device):
raise TypeError(
"dev is expected to be Device or \
List[Device]"
)
devs = [dev]
# CPU is required for executing shape functions
if not any(c.device_type % RPC_SESS_MASK == tvm.cpu().device_type for c in devs):
devs.append(tvm.cpu())
default_alloc_type = VirtualMachine.POOLED_ALLOCATOR
if memory_cfg is None:
memory_cfg = {}
elif isinstance(memory_cfg, str):
assert memory_cfg in ["naive", "pooled"]
if memory_cfg == "naive":
default_alloc_type = VirtualMachine.NAIVE_ALLOCATOR
memory_cfg = {}
elif not isinstance(memory_cfg, dict):
raise TypeError(
"memory_cfg is expected be string or dictionary, "
+ "but received {}".format(type(memory_cfg))
)
init_args = []
for device in devs:
init_args.append(device.device_type % RPC_SESS_MASK)
init_args.append(device.device_id)
alloc_type = memory_cfg[device] if device in memory_cfg else default_alloc_type
init_args.append(alloc_type)
self._init(*init_args)
def set_input(self, func_name, *args, **kwargs):
"""Set the input to a function.
Parameters
----------
func_name : str
The name of the function.
args : list[tvm.runtime.NDArray] or list[np.ndarray]
The arguments to the function.
kwargs: dict of str to tvm.runtime.NDArray or np.ndarray
Named arguments to the function.
"""
if kwargs:
# kwargs is a super set of the required function parameters. We
# only find the ones that are needed.
func_params = self._exec.get_function_params(func_name)
new_args = [None] * len(func_params)
cnt = 0
for k in kwargs:
if k in func_params:
idx = func_params.index(k)
new_args[idx] = kwargs[k]
cnt += 1
assert len(args) + cnt == len(func_params)
idx = 0
for i, arg in enumerate(new_args):
if arg is None:
new_args[i] = args[idx]
idx += 1
args = new_args
cargs = convert(args)
self._set_input(func_name, *cargs)
def invoke(self, func_name, *args, **kwargs):
"""Invoke a function.
Parameters
----------
func_name : str
The name of the function.
args : list[tvm.runtime.NDArray] or list[np.ndarray]
The arguments to the function.
kwargs: dict of str to tvm.runtime.NDArray or np.ndarray
Named arguments to the function.
Returns
-------
result : Object
The output.
"""
if args or kwargs:
self.set_input(func_name, *args, **kwargs)
return self._invoke(func_name)
def run(self, *args, **kwargs):
"""Run the main function.
Parameters
----------
args : list[tvm.runtime.NDArray] or list[np.ndarray]
The arguments to the function.
kwargs: dict of str to tvm.runtime.NDArray or np.ndarray
Named arguments to the function.
Returns
-------
result : Object
The output.
"""
return self.invoke("main", *args, **kwargs)
def invoke_stateful(self, func_name, *args, **kwargs):
"""Invoke a function and ignore the returned result.
Use this function when running over rpc because it is currently
impossible to return a ADT object over rpc. To get the outputs, use
:py:func`get_outputs`.
Parameters
----------
func_name : str
The name of the function.
args : list[tvm.runtime.NDArray] or list[np.ndarray]
The arguments to the function.
kwargs: dict of str to tvm.runtime.NDArray or np.ndarray
Named arguments to the function.
"""
if args or kwargs:
self.set_input(func_name, *args, **kwargs)
self._invoke_stateful(func_name)
def get_outputs(self):
"""Get the outputs from a call to :py:func`invoke_stateful`.
Returns
-------
outputs : List[NDArray]
"""
return [self._get_output(i) for i in range(self._get_num_outputs())]
def get_input_index(self, input_name, func_name="main"):
"""Get inputs index via input name.
Parameters
----------
name : str
The input key name
func_name : str
The function name
Returns
-------
index: int
The input index. -1 will be returned if the given input name is not found.
"""
return self._get_input_index(input_name, func_name)
def benchmark(
self,
device,
*args,
func_name="main",
repeat=5,
number=5,
min_repeat_ms=None,
end_to_end=False,
**kwargs,
):
"""Calculate runtime of a function by repeatedly calling it.
Use this function to get an accurate measurement of the runtime of a function. The function
is run multiple times in order to account for variability in measurements, processor speed
or other external factors. Mean, median, standard deviation, min and max runtime are all
reported. On GPUs, CUDA and ROCm specifically, special on-device timers are used so that
synchonization and data transfer operations are not counted towards the runtime. This allows
for fair comparison of runtimes across different functions and models. The `end_to_end` flag
switches this behavior to include data transfer operations in the runtime.
The benchmarking loop looks approximately like so:
.. code-block:: python
for r in range(repeat):
time_start = now()
for n in range(number):
func_name()
time_end = now()
total_times.append((time_end - time_start)/number)
Parameters
----------
func_name : str
The function to benchmark
repeat : int
Number of times to run the outer loop of the timing code (see above). The output will
contain `repeat` number of datapoints.
number : int
Number of times to run the inner loop of the timing code. This inner loop is run in
between the timer starting and stopping. In order to amortize any timing overhead,
`number` should be increased when the runtime of the function is small (less than a 1/10
of a millisecond).
min_repeat_ms : Optional[float]
If set, the inner loop will be run until it takes longer than `min_repeat_ms`
milliseconds. This can be used to ensure that the function is run enough to get an
accurate measurement.
end_to_end : bool
If set, include time to transfer input tensors to the device and time to transfer
returned tensors in the total runtime. This will give accurate timings for end to end
workloads.
args : Sequence[Object]
Arguments to the function. These are cached before running timing code, so that data
transfer costs are not counted in the runtime.
kwargs : Dict[str, Object]
Named arguments to the function. These are cached like `args`.
Returns
-------
timing_results : BenchmarkResult
Runtimes of the function. Use `.mean` to access the mean runtime, use `.results` to
access the individual runtimes (in seconds).
"""
min_repeat_ms = 0 if min_repeat_ms is None else min_repeat_ms
if end_to_end:
# We need to unpack keyword arguments into positional arguments
packed_args = list(args)
for k, v in kwargs.items():
i = self.get_input_index(k, func_name)
if i < 0:
raise TypeError(f"{func_name}() got an unexpected keyword argument '{k}'")
while i >= len(packed_args):
packed_args.append(None)
packed_args[i] = v
return self.module.time_evaluator(
"invoke_return_to_device",
device,
repeat=repeat,
number=number,
min_repeat_ms=min_repeat_ms,
)(func_name, device.device_type, device.device_id, *packed_args)
if args or kwargs:
self.set_input(func_name, *args, **kwargs)
return self.module.time_evaluator(
"invoke", device, repeat=repeat, number=number, min_repeat_ms=min_repeat_ms
)(func_name)
| [
"noreply@github.com"
] | elphinkuo.noreply@github.com |
b00ab8ba872307d2333753f04e741547c90f2cd0 | 2793721e5cbfccfedac75556e34dba22999530d7 | /Data_Structures/Queue/venv/bin/easy_install-3.7 | 8f39f903e7eb3bb8428554ff85aff955f127983f | [] | no_license | iafjayoza/Python | 135e613d1d23c493b05a009843b40cbca6a1d318 | aaa05b0d655c8f0a47ced0100a844d99f852b2a4 | refs/heads/master | 2022-12-07T07:21:46.494885 | 2020-09-06T09:03:27 | 2020-09-06T09:03:27 | 282,707,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | 7 | #!/Users/jo049566/Desktop/Jay/Jay_Data/Study_Repo/Python/Projects/Queue/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"jaykumar.oza@cerner.com"
] | jaykumar.oza@cerner.com |
b861bfad28fb2afcfebe70efae435a87ea165933 | 2fda622cfecb338307ee5cb52b99068a14a72b2e | /pro level/program58.py | 3b4b0e1b8f272fde412a5e590c525de80f2c7613 | [] | no_license | harishk-97/python | aa63dac34f90bbd4360d9253c16176f2668e0b96 | c49698ad1c3d186f8a6bbd545c6da4d7c83e3d40 | refs/heads/master | 2021-07-19T04:47:27.455146 | 2018-12-19T14:10:20 | 2018-12-19T14:10:20 | 125,207,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | def looper(n):
i=0
while(i<(n+int(n/2))):
q=2
while(q<=n):
if(i+1==(n+int(n/2))):
print(q)
return
else:
print(q,end=' ')
q+=2
i+=1
q=1
while(q<=n):
if(i+1==(n+int(n/2))):
print(q)
return
else:
print(q,end=' ')
q+=2
i+=1
n=int(input())
print(n+int(n/2))
looper(n)
| [
"noreply@github.com"
] | harishk-97.noreply@github.com |
446e14d43aeeaa7445a7de740d1b7934d9e99dab | 046ce7203058c285dfb9f2acf5dfdf794b9f5856 | /main.py | 6a423ebf06afb11bd0be8420d743d5d69a91abc1 | [] | no_license | xuchengustb/spadeLabyrinthMazeMultiAgents | feb348018d806a64d48fbfbc3574cc55cec736e6 | 56a6da4ebb4258f40280600c8ecf2c0b916a8895 | refs/heads/master | 2021-04-17T20:07:15.663876 | 2019-03-26T09:00:16 | 2019-03-26T09:00:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,348 | py | # Declaration of all the sender and receiver classes from scouts, workers and engineers
import time
import _thread
from scout.scout import Scout
from engineer.engineer import Engineer
from worker.worker import Worker
from maze.PyramidalMaze import PyramidalMaze
import eel
eel.init('web')
@eel.expose # Expose this function to Javascript
def say_hello_py(x):
print('Hello from %s' % x)
# say_hello_py('Python World!')
# we create our maze
width = 13
eel.initJs(width) # Call a Javascript function
def webServer():
eel.start('main.html', block=False) # Start (this blocks and enters loop)
while True:
eel.sleep(1.0) # Use eel.sleep(), not time.sleep()
try:
_thread.start_new_thread( webServer,() ) # launch the server in parallel
print("webServer started !")
except:
print ("Error: unable to start thread")
if __name__ == "__main__":
# We create our senders
maze = PyramidalMaze(width)
scout1 = Scout("lmscout1@conversejs.org", "woweygiowa96")
engineer1 = Engineer("lmengineer1@conversejs.org","woweygiowa96")
worker1 = Worker("lmworker1@conversejs.org","woweygiowa96")
scout1.constructor(maze,eel)
scout1.start()
engineer1.constructor(maze,eel)
engineer1.start()
worker1.constructor(maze,eel)
worker1.start()
| [
"leo.sup@hotmail.fr"
] | leo.sup@hotmail.fr |
afc8f0a49a20a307d0e81fa479d20f83167f3817 | 585c9b86b95774188465c31b976fac02cd41ea8c | /one_pass/basic_app/views.py | 572ca41ed5ac15b2c21c6938758f933be780c17e | [] | no_license | Vadlap8/django_deployment_example | 12afe91a1b28686747ca424c8eac0202bdb7a70f | 59203a065e694721292660cdf0601b8e5155f166 | refs/heads/master | 2022-12-22T03:11:37.596624 | 2020-09-30T12:43:58 | 2020-09-30T12:43:58 | 299,909,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,206 | py | from django.shortcuts import render
from basic_app.forms import UserForm, UserProfileInfoForm
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
# Create your views here.
def index(request):
return render(request, 'basic_app/index.html')
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
@login_required
def special(request):
return HttpResponse('You are logged in! Nice!')
def register(request):
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
profile_form = UserProfileInfoForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'profile_pic' in request.FILES:
profile.profile_pic = request.FILES['profile_pic']
profile.save()
registered = True
else:
print(user_form.errors, profile_form.errors)
else:
user_form = UserForm()
profile_form = UserProfileInfoForm()
return render(request, 'basic_app/registrations.html', {'user_form':user_form, 'profile_form': profile_form, 'registered': registered})
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request,user)
return HttpResponseRedirect(reverse('index'))
else:
return HttpResponse('Account is not active!')
else:
print('Someone tried to login and failed!')
print('Username: {} and password: {}'.format(username, password))
else:
return render(request, 'basic_app/login.html', {})
| [
"vadim.lapshin.msk@gmail.com"
] | vadim.lapshin.msk@gmail.com |
c19eb636721936b2958cf5d6e70a002e4972088d | a4b94b06b603b809c2dd7ab07a013951019d5540 | /old-pipeline/predict/predict.py | cf001d8e742686a45ffc97d92f77469c6a5d079a | [] | no_license | meng2468/MOA-Kaggle-Competition | 56626a67e7d89dbae592186dd743ebea21bf7ec7 | b48f7afccebc3b9c7bdf94c9edd29c3dfebf40ca | refs/heads/main | 2023-02-15T18:30:57.728673 | 2021-01-10T14:33:12 | 2021-01-10T14:33:12 | 305,134,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
| [
"mandm116@gmail.com"
] | mandm116@gmail.com |
746b0c743ffb8d49b2ff71cf870102d7d7279481 | 5cea76d53779d466f19a5cf0b51e003586cc4a7b | /py4ops/getip.py | e2bbe141b3b44d2c61828b6bf996715b6e854f17 | [] | no_license | evan886/python | 40152fdb4885876189580141abe27a983d04e04d | d33e996e93275f6b347ecc2d30f8efe05accd10c | refs/heads/master | 2021-06-28T12:35:10.793186 | 2021-05-26T14:33:40 | 2021-05-26T14:33:40 | 85,560,342 | 2 | 1 | null | 2017-10-11T05:31:06 | 2017-03-20T09:51:50 | JavaScript | UTF-8 | Python | false | false | 599 | py | #!/usr/bin/python
#-*- coding:utf-8 -*-
import socket
import fcntl
import struct
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
print get_ip_address('eth0')
'''
send email 要用到这个ip 的东西
>>> get_ip_address('eth0')
'38.113.228.130'
http://blog.csdn.net/heizistudio/article/details/38413739
使用Python获得本机IP地址
http://www.pythonclub.org/python-network-application/get-ip-address
''' | [
"evan886@gmail.com"
] | evan886@gmail.com |
490c5679007f3567ce796e0327003d6433192893 | 3794037859e437bbe9f040e749ecb8712c55f905 | /back-end/gunicorn.conf.py | 294482661cb565d4e65212634c437e1fb625c4a3 | [] | no_license | chrishsu2/ROWDYHACK-2020 | 0640c5e7e6ad3ff3e3918c0bbc44d88f9d96f1bc | ec3128240eccc7161b456f14c4dff5e752beb2f0 | refs/heads/master | 2023-06-06T02:44:59.543389 | 2020-03-29T16:48:39 | 2020-03-29T16:48:39 | 250,845,754 | 0 | 0 | null | 2023-05-22T23:22:42 | 2020-03-28T16:46:11 | HTML | UTF-8 | Python | false | false | 234 | py | # -*- coding: utf-8 -*-
# @Author: Christopher Hsu
# @Date: 2020-03-28 12:39:59
# @Last Modified by: Christopher Hsu
# @Last Modified time: 2020-03-28 12:40:08
import multiprocessing
workers = multiprocessing.cpu_count() * 2 + 1
| [
"chrishsu2@gmail.com"
] | chrishsu2@gmail.com |
1532b3c1ce9a7d248948b273145174c1a58e4ada | b8593b1c9495369ce0df32c6fd5d4f7e23360d81 | /main.py | db9c7e833dfadfaf350698950bc3cd3be8a14cbc | [] | no_license | Lejia-Zhang17/StreamlitDemo | 210a1bf78719bcff09ef6c0e5e8d500c303b7aac | 0505c71fb98f950c9939979f5ed2d5eeeedf60b4 | refs/heads/master | 2023-04-15T09:51:59.670335 | 2021-04-24T03:31:13 | 2021-04-24T03:31:13 | 361,061,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,614 | py | import streamlit as st
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
header = st.beta_container()
dataset = st.beta_container()
feature = st.beta_container()
model_training = st.beta_container()
st.markdown(
"""
<style>
.main{
background-color:#F5F5F5;
}
</style>
""",
unsafe_allow_html=True
)
@st.cache
def get_data(filename):
test_data = pd.read_csv(filename)
return test_data
with header:
st.title("Welcome to my awesome data science project!")
# st.text('In this project I look into transactions of taxis in NYC')
with dataset:
st.header('NYC text dataset')
st.text("I found this dataset on blablablab.com")
test_data=get_data("test_data.csv")
st.write(test_data.head())
st.subheader("Pick-up location ID distribution on the NYC dataset")
station_dist = pd.DataFrame(test_data['start station id'].value_counts().head(50))
st.bar_chart(station_dist)
with feature:
st.header('The featrues I created')
st.markdown('* **first feature: ** I created this feature becasue of this .. I calculated it using this logic ..')
st.markdown('* **second feature: ** I created this feature becasue of this .. I calculated it using this logic ..')
with model_training:
st.header("Time to train the model")
st.text("Here you can choose the hyperparameters of the model and see how the performance changes!")
sel_col, disp_col = st.beta_columns(2)
max_depth=sel_col.slider("What should be the max_depth of the model?", min_value=10, max_value=100,value=20,step=10)
n_estimators = sel_col.selectbox("How many trees should there be?",options=[100,200,300,"No limit"],index=0)
sel_col.text("Here is a list of features in my data")
sel_col.write(test_data.columns)
input_feature = sel_col.text_input("Which feature should be used as the input feature?","start station id")
if n_estimators == "No limit":
regr=RandomForestRegressor(max_depth)
else:
regr=RandomForestRegressor(max_depth,n_estimators=n_estimators)
X=test_data[[input_feature]]
y=test_data[['tripduration']]
regr.fit(X,y)
prediction = regr.predict(y)
disp_col.subheader("Mean absolute error of the model is: ")
disp_col.write(mean_absolute_error(y,prediction))
disp_col.subheader("Mean squared error of the model is: ")
disp_col.write(mean_squared_error(y, prediction))
disp_col.subheader("R squared error of the model is: ")
disp_col.write(r2_score(y, prediction))
| [
"lexie@juzitadideMacBook-Pro.local"
] | lexie@juzitadideMacBook-Pro.local |
aa080bc15ee6542bd46c558c8d680730bde6a6e0 | 4dd014286285b776269305099f8725617bc97fea | /bs_demo.py | 02ab6be2ec8d07ab4f28d053c0f276e56523188b | [
"MIT"
] | permissive | zgo23/flipkart-laptop-scraping | f0c66d13e6bac87650b5c2d3fe5c941e7e40ce3d | 1c8ab437f81b874fa3b76de22fad2d61bf33e642 | refs/heads/master | 2022-12-08T02:00:49.553627 | 2020-09-09T11:25:34 | 2020-09-09T11:25:34 | 293,691,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | from bs4 import BeautifulSoup
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
soup = BeautifulSoup(html_doc, 'html.parser')
print(soup.prettify())
print(soup.title)
print(soup.title.name)
print(soup.title.string)
print(type(soup.title)) | [
"zgo23@uclive.ac.nz"
] | zgo23@uclive.ac.nz |
bc922c2561f60a7eb5872bdc4b5d64b559f6c020 | e1b2c3beb997d5cc70bbf7d508dfa3a039b7b055 | /backend/main.py | bc569db0967a2ba6b1c70e34c737b7b9d3e4f9d0 | [
"Apache-2.0"
] | permissive | patrickbrandt/bovine | 2f51d59eb60058924d7cf79a7c61b706032ee6c0 | 4949ed7c03700cd9ff4d59c4c4e2c9ec3157b8e7 | refs/heads/master | 2021-05-01T03:23:13.715720 | 2018-02-08T17:53:57 | 2018-02-08T17:53:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,256 | py | from __future__ import print_function
import json
import urllib
import os
import base64
import boto3
print('Loading function')
S3_BUCKET = os.environ['S3_BUCKET']
STAGE = os.environ['STAGE']
AUTH_URL = os.environ['AUTH_URL']
URL = os.environ['URL']
def build_config():
config = {
"auth_url": AUTH_URL,
"url": URL
}
return config
def lambda_handler(event, context):
print(event)
try:
print(urllib.unquote(event['headers']['Cookie']))
session_cookie = urllib.unquote(event['headers']['Cookie']).split('=')[1]
print(session_cookie)
if session_cookie == 'undefined':
# WARNING: this is not secure. Cookie should be validated as well...TODO
body = {"msg": "No session cookie present"}
location = AUTH_URL
headers = {"Location": location}
response = {
"statusCode": 302,
"body": json.dumps(body),
"headers": headers
}
return response
except Exception as e:
body = {"msg": "No session cookie present"}
location = AUTH_URL
headers = {"Location": location}
response = {
"statusCode": 302,
"body": json.dumps(body),
"headers": headers
}
return response
headers = {
"Content-Type": "text/html"
}
path = event['requestContext']['path']
s3 = boto3.client('s3')
s3_key = path[1:]
print("S3 key: %s" % s3_key)
encoded = False
if path == '/' or path == '':
s3_key = "index.html"
ct = s3.get_object(Bucket=S3_BUCKET, Key=s3_key)['Body'].read()
elif path == '/config.json':
config = build_config()
ct = json.dumps(config)
elif path == '/favicon.ico':
#s3_key = 'favicon.ico'
headers = {
"Content-Type": "image/x-icon"
}
encoded = True
ct = base64.b64encode(s3.get_object(Bucket=S3_BUCKET, Key=s3_key)['Body'].read())
elif '.png' in path:
#s3_key = 'favicon.png'
headers = {
"Content-Type": "image/png"
}
encoded = True
ct = base64.b64encode(s3.get_object(Bucket=S3_BUCKET, Key=s3_key)['Body'].read())
elif '.css' in path:
#s3_key = event['requestContext']['path'][1:]
headers = {
"Content-Type": "text/css"
}
ct = s3.get_object(Bucket=S3_BUCKET, Key=s3_key)['Body'].read()
elif path.endswith(".js"):
headers = {
"Content-Type": "text/javascript"
}
print(s3_key)
ct = s3.get_object(Bucket=S3_BUCKET, Key=s3_key)['Body'].read()
elif path.endswith(".woff2") or path.endswith(".woff") or path.endswith(".ttf"):
headers = {
"Content-Type": "binary/octet-stream",
}
encoded = True
ct = base64.b64encode(s3.get_object(Bucket=S3_BUCKET, Key=s3_key)['Body'].read())
else:
print("ELSE: %s" % s3_key)
ct = s3.get_object(Bucket=S3_BUCKET, Key=s3_key)['Body'].read()
print(s3_key)
response = {
"statusCode": 200,
"body": ct,
"headers": headers,
"isBase64Encoded": encoded
}
return response
| [
"robert.davis@cfacorp.com"
] | robert.davis@cfacorp.com |
9a16a9f55349336b0fa57b5ee1bb05f0f9b2d80a | 1bf33aaaa787d05037771643bfd15e346fbf9414 | /pages/migrations/0002_auto_20210303_1357.py | 6a9c98d97b05df3e7edbb7a3d949188af95a5b70 | [] | no_license | krishanwishwa/Efarm-Harvest-Management-System | 4b23df89544956413f2973548670c792f7801875 | c514bd5e18f048e4c682a4f6c72aa8d70b695ab1 | refs/heads/main | 2023-05-05T00:31:38.143241 | 2021-06-03T12:03:13 | 2021-06-03T12:03:13 | 373,163,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | # Generated by Django 3.1.6 on 2021-03-03 08:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='product',
name='fcontact_number',
field=models.CharField(blank=True, max_length=17),
),
]
| [
"wishwa32@gmail.com"
] | wishwa32@gmail.com |
67cde3416c4281b6a0c5b9c83b7d57abc55b6ba7 | 9346a8509822d1f40ffdddd079081bb9df7833dc | /convert_seg_to_bbox_txt_file.py | cd4cf4553b74c59b151855ffd99e1a1550628aa5 | [
"MIT"
] | permissive | crmauceri/dataset_loaders | 57e1d88d66d0bf0760797887d88fd2e7332b3363 | 542401b1a9a1ca98a15b4e529a3e78f9d0f91f52 | refs/heads/master | 2023-03-13T22:12:12.452223 | 2021-02-26T17:44:46 | 2021-02-26T17:44:46 | 341,955,417 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,482 | py | from dataloaders import make_data_loader
import argparse, os.path, os
import numpy as np
from tqdm import tqdm
from dataloaders.config.defaults import get_cfg_defaults
## Use to convert dataset to YOLO format txt files
def main(cfg):
datasets = make_data_loader(cfg)
for dataset in datasets[:3]:
img_list = []
if dataset is not None:
for ii, sample in enumerate(tqdm(dataset)):
for jj in range(len(sample["id"])):
if cfg.DATASET.NAME == 'cityscapes':
filepath = sample['id'][jj].replace('leftImg8bit', 'bbox').replace('png', 'txt')
img_list.append(sample['id'][jj])
elif cfg.DATASET.NAME in ['sunrgbd', 'coco']:
id = dataset.dataset.coco_id_index[sample['id'][jj].item()]
img_path, depth_path, img_id = dataset.dataset.get_path(id)
assert img_id == sample['id'][jj].item()
filepath = 'bbox'.join(img_path.rsplit('image', 1))
filepath = os.path.splitext(filepath)[0] + '.txt'
img_list.append(dataset.dataset.coco.loadImgs(img_id)[0]['file_name'])
dir = os.path.dirname(filepath)
if not os.path.exists(dir):
os.makedirs(dir)
#if not os.path.exists(filepath):
np.savetxt(filepath, sample['label'][jj], delimiter=",", fmt=['%d', '%10.8f', '%10.8f', '%10.8f', '%10.8f'])
f = '{}/image_list_{}.txt'.format(cfg.DATASET.ROOT, dataset.dataset.split)
with open(f, 'w') as fp:
fp.write('\n'.join(img_list))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Convert instance segmentation annotation to yolo txt files")
parser.add_argument('config_file', help='config file path')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg = get_cfg_defaults()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.merge_from_list(['DATASET.ANNOTATION_TYPE', 'bbox', \
'DATASET.NO_TRANSFORMS', True, \
'TRAIN.BATCH_SIZE', 1, \
'TEST.BATCH_SIZE', 1])
print(cfg)
main(cfg) | [
"cema3961@colorado.edu"
] | cema3961@colorado.edu |
7633908cfeff1a7a7f1a5f8a38599e5054b16595 | e80c7c62cbf193688df374b99802915134db4122 | /CsvReader/CSVFiles.py | 335589cac29be5c40f57bc4a344d18bd6e78037e | [] | no_license | yk273/IS601-Mini-Project-2 | 5f459a041ae60451321ba3bd50d77736fb1d85cd | 8dd6967ff14dbe11f938477be0f63c4201d44ba0 | refs/heads/master | 2020-09-02T11:57:48.930488 | 2019-11-20T00:42:49 | 2019-11-20T00:42:49 | 219,215,427 | 2 | 0 | null | 2019-11-16T03:45:47 | 2019-11-02T21:20:57 | Python | UTF-8 | Python | false | false | 486 | py | from CsvReader.CsvReader import CSVReader
from CsvReader.TestData import test_data_values
from CsvReader.TestDataResults import test_data_results
class CSVFiles(CSVReader):
def __init__(self):
super().__init__()
def test_data_value(self, test_data):
self.result = test_data_values(self.test_data)
return self.result
def test_data_results(self, results_data):
self.result = test_data_results(self.results_data)
return self.result
| [
"yk273@njit.edu"
] | yk273@njit.edu |
be3cab903221403283dcb433087d1d2770b819c1 | 2aec9c5e8c72b731d3abf22f2a407fe09c1cde09 | /SipopPolicyImg/SipopPolicyImg/settings.py | dc0ee7131688caeaf0e766dc8fe3b44d9a87787e | [] | no_license | jiangyg/ZWFproject | 8b24cc34970ae0a9c2a2b0039dc527c83a5862b5 | aa35bc59566d92721f23d2dd00b0febd268ac2dd | refs/heads/master | 2020-09-26T17:01:00.229380 | 2019-11-15T13:16:21 | 2019-11-15T13:16:21 | 226,297,631 | 0 | 1 | null | 2019-12-06T09:55:37 | 2019-12-06T09:55:36 | null | UTF-8 | Python | false | false | 2,169 | py | # Scrapy settings for SipopPolicyImg project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/topics/settings.html
#
SPIDER_MODULES = ['SipopPolicyImg.spiders']
NEWSPIDER_MODULE = 'SipopPolicyImg.spiders'
ROBOTSTXT_OBEY = False
# USER_AGENT = 'scrapy-redis (+https://github.com/rolando/scrapy-redis)'
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
SCHEDULER_PERSIST = True
SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderPriorityQueue"
#SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderQueue"
#SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderStack"
ITEM_PIPELINES = {
'SipopPolicyImg.pipelines.SipoppolicyPipeline': 300,
'scrapy_redis.pipelines.RedisPipeline': 400,
}
LOG_LEVEL = 'DEBUG'
DOWNLOADER_MIDDLEWARES = {
'SipopPolicyImg.middlewares.RandMiddleware': 543,
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': None,
}
REDIS_HOST = '127.0.0.1' # 也可以根据情况改成 localhost
REDIS_PORT = 6379
REDIS_PARAMS = {
'db': 15,
}
# Introduce an artifical delay to make use of parallelism. to speed up the
# crawl.
DOWNLOAD_DELAY = 1
CONCURRENT_REQUESTS = 8
RETRY_ENABLED = True # 重试中间件 指定关闭 默认为 True 是开启状态
RETRY_HTTP_CODES = [302] # 指定要重试的 HTTP 状态码,其它错误会被丢弃
RETRY_TIMES = 10 # 指定重试次数
AUTOTHROTTLE_ENABLED = True # 自动限速扩展
AUTOTHROTTLE_START_DELAY = 5.0
# 最初的下载延迟(以秒为单位)
AUTOTHROTTLE_MAX_DELAY = 60.0
# 在高延迟情况下设置的最大下载延迟(以秒为单位)
AUTOTHROTTLE_DEBUG = True
# 启用 AutoThrottle 调试模式,该模式显示收到的每个响应的统计数据,以便可以实时调节参数
AUTOTHROTTLE_TARGET_CONCURRENCY = 10
# Scrapy 应平行发送到远程网站的请求数量 将此选项设置为更高的值以增加吞吐量和远程服务器上的负载 将此选项设置为更低的值以使爬虫更保守和礼貌
HTTPERROR_ALLOWED_CODES = [302, 500, 502, 404, 403, 503]
| [
"34021500@qq.com"
] | 34021500@qq.com |
3aafacd2c3e9f63889114636ae310d1c6ca37f74 | f7d343efc7b48818cac4cf9b98423b77345a0067 | /training/rotateArray.py | e9806b40cb7999c3ceb19bcba0f938c7c66662d9 | [] | no_license | vijaymaddukuri/python_repo | 70e0e24d0554c9fac50c5bdd85da3e15c6f64e65 | 93dd6d14ae4b0856aa7c6f059904cc1f13800e5f | refs/heads/master | 2023-06-06T02:55:10.393125 | 2021-06-25T16:41:52 | 2021-06-25T16:41:52 | 151,547,280 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | n=[1,2,3,4,5,6,7]
k=3
def rotateArray(n, k):
new=n[k:]+n[:k]
return new
newArray=rotateArray(n,k)
print(newArray) | [
"Vijay.Maddukuri@virtustream.com"
] | Vijay.Maddukuri@virtustream.com |
350eba5e9afe62233d36b52a1ace4dc2f838cdbb | b60f6e1275592d0461727ed93bdd6402a1dbd17c | /tests/citation_test.py | 14d97302dc71162a12d1961d9e8836f2617150b8 | [] | no_license | fidelitousone/researchpal-sprint2 | fe42fe3bf009c0bca92281d30a9bb41c94a2f9e5 | bafd5cc020153935e0cd6994877bc46a6b9a94cf | refs/heads/main | 2023-01-28T02:56:20.835685 | 2020-12-09T04:35:31 | 2020-12-09T04:35:31 | 313,133,707 | 0 | 0 | null | 2020-12-09T04:35:32 | 2020-11-15T22:14:29 | JavaScript | UTF-8 | Python | false | false | 4,636 | py | import pytest
from app import Citations
# pylint: disable = redefined-outer-name,too-few-public-methods,too-many-arguments
@pytest.fixture
def mocked_citation_list(mocked_citation_model):
return {
"citation_list": [
{
"source_id": mocked_citation_model.source_id,
"is_cited": mocked_citation_model.is_cited,
"mla": mocked_citation_model.mla_citation,
"apa": mocked_citation_model.apa_citation,
},
],
}
# pylint: disable = invalid-name,no-self-use,unused-argument
class TestCitationFlow:
def test_get_all_citatons(
self,
client,
db_session,
socketio_client,
mocked_citation_model,
mocked_source_model,
mocked_project_with_sources,
mocked_user_model,
mocked_project_request,
mocked_citation_list,
):
with pytest.raises(TypeError):
socketio_client.emit("get_all_citations")
# Simulate login
with client.session_transaction() as sess:
sess["user"] = mocked_user_model.email
db_session.add_all(
[
mocked_citation_model,
mocked_source_model,
mocked_user_model,
mocked_project_with_sources,
]
)
db_session.commit()
# Test original flow
socketio_client.emit("get_all_citations", mocked_project_request)
recieved = socketio_client.get_received()
assert recieved[0]["name"] == "all_citations"
[all_citations] = recieved[0]["args"]
assert all_citations == mocked_citation_list
def test_get_all_citatons_no_project(
self,
client,
db_session,
socketio_client,
mocked_user_model,
mocked_project_request,
):
with pytest.raises(TypeError):
socketio_client.emit("get_all_citations")
# Simulate login
with client.session_transaction() as sess:
sess["user"] = mocked_user_model.email
db_session.add(mocked_user_model)
db_session.commit()
# Test original flow
socketio_client.emit("get_all_citations", mocked_project_request)
recieved = socketio_client.get_received()
assert recieved == []
class TestBibliographyFlow:
def test_add_to_bibliography(
self,
client,
db_session,
socketio_client,
mocked_citation_model,
mocked_source_model,
mocked_project_with_sources,
mocked_user_model,
):
with pytest.raises(TypeError):
socketio_client.emit("add_to_bibliography")
# Simulate login
with client.session_transaction() as sess:
sess["user"] = mocked_user_model.email
mocked_citation_model.is_cited = False
db_session.add_all(
[
mocked_citation_model,
mocked_source_model,
mocked_user_model,
mocked_project_with_sources,
]
)
db_session.commit()
# Test original flow
socketio_client.emit(
"add_to_bibliography", {"source_id": mocked_citation_model.source_id}
)
citation_state = (
db_session.query(Citations)
.filter(Citations.source_id == mocked_citation_model.source_id)
.one()
)
assert citation_state.is_cited
def test_remove_from_bibliography(
self,
client,
db_session,
socketio_client,
mocked_citation_model,
mocked_source_model,
mocked_project_with_sources,
mocked_user_model,
):
with pytest.raises(TypeError):
socketio_client.emit("remove_from_bibliography")
# Simulate login
with client.session_transaction() as sess:
sess["user"] = mocked_user_model.email
mocked_citation_model.is_cited = True
db_session.add_all(
[
mocked_citation_model,
mocked_source_model,
mocked_user_model,
mocked_project_with_sources,
]
)
db_session.commit()
# Test original flow
socketio_client.emit(
"remove_from_bibliography", {"source_id": mocked_citation_model.source_id}
)
citation_state = (
db_session.query(Citations)
.filter(Citations.source_id == mocked_citation_model.source_id)
.one()
)
assert not citation_state.is_cited
| [
"48371116+nootify@users.noreply.github.com"
] | 48371116+nootify@users.noreply.github.com |
09b541231ca7b26b86b963b1d56d20ded60d96a8 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_Anscombe/trend_ConstantTrend/cycle_12/ar_/test_artificial_128_Anscombe_ConstantTrend_12__100.py | 90590116d599656197e31e656d41ca810bd1f95f | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 269 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 12, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 0); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
4e9ee05904e5eb5a995d42cf800ec0f6744fe564 | 712991d89625cdc7891975b2fa3a210e655d270f | /main.py | 55f06366f0f235e65a7a66bd4cf64433dc5ab05f | [
"MIT"
] | permissive | WiraDKP/cow_detection_opencv | 639080dd04f23bdc8de857fc675d6440bc7da5b5 | 8a136989e3a40af44269ff31e2980913d3b7b7be | refs/heads/master | 2023-07-20T10:06:22.652338 | 2023-07-14T14:33:40 | 2023-07-14T14:33:40 | 283,517,597 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | import cv2
from detect import CowDetection
from jcopvision.io import MediaReader, key_pressed, create_sized_window
import config as cfg
if __name__ == "__main__":
media = MediaReader(cfg.FILE_PATH)
model = CowDetection(cfg.MODEL_PATH, cfg.CONFIG_PATH, cfg.LABEL_PATH)
create_sized_window(500, media.aspect_ratio, cfg.WINDOW_NAME)
for frame in media.read():
# Perform Detection
bbox, labels, scores = model.predict(frame, min_confidence=cfg.MIN_CONF, max_iou=cfg.MAX_IOU)
# Draw Bounding Box
frame = model.draw(frame, bbox, labels, scores)
# Visualize
cv2.imshow(cfg.WINDOW_NAME, frame[..., ::-1])
if key_pressed("q"):
break
media.close()
| [
""
] | |
b45775a35e3f67c07cfb7008555c6609457a1dd9 | cb81129ca8171c79535d51f6a8ff2cf4c0bf1b16 | /Ejemplo I.py | de30cad3d2495845398f51480300fc93c50f433e | [] | no_license | jgambalo/SOLID-GRASP---ACTIVIDAD2 | dd77157b03115441ee8ac8c4cd66677d86cca8d8 | d0397167ec9ccedbc982e5717114b6e0712b8abf | refs/heads/master | 2022-11-17T01:55:57.168176 | 2020-07-19T23:42:40 | 2020-07-19T23:42:40 | 280,970,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | import abc
from abc import ABC, abstractmethod
# Specific Interfaces
class iAnimal(ABC):
@abc.abstractmethod
def eat(self):
pass
class iFlyAnimal(ABC):
@abc.abstractmethod
def fly(self):
pass
class iWalkAnimal(ABC):
@abc.abstractmethod
def walk(self):
pass
# Animals Classes
class bird(iAnimal, iFlyAnimal):
@classmethod
def eat(self):
print("Eat")
@classmethod
def fly(self):
print("Fly")
class feline(iAnimal,iWalkAnimal):
@classmethod
def eat(self):
print("Eat")
@classmethod
def walk(self):
print("Walk")
#Test
print("---Dove---")
dove = bird()
dove.eat()
dove.fly()
print("---Tiger---")
tiger = feline()
tiger.eat()
tiger.walk()
| [
"noreply@github.com"
] | jgambalo.noreply@github.com |
68d3917a975d50656abdc774bc265c0f3b692829 | 110dd2ffcbb260e63189e05aabcb1381ed365c3d | /matrixbot/ldap.py | 7e3808a12bb3d818f13ac88486e9c3f4aad1cba9 | [
"MIT"
] | permissive | ziegelstein/matrix-bot | 486de59cd890a6fd326a0a123e369a2ffeed2a04 | b6db44d39fc23f2bd0c890081fcbb3020f70a614 | refs/heads/master | 2020-12-30T15:53:51.168664 | 2017-05-10T14:16:07 | 2017-05-10T14:16:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,686 | py | # -*- coding:utf-8 -*-
#
# Author: Pablo Saavedra
# Maintainer: Pablo Saavedra
# Contact: saavedra.pablo at gmail.com
from __future__ import absolute_import
import ldap as LDAP
from . import utils
def get_custom_ldap_group_members(ldap_settings, group_name):
logger = utils.get_logger()
ldap_server = ldap_settings["server"]
ldap_base = ldap_settings["base"]
get_uid = lambda x: x[1]["uid"][0]
members = []
try:
conn = LDAP.initialize(ldap_server)
g_ldap_filter = ldap_settings[group_name]
logger.debug("Searching members for %s: %s" % (group_name,
g_ldap_filter))
items = conn.search_s(ldap_base, LDAP.SCOPE_SUBTREE,
attrlist=['uid'],
filterstr=g_ldap_filter)
members = map(get_uid, items)
except Exception, e:
logger.error("Error getting custom group %s from LDAP: %s" % (group_name, e))
return members
def get_ldap_group_members(ldap_settings, group_name):
# base:dc=example,dc=com
# filter:(&(objectClass=posixGroup)(cn={group_name}))
logger = utils.get_logger()
ldap_server = ldap_settings["server"]
ldap_base = ldap_settings["groups_base"]
ldap_filter = "(&%s(%s={group_name}))" % (ldap_settings["groups_filter"], ldap_settings["groups_id"])
get_uid = lambda x: x.split(",")[0].split("=")[1]
try:
ad_filter = ldap_filter.replace('{group_name}', group_name)
conn = LDAP.initialize(ldap_server)
logger.debug("Searching members for %s: %s - %s - %s" % (group_name,
ldap_server,
ldap_base,
ad_filter))
res = conn.search_s(ldap_base, LDAP.SCOPE_SUBTREE, ad_filter)
except Exception, e:
logger.error("Error getting group from LDAP: %s" % e)
return map(get_uid, res[0][1]['uniqueMember'])
def get_ldap_groups(ldap_settings):
'''Returns the a list of found LDAP groups filtered with the groups list in
the settings
'''
# filter:(objectClass=posixGroup)
# base:ou=Group,dc=example,dc=com
logger = utils.get_logger()
ldap_server = ldap_settings["server"]
ldap_base = ldap_settings["groups_base"]
ldap_filter = ldap_settings["groups_filter"]
ldap_groups = ldap_settings["groups"]
get_uid = lambda x: x[1]["cn"][0]
try:
conn = LDAP.initialize(ldap_server)
logger.debug("Searching groups: %s - %s - %s" % (ldap_server,
ldap_base,
ldap_filter))
res = conn.search_s(ldap_base, LDAP.SCOPE_SUBTREE, ldap_filter)
return filter((lambda x: x in ldap_groups), map(get_uid, res))
except Exception, e:
logger.error("Error getting groups from LDAP: %s" % e)
def get_ldap_groups_members(ldap_settings):
logger = utils.get_logger()
ldap_groups = ldap_settings["groups"]
groups = get_ldap_groups(ldap_settings)
res = {}
for g in groups:
res[g] = get_ldap_group_members(ldap_settings, g)
# pending groups to get members. filters for those groups are explicitelly
# defined in the settings
custom_groups = filter((lambda x: x not in groups), ldap_groups)
logger.error("custom_groups: %s", custom_groups)
for g in custom_groups:
res[g] = get_custom_ldap_group_members(ldap_settings, g)
return res
def get_groups(ldap_settings):
return ldap_settings["groups"]
| [
"psaavedra@igalia.com"
] | psaavedra@igalia.com |
095c4a3eedbb733256a12214bc9812bc46989004 | e4ebe0f85e458ab76631ebdbf7ce691a72975e36 | /Documents/GitHub/kvizo_core-kvizo_web/web/quizkly/quizkly_app/models.py | 42196d2e22c0bbcbee4f27ef11e1182cc2432f31 | [] | no_license | shiningsunnyday/quizkly_landing | 45e07672a9ff39aeb1d22bd63493714032da931c | 10345aa6f323009ad74b8b7ceaec46cf251de579 | refs/heads/master | 2020-04-14T07:29:40.551350 | 2019-03-25T13:33:19 | 2019-03-25T13:33:19 | 163,714,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | from django.db import models
from django.contrib.auth.models import User
# for all people who signed up mailing list
class AppUser(models.Model):
registered = models.DateTimeField(auto_now_add = True)
user = models.OneToOneField(User, on_delete = models.CASCADE, primary_key = True)
class Meta:
ordering = ("registered",)
class Corpus(models.Model):
user = models.ForeignKey(AppUser, on_delete = models.CASCADE)
submitted = models.DateTimeField(auto_now_add=True)
content = models.TextField()
class Meta:
ordering = ("submitted",)
class Quiz(models.Model):
corpus = models.OneToOneField(Corpus, on_delete = models.CASCADE)
generated = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=30)
class Meta:
ordering = ("generated",)
class Question(models.Model):
quiz = models.ForeignKey(Quiz, on_delete = models.CASCADE)
generated = models.DateTimeField(auto_now_add=True)
question = models.TextField()
correct = models.IntegerField()
class Meta:
ordering = ("generated",)
class Distractor(models.Model):
index = models.IntegerField()
generated = models.DateTimeField(auto_now_add=True)
question = models.ForeignKey(Question, on_delete = models.CASCADE)
text = models.CharField(max_length=30)
class Meta:
ordering = ("generated",)
| [
"michaelsun18@yahoo.com"
] | michaelsun18@yahoo.com |
40559f6e802d7427106f476a0845fb974b53ea71 | 8a0f37c6847bc8ef2309bdf8c352d3f0373c9264 | /androidemu/internal/module.py | d0d10fbab05a3dda299f032a194b065aea49d68f | [] | no_license | xiaoxiao2050/AndroidNativeEmu | 444799622e36e37859d3faecb891b94a3730e98f | 6af92872f4ec011a3fdd7e54e953894ed772db55 | refs/heads/master | 2020-06-21T02:22:30.224296 | 2019-06-20T21:33:30 | 2019-06-20T21:33:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | class Module:
"""
:type filename str
:type base int
:type size int
"""
def __init__(self, filename, address, size, symbols_resolved):
self.filename = filename
self.base = address
self.size = size
self.symbols = symbols_resolved
self.symbol_lookup = dict()
# Create fast lookup.
for symbol_name, symbol in self.symbols.items():
if symbol.address != 0:
self.symbol_lookup[symbol.address] = (symbol_name, symbol)
def find_symbol(self, name):
if name in self.symbols:
return self.symbols[name]
return None
| [
"aeonlucid@outlook.com"
] | aeonlucid@outlook.com |
ca03f0594aeb6111fbac293d4b25d98d5442ca0a | e67d38be1fda0d9c09fb169d73d9f3153f3c4610 | /book_me/asgi.py | fd0b8eadb78102d7191d07a23a53d09331705b54 | [] | no_license | s-kobets/book_me | 965b95e92886baaeb389e4212a7c88e08626a200 | d5999ac1cec08adc126b46615ecdd6ba753d2c35 | refs/heads/master | 2023-06-10T03:12:08.264666 | 2021-07-05T18:57:22 | 2021-07-05T18:57:22 | 382,767,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
ASGI config for book_me project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'book_me.settings')
application = get_asgi_application()
| [
"s.kobets@semrush.com"
] | s.kobets@semrush.com |
e6f0111eeb863107dddf51b208d6fc2247d2f9f9 | 402b566f68624725ba498bcc29dbbfc2f4f4e713 | /learning_log/learning_logs/urls.py | 35c1cbd54d2b5d50bc1dc220ecf55517619a6fb5 | [] | no_license | KevinG1thub/kevin.learning_log | 8f3d2c8d48c525170c00271d14f6be5d8fbe34c4 | 01b25697d2402b99a12241c96e7374d47d5db8e1 | refs/heads/main | 2023-02-01T10:25:53.181331 | 2020-12-17T14:12:06 | 2020-12-17T14:12:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | """Defines URL patterns for learning_logs."""
from django.urls import path
from . import views
app_name = 'learning_logs'
urlpatterns = [
# Home page
path('', views.index, name='index'),
#page that shows all topics
path('topics/', views.topics, name='topics'),
#detail page for single topic
path('topics/<int:topic_id>/', views.topic, name='topic'),
#page for adding new topic
path('new_topic/', views.new_topic, name='new_topic'),
# Page for adding a new entry
path('new_entry/<int:topic_id>/', views.new_entry, name='new_entry'),
# Page for editing an entry.
path('edit_entry/<int:entry_id>/', views.edit_entry, name='edit_entry'),
] | [
"kevin.loetscher@seantis.ch"
] | kevin.loetscher@seantis.ch |
62476df0b7f7d2e336afdd3147f644b538346cf3 | 6e9c127bd6705a8b92f240ca663163504b86cd81 | /elecsim/reinforcement_learning/__init__.py | c81092b6f862e68895065cc1c34baacd81f097c9 | [
"MIT"
] | permissive | alexanderkell/elecsim | 239ffd539d1b04f24186ddaae20ac4ce6b258c03 | df9ea14cbc8dd3fd4302be9274cb6ea61c0cdb10 | refs/heads/master | 2023-04-06T10:03:35.367411 | 2023-04-05T16:52:16 | 2023-04-05T16:52:16 | 124,561,430 | 36 | 10 | MIT | 2022-12-08T01:57:45 | 2018-03-09T15:55:53 | Jupyter Notebook | UTF-8 | Python | false | false | 229 | py | """
File name: __init__.py
Date created: 01/03/2019
Feature: #Enter feature description here
"""
__author__ = "Alexander Kell"
__copyright__ = "Copyright 2018, Alexander Kell"
__license__ = "MIT"
__email__ = "alexander@kell.es"
| [
"alexander@kell.es"
] | alexander@kell.es |
53de0dc2fc3ca23f7489ba815b0d47e166c0a41d | 54a0b86d4c3f731487ad4470fb365907970472e6 | /P1/studentparameters/Project1_Parameters_gg.py | 84f2505c0e7969acde0d86286867f8a7f41dcddc | [] | no_license | samiurrahman98/ece458-computer-security | 26aa46e174b0bf77f748e6451dd2e0e4183feebd | cf79430b98e3679ffcd687a0c96b5e979187e1e3 | refs/heads/master | 2022-11-25T01:26:36.874094 | 2020-07-31T21:24:53 | 2020-07-31T21:24:53 | 280,979,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | # Select the file name that matches your first two letters of your last name on Learn
# Read those parameters as your ECE458 project 1 parameters
# p,q,g are DSA domain parameters, sk_i (secret keys) are used in each signature and verification
p=16158504202402426253991131950366800551482053399193655122805051657629706040252641329369229425927219006956473742476903978788728372679662561267749592756478584653187379668070077471640233053267867940899762269855538496229272646267260199331950754561826958115323964167572312112683234368745583189888499363692808195228055638616335542328241242316003188491076953028978519064222347878724668323621195651283341378845128401263313070932229612943555693076384094095923209888318983438374236756194589851339672873194326246553955090805398391550192769994438594243178242766618883803256121122147083299821412091095166213991439958926015606973543
q=13479974306915323548855049186344013292925286365246579443817723220231
g=9891663101749060596110525648800442312262047621700008710332290803354419734415239400374092972505760368555033978883727090878798786527869106102125568674515087767296064898813563305491697474743999164538645162593480340614583420272697669459439956057957775664653137969485217890077966731174553543597150973233536157598924038645446910353512441488171918287556367865699357854285249284142568915079933750257270947667792192723621634761458070065748588907955333315440434095504696037685941392628366404344728480845324408489345349308782555446303365930909965625721154544418491662738796491732039598162639642305389549083822675597763407558360
sk1=4238046188863111196134523391567180673009409153722853144581286972290
sk2=5251810960028969703460342961462449674500434928936370393443116963182
sk3=1290595980181313010068948362172310388515606811911036234145294356283
| [
"S72RAHMA@uwaterloo.ca"
] | S72RAHMA@uwaterloo.ca |
2d8d7aa5d4dc881b46a68978d65a521a65f2e9fa | da69c31ec6eafd545f62a979014d6bf06b187512 | /testing/aa_todo/test_workflowqueue.py | feb03e904b6bfba6e2eea912a97e05f0b405e798 | [
"CC0-1.0"
] | permissive | dirigit/WALKOFF | ffba8e9134588ea22b1ef51c9d00949211e06f2e | ec70db590060b0c2ab24a19677d4445621c29d01 | refs/heads/master | 2020-12-13T22:23:16.705568 | 2020-05-03T06:53:52 | 2020-05-03T06:53:52 | 234,548,177 | 0 | 0 | NOASSERTION | 2020-05-03T06:53:53 | 2020-01-17T12:53:18 | null | UTF-8 | Python | false | false | 4,661 | py | import json
import logging
import yaml
from starlette.testclient import TestClient
from testing.api.helpers import assert_crud_resource
logger = logging.getLogger(__name__)
base_workflowqueue_url = "/walkoff/api/workflowqueue/"
base_workflows_url = "/walkoff/api/workflows/"
def test_sanity_check(api: TestClient, auth_header: dict):
p = api.get(base_workflowqueue_url, headers=auth_header)
assert p.status_code == 200
assert p.json() == []
def test_execute_workflow(api: TestClient, auth_header: dict):
with open('testing/util/workflow.json') as fp:
wf_json = json.load(fp)
p = api.post(base_workflows_url, headers=auth_header, data=json.dumps(wf_json))
assert p.status_code == 201
with open('testing/util/workflowqueue.json') as fp:
wf_queue_json = json.load(fp)
p = api.post(base_workflowqueue_url, headers=auth_header, data=json.dumps(wf_queue_json))
assert p.status_code == 202
p = api.get(base_workflowqueue_url, headers=auth_header)
assert p.status_code == 200
assert len(p.json()) == 1
p = api.get(base_workflowqueue_url + wf_queue_json["execution_id"], headers=auth_header)
assert p.status_code == 200
assert p.json()["status"] == "PENDING"
def test_execute_workflow_with_trigger(api: TestClient, auth_header: dict):
with open('testing/util/workflow_with_trigger.json') as fp:
wf_json = json.load(fp)
p = api.post(base_workflows_url, headers=auth_header, data=json.dumps(wf_json))
assert p.status_code == 201
with open('testing/util/workflowqueue_with_trigger.json') as fp:
wf_queue_json = json.load(fp)
p = api.post(base_workflowqueue_url, headers=auth_header, data=json.dumps(wf_queue_json))
assert p.status_code == 202
p = api.get(base_workflowqueue_url, headers=auth_header)
assert p.status_code == 200
assert len(p.json()) == 1
p = api.get(base_workflowqueue_url + wf_queue_json["execution_id"], headers=auth_header)
assert p.status_code == 200
assert p.json()["status"] == "PENDING"
def test_invalid_get(api: TestClient, auth_header: dict):
with open('testing/util/workflowqueue.json') as fp:
wf_queue_json = json.load(fp)
p = api.post(base_workflowqueue_url, headers=auth_header, data=json.dumps(wf_queue_json))
assert p.status_code == 404
def test_cleardb(api: TestClient, auth_header: dict):
assert False
def test_control_workflow_trigger(api: TestClient, auth_header: dict):
with open('testing/util/workflow_with_trigger.json') as fp:
wf_json = json.load(fp)
p = api.post(base_workflows_url, headers=auth_header, data=json.dumps(wf_json))
assert p.status_code == 201
trigger_id = wf_json["triggers"][0]["id_"]
with open('testing/util/workflowqueue_with_trigger.json') as fp:
wf_queue_json = json.load(fp)
p = api.post(base_workflowqueue_url, headers=auth_header, data=json.dumps(wf_queue_json))
assert p.status_code == 202
data = {
"status": "trigger",
"trigger_id": trigger_id
}
p = api.patch(base_workflowqueue_url + wf_queue_json["execution_id"],
headers=auth_header, data=json.dumps(data))
assert p.status_code == 204
p = api.get(base_workflowqueue_url + wf_queue_json["execution_id"], headers=auth_header)
assert p.status_code == 200
assert p.json()["status"] == "EXECUTING"
def test_control_workflow_abort(api: TestClient, auth_header: dict):
with open('testing/util/workflow.json') as fp:
wf_json = json.load(fp)
p = api.post(base_workflows_url, headers=auth_header, data=json.dumps(wf_json))
assert p.status_code == 201
with open('testing/util/workflowqueue.json') as fp:
wf_queue_json = json.load(fp)
p = api.post(base_workflowqueue_url, headers=auth_header, data=json.dumps(wf_queue_json))
assert p.status_code == 202
data = {
"status": "abort"
}
p = api.patch(base_workflowqueue_url + wf_queue_json["execution_id"],
headers=auth_header, data=json.dumps(data))
assert p.status_code == 204
p = api.get(base_workflowqueue_url + wf_queue_json["execution_id"], headers=auth_header)
assert p.status_code == 200
assert p.json()["status"] == "ABORTED"
def test_rud_workflow_dne(api: TestClient, auth_header: dict):
data = {
"status": "trigger"
}
dne_id = "360b4e27-3bc3-4499-abae-2364bd99ade7"
p = api.get(base_workflowqueue_url + dne_id, headers=auth_header)
assert p.status_code == 404
p = api.patch(base_workflowqueue_url + dne_id, headers=auth_header, data=json.dumps(data))
assert p.status_code == 404 | [
"burke_hannah@bah.com"
] | burke_hannah@bah.com |
6b06e5c09272e54d1cad1ac49c215743268c2543 | e5ddf4ad2f78e8e519a65e148611dfea7fc3f45e | /hw2_multi_SVM.py | 2a5499ec9c114a98f10e6dd11d882179706a5754 | [] | no_license | Charlie-Ren/ML5525 | 85165af67901ec3d67a8604dde782f2b2c1d732f | 345202a514252740b04234e027d787d62c418837 | refs/heads/master | 2021-05-17T17:57:56.011352 | 2020-03-28T22:34:22 | 2020-03-28T22:34:22 | 250,906,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,565 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
global k
import numpy as np
import pandas as pd
import cvxopt
from matplotlib import pyplot as plt
data_1=pd.read_csv("mfeat_train.csv",index_col=0).to_numpy()
data_2=pd.read_csv("mfeat_test.csv",index_col=0).to_numpy()
[m,n]=data_1.shape
#we have 10 classes, 1 - 10, and we want to make it 0-9, which is easier to operate
data_1[:,n-1]-=1
data_2[:,n-1]-=1
train_x=data_1[:,:n-1]
train_y=data_1[:,n-1].reshape(-1,1)
test_y=data_2[:,n-1].reshape(-1,1)
test_x=data_2[:,:n-1]
k=len(np.unique(train_y)) #k=10
print (np.unique(train_y))
print (np.unique(test_y))
#we need 10 datasets here to do 10 SVM, so we need to devide them based on their classes(labels)-->divide data function
# In[2]:
def rbf(X1,X2,sigma):
return np.exp(-np.linalg.norm(X1-X2)**2/(2*sigma**2))
# In[3]:
def rbf_svm_train(X,y,c=1,sigma=1):#return Alpha
[m,n]=X.shape
y=y.reshape(-1,1)*1. #make it float
Gram=np.zeros((m,m))
count=0
for i in range(m):
for j in range(m):
Gram[i,j]=rbf(X[i],X[j],sigma) #kernel
count+=1
cvxopt.solvers.options['show_progress'] = False
P=cvxopt.matrix(np.outer(y,y)*Gram)
q=cvxopt.matrix(-np.ones((m,1)))
G=cvxopt.matrix(np.vstack((np.eye(m)*-1,np.eye(m))))
h=cvxopt.matrix(np.hstack((np.zeros(m), np.ones(m) * c)))
ans=cvxopt.solvers.qp(P,q,G,h)
alpha=np.array(ans['x'])
return alpha
# In[4]:
def predict(test_X,train_X,train_y,alpha,sigma=1):
len1=test_X.shape[0]
len2=train_X.shape[0]
Gram=np.zeros((len1,len2))
for i in range(len1):
for j in range(len2):
Gram[i,j]=rbf(test_X[i],train_X[j],sigma)
label= Gram @ (alpha*train_y)
return label #predict based on 1 model
# In[5]:
def dividedata(data_1):
temp=data_1.copy()
xdict=dict()
ydict=dict()
for i in range(k):
ydict[i]=np.where(temp[:,64]==i,1,0).reshape(-1,1) #1-10
xdict[i]=temp[:,:64]
return xdict,ydict #successful after test
# In[6]:
#print (xdict[0].shape)
#x_train=xdict[0]
#y_train=ydict[0]
#print (rbf_svm_train(x_train,y_train).shape)
# In[7]:
def getlabel(data_1,test_X): #we keep all trained weights in this step.
xdict,ydict=dividedata(data_1)
label=np.zeros((test_X.shape[0],1))
final_weight=np.zeros((data_1.shape[0],1))
for i in range(k):
print (i)
x_train=xdict[i]
y_train=ydict[i].reshape(-1,1)
alpha=rbf_svm_train(x_train,y_train) #predict on all models and combine
#final_weight=np.concatenate((final_weight,alpha),axis=1) #combine all the alpha
if(i==0):
label=predict(test_X,x_train,y_train,alpha).reshape(-1,1)
final_weight=alpha
else:
label=np.concatenate((label,predict(test_X,x_train,y_train,alpha).reshape(-1,1)),axis=1)
final_weight=np.concatenate((final_weight,alpha.reshape(-1,1)),axis=1)
y_pred=np.argmax(label, axis=1)
return y_pred,final_weight #we do not need column #1 since it is used for intialization
# In[8]:
def err(label,test_y):
return np.count_nonzero(label-test_y)/len(test_y)
# In[9]:
def confusion(label,test_y):
conf=np.zeros((k,k))
for i in range(len(label)):
conf[int(test_y[i]),int(label[i])]+=1 #Actual and predicted class
return conf
# In[ ]:
label,weight=getlabel(data_1,test_x)
print (weight.shape)
# In[ ]:
label=label.reshape(-1,1)
print (1-err(label,test_y))
print (confusion(label,test_y))
np.savetxt("problem7_weight",weight)
# In[ ]:
| [
"noreply@github.com"
] | Charlie-Ren.noreply@github.com |
0b1ea9e87abaeb7dfa27789eae2c5d497a7910a1 | f7553eda5d250834006f72b962a3ba5421a7f3e7 | /pinoStock/product/forms.py | bc5f70dc0b56310c5ff8bad468eb327ffc7e4e5c | [] | no_license | losMomazos/PinoStock_App | efabf677de234801379fe6d4378f322129a816d4 | ee37b8f277547cb58893d88ea1838196a043e9f0 | refs/heads/master | 2022-02-18T07:12:24.692178 | 2018-05-14T23:43:55 | 2018-05-14T23:43:55 | 127,243,504 | 0 | 0 | null | 2022-01-21T19:10:58 | 2018-03-29T05:56:46 | Python | UTF-8 | Python | false | false | 138 | py | from django import forms
class ContactForm(forms.Form):
def __init__(self, arg):
super(ContactForm, self).__init__()
self.arg = arg
| [
"angelocristobalep@gmail.com"
] | angelocristobalep@gmail.com |
c318ceec4cf685ccbeef9e9aecb356187990144e | d400b66030d7ae7157e50230362d6d6a5fd0c797 | /make_score_distribution.py | 9bac556ac3aff89c50901be52c62853f444f438b | [
"MIT"
] | permissive | EleutherAI/pile-cc-filtering | a981fd670d7780fac0c0aad550fbab5d5a25090c | 99f99c56f35f78550805f5c8486db5ba1c9fd358 | refs/heads/master | 2023-01-03T10:13:36.511864 | 2020-10-22T16:26:42 | 2020-10-22T16:26:42 | 305,167,684 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,104 | py | import lm_dataformat as lmd
import os
from tqdm import tqdm
import sys
import fasttext
datadir = sys.argv[1]
files = [datadir + '/' + f for f in os.listdir(datadir) if f.startswith('data_')]
num_docs = 16882635
docs_per_chunk = num_docs // len(files)
model = fasttext.load_model('cc_filter_model.bin')
def preprocess_for_fasttext(x):
return x.replace('\n', ' ').replace('\r', ' ')[:4000][-1500:]
def get_doc_score(doc):
pred = model.predict(preprocess_for_fasttext(doc))
document_score = pred[1][0]
if pred[0][0] == '__label__cc':
document_score = 1 - document_score
return document_score
def scored(x):
return ((doc, get_doc_score(doc)) for doc in x)
def limit(x, ct):
i = 0
for v in x:
yield v
i += 1
if i >= ct: break
with open('scores.txt', 'w') as score_record:
for f in tqdm(files):
rdr = lmd.Reader(f)
stream = rdr.stream_data()
for _ in range(docs_per_chunk): next(stream)
for doc, score in scored(limit(stream, docs_per_chunk)):
score_record.write(str(score) + '\n')
| [
"leogao31@gmail.com"
] | leogao31@gmail.com |
07c5fb10d5e0e904a2bf198a01e216e33817d279 | 0065ce1c1b03977001d35187d719e0e7699f6569 | /exercice5_test_condition_convertir.py | d389e631e5204c2be505f60235977d50600d7f5e | [] | no_license | gb-0001/scripts | 646f7bc84c5bc92bee6d7a2d31bb32f7bf26967f | d26bc56703ecfb68065594942dbae2df7b4e8eb3 | refs/heads/master | 2023-04-13T08:02:38.601816 | 2021-04-22T20:09:36 | 2021-04-22T20:09:36 | 357,812,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | #!/usr/bin/python
devise = str(input("Saisir la devise 'E' ou '$' à convertir:"))
TAUX_DOL_TO_EUR=0.83
TAUX_EUR_TO_DOL=1.20
if devise == 'E':
montant = str(input("Saisir le montant en Euro à convertir en Dollars:"))
calcul= int(montant) * float(TAUX_DOL_TO_EUR)
print("Le montant en Dollars est:", calcul)
elif devise == '$':
montant = str(input("Saisir le montant en Dollars à convertir en Euro:"))
calcul= int(montant) * float(TAUX_EUR_TO_DOL)
print("Le montant en Dollars est:", calcul)
else:
print("Saisir E ou $ ") | [
"gb-0001@outlook.fr"
] | gb-0001@outlook.fr |
917a61701892ed654350558e644e0a435cc2f512 | 67a49408d8fbc1846c5e78e2383416670831e8b8 | /code/plot_attitude.py | c13eead526a912b7ffafa3cc840366a3f23ff4dd | [
"BSD-3-Clause"
] | permissive | justakeit/InertialNav | 515612ce4ac3d54d5e259aafb91d31291dcee88b | 3c1aa90f5bb9f69d8bb3c74932caa475d208d755 | refs/heads/master | 2020-12-25T00:19:39.643079 | 2014-05-13T06:49:46 | 2014-05-13T06:49:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | #!/bin/python
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import numpy as np
import math
data = np.genfromtxt('EulDataOut.txt', delimiter=' ', skip_header=1,
skip_footer=1, names=['time', 'roll', 'roll_onb', 'pitch', 'pitch_onb', 'yaw', 'yaw_onb', 'empty1', 'empty2'])
fig = plt.figure()
ax1 = fig.add_subplot(311)
ax1.set_title("Attitude estimate")
ax1.set_xlabel('time (s)')
ax1.set_ylabel('angle (degrees)')
ax2 = fig.add_subplot(312)
ax2.set_title("Attitude estimate")
ax2.set_xlabel('time (s)')
ax2.set_ylabel('angle (degrees)')
ax3 = fig.add_subplot(313)
ax3.set_title("Attitude estimate")
ax3.set_xlabel('time (s)')
ax3.set_ylabel('angle (degrees)')
data['roll'] = np.multiply(data['roll'], 180 / math.pi)
data['pitch'] = np.multiply(data['pitch'], 180 / math.pi)
data['yaw'] = np.multiply(data['yaw'], 180 / math.pi)
data['roll_onb'] = np.multiply(data['roll_onb'], 180 / math.pi)
data['pitch_onb'] = np.multiply(data['pitch_onb'], 180 / math.pi)
data['yaw_onb'] = np.multiply(data['yaw_onb'], 180 / math.pi)
ax1.plot(data['time'], data['roll'], color='r', label='roll')
ax2.plot(data['time'], data['pitch'], color='g', label='pitch')
ax3.plot(data['time'], data['yaw'], color='b', label='yaw')
ax1.plot(data['time'], data['roll_onb'], color='m', label='roll onboard')
ax2.plot(data['time'], data['pitch_onb'], color='c', label='pitch onboard')
ax3.plot(data['time'], data['yaw_onb'], color='k', label='yaw onboard')
leg = ax1.legend()
plt.show() | [
"lm@inf.ethz.ch"
] | lm@inf.ethz.ch |
f3ddddca66e54f4d6de8933f6b81ae1d7082e0df | 06c38f667bbad83a40ded8ea772e4c9c41663403 | /models/todos.py | eb0064c2c8130dcdda5a4018857e86d5a31fddfd | [] | no_license | naval2408/pytest_api_framework | 0006480085ebc8ddfa58256eb9bf7d3c845f8fef | a0298850f101090b60ee7e51c241075aa70a1385 | refs/heads/master | 2023-05-30T12:52:24.122960 | 2020-06-27T13:09:22 | 2020-06-27T13:09:22 | 266,531,013 | 1 | 0 | null | 2023-05-22T23:28:08 | 2020-05-24T11:55:58 | Python | UTF-8 | Python | false | false | 403 | py | from dataclasses import dataclass
"""
This class defines the structure of
the response for the get todo API.
"""
@dataclass
class ToDos:
user_id: int
id: int
title: str
completed: bool
def __init__(self, data: dict):
self.user_id = data.get('userId')
self.id = data.get('id')
self.title = data.get('title')
self.completed = data.get('completed')
| [
"navalkishorupadhyay@Navals-MacBook-Air.local"
] | navalkishorupadhyay@Navals-MacBook-Air.local |
2b8a2244e4e8b8f8a97d5cbe7d0d579cd8508d15 | bc1525a4e85e49829ccbf7cfc9db6881790fa3a7 | /pyUbiForge/ACU/type_readers/788BAA0D.py | cf65a21e511965a7960ee2750c94202ea6eb269d | [] | no_license | christianbethel1993/ACExplorer | 5183228c1c1beb5d7a3c768f5c8345e00e38f82f | f343de8925e0ca08aff7d2719c5e885dc3e503ac | refs/heads/master | 2023-03-26T13:57:15.390140 | 2019-07-02T12:05:23 | 2019-07-02T12:05:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | from pyUbiForge.misc.file_object import FileObjectDataWrapper
from pyUbiForge.misc.file_readers import BaseReader
class Reader(BaseReader):
file_type = '788BAA0D'
def __init__(self, file_object_data_wrapper: FileObjectDataWrapper):
for _ in range(4):
for _ in range(4):
file_object_data_wrapper.read_float_32()
file_object_data_wrapper.out_file_write('\n')
| [
"james_clare1@yahoo.co.uk"
] | james_clare1@yahoo.co.uk |
6a8ae257ed5d8b212aee3268f49fd10254ece7ce | 611992aafa31bf95702d234203b9c5b9c72fd953 | /api_model/asynch/test/app.py | dffd25ce4dddde20e39efa4536a1a1c93f6b4403 | [
"MIT"
] | permissive | bukosabino/scoring-handler | fd3161a735e06b62a346eccfa6c80178aa1b8965 | 2e642cd90fbac913185c1c334516f4653eada1b3 | refs/heads/main | 2023-03-23T17:05:31.345737 | 2021-02-17T16:01:30 | 2021-02-17T16:01:30 | 339,707,159 | 1 | 0 | MIT | 2021-02-25T21:04:52 | 2021-02-17T11:50:07 | HTML | UTF-8 | Python | false | false | 1,777 | py | import ast
import os
import shutil
import sys
import unittest
import fastapi.testclient as tc
from app.main import app
sys.path.append(os.path.join("..", ".."))
client = tc.TestClient(app)
class AsyncModelApiTest(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
dir_path = os.path.dirname(os.path.realpath(__file__))
cls.folder_output = os.path.join(dir_path, "output")
if not os.path.exists(cls.folder_output):
os.makedirs(cls.folder_output)
@classmethod
def tearDownClass(cls) -> None:
if os.path.exists(cls.folder_output):
shutil.rmtree(cls.folder_output)
def test_healthcheck_api(self):
expected_resp = "OK"
resp = client.get("/api/v1/healthcheck")
self.assertEqual(resp.status_code, 200)
resp_text = ast.literal_eval(resp.text)
resp_text = resp_text.get("status")
self.assertEqual(resp_text, expected_resp)
def test_predict_api(self):
parameters = [
{},
{"profile": self.folder_output, "profile-type": "pyinstrument"},
{"profile": self.folder_output, "profile-type": "yappi"},
]
for params in parameters:
with self.subTest(i=params):
resp = client.post(
"/api/v1/ml/async/predict", json=[5.1, 3.5, 1.4, 0.2], params=params
)
self.assertEqual(resp.status_code, 200)
resp_text = ast.literal_eval(resp.text)
resp_text = resp_text.get("detail")
output = float(resp_text)
condition_between = 0.05 > output > 0.04 # TODO: settings values
self.assertTrue(condition_between)
print(output)
| [
"dlpadial@opensistemas.com"
] | dlpadial@opensistemas.com |
3286465137b14a191af6344d9f59cdd47d20c266 | aedb994a7f1d2fee9a4ac39a39b657a863e86dd5 | /exercicio_secao_07_p2.py | cc941a57d1909c11063cd36778187524df9cb829 | [] | no_license | Carlos2y/exercicios_python_secao_GeekUniversity | 6a14355577f6366acd222a267dc3ceaa944f6039 | 9107ee717eb08dc00899fbe02120a9efa2c8ecbc | refs/heads/main | 2023-06-18T17:32:00.328465 | 2021-07-08T22:11:31 | 2021-07-08T22:11:31 | 315,795,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,217 | py | from exercicio_secao_07 import question
import random
import numpy as np
import pandas as pd
def PM(m):
c = []
for i in range(len(m[0])):
c.append(" ")
x = pd.DataFrame(m, columns=c)
print(x.to_string(index=False))
question(1)
matrix = [[],[],[],[]]
for a in range(len(matrix)):
for b in range(10):
matrix[a].append(random.randrange(100))
print("Matrix: ")
for i in range(len(matrix)):
print(matrix[i])
question(2)
matrix = [[],[],[],[],[]]
for a in range(len(matrix)):
matrix[a].append(1)
for b in range(9):
matrix[a].append(0)
print("Matrix")
for i in range(len(matrix)):
print(matrix[i])
question(3)
matrix = [[],[],[],[]]
for a in range(len(matrix)):
for b in range(10):
matrix[a].append(a * b)
print("Matrix: ")
for i in range(len(matrix)):
print(matrix[i])
question(4)
matrix = [[],[],[],[]]
for a in range(len(matrix)):
for b in range(10):
matrix[a].append(a * b)
print("Matrix: ")
for i in range(len(matrix)):
print(matrix[i])
print(f"Maior: {max(max(matrix))}")
question(5)
matrix = [[],[],[],[],[]]
for a in range(1, len(matrix) + 1):
for b in range(10):
matrix[a - 1].append(a * b)
print("Informe um numero: ",end=" ")
n = int(input())
x = ""
y = ""
c = 0
for i in range(len(matrix)):
if n in matrix[i]:
x += str(i)
y += str(matrix[i].index(n))
if x != "" and y != "":
c = int(len(str(x)) + len(str(y)) / 2) - 1
for i in range(c):
print(f"{i + 1}. {n} encontrado na matrix {x[i]} linha {y[i]}.")
else:
print(f"{n} não encontrado.")
question(6)
matrix =[[],[],[],[]]
matrix2 = [[],[],[],[]]
for a in range(len(matrix)):
for b in range(10):
matrix[a].append(a * b)
for a in range(len(matrix2)):
matrix2[a].append(max(matrix[a]))
for i in range(len(matrix2)):
print(matrix2[i])
question(7)
matrix = [[],[],[],[],[],[],[],[],[],[]]
for i in range(len(matrix)):
for j in range(10):
matrix[i].append(i * j)
print(f"Matrix v1: ")
for i in range(len(matrix)):
print(matrix[i])
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if matrix[i][i] < matrix[i][j]:
matrix[i][j] = (2 * i) + (7 * j) - 2
elif matrix[i][i] == matrix[i][j]:
matrix[i][j] = 2 * (i ** 2) - 1
elif matrix[i][i] > matrix[i][j]:
matrix[i][j] = ( 4 * (i ** 3)) - (5 * (j ** 2)) + 1
print("\n\n Matrix v2: ")
for i in range(len(matrix)):
print(matrix[i])
question(8)
matrix = [[],[],[]]
for a in range(len(matrix)):
for b in range(3):
matrix[a].append(b)
print(f"Matrix: ")
for i in range(len(matrix)):
print(matrix[i])
print("\n\n Soma Acima: ")
for i in range(1 , len(matrix)):
print(matrix[i][i] + matrix[i - 1][i])
question(9)
matrix = [[],[],[]]
for a in range(len(matrix)):
for b in range(3):
matrix[a].append(b)
print(f"Matrix: ")
for i in range(len(matrix)):
print(matrix[i])
print("\n\n Soma Abaixo: ")
for i in range(0 , len(matrix) - 1):
print(matrix[i][i] + matrix[i + 1][i])
question(10)
matrix = [[],[],[]]
for a in range(len(matrix)):
for b in range(3):
matrix[a].append(b)
print(f"Matrix: ")
for i in range(len(matrix)):
print(matrix[i])
print("\n\n Soma Diagonal: ")
for i in range(0 , len(matrix) - 1):
print(matrix[i][i] + matrix[i + 1][i + 1])
question(11)
matrix = [[],[],[]]
for a in range(len(matrix)):
for b in range(3):
matrix[a].append(b)
print(f"Matrix: ")
for i in range(len(matrix)):
print(matrix[i])
print("\n\n Soma Secundaria: ")
for i in range(0 , len(matrix) - 1):
print(matrix[i][len(matrix[i]) - 1 - i] + matrix[i + 1][len(matrix[i]) - 2 - i])
question(12)
matrix = [[],[]]
# matrix2 = [] // use this matrix to save the transpost matrix
foo = []
x = 0
y = 0
for a in range(len(matrix)):
for b in range(4):
matrix[a].append(b)
print(f"Matrix: ")
for i in range(len(matrix)):
print(matrix[i])
if len(matrix) == len(matrix[0]):
x = len(matrix[0])
y = len(matrix)
else:
y = len(matrix)
x = len(matrix[0])
print("\n\n Matrix Transposta: ")
for a in range(x):
foo.clear()
for b in range(y):
foo.append(matrix[b][a])
print(foo)
# matrix2.append(foo)
# print(matrix2)
question(13)
matrix = [[],[],[],[]]
for a in range(len(matrix)):
for b in range(1, 21):
matrix[a].append(b)
print("Matrix: ")
for i in range(len(matrix)):
print(matrix[i])
print("\n\nMatriz Triangular inferior: ")
for i in range(1, len(matrix)):
matrix[i-1][i] = 0
matrix[i][i + 1] = 0
for i in range(0, len(matrix)):
print(matrix[i])
question(14)
numeros = []
cartela = [[],[],[],[],[]]
for i in range(100):
numeros.append(i)
random.shuffle(numeros)
for a in range(len(cartela)):
for b in range(5):
foo = int(numeros.pop())
cartela[a].append(foo)
print("Cartela:")
x = pd.DataFrame(cartela, columns=[" "," "," "," "," "])
print(x.to_string(index=False))
question(15)
escolha = ["a","b","c","d"]
respostas = [[],[],[],[],[]]
resposta = []
acerto = 0
erro = 0
for a in range(len(respostas)):
random.shuffle(escolha)
for b in range(10):
respostas[a].append(escolha[random.randrange(5)])
for a in range(10):
random.shuffle(escolha)
resposta.append(escolha[random.randrange(5)])
for a in range(len(respostas)):
for b in range(10):
if respostas[a][b] != resposta[b]:
erro += 1
else:
acerto += 1
print(f"{a+1}° Aluno: acertos = {acerto} / erros = {erro} ")
acerto = 0
erro = 0
question(16)
escolha = ["a","b","c","d", "e"]
respostas = {}
resposta = []
acerto = 0
ap = ""
t = 0
for i in range(1, 4):
print(f"Informe a matriculo do {i}° ALuno: ",end=" ")
n = int(input())
respostas[n] = []
for a,x in respostas.items():
random.shuffle(escolha)
for b in range(10):
respostas[a].append(escolha[random.randrange(5)])
for a in range(10):
random.shuffle(escolha)
resposta.append(escolha[random.randrange(5)])
print("\n\n")
for a,x in respostas.items():
for b in range(10):
if respostas[a][b] == resposta[b]:
acerto += 1
if acerto < 7:
ap = "reprovado"
else:
ap = "aprovado"
print(f"Aluno de Matricula {a+1}: respostas: {respostas[a]} / Acerto: {acerto*10}% - {ap} / Nota: {acerto}")
t += acerto*10
acerto = 0
print("\n")
print(f"\n\nTaxa de acerto: {t /len(respostas)} %")
question(17)
escolha = ["a","b","c","d", "e"]
respostas = {}
# aluno = []
resposta = []
acerto = 0
acertos = [[],[],[]]
for i in range(10):
print(f"Informe a matriculo do {i+1}° ALuno: ",end=" ")
n = int(input())
respostas[n] = [[],[],[]]
# aluno += [[]]
for a,x in respostas.items():
random.shuffle(escolha)
for b in range(3):
for c in range(10):
respostas[a][b].append(escolha[random.randrange(5)])
for a in range(10):
random.shuffle(escolha)
resposta.append(escolha[random.randrange(5)])
print("\n\n")
for a,x in respostas.items():
for b in range(len(x)):
for c in range(10):
if respostas[a][b][c] == resposta[b]:
acerto += 1
acertos[b].append(acerto)
acerto = 0
#descompacta as notas em uma matriz de 10x3
#for a in range(len(acertos)):
# for b in range(len(resposta)):
# aluno[b].append(acertos[a][b])
#
#for x in range(len(aluno)):
# print(aluno[x], "\n\n")
foo = 0
for i in range(len(acertos)):
for j in range(len(acertos[i])):
if acertos[i][j] < 7:
foo += 1
print(f"\nPior Aluno da {i+1}° Prova: {min(acertos[i])} / Reprovados: {foo}")
foo = 0
question(18)
matriz = [[],[],[]]
soma = []
r = 0
for a in range(len(matriz)):
for b in range(3):
print(f"Insira o numero da posição ({a}, {b}): ", end=" ")
n = float(input())
matriz[a].append(n)
for a in range(len(matriz)):
for b in range(3):
r += matriz[b][a]
soma.append(r)
r = 0
print("\n Matriz: ")
for i in range(len(matriz)):
print(matriz[i])
print(f"\n Array: {soma}")
question(19)
matriz = {}
lista = ["Matrícula", "média das provas", "media dos Trabalhos", "nota final"]
nf = []
soma = 0
for a in range(5):
print(f"Informe a {lista[0]}: ", end=" ")
mat = int(input())
matriz[mat] = []
print("\n")
for a,x in matriz.items():
for b in range(1, len(lista)):
print(f"Informe a {lista[b]} da matricula {a}: ", end=" ")
foo = float(input())
matriz[a].append(foo)
print("\n")
for a,x in matriz.items():
foo += matriz[a][0] + matriz[a][1]
print(f"Matricula {a}: nota final = {foo}\n")
nf.append(foo)
foo = 0
for a,x in matriz.items():
if matriz[a][0] + matriz[a][1] == max(nf):
print(f"Matricula {a} com a maior nota de {max(nf)}")
break
soma += matriz[a][2]
print(f"Media das notas finais: {soma / len(matriz)}")
question(20)
matriz = [[],[],[]]
soma = 0
m1 = 0
m2 = 0
for a in range(len(matriz)):
for b in range(6):
matriz[a].append(random.randrange(10))
for a in range(len(matriz)):
for b in range(len(matriz[a])):
if b % 2 != 0:
soma += matriz[a][b]
print(f"\nSoma das columas impares : {soma}")
for a in range(len(matriz)):
for b in range(len(matriz[a])):
if b == 1:
m1 += matriz[a][b]
elif b == 3:
m2 += matriz[a][b]
print(f"\nMedia Aritimetica da segunda columna: {m1}")
print(f"Media Aritimetica da quarta columna: {m2}")
print("\n Matriz Original: ")
for i in range(len(matriz)):
print(matriz[i])
print("\n Matriz Modificada: ")
for i in range(len(matriz)):
matriz[i][len(matriz[a]) - 1] = matriz[i][0] + matriz[i][1]
for i in range(len(matriz)):
print(matriz[i])
question(21)
a = [[],[]]
b = [[],[]]
c = [[],[]]
const = False
for x in range(2):
for y in range(2):
a[x].append(random.randrange(10))
b[x].append(random.randrange(10))
while True:
print("""\n
1. Soma as duas matrizes
2. Subtrair a primeira matriz da segunda
3. Adicionar uma constante as duas matrizes
4. Imprimir as matrizes
5. Sair
Escolha: """, end=" ")
op = input()
if const:
a[lin][col] = constante
b[lin][col] = constante
c = [[],[]]
if op == "1":
for x in range(2):
for y in range(2):
c[x].append(a[x][y] + b[x][y])
print("\nSoma das Matrizes A e B: ")
PM(c)
elif op == "2":
for x in range(2):
for y in range(2):
c[x].append(a[x][y] - b[x][y])
print("\nSubtração das Matrizes A e B: ")
PM(c)
elif op == "3":
const = True
print("\n Matriz 2x2\n")
print("Informe a linha: ", end=" ")
lin = int(input())
print("Informe a coluna: ", end=" ")
col = int(input())
print("Informe a constante: ", end=" ")
constante = int(input())
elif op == "4":
print("\n Matriz A:")
PM(a)
print("\n\n")
print(" Matriz B:")
PM(b)
else:
print("CYA.")
break
question(22)
a = [[],[],[]]
b = [[],[],[]]
c = [[],[],[]]
for x in range(len(a)):
for y in range(3):
a[x].append(random.randrange(10))
b[x].append(random.randrange(10))
for x in range(len(a)):
for y in range(len(a[x])):
c[x].append(a[x][y] + b[x][y])
print("Matriz A: ")
PM(a)
print("\nMatriz B: ")
PM(b)
print("\nMatriz C: ")
PM(c)
question(23)
a = [[],[],[]]
b = [[],[],[]]
for x in range(len(a)):
for y in range(3):
a[x].append(random.randrange(10))
for x in range(len(a)):
for y in range(len(a[x])):
b[x].append(a[x][y] ** 2)
print("Matriz A: ")
PM(a)
print("\nMatriz B: ")
PM(b)
question(24)
x = ""
c = 0
m = []
md = 0 # Maior na diagonal
mc = 0 # Maior em cima
mb = 0 # Maior em baixo
me = 0 # Maior na esquerda
mr = 0 # Maior na direita (right)
# formatação do arquivo matriz.txt
with open("matriz.txt", "r") as matrix:
for line in matrix:
m += [[]]
for item in line:
x += item
if item == " ":
x = ""
pass
elif item == "\n":
x = ""
pass
elif len(x) == 2:
m[c].append(x)
c += 1
for i in range(m.count([])):
m.remove([])
for a in range(len(m)):
for b in range(len(m[a])):
m[a][b] = int(m[a][b])
# formatação do arquivo matriz.txt
for a in range(len(m) - 3):
for b in range(len(m[a]) - 3):
foo = m[a][b] * m[a+1][b+1] * m[a+2][b+2] * m[a+3][b+3]
if foo > md:
md = foo
for a in range(len(m) - 3):
for b in range(len(m[a])):
foo = m[a+3][b] * m[a+2][b] * m[a+1][b] * m[a][b]
if foo > mc:
mc = foo
for a in range(len(m) - 3):
for b in range(len(m[a])):
foo = m[a][b] * m[a+1][b] * m[a+2][b] * m[a+3][b]
if foo > mb:
mb = foo
for a in range(len(m)):
for b in range(0, len(m[a]), 4):
foo = m[a][len(m[a]) - 1 - b] * m[a][len(m[a]) - 2 - b] * m[a][len(m[a]) - 3 - b] * m[a][len(m[a]) - 4 - b]
if foo > me:
me = foo
for a in range(len(m)):
for b in range(len(m[a]) - 3):
foo = m[a][b] * m[a][b+1] * m[a][b+2] * m[a][b+3]
if foo > mr:
mr = foo
valores = [md, mb, mc, me, mr]
nomes = ["Diagonal", "Baixo", "Cima", "Esquerda", "Direita"]
print(f"O maior produto de 4 numeros e na direção {nomes[valores.index(max(valores))]} com o valor {max(valores)}")
question(25)
velha = [[0,0,0],[0,0,0],[0,0,0]]
c = 0
jogadores = [-1, 1]
x = pd.DataFrame(velha)
print(x)
while True:
while True:
print(f"\n\n Escolha uma posição para jogar, jogador {jogadores[c%2]}")
print("\n Linha: ",end=" ")
lin = int(input())
print("\n Coluna: ",end=" ")
col = int(input())
if lin < 0 or col < 0 or lin > 2 or col > 2:
print("\nPosição Invalida.\n")
else:
if c % 2 == 0:
if velha[lin][col] != 0:
print("\nJogada Invalida.\n")
else:
velha[lin][col] = -1
break
else:
if velha[lin][col] != 0:
print("\nJogada Invalida.\n")
else:
velha[lin][col] = 1
break
x = pd.DataFrame(velha)
print("\n\n")
print(x)
if velha[0][0] != 0 or velha[1][1] != 0 or velha[2][2] != 0:
if c>3 and velha[0][0] == velha[0][1] == velha[0][2] != 0 or velha[1][0] == velha[1][1] == velha[1][2] != 0 or velha[2][0] == velha[2][1] == velha[2][2] != 0:
print(f"\nJogador {jogadores[c%2]} venceu.")
break
elif c>3 and velha[0][0] == velha[1][1] == velha[2][2] != 0 or velha[0][2] == velha[1][1] == velha[2][0] != 0:
print(f"\nJogador {jogadores[c%2]} venceu.")
break
elif c>3 and velha[0][0] == velha[1][0] == velha[2][0] != 0 or velha[0][1] == velha[1][1] == velha[2][1] != 0 or velha[0][2] == velha[1][2] == velha[2][2] != 0:
print(f"\nJogador {jogadores[c%2]} venceu.")
break
c += 1
| [
"noreply@github.com"
] | Carlos2y.noreply@github.com |
681ed955c661097ddf8cef8a894e4e244f8bfa4e | 22c74dd08d9eba048c5870fa0793f79c761de953 | /Assignment1/venv/Scripts/pip3-script.py | 9c6aa13d9d8999d495aea307e371eafb51d885f1 | [] | no_license | Minahil1/IRAssignments | ea0f497fd54829a24a112a2410c6c3d8f3414d9f | 036970e05278e9fef965dbc912934bfd488aa2ef | refs/heads/main | 2023-01-08T07:41:20.045523 | 2020-11-11T12:13:02 | 2020-11-11T12:13:02 | 311,959,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | #!"C:\Users\Samina Fida\PycharmProjects\untitled\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"haad99@gmail.com"
] | haad99@gmail.com |
d3515f68cd253aa9b013ca6e9126dca40e3b35ec | 8bf653659db4298f759867053c405e4edbe8a7f6 | /workflow/hetero_secureboosting_tree_workflow/hetero_secureboosting_tree_host_workflow.py | 1fbd0564b8dc7b425c32212c767194dd5825ab1a | [
"Apache-2.0"
] | permissive | CharlotteSean/FATE | 4dad5832db1139b41b37b4b05c1fefd90c1dfa35 | e925d4d3a161efd2c0778fc8fb1137bad0b299fb | refs/heads/master | 2020-05-19T11:51:07.837311 | 2019-04-25T06:40:02 | 2019-04-25T06:40:02 | 185,001,647 | 1 | 0 | Apache-2.0 | 2019-05-05T08:21:18 | 2019-05-05T08:21:17 | null | UTF-8 | Python | false | false | 1,978 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
# =============================================================================
# Base WorkFlow Define
# =============================================================================
from federatedml.param import BoostingTreeParam
from federatedml.util import ParamExtract
from federatedml.util import consts
from federatedml.tree import HeteroSecureBoostingTreeHost
from workflow.workflow import WorkFlow
import sys
class HeteroSecureBoostingTreeHostWorkFlow(WorkFlow):
def _initialize_model(self, config):
secureboosting_param = BoostingTreeParam()
self.secureboosting_tree_param = ParamExtract.parse_param_from_config(secureboosting_param, config)
self.model = HeteroSecureBoostingTreeHost(self.secureboosting_tree_param)
def _initialize_role_and_mode(self):
self.role = consts.HOST
self.mode = consts.HETERO
def save_predict_result(self, predict_result):
pass
def evaluate(self, eval_data):
pass
def save_eval_result(self, eval_data):
pass
if __name__ == "__main__":
workflow = HeteroSecureBoostingTreeHostWorkFlow()
workflow.run()
| [
"dylanfan@webank.com"
] | dylanfan@webank.com |
1ccbc85a8f20324d99f2b2eb30db1d21ed445f07 | 1f51c4e89a71ea3fcc2cc921613aacc19e078b69 | /16_Cleaning Data in Python [Part - 1]/02_Text and categorical data problems/06_Removing titles and taking names.py | e3da04a5987946e045f113e8723ed5864fb6283b | [
"MIT"
] | permissive | CodeHemP/CAREER-TRACK-Data-Scientist-with-Python | 871bafbd21c4e754beba31505965572dd8457adc | 13ebb10cf9083343056d5b782957241de1d595f9 | refs/heads/main | 2023-03-26T08:43:37.054410 | 2021-03-22T15:08:12 | 2021-03-22T15:08:12 | 471,015,287 | 1 | 0 | MIT | 2022-03-17T13:52:32 | 2022-03-17T13:52:31 | null | UTF-8 | Python | false | false | 1,407 | py | '''
06 - Removing titles and taking names
While collecting survey respondent metadata in the airlines DataFrame,
the full name of respondents was saved in the full_name column. However
upon closer inspection, you found that a lot of the different names are
prefixed by honorifics such as "Dr.", "Mr.", "Ms." and "Miss".
Your ultimate objective is to create two new columns named first_name and
last_name, containing the first and last names of respondents respectively.
Before doing so however, you need to remove honorifics.
The airlines DataFrame is in your environment, alongside pandas as pd.
Instructions:
- Remove "Dr.", "Mr.", "Miss" and "Ms." from full_name by replacing them with
an empty string "" in that order.
- Run the assert statement using .str.contains() that tests whether full_name
still contains any of the honorifics.
'''
# Replace "Dr." with empty string ""
airlines['full_name'] = airlines['full_name'].str.replace("Dr.", "")
# Replace "Mr." with empty string ""
airlines['full_name'] = airlines['full_name'].str.replace("Mr.", "")
# Replace "Miss" with empty string ""
airlines['full_name'] = airlines['full_name'].str.replace("Miss", "")
# Replace "Ms." with empty string ""
airlines['full_name'] = airlines['full_name'].str.replace("Ms.", "")
# Assert that full_name has no honorifics
assert airlines['full_name'].str.contains('Ms.|Mr.|Miss|Dr.').any() == False
| [
"ifaizymohd@gmail.com"
] | ifaizymohd@gmail.com |
5d96c76972b89756d943837afac8ab18ff82c7bc | e8b21b149f1ce8a9be0b5e52049aa48e30fc0e27 | /webtier/html/cgi-bin/test.py | b7beede47613c490e430b01440355214fc6cbe5e | [] | no_license | EricB2745/BlueFlame | b52fb3fc292da01401a4e8dac863a89e223fd262 | 1d5c0efafade5711d49d97fe063b04e455081767 | refs/heads/master | 2021-08-10T20:26:20.092171 | 2017-11-12T23:42:39 | 2017-11-12T23:42:39 | 110,448,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | #!/usr/bin/env python
# enable debugging
import cgitb
cgitb.enable()
print "Content-Type: text/plain\r\n\r\n"
print
print "Hello World!"
| [
"ebuhring@yahoo.com"
] | ebuhring@yahoo.com |
3cd902ce5209b6c7863f07b1602b49859de1031e | 4d2475135f5fc9cea73572b16f59bfdc7232e407 | /prob224_basic_calculator.py | e775df1d9d3e1655e6652d7439cd899e9757ac9c | [] | no_license | Hu-Wenchao/leetcode | 5fa0ae474aadaba372756d234bc5ec397c8dba50 | 31b2b4dc1e5c3b1c53b333fe30b98ed04b0bdacc | refs/heads/master | 2021-06-24T04:57:45.340001 | 2017-06-17T02:33:09 | 2017-06-17T02:33:09 | 45,328,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | """
Implement a basic calculator to evaluate a simple expression string.
The expression string may contain open ( and closing parentheses ),
the plus + or minus sign -, non-negative integers and empty spaces .
You may assume that the given expression is always valid.
Some examples:
"1 + 1" = 2
" 2-1 + 2 " = 3
"(1+(4+5+2)-3)+(6+8)" = 23
"""
class Solution(object):
def calculate(self, s):
"""
:type s: str
:rtype: int
"""
total = 0
i, signs = 0, [1, 1]
while i < len(s):
c = s[i]
if c.isdigit():
start = i
while i < len(s) and s[i].isdigit():
i += 1
total += signs.pop() * int(s[start:i])
continue
if c in '+-(':
signs.append(signs[-1] * (1, -1)[c == '-'])
elif c == ')':
signs.pop()
i += 1
return total
| [
"huwcbill@gmail.com"
] | huwcbill@gmail.com |
78cc2c0e6ce7233a114f720346695cd17917852a | f6c051b15e29fbf1501499d5551c0d9237da0852 | /order/migrations/0008_auto_20210108_0304.py | 8fa64fe7e7a2df8ee6ded7dac0bf581c23033732 | [] | no_license | Deepjyoti13/eCommerce | 8e672d2c4b6f708ef4ac1b66521ce72d2fe2cc39 | b0745b8c3a410f7ee8182496c556229748fd3265 | refs/heads/master | 2023-02-25T04:00:47.068320 | 2021-01-24T19:21:13 | 2021-01-24T19:21:13 | 321,396,947 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | # Generated by Django 3.1.4 on 2021-01-07 21:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0007_order_tracking'),
]
operations = [
migrations.AlterField(
model_name='order',
name='status',
field=models.CharField(choices=[('New', 'New'), ('Accepted', 'Accepted'), ('Preparing', 'Preparing'), ('On Shipping', 'On Shipping'), ('Completed', 'Completed'), ('Canceled', 'Canceled')], default='New', max_length=50),
),
]
| [
"deepjyotide13@gmail.com"
] | deepjyotide13@gmail.com |
bd2b16963229ad46bd6f5bc735de3c69475b833f | bcf5f487cf0574af6b6df95d204f5e6278c8d609 | /hypernets_processor/main/scheduler_main.py | 30957590ff07197b6fb35e16361a41e335d96151 | [] | no_license | javierconcha/hypernets_processor | 3628d513a58bd36b438a8023aae91eff2f66050a | 617751fcc5f974a074ac034e06950bc5b04ed2a5 | refs/heads/master | 2023-02-24T09:50:00.567573 | 2021-01-28T14:34:04 | 2021-01-28T14:34:04 | 257,593,081 | 1 | 0 | null | 2021-01-28T14:34:05 | 2020-04-21T12:46:13 | Python | UTF-8 | Python | false | false | 5,100 | py | """
Main function for running scheduler
"""
from hypernets_processor.version import __version__
from hypernets_processor.utils.config import read_config_file, read_jobs_list
from hypernets_processor.utils.logging import configure_logging
from hypernets_processor.utils.config import get_config_value, PROCESSOR_CONFIG_PATH, JOBS_FILE_PATH
from hypernets_processor import Scheduler
from hypernets_processor.main.sequence_processor_main import main as processor_main
'''___Authorship___'''
__author__ = "Sam Hunt"
__created__ = "26/3/2020"
__version__ = __version__
__maintainer__ = "Sam Hunt"
__email__ = "sam.hunt@npl.co.uk"
__status__ = "Development"
def unpack_scheduler_config(scheduler_config):
"""
Returns information from scheduler configuration information
:type scheduler_config: configparser.RawConfigParser
:param scheduler_config: scheduler configuration
:rtype: dict
:return: scheduler configuration information, with entries (defaults occur if entry omitted from config file):
* seconds (int) - Scheduled job repeat interval in seconds, default None (if not None minutes and hours are None)
* minutes (int) - Scheduled job repeat interval in minutes, default None (if not None seconds and hours are None)
* hours (int) - Scheduled job repeat interval in hour, default None (if not None seconds and minutes are None)
* start_time (datetime.datetime) - Scheduled time to start running tasks, default None (means start now)
* parallel (bool) - Switch to run scheduled jobs on different threads, default False
* jobs_list (str) - Path of jobs list file, to run on schedule
"""
scheduler_config_dict = dict()
sections = [sch for sch in scheduler_config.sections()]
sections.remove("Log")
for sch in sections:
scheduler_config_dict[sch] = {}
# Schedule Configuration
scheduler_config_dict[sch]["seconds"] = get_config_value(scheduler_config, sch, "seconds", dtype=int)
scheduler_config_dict[sch]["minutes"] = get_config_value(scheduler_config, sch, "minutes", dtype=int)
scheduler_config_dict[sch]["hours"] = get_config_value(scheduler_config, sch, "hours", dtype=int)
scheduler_config_dict[sch]["start_time"] = get_config_value(scheduler_config, sch, "start_time", dtype=str)
scheduler_config_dict[sch]["parallel"] = get_config_value(scheduler_config, sch, "seconds", dtype=bool)
# Use custom jobs list provided, else use default
scheduler_config_dict[sch]["jobs_list"] = get_config_value(scheduler_config, sch, "jobs_list", dtype=str)
if scheduler_config_dict[sch]["jobs_list"] is None:
scheduler_config_dict[sch]["jobs_list"] = JOBS_FILE_PATH
# todo - sort out start time format
# Checks
# Check only have hours, minutes or seconds
intervals = [scheduler_config_dict[sch]["seconds"], scheduler_config_dict[sch]["minutes"],
scheduler_config_dict[sch]["hours"]]
if intervals.count(None) != 2:
raise ValueError("job repeat interval must be defined as 1 of seconds, minutes or hours for " + sch)
return scheduler_config_dict
def main(scheduler_config_path):
"""
Main function to schedule automated hypernets_processor jobs
:type scheduler_config_path: str
:param scheduler_config_path: path of scheduler config file
"""
scheduler_config = read_config_file(scheduler_config_path)
logger = configure_logging(config=scheduler_config, name=__name__)
scheduler_config = unpack_scheduler_config(scheduler_config)
jobs_list = read_jobs_list(scheduler_config["Processor Schedule"]["jobs_list"])
# schedule jobs
processor_sch = Scheduler(logger=logger)
for job_config_path in jobs_list:
# define scheduler job config
scheduler_job_config = dict()
# read job config file to set job name
job_config = read_config_file(job_config_path)
if ("Job" in job_config.keys()) and ("job_name" in job_config["Job"].keys()):
scheduler_job_config["name"] = job_config["Job"]["job_name"]
else:
scheduler_job_config["name"] = job_config_path
del job_config
scheduler_job_config["seconds"] = scheduler_config["Processor Schedule"]["seconds"]
scheduler_job_config["minutes"] = scheduler_config["Processor Schedule"]["minutes"]
scheduler_job_config["hours"] = scheduler_config["Processor Schedule"]["hours"]
scheduler_job_config["parallel"] = scheduler_config["Processor Schedule"]["parallel"]
# schedule job
processor_sch.schedule(processor_main,
scheduler_job_config=scheduler_job_config,
job_config_path=job_config_path,
processor_config_path=PROCESSOR_CONFIG_PATH,
to_archive=True)
# run scheduled jobs
processor_sch.run(start_time=scheduler_config["Processor Schedule"]["start_time"])
return None
if __name__ == "__main__":
pass
| [
"sam.hunt@npl.co.uk"
] | sam.hunt@npl.co.uk |
f3b14c0152c11176bce82a7b75f78d2b4434c904 | 5119d705c5c916185a0a669bdc2d1eac77649879 | /src/fake_stars.py | 94fc273133da8f1cbdc6972c1e434af786203089 | [] | no_license | Daenjanglee/Balco | 3b5ef848f943ee14b8f745856277a8b11286b9de | dd5e257ecbbadfbfa023d9c58ecc928c664efe4f | refs/heads/master | 2023-03-15T13:22:23.283429 | 2021-03-08T21:20:50 | 2021-03-08T21:20:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,120 | py | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import fitsio
from astropy.io import fits
from astropy.utils.data import download_file
import os
import random
class FakeStars():
"""
A class object that generates fake sources or fake images.
NOTE- The generate sources are completely pseudo and do not represent a real
SuperBIT image in any manner.
In order for this class to run, you need to necessary Python modules installed.
It is recommended you run this on LINUX, I've tried making it compatible with
windows but I encounter some errors.
"""
def __init__(self, image_name, x_len, y_len):
"""
Initializes a new FakeStar object which represents a fake star image.
@type self: FakeStars
@type image_name: String (The name of the image)
@type x_len: Int (Width of Image)
@type y_len: Int (Height of Image)
@rtype: None
"""
self.name = image_name
self.width = x_len
self.height = y_len
self.image_array = np.zeros((self.width, self.height))
self.noise = np.zeros((self.width, self.height))
def generate_white_noise(self, std=29, mean=0):
"""
Generates a 1D array of random gaussian white noise. The default
setting for the average noise is 29. (This is estimated to be SuperBIT's
estimated noise)
The length of the array is width * height of the image.
@type self: FakeStars
@rtype: Numpy Array
"""
num_samples = self.width * self.height
white_noise = np.round(np.random.normal(mean, std, size=num_samples))
white_noise = white_noise.reshape(self.width, self.height)
return white_noise
def generate_background(self, mean=1250, std=0):
"""
Generates a fake background representing the dark sky of the image.
The background has a median value of 1250.
All this does is simply add the value 1250 to each individual pixel in
the image. The median pixel count of a SuperBIT image is approximately
1250; hence, the motivation for the value.
@type self: FakeStars
@rtype: Numpy Array
"""
num_samples = self.width * self.height
background = np.round(np.random.normal(mean, std, size = num_samples))
return background
def create_point_stars(self, num_stars):
"""
Creates a fake single pixel to represent a hot pixel on a SuperBIT
image.
The star will simply be a dot on the image with a high intensity
brightness.
@type self: FakeStars
@type num_stars: Integer
@rtype: Numpy Array
"""
for i in range(num_stars):
point_star = random.randint(6000, 60000)
x_pos = random.randint(0, self.width-1)
y_pos = random.randint(0, self.height-1)
self.image_array[x_pos][y_pos] += point_star
def create_2d_gaussian(self, size, xfwhm=2, yfwhm=2, center=None, btness=400):
""" Make a square gaussian kernel
Size is the length of a side of the square
fwhm is full-width-half-maximum, which
can be thought of as an effective radius.
@type self: FakeStars
@type size: Int
@type xfwhm: Int
@type yfwhm: Int
@type center: Tuple (Coordinates of center of star)
@type btness: Int (Brightest point of star)
"""
x = np.arange(0, size, 1, float)
y = x[:,np.newaxis]
if center is None:
x0 = y0 = size // 2
else:
x0 = center[0]
y0 = center[1]
return btness * np.exp(-4*np.log(2) *\
((x-x0)**2/ xfwhm**2 + (y-y0)**2 / yfwhm**2))
def create_stars(self, generator, sz=20, xf=10, yf=10, bt= 200):
"""
Creates a certain number of fake star based on different parameters
based on the generator value.
@type self: FakeStars
@type generator: Int (Number of Iterations)
@type sz: Size of Image in dimensions (Used to locate center)
@type xf: Int (FWHM in x direction)
@type yf: Int (FWHM in y direction)
@type bt: Int (Brightest point in the star)
@rtype: None
"""
while generator != 0:
x = random.randint(100, self.width-100)
y = random.randint(100, self.height-100)
source = self.create_2d_gaussian(size=sz, xfwhm=xf, yfwhm=yf, btness=bt)
self.image_array[x:x+sz, y:y+sz] += source
generator -= 1
def create_cosmic_rays(self, amount):
"""
Creates a streak of cosmic ray streak that will be randomly placed
in the image.
@type self: FakeStars
@type amount: Int (Number of sources)
@rtype: None
"""
while amount != 0:
size = 40
chance = random.randint(0,1)
rotation = random.randint(0, 2)
x_pos = random.randint(0, self.width - 1)
y_pos = random.randint(0, self.height - 1)
brightness = 8000
while size != 0:
if rotation == 0:
if x_pos >= 4400 or y_pos >= 6650:
break
self.image_array[x_pos][y_pos] += brightness
x_pos += 1
y_pos -= 1
elif rotation == 1:
if x_pos >= 4400 or y_pos >= 6650:
break
self.image_array[x_pos][y_pos] += brightness
x_pos += 1
y_pos += 1
else:
if x_pos >= 4400 or y_pos >= 6650:
break
self.image_array[x_pos][y_pos] += brightness
x_pos += 1
size -=1
brightness -= 8000/500
amount -= 1
def create_single_source(self, bt=250, xf=10, yf=10, rand_pos=False, std=29, sz=100):
"""
Creates a single fake source with different noise realizations.
Brightness increases the signal of the source value.
@type self: FakeStars
@type bt: Int (Brightest point in the star)
@type xf: Int (FWHM in x direction)
@type yf: Int (FWHM in y direction)
@type rand_pos: Boolean (Randomly places star)
@type std: Int (Standard Deviation)
@type sz: Size of Image (height by width)
@rtype: None
"""
x = y = sz // 2
if rand_pos:
x += random.uniform(-0.5, 0.5)
y += random.uniform(-0.5, 0.5)
star = self.create_2d_gaussian(size=sz, xfwhm=xf, yfwhm=yf, center=(x, y), btness=bt)
num_samples = sz*sz
white_noise = np.round(np.random.normal(29, std, size=num_samples))
white_noise = np.reshape(white_noise, (sz, sz))
self.image_array = star + white_noise
def create_image(self, signal=1, btness= 400):
"""
Creates a fake map with different sources and galaxies.
Signal is an integer value. Which determines how many fake sources
of different forms (Point stars, large sources, galaxies, cosmic rays)
will be created.
@type self: Fake_Star
@type signal: Int
@type btness: Int (Brightest point of a source)
@rtype: None
"""
self.noise = self.generate_white_noise()
self.create_point_stars(signal*100)
if type(btness) == list:
for sat in btness:
if random.randint(0, 25) == 1:
self.create_stars(generator=signal * 5, sz=50*2, xf=5, yf=2, bt=sat)
else:
self.create_stars(generator=signal * 5, sz=50*2, xf=sat/40, yf=sat/40, bt=sat)
else:
self.create_stars(generator=signal*5, sz=30, xf=sat/40, yf=sat/40, bt=btness)
self.create_cosmic_rays(signal*3)
self.image_array += self.noise
def new_noise(self):
"""
Creates a new generated white noise background with the same sources
in the same position.
@type self: Fake_stars
@rytpe: None
"""
self.image_array -= self.noise
self.noise = self.generate_white_noise()
self.image_array += self.noise
def cap_pixel_value(self):
"""
Limits the maximal pixal value to be below a 16 bit number
@type self: Fake_Stars
@rtype: None
"""
x, y = np.where(self.image_array > 65535)
if type(x) == np.ndarray and len(x) > 0:
for i in range(len(x)):
self.image_array[x[i]][y[i]] = 65535
def show_image(self, together=True):
"""
Displays the scaled and original version of the source.
The together parameter puts both sources into the one figure.
@type self: FakeStars
@type together: Boolean
@rtype: None
"""
if together:
fig=plt.figure(figsize=(10, 10))
columns = 2
rows = 1
max = np.mean(self.image_array) + np.std(self.image_array) *3
min = np.mean(self.image_array) - np.std(self.image_array) *3
fig.add_subplot(rows, columns, 1)
plt.title("Normalized")
plt.imshow(self.image_array, vmax=max, vmin=min)
plt.colorbar()
fig.add_subplot(rows, columns, 2)
plt.title("Original")
plt.imshow(self.image_array)
plt.colorbar()
plt.show()
else:
plt.figure("Scaled")
plt.title("Scaled")
max = np.mean(self.image_array) + np.std(self.image_array) *1
min = np.mean(self.image_array) - np.std(self.image_array) *1
plt.imshow(self.image_array, vmax=max, vmin=min)
plt.colorbar()
plt.figure("Original")
plt.title("Original")
plt.imshow(self.image_array)
plt.colorbar()
plt.show()
def create_fits_image(self):
"""
Creates a fake fits image and saves it onto the current working directory.
Use os.chdir() to change to the desired save location
clobber: bool, optional- if True, overwrite any existing file.
@type self: FakeStars
@rtype: None
"""
fitsio.write(self.name + ".fits", self.image_array, \
header={'a': 'FILLER','b': 'FILLER','c': "FILLER"}, clobber=True)
def show_statistics(self):
"""
Runs basic statistics on the fake image and prints the values.
@type self: FakeStars
@rtype: None
"""
# Basic Statistics
self.median = np.median(self.image_array)
self.mean = np.mean(self.image_array)
self.min = np.min(self.image_array)
self.std = np.std(self.image_array)
print("Mean: ", self.mean, "\n")
print("Min: ", self.min, "\n")
print("Median: ", self.median, "\n")
print("Standard Deviation: ", self.std, "\n")
def return_image(self):
"""
Returns the image array data.
@type self: Fakestars
@rtype: Numpy Array
"""
return self.image_array
## (Example)
# os.chdir("/home/james/Desktop/sbit_compress_py/experiment/original")
# fakestar1 = FakeStars("fakestar1", 4400, 6650)
# fakestar1.create_single_source(bt=250, xf=2, yf=2, sz=100)
# # fakestar1.create_image(signal = 150, btness=[1200, 1000, 800, 200])
# fakestar1.cap_pixel_value()
# fakestar1.create_fits_image()
# fakestar1.show_image(together=False)
# ##
# fakestar1.new_noise()
# fakestar1.show_image(together=False) | [
"noreply@github.com"
] | Daenjanglee.noreply@github.com |
2e88f7f6410bfe4c28746b3ebec64e0bc61194be | c4e280f9fca027f894710a896c6e6dcaaa2ed6b8 | /tvb_hpc/base.py | f563e66d22ff9ededc1d4393d3f4bb2ceff1ce66 | [
"Apache-2.0",
"Swift-exception"
] | permissive | sipv/tvb-hpc | aa8bae1643124ac8268b287c2cce08b1dabb7a44 | a7253e31225d8a4e353545ace5ebd423d7570ee2 | refs/heads/master | 2021-06-29T22:02:22.621384 | 2017-09-13T14:33:46 | 2017-09-13T14:33:46 | 103,411,034 | 0 | 0 | null | 2017-09-13T14:36:56 | 2017-09-13T14:36:56 | null | UTF-8 | Python | false | false | 1,386 | py |
"""
Base classes.
"""
from typing import List, Dict
from numpy import dtype
from loopy import (
TargetBase, LoopKernel, make_kernel, add_and_infer_dtypes,
make_reduction_inames_unique, )
# list of Loopy instructions
Isns = List[str]
class BaseKernel:
def kernel(self, target: TargetBase, typed: bool=True) -> LoopKernel:
"Build and return loop kernel."
domains = self.kernel_domains()
body = '\n'.join(self.kernel_isns())
data = self.kernel_data()
knl = make_kernel(domains, body, data, target=target)
knl = make_reduction_inames_unique(knl)
fmt = 'tvb_kernel_%s' % (self.__class__.__name__,)
knl.name = fmt
if typed:
dtypes = self.kernel_dtypes()
knl = add_and_infer_dtypes(knl, dtypes)
return knl
def kernel_domains(self) -> str:
"Return loop domains of kernel."
return ''
def kernel_data(self) -> List[str]:
"Return arguments / data to kernel."
# normalize wrt. key set like ['n,out', 'foo,bar']
csk = ','.join(self.kernel_dtypes().keys())
return [key for key in csk.split(',')]
def kernel_dtypes(self) -> Dict[str, dtype]:
"Return map of identifiers to Numpy dtypes."
return {}
def kernel_isns(self) -> Isns:
"Return list of loopy instructions."
return []
| [
"mmwoodman@gmail.com"
] | mmwoodman@gmail.com |
a7c65d4c71c7ca67ebe051dd6ec21a33d83d4b6c | d737ed7a343ceabad50ce1a753d5d3ae0b146ce0 | /app/controllers.py | d58382efcb82ae669fcb337bde18aa039e26541e | [] | no_license | BrandonOdiwuor/insurance-agency-management | 4f9870594012c50a18d001120f74b22323c64b25 | 6e2056de09bf576e42eff0c5ae9fa432d4670e69 | refs/heads/master | 2023-04-26T18:05:42.882188 | 2021-05-13T09:18:32 | 2021-05-13T09:18:32 | 348,772,349 | 0 | 1 | null | 2021-05-13T09:18:33 | 2021-03-17T16:10:07 | CSS | UTF-8 | Python | false | false | 11,433 | py | from app import db
from app.models.user import User
from app.models.customer import Customer, CustomerStatus
from app.models.invoice import Invoice
from app.models.payment import Payment
from app.models.quotation import MotorPrivateQuotation, MotorCommercialQuotation, \
Quotation, MedicalInpatientQuotation, MedicalOutpatientQuotation
from app.models.sale_item import SaleItem
from app.models.policy import Policy, PrivateMotorPolicy, CommercialMotorPolicy, \
MedicalInpatientPolicy, MedicalOutpatientPolicy
from app.utils.utils import private_motor_premium_claculator, \
commercial_motor_premium_claculator, medical_inpatient_premium_claculator, \
medical_outpatient_premium_claculator
from app.utils.enums import ProductTypes
def create_user(user_payload):
new_user = User(
email=user_payload['email'],
password=user_payload['password'],
phone=user_payload['phone'],
f_name=user_payload['first_name'],
l_name=user_payload['last_name']
)
try:
db.session.add(new_user)
db.session.commit()
except Exception as exception:
print("error : ", exception)
def verify_user(email, password):
user = User.query.filter_by(email=email).first()
if user and user.verify_password(password):
return user
return False
def create_item_of_sale(item_payload):
new_item = SaleItem(
category=item_payload['category'],
name=item_payload['name'],
price=item_payload['price']
)
try:
db.session.add(new_item)
db.session.commit()
except Exception as exception:
print("error : ", exception)
def get_items_of_sale():
return SaleItem.query.all()
def create_customer(customer_payload):
customer = Customer(
first_name=customer_payload['first_name'],
last_name=customer_payload['last_name'],
national_id_number=customer_payload['national_id_number'],
primary_phone_number=customer_payload['primary_phone_number'],
primary_email=customer_payload['primary_email'],
password=customer_payload['password'],
account_status=CustomerStatus.INACTIVE,
physical_address=customer_payload['physical_address'],
city=customer_payload['city'],
county=customer_payload['county'],
postal_address=customer_payload['postal_address'],
postal_code=customer_payload['postal_code'],
gender=customer_payload['gender'],
birth_date=customer_payload['birth_date'],
kra_pin=customer_payload['kra_pin'],
attachment_id_front=customer_payload['attachment_id_front'],
attachment_id_back=customer_payload['attachment_id_front']
)
try:
db.session.add(customer)
db.session.commit()
except Exception as exception:
print("error : ", exception)
def verify_customer(email, password):
customer = Customer.query.filter_by(primary_email=email).first()
if customer and customer.verify_password(password):
return customer
return False
def get_customers():
return Customer.query.all()
def get_customer(customer_id):
return Customer.query.filter_by(id=customer_id).first()
def get_customer_quotations(customer_id):
return Quotation.query.filter_by(
customer_id=customer_id
).all()
def get_customer_policies(customer_id):
return Policy.query.filter_by(
customer_id=customer_id
).all()
def get_customer_info(customer_id):
customer = get_customer(customer_id)
invoices = get_customer_invoices(customer_id)
payments = get_customer_payments(customer_id)
policies = get_customer_policies(customer_id)
quotations = get_customer_quotations(customer_id)
return dict(
customer=customer,
invoices=invoices,
payments=payments,
policies=policies,
quotations=quotations
)
def validate_customer_email(email):
return Customer.query.filter_by(primary_email=email).first()
def validate_customer_telephone(telephone):
return Customer.query.filter_by(primary_phone_number=telephone).first()
def update_customer_status(customer_id):
customer = Customer.query.filter_by(id=customer_id).first()
if customer.account_status == CustomerStatus.ACTIVE:
customer.account_status = CustomerStatus.INACTIVE
else:
customer.account_status = CustomerStatus.ACTIVE
try:
db.session.commit()
except Exception as exception:
print("error : ", exception)
def edit_customer(customer_id, customer_payload):
customer = Customer.query.filter_by(id=customer_id).first()
for key in customer_payload:
setattr(customer, key, customer_payload[key])
try:
db.session.commit()
except Exception as exception:
print("error : ", exception)
def create_invoice(invoice_payload):
invoice = Invoice(**invoice_payload)
try:
db.session.add(invoice)
db.session.commit()
except Exception as exception:
print("error : ", exception)
def update_invoice_status(invoice_id, status):
invoice = Invoice.query.filter_by(id=invoice_id).first()
invoice.status = status
try:
db.session.commit()
except Exception as exception:
print("error : ", exception)
def get_invoice(invoice_id):
return Invoice.query.filter_by(id=invoice_id).first()
def get_customer_invoices(customer_id):
return Invoice.query.filter_by(customer_id=customer_id).all()
def get_invoices():
return Invoice.query.all()
def create_payment(payment_payload):
new_payment = Payment(
invoice_id=payment_payload['invoice'],
amount=payment_payload['amount'],
payment_mode=payment_payload['payment_mode']
)
try:
db.session.add(new_payment)
db.session.commit()
except Exception as exception:
print("error : ", exception)
def get_payment(payment_id):
return Payment.query.filter_by(id=payment_id).first()
def get_payments():
return Payment.query.all()
def get_customer_payments(customer_id):
return Payment.query.join(Invoice).join(
Customer, Invoice.customer_id == Customer.id
).filter_by(id=customer_id).all()
def create_policy(policy_payload):
policy = None
if policy_payload['product_type'] == ProductTypes.MOTOR_PRIVATE:
policy = PrivateMotorPolicy(**policy_payload)
policy.premium = private_motor_premium_claculator(
float(policy.sum_insured)
)
elif policy_payload['product_type'] == ProductTypes.MOTOR_COMMERCIAL:
policy = CommercialMotorPolicy(**policy_payload)
policy.premium = commercial_motor_premium_claculator(
float(policy.sum_insured)
)
elif policy_payload['product_type'] == ProductTypes.MEDICAL_INPATIENT:
policy = MedicalInpatientPolicy(**policy_payload)
policy.premium = medical_inpatient_premium_claculator(
float(policy.sum_insured)
)
elif policy_payload['product_type'] == ProductTypes.MEDICAL_OUTPATIENT:
policy = MedicalOutpatientPolicy(**policy_payload)
policy.premium = medical_outpatient_premium_claculator(
float(policy.sum_insured)
)
policy.premium = round(policy.premium, 2)
try:
db.session.add(policy)
db.session.commit()
except Exception as exception:
print("error : ", exception)
def get_policies():
return Policy.query.all()
def get_policy(policy_id):
return Policy.query.filter_by(
id=policy_id
).first()
def update_policy(policy_id, policy_payload):
policy = Policy.query.filter_by(
id=policy_id
).first()
if policy:
for key in policy_payload:
setattr(policy, key, policy_payload[key])
if policy.product_type == ProductTypes.MOTOR_PRIVATE:
policy.premium = private_motor_premium_claculator(
float(policy.sum_insured)
)
elif policy.product_type == ProductTypes.MOTOR_COMMERCIAL:
policy.premium = commercial_motor_premium_claculator(
float(policy.sum_insured)
)
elif policy.product_type == ProductTypes.MEDICAL_INPATIENT:
policy.premium = medical_inpatient_premium_claculator(
float(policy.sum_insured)
)
elif policy.product_type == ProductTypes.MEDICAL_OUTPATIENT:
policy.premium = medical_outpatient_premium_claculator(
float(policy.sum_insured)
)
policy.premium = round(policy.premium, 2)
try:
db.session.commit()
except Exception as exception:
print("error : ", exception)
def create_quote(quotation_payload):
quotation = None
product_type = quotation_payload['product_type']
print(product_type)
if product_type == ProductTypes.MOTOR_PRIVATE:
quotation = MotorPrivateQuotation(**quotation_payload)
quotation.premium = private_motor_premium_claculator(
float(quotation.sum_insured)
)
elif product_type == ProductTypes.MOTOR_COMMERCIAL:
quotation = MotorCommercialQuotation(**quotation_payload)
quotation.premium = commercial_motor_premium_claculator(
float(quotation.sum_insured)
)
elif product_type == ProductTypes.MEDICAL_INPATIENT:
quotation = MedicalInpatientQuotation(**quotation_payload)
quotation.premium = medical_inpatient_premium_claculator(
float(quotation.sum_insured)
)
elif product_type == ProductTypes.MEDICAL_OUTPATIENT:
quotation = MedicalOutpatientQuotation(**quotation_payload)
quotation.premium = medical_outpatient_premium_claculator(
float(quotation.sum_insured)
)
quotation.premium = round(quotation.premium, 2)
try:
db.session.add(quotation)
db.session.commit()
except Exception as exception:
print("error : ", exception)
def get_quote(quotation_id):
return Quotation.query.filter_by(
id=quotation_id
).first()
def get_quotes():
return Quotation.query.all()
def update_quote(quotation_id, quotation_payload):
quotation = Quotation.query.filter_by(
id=quotation_id
).first()
if quotation:
for key in quotation_payload:
setattr(quotation, key, quotation_payload[key])
if quotation.product_type == ProductTypes.MOTOR_PRIVATE:
quotation.premium = private_motor_premium_claculator(
float(quotation.sum_insured)
)
elif quotation.product_type == ProductTypes.MOTOR_COMMERCIAL:
quotation.premium = commercial_motor_premium_claculator(
float(quotation.sum_insured)
)
elif quotation.product_type == ProductTypes.MEDICAL_INPATIENT:
quotation.premium = medical_inpatient_premium_claculator(
float(quotation.sum_insured)
)
elif quotation.product_type == ProductTypes.MEDICAL_OUTPATIENT:
quotation.premium = medical_outpatient_premium_claculator(
float(quotation.sum_insured)
)
quotation.premium = round(quotation.premium, 2)
try:
db.session.commit()
except Exception as exception:
print("error : ", exception)
| [
"brandon.odiwuor@gmail.com"
] | brandon.odiwuor@gmail.com |
97e5fb3a556818b85edddb8b4c2f2dbef205fd66 | 97584e83f94bd5c05ccd6796bd9405dbe8351cba | /analysis/textclassification/SklearnClassifierFactory.py | 5ca45f8d801a1676e714d598ee26ce4a31c5c22c | [
"BSD-2-Clause"
] | permissive | dhermyt/WONS | abc64faab6ec631ca6c4b73e6fd501f1beba87f4 | 795b29fea6fbaf941d28aebe13029f5e8a1714d3 | refs/heads/master | 2021-04-25T13:29:15.294509 | 2018-03-06T20:38:32 | 2018-03-06T20:38:32 | 52,667,198 | 6 | 1 | null | 2017-10-06T11:27:50 | 2016-02-27T13:05:47 | Python | UTF-8 | Python | false | false | 2,285 | py | from nltk import NaiveBayesClassifier, SklearnClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import MultinomialNB, BernoulliNB, GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.svm import LinearSVC
from sklearn.svm import NuSVC
from sklearn.svm import SVC
from analysis.textclassification.NltkClassifierWrapper import NltkClassifierWrapper
from analysis.textclassification.SklearnClassifierWrapper import SklearnClassifierWrapper
class SklearnClassifierFactory(object):
@staticmethod
def SklearnMultinomialNB():
return SklearnClassifierWrapper(MultinomialNB)
@staticmethod
def SklearnBernoulliNB():
return SklearnClassifierWrapper(BernoulliNB)
@staticmethod
def SklearnLogisticRegression():
return SklearnClassifierWrapper(LogisticRegression)
@staticmethod
def SklearnSGDClassifier():
return SklearnClassifierWrapper(lambda: SGDClassifier(loss='log'))
@staticmethod
def SklearnSVC():
return SklearnClassifierWrapper(lambda : SVC(probability=True))
@staticmethod
def SklearnLinearSVC():
return SklearnClassifierWrapper(LinearSVC)
@staticmethod
def SklearnNuSVC():
return SklearnClassifierWrapper(lambda : NuSVC(probability=True))
@staticmethod
def SklearnRidgeClassifier():
return SklearnClassifierWrapper(RidgeClassifier)
@staticmethod
def SklearnPerceptron():
return SklearnClassifierWrapper(Perceptron)
@staticmethod
def SklearnPassiveAggressive():
return SklearnClassifierWrapper(PassiveAggressiveClassifier)
@staticmethod
def SklearnKNeighbours():
return SklearnClassifierWrapper(KNeighborsClassifier)
@staticmethod
def SklearnNearestCentroid():
return SklearnClassifierWrapper(NearestCentroid)
@staticmethod
def SklearnRandomForest():
return SklearnClassifierWrapper(RandomForestClassifier)
| [
"daniel.hermyt@gmail.com"
] | daniel.hermyt@gmail.com |
f1c57d57d4175a117e64f2370c9b2da75032aefd | 5cea76d53779d466f19a5cf0b51e003586cc4a7b | /project/chapter29squish/config.py | c0a9c78f74009e17e69d5662bf33698d71ab2bc1 | [] | no_license | evan886/python | 40152fdb4885876189580141abe27a983d04e04d | d33e996e93275f6b347ecc2d30f8efe05accd10c | refs/heads/master | 2021-06-28T12:35:10.793186 | 2021-05-26T14:33:40 | 2021-05-26T14:33:40 | 85,560,342 | 2 | 1 | null | 2017-10-11T05:31:06 | 2017-03-20T09:51:50 | JavaScript | UTF-8 | Python | false | false | 713 | py | # Configuration file for Squish
# -----------------------------
# Feel free to modify the configuration variables below to taste.
# If the game is too fast or too slow, try to modify the speed
# variables.
# Change these to use other images in the game:
banana_image = 'banana.png'
weight_image = 'weight.png'
splash_image = 'weight.png'
#splash_image = 'banana.png'
# Change these to affect the general appearance:
screen_size = 800, 600
background_color = 255, 255, 255
margin = 30
full_screen = 0
#full_screen = 1
#font_size = 68
font_size = 48
# These affect the behavior of the game:
drop_speed = 1
banana_speed = 1
speed_increase = 1
weights_per_level = 10
banana_pad_top = 40
banana_pad_side = 20
| [
"evan886@gmail.com"
] | evan886@gmail.com |
9fcb80ebf6ba49d19469342df5512714fae0445e | c7cbbd4b1c1e281cef5f4a0c4e3d4a97cee2241e | /froide/accesstoken/views.py | f7bfb75707d18ad131e30d2a36f10fd8cc21fc26 | [
"MIT"
] | permissive | manonthemat/froide | 078cf78a6eb35226512c0bdfa2ac9043bcc81ad9 | 698c49935eaf2e922f3c9f6a46af0fd545ccbbbb | refs/heads/master | 2020-08-14T08:19:36.215473 | 2019-10-14T19:43:16 | 2019-10-14T19:43:16 | 215,129,869 | 0 | 0 | MIT | 2019-10-14T19:35:49 | 2019-10-14T19:35:49 | null | UTF-8 | Python | false | false | 669 | py | from django.views.decorators.http import require_POST
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
from froide.helper.utils import render_403, get_redirect
from .forms import ResetTokenForm
@require_POST
def reset_token(request):
if not request.user.is_authenticated:
return render_403(request)
form = ResetTokenForm(data=request.POST, user=request.user)
if form.is_valid():
message = form.save()
messages.add_message(request, messages.SUCCESS, message)
else:
messages.add_message(request, messages.ERROR, _('Failed to reset token.'))
return get_redirect(request)
| [
"mail@stefanwehrmeyer.com"
] | mail@stefanwehrmeyer.com |
990b85fea581c3710a827f71f87d0f2bc9447d5f | ef54d37f8a3303013ca7469871a320d303957ed7 | /robo4.2/4.2/lib/python2.7/site-packages/robot/libraries/dialogs_py.py | 252b6948049aa0b01ab2f54bdf4e68c57aabb39a | [] | no_license | richa92/Jenkin_Regression_Testing | d18badfcf16bda682dfe7bcbbd66f54a9a27a58d | 24a74926170cbdfafa47e972644e2fe5b627d8ff | refs/heads/master | 2020-07-12T10:01:59.099137 | 2019-08-27T12:14:53 | 2019-08-27T12:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,703 | py | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from threading import currentThread
import time
try:
# from Tkinter import (Button, Entry, Frame, Label, Listbox, TclError,
Toplevel, Tk, BOTH, END, LEFT, W)
except ImportError:
# from tkinter import (Button, Entry, Frame, Label, Listbox, TclError,
Toplevel, Tk, BOTH, END, LEFT, W)
class _TkDialog(Toplevel):
_left_button = 'OK'
_right_button = 'Cancel'
def __init__(self, message, value=None, **extra):
self._prevent_execution_with_timeouts()
self._parent = self._get_parent()
Toplevel.__init__(self, self._parent)
self._initialize_dialog()
self._create_body(message, value, **extra)
self._create_buttons()
self._result = None
def _prevent_execution_with_timeouts(self):
if 'linux' not in sys.platform \
and currentThread().getName() != 'MainThread':
raise RuntimeError('Dialogs library is not supported with '
'timeouts on Python on this platform.')
def _get_parent(self):
parent = Tk()
parent.withdraw()
return parent
def _initialize_dialog(self):
self.title('Robot Framework')
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self._close)
self.bind("<Escape>", self._close)
self.minsize(250, 80)
self.geometry("+%d+%d" % self._get_center_location())
self._bring_to_front()
def grab_set(self, timeout=30):
maxtime = time.time() + timeout
while time.time() < maxtime:
try:
# Fails at least on Linux if mouse is hold down.
return Toplevel.grab_set(self)
except TclError:
pass
raise RuntimeError('Failed to open dialog in %s seconds. One possible '
'reason is holding down mouse button.' % timeout)
def _get_center_location(self):
x = (self.winfo_screenwidth() - self.winfo_reqwidth()) // 2
y = (self.winfo_screenheight() - self.winfo_reqheight()) // 2
return x, y
def _bring_to_front(self):
self.attributes('-topmost', True)
self.attributes('-topmost', False)
def _create_body(self, message, value, **extra):
frame = Frame(self)
Label(frame, text=message, anchor=W, justify=LEFT, wraplength=800).pack(fill=BOTH)
selector = self._create_selector(frame, value, **extra)
if selector:
selector.pack(fill=BOTH)
selector.focus_set()
frame.pack(padx=5, pady=5, expand=1, fill=BOTH)
def _create_selector(self, frame, value):
return None
def _create_buttons(self):
frame = Frame(self)
self._create_button(frame, self._left_button,
self._left_button_clicked)
self._create_button(frame, self._right_button,
self._right_button_clicked)
frame.pack()
def _create_button(self, parent, label, callback):
if label:
button = Button(parent, text=label, width=10, command=callback)
button.pack(side=LEFT, padx=5, pady=5)
def _left_button_clicked(self, event=None):
if self._validate_value():
self._result = self._get_value()
self._close()
def _validate_value(self):
return True
def _get_value(self):
return None
def _close(self, event=None):
# self.destroy() is not enough on Linux
self._parent.destroy()
def _right_button_clicked(self, event=None):
self._result = self._get_right_button_value()
self._close()
def _get_right_button_value(self):
return None
def show(self):
self.wait_window(self)
return self._result
class MessageDialog(_TkDialog):
_right_button = None
class InputDialog(_TkDialog):
def __init__(self, message, default='', hidden=False):
_TkDialog.__init__(self, message, default, hidden=hidden)
def _create_selector(self, parent, default, hidden):
self._entry = Entry(parent, show='*' if hidden else '')
self._entry.insert(0, default)
self._entry.select_range(0, END)
return self._entry
def _get_value(self):
return self._entry.get()
class SelectionDialog(_TkDialog):
def __init__(self, message, values):
_TkDialog.__init__(self, message, values)
def _create_selector(self, parent, values):
self._listbox = Listbox(parent)
for item in values:
self._listbox.insert(END, item)
self._listbox.config(width=0)
return self._listbox
def _validate_value(self):
return bool(self._listbox.curselection())
def _get_value(self):
return self._listbox.get(self._listbox.curselection())
class PassFailDialog(_TkDialog):
_left_button = 'PASS'
_right_button = 'FAIL'
def _get_value(self):
return True
def _get_right_button_value(self):
return False
| [
"akul@SAC0MKUVCQ.asiapacific.hpqcorp.net"
] | akul@SAC0MKUVCQ.asiapacific.hpqcorp.net |
71981e273fe57c7361cbf2082c031fc628341729 | 910b09b106abd18a9bdbcb02f8564eddcd21d63a | /medical/loc.py | dd03ec7bcb0cd1c31144acb8f6a35940118ceb2d | [] | no_license | rajshah9914/HealthCare | 7dbdc3554a5edfe3661361130f6ad1260f5a647a | b1b21dc7a27edeb8033917d5303fa4eaa11517af | refs/heads/master | 2020-04-18T17:28:15.609851 | 2019-01-26T06:00:43 | 2019-01-26T06:00:43 | 167,655,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | import geocoder
g = geocoder.google('Mountain View, CA')
print(g.latlng)
| [
"rajshah9914@gmail.com"
] | rajshah9914@gmail.com |
35dfe86db227e3ebcc4020419a9b458da4804d07 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_8/models/quota_setting_response.py | 911d109050831d4426d6f40b7851420fcdcd0f2a | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 3,181 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.8, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_8 import models
class QuotaSettingResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[QuotaSetting]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.QuotaSetting]
):
"""
Keyword args:
items (list[QuotaSetting]): A list of quota settings objects.
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `QuotaSettingResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(QuotaSettingResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, QuotaSettingResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"tlewis@purestorage.com"
] | tlewis@purestorage.com |
fc8099f577227ef62f0cb10d9e33cf5a046236c8 | d42b43bda2ee4d4640a53624433e1f6894a60ce5 | /workspace/src/pytorch/nodes/blurnet.py | 43f9fa0c6962f001ec076486e180d295a7c65b71 | [] | no_license | brikeats/ROS | 9c18c09a8b70e48f2cf7275d4b3d194ae90e5208 | 7fb454dc04a1b6d17a6a2d4cf03b617112778403 | refs/heads/master | 2023-06-25T13:04:29.769576 | 2018-07-06T04:53:45 | 2018-07-06T04:53:45 | 58,983,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import scipy.stats as st
def gaussian(kernlen=21, nsig=3):
interval = (2*nsig+1.)/(kernlen)
x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1)
kern1d = np.diff(st.norm.cdf(x))
kernel_raw = np.sqrt(np.outer(kern1d, kern1d))
kernel = kernel_raw/kernel_raw.sum()
return kernel
class BlurNet(nn.Module):
def __init__(self, input_chans=3, kern_sz=11):
super(BlurNet, self).__init__()
self.conv = nn.Conv2d(input_chans, input_chans, kernel_size=kern_sz, padding=kern_sz/2, bias=False)
self.conv.weight.data.zero_()
blur_kernel = torch.from_numpy(gaussian(kern_sz))
for chan in range(3):
self.conv.weight.data[chan, chan, :, :] = blur_kernel
def forward(self, x):
return self.conv(x)
| [
"brikeats@gmail.com"
] | brikeats@gmail.com |
623f6422a5ae6024cf8396a91d4e17034ad223ef | 0a848b5b2ea31a7e2e997f27b13f363530df78d1 | /backend/register/urls.py | 0f41e3c719325e428be212796d4a784a5b0b1870 | [] | no_license | PhanVanThanh-hub/React-Django-Ecommerce | a88e4c0747a9a3d6179d45c60641595221fe701c | 00946075486495676595fe0a17dcdd0799756d4b | refs/heads/main | 2023-08-13T19:24:40.492000 | 2021-09-30T13:05:03 | 2021-09-30T13:05:03 | 412,066,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | from django.urls import path, include
from . import views
urlpatterns = [
path('', include('register.api.urls')),
] | [
"thanhphan1230@gmail.com"
] | thanhphan1230@gmail.com |
3343c7b2165ded3fa957485d7dec326cfdadeaf5 | ee3e0ff891295dcd44c2d1404d8ad4b837ac3526 | /old/profile_all_attributes.py | 16e945018b171fa7827e2fb43af88458f8a5113c | [] | no_license | ScottHull/examine_fdps_sph_outputs | 97868dcec98a1f5d39bd938eeda5c628b93ccbbb | 43345c770bb54973baca9e23e7379e0e5dcb33a8 | refs/heads/master | 2023-05-29T11:59:12.493737 | 2021-06-17T13:17:51 | 2021-06-17T13:17:51 | 262,062,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,551 | py | from math import sqrt
import pandas as pd
import matplotlib.pyplot as plt
target_path = "/Users/scotthull/Desktop/tar.dat"
impactor_path = "/Users/scotthull/Desktop/imp.dat"
target_df = pd.read_csv(target_path, header=None, skiprows=2, delimiter="\t")
impactor_df = pd.read_csv(impactor_path, header=None, skiprows=2, delimiter="\t")
target_coordinates = zip(target_df[3], target_df[4], target_df[5])
target_distances = [sqrt(i[0] ** 2 + i[1] ** 2 + i[2] ** 2) / 1000.0 for i in target_coordinates]
target_tag = target_df[1]
target_entropy = target_df[13]
target_temperature = target_df[14]
target_internal_energy = target_df[10]
target_pressure = target_df[11]
target_density = target_df[9]
impactor_coordinates = zip(impactor_df[3], impactor_df[4], impactor_df[5])
impactor_distances = [sqrt(i[0] ** 2 + i[1] ** 2 + i[2] ** 2) / 1000.0 for i in impactor_coordinates]
impactor_tag = impactor_df[1]
impactor_entropy = impactor_df[13]
impactor_internal_energy = impactor_df[10]
impactor_pressure = impactor_df[11]
impactor_density = impactor_df[9]
impactor_temperature = impactor_df[14]
target_fig = plt.figure()
target_fig.set_size_inches(18.5, 10.5)
target_ax_density = target_fig.add_subplot(221)
target_ax_pressure = target_fig.add_subplot(222)
target_ax_internal_energy = target_fig.add_subplot(223)
target_ax_entropy = target_fig.add_subplot(224)
target_ax_density.scatter(
[i for index, i in enumerate(target_distances) if target_tag[index] % 2 == 0],
[i for index, i in enumerate(target_density) if target_tag[index] % 2 == 0],
color="red",
marker="+",
label="Silicate"
)
target_ax_density.scatter(
[i for index, i in enumerate(target_distances) if target_tag[index] % 2 == 1],
[i for index, i in enumerate(target_density) if target_tag[index] % 2 == 1],
color="blue",
marker="+",
label="Iron"
)
target_ax_pressure.scatter(
[i for index, i in enumerate(target_distances) if target_tag[index] % 2 == 0],
[i for index, i in enumerate(target_pressure) if target_tag[index] % 2 == 0],
color="red",
marker="+",
label="Silicate"
)
target_ax_pressure.scatter(
[i for index, i in enumerate(target_distances) if target_tag[index] % 2 == 1],
[i for index, i in enumerate(target_pressure) if target_tag[index] % 2 == 1],
color="blue",
marker="+",
label="Iron"
)
target_ax_internal_energy.scatter(
[i for index, i in enumerate(target_distances) if target_tag[index] % 2 == 0],
[i for index, i in enumerate(target_internal_energy) if target_tag[index] % 2 == 0],
color="red",
marker="+",
label="Silicate"
)
target_ax_internal_energy.scatter(
[i for index, i in enumerate(target_distances) if target_tag[index] % 2 == 1],
[i for index, i in enumerate(target_internal_energy) if target_tag[index] % 2 == 1],
color="blue",
marker="+",
label="Iron"
)
target_ax_entropy.scatter(
[i for index, i in enumerate(target_distances) if target_tag[index] % 2 == 0],
[i for index, i in enumerate(target_entropy) if target_tag[index] % 2 == 0],
color="red",
marker="+",
label="Silicate"
)
target_ax_entropy.scatter(
[i for index, i in enumerate(target_distances) if target_tag[index] % 2 == 1],
[i for index, i in enumerate(target_entropy) if target_tag[index] % 2 == 1],
color="blue",
marker="+",
label="Iron"
)
target_ax_density.set_title("Target Density")
target_ax_pressure.set_title("Target Pressure")
target_ax_internal_energy.set_title("Target Internal Energy")
target_ax_entropy.set_title("Target Entropy")
target_ax_internal_energy.set_xlabel("Distance from Center (km)")
target_ax_entropy.set_xlabel("Distance from Center (km)")
target_ax_density.set_ylabel("Density")
target_ax_pressure.set_ylabel("Pressure")
target_ax_internal_energy.set_ylabel("Internal Energy")
target_ax_entropy.set_ylabel("Entropy")
target_ax_density.grid()
target_ax_pressure.grid()
target_ax_internal_energy.grid()
target_ax_entropy.grid()
target_ax_entropy.legend()
plt.tight_layout()
impactor_fig = plt.figure()
impactor_fig.set_size_inches(18.5, 10.5)
impactor_ax_density = impactor_fig.add_subplot(221)
impactor_ax_pressure = impactor_fig.add_subplot(222)
impactor_ax_internal_energy = impactor_fig.add_subplot(223)
impactor_ax_entropy = impactor_fig.add_subplot(224)
impactor_ax_density.scatter(
[i for index, i in enumerate(impactor_distances) if impactor_tag[index] % 2 == 0],
[i for index, i in enumerate(impactor_density) if impactor_tag[index] % 2 == 0],
color="red",
marker="+",
label="Silicate"
)
impactor_ax_density.scatter(
[i for index, i in enumerate(impactor_distances) if impactor_tag[index] % 2 == 1],
[i for index, i in enumerate(impactor_density) if impactor_tag[index] % 2 == 1],
color="blue",
marker="+",
label="Iron"
)
impactor_ax_pressure.scatter(
[i for index, i in enumerate(impactor_distances) if impactor_tag[index] % 2 == 0],
[i for index, i in enumerate(impactor_pressure) if impactor_tag[index] % 2 == 0],
color="red",
marker="+",
label="Silicate"
)
impactor_ax_pressure.scatter(
[i for index, i in enumerate(impactor_distances) if impactor_tag[index] % 2 == 1],
[i for index, i in enumerate(impactor_pressure) if impactor_tag[index] % 2 == 1],
color="blue",
marker="+",
label="Iron"
)
impactor_ax_internal_energy.scatter(
[i for index, i in enumerate(impactor_distances) if impactor_tag[index] % 2 == 0],
[i for index, i in enumerate(impactor_internal_energy) if impactor_tag[index] % 2 == 0],
color="red",
marker="+",
label="Silicate"
)
impactor_ax_internal_energy.scatter(
[i for index, i in enumerate(impactor_distances) if impactor_tag[index] % 2 == 1],
[i for index, i in enumerate(impactor_internal_energy) if impactor_tag[index] % 2 == 1],
color="blue",
marker="+",
label="Iron"
)
impactor_ax_entropy.scatter(
[i for index, i in enumerate(impactor_distances) if impactor_tag[index] % 2 == 0],
[i for index, i in enumerate(impactor_entropy) if impactor_tag[index] % 2 == 0],
color="red",
marker="+",
label="Silicate"
)
impactor_ax_entropy.scatter(
[i for index, i in enumerate(impactor_distances) if impactor_tag[index] % 2 == 1],
[i for index, i in enumerate(impactor_entropy) if impactor_tag[index] % 2 == 1],
color="blue",
marker="+",
label="Iron"
)
impactor_ax_density.set_title("Impactor Density")
impactor_ax_pressure.set_title("Impactor Pressure")
impactor_ax_internal_energy.set_title("Impactor Internal Energy")
impactor_ax_entropy.set_title("Impactor Entropy")
impactor_ax_internal_energy.set_xlabel("Distance from Center (km)")
impactor_ax_entropy.set_xlabel("Distance from Center (km)")
impactor_ax_density.set_ylabel("Density")
impactor_ax_pressure.set_ylabel("Pressure")
impactor_ax_internal_energy.set_ylabel("Internal Energy")
impactor_ax_entropy.set_ylabel("Entropy")
impactor_ax_density.grid()
impactor_ax_pressure.grid()
impactor_ax_internal_energy.grid()
impactor_ax_entropy.grid()
impactor_ax_entropy.legend()
plt.tight_layout()
temp_fig = plt.figure()
temp_ax_target = temp_fig.add_subplot(121)
temp_ax_impactor = temp_fig.add_subplot(122)
temp_ax_target.scatter(
[i for index, i in enumerate(target_distances) if target_tag[index] % 2 == 0],
[i for index, i in enumerate(target_temperature) if target_tag[index] % 2 == 0],
color="red",
marker="+",
label="Silicate"
)
temp_ax_target.scatter(
[i for index, i in enumerate(target_distances) if target_tag[index] % 2 == 1],
[i for index, i in enumerate(target_temperature) if target_tag[index] % 2 == 1],
color="blue",
marker="+",
label="Iron"
)
temp_ax_impactor.scatter(
[i for index, i in enumerate(impactor_distances) if impactor_tag[index] % 2 == 0],
[i for index, i in enumerate(impactor_temperature) if impactor_tag[index] % 2 == 0],
color="red",
marker="+",
label="Silicate"
)
temp_ax_impactor.scatter(
[i for index, i in enumerate(impactor_distances) if impactor_tag[index] % 2 == 1],
[i for index, i in enumerate(impactor_temperature) if impactor_tag[index] % 2 == 1],
color="blue",
marker="+",
label="Iron"
)
temp_ax_target.set_title("Target")
temp_ax_impactor.set_title("Impactor")
temp_ax_target.set_xlabel("Distance from Center (km)")
temp_ax_impactor.set_xlabel("Distance from Center (km)")
temp_ax_target.set_ylabel("Temperature")
temp_ax_impactor.set_ylabel("Temperature")
temp_ax_target.grid()
temp_ax_impactor.grid()
temp_ax_impactor.legend()
plt.show()
| [
"scottdhull21@gmail.com"
] | scottdhull21@gmail.com |
56650a7bd8cc9cfe12af8659a72c93dc5ef17f4d | 76362840f04ed27f0391b2806933a697249e4b36 | /basics/excercise/01douban_all/common/test02.py | b87ab9b18e596f03daca5ac57eb89869d9a2e021 | [] | no_license | loriyuhv/PythonSpider | db9aa67f47a01d2f00cf878319ee2273ba79a92f | 9882de213d0475ab473732ce3c9c34935f185317 | refs/heads/master | 2023-08-26T06:51:27.635345 | 2021-11-10T07:13:07 | 2021-11-10T07:13:07 | 415,745,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,040 | py | import requests
import random
url = "https://book.douban.com/tag/%E5%B0%8F%E8%AF%B4"
query = {
"start": 20,
"type": "T"
}
user_agent = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
headers = {
"User-Agent": random.choice(user_agent)
}
proxies = {"http": None, "https": None}
# response = requests.get(url, headers=headers, params=query, proxies=proxies).text
# print(response)
# i = 0
# while True:
# if i > 10:
# break
# print(i)
# i = i + 1
| [
"2385923602@qq.com"
] | 2385923602@qq.com |
7beeaf9f5002b8b2818072f358d67a855bf0bbee | 77eadc4ba7a64523e49c47f5f6a6fdfde314dff5 | /com/infervision/code_0920/change_dicom_time_06.py | 214f1ae2bfd8bdda344797a30cb3b66d5098eefc | [] | no_license | hashmapybx/deal-dicom-02 | a6ea45f0222ee6f2d9d4109ec6d79d5a65e557d2 | 01070360af9a139c619c074bab05e4978891ca9c | refs/heads/master | 2021-07-02T00:45:40.006312 | 2021-06-26T14:54:52 | 2021-06-26T14:54:52 | 205,334,706 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,568 | py | # -*- coding: utf-8 -*-
"""
Create Time: 2019/12/9 下午4:29
Author: ybx
"""
import os
import time
import random
root_path = "/media/tx-eva-data/NAS/基础数据库/肺部计算机辅助诊断软件/CN024002"
for ssfile in os.listdir(root_path):
if ssfile.endswith('.log'):
continue
str_date = ssfile[:10]
str_year = str_date.split('_')[0]
str_month = str_date.split('_')[1]
str_day = str_date.split('_')[2]
a1 = (int(str_year), int(str_month), int(str_day), 9, 0, 0, 0, 0, 0)
a2 = (int(str_year), int(str_month), int(str_day), 18, 0, 0, 0, 0, 0)
start = time.mktime(a1)
end = time.mktime(a2)
t = random.randint(start, end)
date_touple = time.localtime(t)
# change_date_hm = time.strftime("%Y%m%d%H%M", date_touple)
change_date_h = time.strftime("%Y%m%d%H", date_touple)
print(ssfile)
# change_date_second = time.strftime(".%S", date_touple)
# change_date_second_1 = str(random.randint(00, 59)).zfill(2)
h_folder_path = os.path.join(root_path, ssfile)
for afile in os.listdir(h_folder_path):
a_folder = os.path.join(h_folder_path, afile)
for dcm_file in os.listdir(a_folder):
dcm_file_path = os.path.join(a_folder, dcm_file)
os.system('touch -mt %s %s' % (
change_date_h + str(random.randint(00, 59)).zfill(2) + '.' + str(random.randint(00, 59)).zfill(2), dcm_file_path))
os.system('touch -mt %s %s' % (
change_date_h + str(random.randint(00, 59)).zfill(2) + '.' + str(random.randint(00, 59)).zfill(2), a_folder))
| [
"yboxing@infervision.com"
] | yboxing@infervision.com |
cd99f75be9785932ded5f27ee27c535ed1bf67e4 | ffbc6975238e862b689de3340ea1820d96594fe4 | /hand2text/rules.py | df441aa70a6c85313e080784ccb439ac15206a55 | [] | no_license | reusee/ILTE | bdc2cf0d9b0f31fa04b397c5c6b25168b74ea00e | 69de03cc1d8d1aab798ed8e83fa1c16581d6294b | refs/heads/master | 2021-01-22T23:53:09.467562 | 2013-02-18T07:16:04 | 2013-02-18T07:16:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,210 | py | # coding: utf8
STROKE_COUNTS = {
1: set(['く', 'し', 'そ', 'つ', 'て', 'の', 'ひ', 'へ', 'る', 'ろ', 'ん',
'ノ', 'フ', 'ヘ', 'レ', ]),
2: set(['い', 'う', 'え', 'こ', 'す', 'ち', 'と', 'ぬ', 'ね', 'ぴ', 'ぺ', 'み', 'め', 'ゆ', 'よ', 'ら', 'り', 'れ', 'わ',
'ア', 'イ', 'カ', 'ク', 'コ', 'ス', 'セ', 'ソ', 'ト', 'ナ', 'ニ', 'ヌ', 'ハ', 'ヒ', 'プ', 'ペ', 'マ', 'ム', 'メ', 'ヤ', 'ユ', 'ラ', 'リ', 'ル', 'ワ', 'ン', ]),
3: set(['あ', 'お', 'か', 'け', 'ぐ', 'さ', 'せ', 'じ', 'ぞ', 'づ', 'で', 'に', 'は', 'び', 'べ', 'ま', 'む', 'も', 'や', 'を',
'ウ', 'エ', 'オ', 'キ', 'ケ', 'サ', 'シ', 'タ', 'チ', 'ツ', 'テ', 'ブ', 'ベ', 'パ', 'ピ', 'ミ', 'モ', 'ヨ', 'ロ', 'ヲ', ]),
4: set(['き', 'ご', 'ず', 'た', 'ぢ', 'ど', 'な', 'ふ', 'ほ', 'ぱ',
'ガ', 'グ', 'ゴ', 'ズ', 'ゼ', 'ゾ', 'ド', 'ネ', 'ホ', 'バ', 'ビ', ]),
5: set(['が', 'げ', 'ざ', 'ぜ', 'ば', 'ぷ', 'ぽ',
'ギ', 'ゲ', 'ザ', 'ジ', 'ダ', 'ヂ', 'ヅ', 'デ', 'ポ', ]),
6: set(['ぎ', 'だ', 'ぶ', 'ぼ',
'ボ', ]),
}
TRACK_RULES = {
# A
# K
# S
'そ': [
lambda htracks, vtracks: htracks[0][:4] == 'RLRL',
],
# T
'て': [
lambda htracks, vtracks: htracks[0][:2] == 'RL',
],
# N
# H
'へ': [
lambda htracks, vtracks: vtracks[0] == 'UD',
],
# M
'め': [
lambda htracks, vtracks: htracks[-1][-1] == 'L',
],
'も': [
lambda htracks, vtracks: vtracks[2] == 'DMU',
],
# Y
'や': [
lambda htracks, vtracks: htracks[0] == 'RML',
],
# R
'り': [
lambda htracks, vtracks: vtracks[1][0] == 'U',
],
# W N
'わ': [
lambda htracks, vtracks: htracks[1][-1] == 'L',
],
# G
'ぐ': [
lambda htracks, vtracks: htracks[0] == 'LR',
],
# Z
# D
'で': [
lambda htracks, vtracks: htracks[0][:2] == 'RL',
],
# B
'べ': [
lambda htracks, vtracks: vtracks[0] == 'UD',
],
# P
'キ': [
lambda htracks, vtracks: htracks[2] != 'L',
],
'ケ': [
lambda htracks, vtracks: htracks[0] == 'L',
lambda htracks, vtracks: htracks[1] == 'R',
lambda htracks, vtracks: htracks[2] == 'L',
],
'ソ': [
lambda htracks, vtracks: htracks[0] == 'R',
],
'ト': [
lambda htracks, vtracks: htracks[0] == 'M',
],
'ヅ': [
lambda htracks, vtracks: htracks[0] == 'R',
lambda htracks, vtracks: htracks[1] == 'R',
lambda htracks, vtracks: htracks[2] == 'L',
],
'テ': [
lambda htracks, vtracks: htracks[0] == 'R',
lambda htracks, vtracks: htracks[1] == 'R',
],
'ヌ': [
lambda htracks, vtracks: htracks[0] == 'R',
lambda htracks, vtracks: htracks[1] == 'L',
],
'リ': [
lambda htracks, vtracks: vtracks[0] == 'M',
],
'ラ': [
lambda htracks, vtracks: vtracks[0] == 'M',
],
'レ': [
lambda htracks, vtracks: vtracks[0] == 'DU',
],
'モ': [
lambda htracks, vtracks: vtracks[-1][-1] == 'M',
],
}
INTERSECT_RULES = {
'ぶ': lambda i: i == 0,
'ぷ': lambda i: i == 0,
}
MOVE_RULES = {
'ヌ': [
lambda hmoves, vmoves: hmoves[0] == 'R',
],
}
| [
"reusee@ymail.com"
] | reusee@ymail.com |
8b8b983e45e52f1de882aa965ff8eb69003c0e72 | 8e03e02464385d14ac611c5a2135c7b2ba963cce | /vscode/.vscode/extensions/magicstack.MagicPython-0.6.1/test/statements/import3.py | 79f06128e0d584c1a586989990631f8d94440e01 | [
"MIT"
] | permissive | tony-rasskazov/configs | 4d5f9ceb1cf22a7d4986fbcc7bf2c4175daf9259 | 01dd03f2df11ff0bbdb2d91d455ed48e4f630d14 | refs/heads/master | 2021-01-18T22:38:11.612622 | 2017-04-27T19:45:37 | 2017-04-27T19:45:37 | 1,803,203 | 4 | 7 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | from....import a
from...import b
from..import c
from.import d
from : keyword.control.import.python, source.python
.... : source.python
import : keyword.control.import.python, source.python
: source.python
a : source.python
from : keyword.control.import.python, source.python
... : source.python
import : keyword.control.import.python, source.python
: source.python
b : source.python
from : keyword.control.import.python, source.python
.. : source.python
import : keyword.control.import.python, source.python
: source.python
c : source.python
from : keyword.control.import.python, source.python
. : source.python
import : keyword.control.import.python, source.python
: source.python
d : source.python
| [
"tony.rasskazov@gmail.com"
] | tony.rasskazov@gmail.com |
85bd94c34bf80d76824c0acb8c7b0e3827db614a | ab01443d0c1ec6796ec20b5d576a2d2e8b7ddd10 | /smsd4/test/testHandler.py | a4d3e424337f9422a6f3e1a3fc77c6b569de2722 | [] | no_license | xbfool/smscd | ed5e056be20b7f6090204602a638ac2d878a11b4 | 6cac858cfa0634ea3b668330fdb3601f43414a58 | refs/heads/master | 2020-06-14T03:08:14.545823 | 2018-08-23T15:04:21 | 2018-08-23T15:04:21 | 75,513,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | __author__ = 'xbfool'
if '__name__' == '__main__':
| [
"xbfool@gmail.com"
] | xbfool@gmail.com |
0834a5b08c86116c058a156d90a1ff41c16fbe22 | aa0270b351402e421631ebc8b51e528448302fab | /sdk/servicebus/azure-servicebus/azure/servicebus/aio/_async_auto_lock_renewer.py | 0bc36d7b473330ffd0a344b4b93c2976c7fb5cbd | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | fangchen0601/azure-sdk-for-python | d04a22109d0ff8ff209c82e4154b7169b6cb2e53 | c2e11d6682e368b2f062e714490d2de42e1fed36 | refs/heads/master | 2023-05-11T16:53:26.317418 | 2023-05-04T20:02:16 | 2023-05-04T20:02:16 | 300,440,803 | 0 | 0 | MIT | 2020-10-16T18:45:29 | 2020-10-01T22:27:56 | null | UTF-8 | Python | false | false | 10,532 | py | # ------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------
import asyncio
import logging
import datetime
from typing import Optional, Iterable, Any, Union, Callable, Awaitable, List
from .._common.message import ServiceBusReceivedMessage
from ._servicebus_session_async import ServiceBusSession
from ._servicebus_receiver_async import ServiceBusReceiver
from .._common.utils import (
get_renewable_start_time,
utc_now,
get_renewable_lock_duration,
)
from .._common.auto_lock_renewer import SHORT_RENEW_OFFSET, SHORT_RENEW_SCALING_FACTOR
from ._async_utils import get_dict_with_loop_if_needed
from ..exceptions import AutoLockRenewTimeout, AutoLockRenewFailed, ServiceBusError
Renewable = Union[ServiceBusSession, ServiceBusReceivedMessage]
AsyncLockRenewFailureCallback = Callable[
[Renewable, Optional[Exception]], Awaitable[None]
]
_log = logging.getLogger(__name__)
class AutoLockRenewer:
"""Auto lock renew.
An asynchronous AutoLockRenewer handler for renewing the lock
tokens of messages and/or sessions in the background.
:param max_lock_renewal_duration: A time in seconds that locks registered to this renewer
should be maintained for. Default value is 300 (5 minutes).
:type max_lock_renewal_duration: float
:param on_lock_renew_failure: A callback may be specified to be called when the lock is lost on the renewable
that is being registered. Default value is None (no callback).
:type on_lock_renew_failure: Optional[LockRenewFailureCallback]
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_code_servicebus_async.py
:start-after: [START auto_lock_renew_message_async]
:end-before: [END auto_lock_renew_message_async]
:language: python
:dedent: 4
:caption: Automatically renew a message lock
.. literalinclude:: ../samples/async_samples/sample_code_servicebus_async.py
:start-after: [START auto_lock_renew_session_async]
:end-before: [END auto_lock_renew_session_async]
:language: python
:dedent: 4
:caption: Automatically renew a session lock
"""
def __init__(
self,
max_lock_renewal_duration: float = 300,
on_lock_renew_failure: Optional[AsyncLockRenewFailureCallback] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
self._internal_kwargs = get_dict_with_loop_if_needed(loop)
self._shutdown = asyncio.Event()
self._futures: List[asyncio.Future] = []
self._sleep_time = 1
self._renew_period = 10
self._on_lock_renew_failure = on_lock_renew_failure
self._max_lock_renewal_duration = max_lock_renewal_duration
async def __aenter__(self) -> "AutoLockRenewer":
if self._shutdown.is_set():
raise ServiceBusError(
"The AutoLockRenewer has already been shutdown. Please create a new instance for"
" auto lock renewing."
)
return self
async def __aexit__(self, *args: Iterable[Any]) -> None:
await self.close()
def _renewable(
self, renewable: Union[ServiceBusReceivedMessage, ServiceBusSession]
) -> bool:
# pylint: disable=protected-access
if self._shutdown.is_set():
return False
if hasattr(renewable, "_settled") and renewable._settled: # type: ignore
return False
if renewable._lock_expired:
return False
try:
if not renewable._receiver._running: # type: ignore
return False
except AttributeError: # If for whatever reason the renewable isn't hooked up to a receiver
raise ServiceBusError(
"Cannot renew an entity without an associated receiver. "
"ServiceBusReceivedMessage and active ServiceBusReceiver.Session objects are expected."
)
return True
async def _auto_lock_renew(
self,
receiver: ServiceBusReceiver,
renewable: Renewable,
starttime: datetime.datetime,
max_lock_renewal_duration: float,
on_lock_renew_failure: Optional[AsyncLockRenewFailureCallback] = None,
renew_period_override: Optional[float] = None,
) -> None:
# pylint: disable=protected-access
_log.debug(
"Running async lock auto-renew for %r seconds", max_lock_renewal_duration
)
error: Optional[Exception] = None
clean_shutdown = False # Only trigger the on_lock_renew_failure if halting was not expected (shutdown, etc)
renew_period = renew_period_override or self._renew_period
try:
while self._renewable(renewable):
if (utc_now() - starttime) >= datetime.timedelta(
seconds=max_lock_renewal_duration
):
_log.debug(
"Reached max auto lock renew duration - letting lock expire."
)
raise AutoLockRenewTimeout(
"Auto-renew period ({} seconds) elapsed.".format(
max_lock_renewal_duration
)
)
if (renewable.locked_until_utc - utc_now()) <= datetime.timedelta(
seconds=renew_period
):
_log.debug(
"%r seconds or less until lock expires - auto renewing.",
renew_period,
)
try:
# Renewable is a session
await renewable.renew_lock() # type: ignore
except AttributeError:
# Renewable is a message
await receiver.renew_message_lock(renewable) # type: ignore
await asyncio.sleep(self._sleep_time)
clean_shutdown = not renewable._lock_expired
except AutoLockRenewTimeout as e:
error = e
renewable.auto_renew_error = e
clean_shutdown = not renewable._lock_expired
except Exception as e: # pylint: disable=broad-except
_log.debug("Failed to auto-renew lock: %r. Closing thread.", e)
error = AutoLockRenewFailed("Failed to auto-renew lock", error=e)
renewable.auto_renew_error = error
finally:
if on_lock_renew_failure and not clean_shutdown:
await on_lock_renew_failure(renewable, error)
def register(
self,
receiver: ServiceBusReceiver,
renewable: Union[ServiceBusReceivedMessage, ServiceBusSession],
max_lock_renewal_duration: Optional[float] = None,
on_lock_renew_failure: Optional[AsyncLockRenewFailureCallback] = None,
) -> None:
"""Register a renewable entity for automatic lock renewal.
:param receiver: The ServiceBusReceiver instance that is associated with the message or the session to
be auto-lock-renewed.
:type receiver: ~azure.servicebus.aio.ServiceBusReceiver
:param renewable: A locked entity that needs to be renewed.
:type renewable: Union[~azure.servicebus.aio.ServiceBusReceivedMessage,~azure.servicebus.aio.ServiceBusSession]
:param max_lock_renewal_duration: A time in seconds that the lock should be maintained for.
Default value is None. If specified, this value will override the default value specified at the constructor.
:type max_lock_renewal_duration: Optional[float]
:param Optional[AsyncLockRenewFailureCallback] on_lock_renew_failure:
An async callback may be specified to be called when the lock is lost on the renewable being registered.
Default value is None (no callback).
:rtype: None
"""
if not isinstance(renewable, (ServiceBusReceivedMessage, ServiceBusSession)):
raise TypeError(
"AutoLockRenewer only supports registration of types "
"azure.servicebus.ServiceBusReceivedMessage (via a receiver's receive methods) and "
"azure.servicebus.aio.ServiceBusSession "
"(via a session receiver's property receiver.session)."
)
if self._shutdown.is_set():
raise ServiceBusError(
"The AutoLockRenewer has already been shutdown. Please create a new instance for"
" auto lock renewing."
)
if renewable.locked_until_utc is None:
raise ValueError(
"Only azure.servicebus.ServiceBusReceivedMessage objects in PEEK_LOCK receive mode may"
"be lock-renewed. (E.g. only messages received via receive() or the receiver iterator,"
"not using RECEIVE_AND_DELETE receive mode, and not returned from Peek)"
)
starttime = get_renewable_start_time(renewable)
# This is a heuristic to compensate if it appears the user has a lock duration less than our base renew period
time_until_expiry = get_renewable_lock_duration(renewable)
renew_period_override = None
# Default is 10 seconds, but let's leave ourselves a small margin of error because clock skew is a real problem
if time_until_expiry <= datetime.timedelta(
seconds=self._renew_period + SHORT_RENEW_OFFSET
):
renew_period_override = (
time_until_expiry.seconds * SHORT_RENEW_SCALING_FACTOR
)
renew_future = asyncio.ensure_future(
self._auto_lock_renew(
receiver,
renewable,
starttime,
max_lock_renewal_duration or self._max_lock_renewal_duration,
on_lock_renew_failure or self._on_lock_renew_failure,
renew_period_override,
),
**self._internal_kwargs
)
self._futures.append(renew_future)
async def close(self) -> None:
"""Cease autorenewal by cancelling any remaining open lock renewal futures."""
self._shutdown.set()
if self._futures:
await asyncio.wait(self._futures)
| [
"noreply@github.com"
] | fangchen0601.noreply@github.com |
af8bdce8b06b56c4b156043cf40cb6163573214a | 813212a9a3f211c25c4765795ed252d74b0273dd | /main.py | 1a8a253f008ccfeffd0ee5c0f2d0d54952d151e8 | [] | no_license | Suke0/DSFD_pytorch | 0228963441e75f62f1588e37a22e86c90340a0a9 | a8ae0f4930acdd81e05f73941a8b397daa35c489 | refs/heads/master | 2022-03-19T06:45:32.663406 | 2019-11-19T14:47:38 | 2019-11-19T14:47:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,535 | py | from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import time
import shutil
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.init as init
import torch.utils.data as data
import numpy as np
from torch.autograd import Variable
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from data.config import cfg
from data.widerface import WIDERDetection, detection_collate
from layers.modules import MultiBoxLoss
from layers.functions import PriorBox
from models.factory import build_net, basenet_factory
parser = argparse.ArgumentParser(
description='DSFD face Detector Training With Pytorch')
parser.add_argument('--model',default='vgg', type=str,
choices=['vgg', 'resnet50', 'resnet101', 'resnet152'],
help='model for training')
parser.add_argument('--basenet',default='vgg16_reducedfc.pth',
help='Pretrained base model')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--batch_size',default=2, type=int,
help='Batch size for training')
parser.add_argument('--pretrained', default=True, type=str,
help='use pre-trained model')
parser.add_argument('--resume',default=None, type=str,
help='Checkpoint state_dict file to resume training from')
parser.add_argument('--num_workers',default=4, type=int,
help='Number of workers used in dataloading')
parser.add_argument('--distributed', default=True, type=str,
help='use distribute training')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--lr', '--learning-rate',default=1e-3, type=float,
help='initial learning rate')
parser.add_argument('--momentum',default=0.9, type=float,
help='Momentum value for optim')
parser.add_argument('--weight_decay',default=5e-4, type=float,
help='Weight decay for SGD')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--save_folder',default='weights/',
help='Directory for saving checkpoint models')
parser.add_argument('--prefix',default='dsfd_',
help='the prefix for saving checkpoint models')
args = parser.parse_args()
cudnn.benchmark = True
args = parser.parse_args()
minmum_loss = np.inf
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
def main():
global args
global minmum_loss
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.total_batch_size = args.world_size * args.batch_size
# build dsfd network
print("Building net...")
basenet = basenet_factory(args.model)
dsfd_net = build_net('train', cfg.NUM_CLASSES, args.model)
model = dsfd_net
if args.pretrained:
base_weights = torch.load(args.save_folder + args.basenet)
print('Load base network {}'.format(args.save_folder + basenet))
if args.model == 'vgg':
model.vgg.load_state_dict(base_weights)
else:
model.resnet.load_state_dict(base_weights)
# for multi gpu
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model)
model = model.cuda()
# optimizer and loss function
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
criterion = MultiBoxLoss(cfg, True)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage.cuda(args.gpu))
args.start_epoch = checkpoint['epoch']
minmum_loss = checkpoint['minmum_loss']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
else:
print('Initializing weights...')
dsfd_net.extras.apply(dsfd_net.weights_init)
dsfd_net.fpn_topdown.apply(dsfd_net.weights_init)
dsfd_net.fpn_latlayer.apply(dsfd_net.weights_init)
dsfd_net.fpn_fem.apply(dsfd_net.weights_init)
dsfd_net.loc_pal1.apply(dsfd_net.weights_init)
dsfd_net.conf_pal1.apply(dsfd_net.weights_init)
dsfd_net.loc_pal2.apply(dsfd_net.weights_init)
dsfd_net.conf_pal2.apply(dsfd_net.weights_init)
print('Loading wider dataset...')
train_dataset = WIDERDetection(cfg.FACE.TRAIN_FILE, mode='train')
val_dataset = WIDERDetection(cfg.FACE.VAL_FILE, mode='val')
train_loader = data.DataLoader(train_dataset, args.batch_size,
num_workers=args.num_workers,
shuffle=True,
collate_fn=detection_collate,
pin_memory=True)
val_batchsize = args.batch_size // 2
val_loader = data.DataLoader(val_dataset, val_batchsize,
num_workers=args.num_workers,
shuffle=False,
collate_fn=detection_collate,
pin_memory=True)
print('Using the specified args:')
print(args)
# load PriorBox
with torch.no_grad():
priorbox1 = PriorBox(input_size=[640,640], cfg=cfg, pal=1)
priors_pal1 = priorbox1.forward()
priors_pal1 = priors_pal1.cuda()
priorbox2 = PriorBox(input_size=[640,640], cfg=cfg, pal=2)
priors_pal2 = priorbox2.forward()
priors_pal2 = priors_pal2.cuda()
for epoch in range(args.start_epoch, args.epochs):
# train for one epoch
end = time.time()
train_loss = train(train_loader, model, priors_pal1, priors_pal2, criterion, optimizer, epoch)
val_loss = val(val_loader, model, priors_pal1, priors_pal2, criterion)
if args.local_rank == 0:
is_best = val_loss < minmum_loss
minmum_loss = min(val_loss, minmum_loss)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': minmum_loss,
'optimizer': optimizer.state_dict(),
}, is_best, epoch)
epoch_time = time.time() -end
print('Epoch %s time cost %f' %(epoch, epoch_time))
def train(train_loader, model, priors_pal1, priors_pal2, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
loc_loss = AverageMeter()
cls_loss = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, data in enumerate(train_loader, 1):
input, targets = data
train_loader_len = len(train_loader)
adjust_learning_rate(optimizer, epoch, i, train_loader_len)
# measure data loading time
data_time.update(time.time() - end)
input_var = Variable(input.cuda())
target_var = [Variable(ann.cuda(), requires_grad=False) for ann in targets]
# compute output
output = model(input_var)
loss_l_pa1l, loss_c_pal1 = criterion(output[0:2], priors_pal1, target_var)
loss_l_pa12, loss_c_pal2 = criterion(output[2:4], priors_pal2, target_var)
loss = loss_l_pa1l + loss_c_pal1 + loss_l_pa12 + loss_c_pal2
loss_l = loss_l_pa1l + loss_l_pa12
loss_c = loss_c_pal1 + loss_c_pal2
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
reduced_loss_l = reduce_tensor(loss_l.data)
reduced_loss_c = reduce_tensor(loss_c.data)
else:
reduced_loss = loss.data
reduced_loss_l = loss_l.data
reduced_loss_c = loss_c.data
losses.update(to_python_float(reduced_loss), input.size(0))
loc_loss.update(to_python_float(reduced_loss_l), input.size(0))
cls_loss.update(to_python_float(reduced_loss_c), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
torch.cuda.synchronize()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and i % args.print_freq == 0 and i >= 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {3:.3f} ({4:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'loc_loss {loc_loss.val:.3f} ({loc_loss.avg:.3f})\t'
'cls_loss {cls_loss.val:.3f} ({cls_loss.avg:.3f})'.format(
epoch, i, train_loader_len,
args.total_batch_size / batch_time.val,
args.total_batch_size / batch_time.avg,
batch_time=batch_time,
data_time=data_time, loss=losses, loc_loss=loc_loss, cls_loss=cls_loss))
return losses.avg
def val(val_loader, model, priors_pal1, priors_pal2, criterion):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
loc_loss = AverageMeter()
cls_loss = AverageMeter()
# switch to train mode
model.eval()
end = time.time()
for i, data in enumerate(val_loader, 1):
input, targets = data
val_loader_len = len(val_loader)
# measure data loading time
data_time.update(time.time() - end)
input_var = Variable(input.cuda())
target_var = [Variable(ann.cuda(), requires_grad=False) for ann in targets]
# compute output
output = model(input_var)
loss_l_pa1l, loss_c_pal1 = criterion(output[0:2], priors_pal1, target_var)
loss_l_pa12, loss_c_pal2 = criterion(output[2:4], priors_pal2, target_var)
loss = loss_l_pa1l + loss_c_pal1 + loss_l_pa12 + loss_c_pal2
loss_l = loss_l_pa1l + loss_l_pa12
loss_c = loss_c_pal1 + loss_c_pal2
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
reduced_loss_l = reduce_tensor(loss_l.data)
reduced_loss_c = reduce_tensor(loss_c.data)
else:
reduced_loss = loss.data
reduced_loss_l = loss_l.data
reduced_loss_c = loss_c.data
losses.update(to_python_float(reduced_loss), input.size(0))
loc_loss.update(to_python_float(reduced_loss_l), input.size(0))
cls_loss.update(to_python_float(reduced_loss_c), input.size(0))
torch.cuda.synchronize()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and i % args.print_freq == 0 and i >= 0:
print('[{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {2:.3f} ({3:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'loc_loss {loc_loss.val:.3f} ({loc_loss.avg:.3f})\t'
'cls_loss {cls_loss.val:.3f} ({cls_loss.avg:.3f})'.format(
i, val_loader_len,
args.total_batch_size / batch_time.val,
args.total_batch_size / batch_time.avg,
batch_time=batch_time,
data_time=data_time, loss=losses, loc_loss=loc_loss, cls_loss=cls_loss))
return losses.avg
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= args.world_size
return rt
def adjust_learning_rate(optimizer, epoch, step, len_epoch):
"""LR schedule that should yield 76% converged accuracy with batch size 256"""
factor = epoch // 10
if epoch >= 30:
factor = factor + 1
lr = args.lr * (0.1 ** factor)
"""Warmup"""
if epoch < 1:
lr = lr * float(1 + step + epoch * len_epoch) / (5. * len_epoch)
if(args.local_rank == 0 and step % args.print_freq == 0 and step > 1):
print("Epoch = {}, step = {}, lr = {}".format(epoch, step, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def save_checkpoint(state, is_best, epoch):
filename = os.path.join(args.save_folder, args.prefix + str(epoch)+ ".pth")
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join(args.save_folder, 'model_best.pth'))
if __name__ == '__main__':
main()
| [
"jianzhnie@126.com"
] | jianzhnie@126.com |
c9c2a19fa0bade0625640dd0a74927dc3df474c3 | a4735eec2dac952990ae7a2e1d07c72e4d58e6b4 | /libDiscovery/libWSD_Onvif/onvif2/cli.py | b9c7d935d89f662056e319818ae4c5ed2c178e25 | [] | no_license | SaulDiez/discover_IoT_vuln | ded041a1eb1c3b88989651cac91e9da9d1202863 | 2285b0223e1479bf47d586ad3bafa2b2b47d626c | refs/heads/main | 2023-06-11T20:36:01.514043 | 2021-07-07T18:50:36 | 2021-07-07T18:50:36 | 377,612,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,086 | py | #!/usr/bin/python
'''ONVIF Client Command Line Interface'''
from __future__ import print_function, division
import re
from cmd import Cmd
from ast import literal_eval
from argparse import ArgumentParser, REMAINDER
from zeep.exceptions import LookupError as MethodNotFound
from zeep.xsd import String as Text
from . import ONVIFCamera, ONVIFService, ONVIFError
from .definition import SERVICES
import os.path
SUPPORTED_SERVICES = SERVICES.keys()
class ThrowingArgumentParser(ArgumentParser):
def error(self, message):
usage = self.format_usage()
raise ValueError("%s\n%s" % (message, usage))
def success(message):
print('True: ' + str(message))
def error(message):
print('False: ' + str(message))
class ONVIFCLI(Cmd):
prompt = 'ONVIF >>> '
client = None
cmd_parser = None
def setup(self, args):
''' `args`: Instance of `argparse.ArgumentParser` '''
# Create onvif camera client
self.client = ONVIFCamera(args.host, args.port,
args.user, args.password,
args.wsdl, encrypt=args.encrypt)
# Create cmd argument parser
self.create_cmd_parser()
def create_cmd_parser(self):
# Create parser to parse CMD, `params` is optional.
cmd_parser = ThrowingArgumentParser(prog='ONVIF CMD',
usage='CMD service operation [params]')
cmd_parser.add_argument('service')
cmd_parser.add_argument('operation')
cmd_parser.add_argument('params', default='{}', nargs=REMAINDER)
self.cmd_parser = cmd_parser
def do_cmd(self, line):
'''Usage: CMD service operation [parameters]'''
try:
args = self.cmd_parser.parse_args(line.split())
except ValueError as err:
return error(err)
# Check if args.service is valid
if args.service not in SUPPORTED_SERVICES:
return error('No Service: ' + args.service)
args.params = ''.join(args.params)
# params is optional
if not args.params.strip():
args.params = '{}'
# params must be a dictionary format string
match = re.match(r"^.*?(\{.*\}).*$", args.params)
if not match:
return error('Invalid params')
try:
args.params = dict(literal_eval(match.group(1)))
except ValueError as err:
return error('Invalid params')
try:
# Get ONVIF service
service = self.client.get_service(args.service)
# Actually execute the command and get the response
response = getattr(service, args.operation)(args.params)
except MethodNotFound as err:
return error('No Operation: %s' % args.operation)
except Exception as err:
return error(err)
if isinstance(response, (Text, bool)):
return success(response)
# Try to convert instance to dictionary
try:
success(ONVIFService.to_dict(response))
except ONVIFError:
error({})
def complete_cmd(self, text, line, begidx, endidx):
# TODO: complete service operations
# service.ws_client.service._ServiceSelector__services[0].ports[0].methods.keys()
if not text:
completions = SUPPORTED_SERVICES[:]
else:
completions = [ key for key in SUPPORTED_SERVICES
if key.startswith(text) ]
return completions
def emptyline(self):
return ''
def do_EOF(self, line):
return True
def create_parser():
parser = ThrowingArgumentParser(description=__doc__)
# Dealwith dependency for service, operation and params
parser.add_argument('service', nargs='?',
help='Service defined by ONVIF WSDL document')
parser.add_argument('operation', nargs='?', default='',
help='Operation to be execute defined'
' by ONVIF WSDL document')
parser.add_argument('params', default='', nargs='?',
help='JSON format params passed to the operation.'
'E.g., "{"Name": "NewHostName"}"')
parser.add_argument('--host', required=True,
help='ONVIF camera host, e.g. 192.168.2.123, '
'www.example.com')
parser.add_argument('--port', default=80, type=int, help='Port number for camera, default: 80')
parser.add_argument('-u', '--user', required=True,
help='Username for authentication')
parser.add_argument('-a', '--password', required=True,
help='Password for authentication')
parser.add_argument('-w', '--wsdl', default=os.path.join(os.path.dirname(os.path.dirname(__file__)), "wsdl"),
help='directory to store ONVIF WSDL documents')
parser.add_argument('-e', '--encrypt', default='False',
help='Encrypt password or not')
parser.add_argument('-v', '--verbose', action='store_true',
help='increase output verbosity')
parser.add_argument('--cache-location', dest='cache_location', default='/tmp/onvif/',
help='location to cache suds objects, default to /tmp/onvif/')
parser.add_argument('--cache-duration', dest='cache_duration',
help='how long will the cache be exist')
return parser
def main():
INTRO = __doc__
# Create argument parser
parser = create_parser()
try:
args = parser.parse_args()
except ValueError as err:
print(str(err))
return
# Also need parse configuration file.
# Interactive command loop
cli = ONVIFCLI(stdin=input)
cli.setup(args)
if args.service:
cmd = ' '.join(['cmd', args.service, args.operation, args.params])
cli.onecmd(cmd)
# Execute command specified and exit
else:
cli.cmdloop()
if __name__ == '__main__':
main()
| [
"sdiezf02@estudiantes.unileon.es"
] | sdiezf02@estudiantes.unileon.es |
15fa92910124d8733fc23acbbe25b7199cb90de9 | 9b1aeb2a871bac56b21cea76017b70c7b264a2b0 | /smallScripts/parse_hgnc_file.py | 14caa6bc1350b739fe2510bf191338a14b891364 | [] | no_license | gudeqing/biodev | bdd8aae514c3502a116ad25b388961acd7c86e48 | b9e7c221f6d2c1ecdeefe13cf5aa481a5e5a64f7 | refs/heads/master | 2023-03-14T06:12:28.971104 | 2022-11-16T06:40:18 | 2022-11-16T06:40:18 | 118,697,443 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,987 | py | import pandas as pd
class ParseHGNC(object):
def __init__(self, data):
"""
:param data: https://www.genenames.org/cgi-bin/download/custom?col=gd_hgnc_id&col=gd_app_sym&col=gd_app_name&col=gd_prev_sym&col=gd_aliases&col=gd_pub_ensembl_id&status=Approved&status=Entry%20Withdrawn&hgnc_dbtag=on&order_by=gd_app_sym_sort&format=text&submit=submit
"""
target_cols = [
'HGNC ID',
'Approved symbol',
'Approved name',
'Previous symbols',
'Alias symbols',
'Ensembl gene ID'
]
self.data = pd.read_csv(data, index_col=0, header=0, sep='\t', usecols=target_cols)
def withdraw_dict(self):
result = dict()
for row in self.data.itertuples():
if '~withdrawn' in row._1 and 'symbol withdrawn' in row._2:
key = row._1.split('~withdrawn')[0]
result.setdefault(key, set())
result[key].add(row._2.split('see')[1].strip())
return result
def ensembl2symbols(self):
"""ensembl_id: [approved_name, approved_symbol, alias, alias2, alias3 ...]"""
result = dict()
for row in self.data.itertuples():
if '~withdrawn' in row._1:
continue
if pd.isnull(row._5):
continue
result.setdefault(row._5, list())
if not pd.isnull(row._2):
result[row._5].append(row._2)
else:
result[row._5].append('not_found')
result[row._5].append(row._1)
if not pd.isnull(row._3):
result[row._5] += [x.strip() for x in row._3.split(',')]
if not pd.isnull(row[4]):
result[row._5] += [x.strip() for x in row[4].split(',')]
return result
def symbol2ensembl(self):
result = dict()
for k, v in self.ensembl2symbols().items():
for sym in v[1:]:
result.setdefault(sym, set())
result[sym].add(k)
return result
def converting(self, query: list or str, out='symbol2ensembl.txt', known_pair=None, symbol2id=True):
queries = query if type(query) == list else [x.strip().split()[0] for x in open(query)]
result = list()
if symbol2id:
known_dict = dict(x.strip().split()[:2][::-1] for x in open(known_pair) if x.strip()) if known_pair else dict()
else:
known_dict = dict(x.strip().split()[:2] for x in open(known_pair) if x.strip()) if known_pair else dict()
known_dict = {x.lower(): y for x,y in known_dict.items()}
s2e = self.symbol2ensembl() if symbol2id else self.ensembl2symbols()
s2e = {x.lower(): y for x,y in s2e.items()}
with open(out, 'w') as f:
not_found = []
for each_ori in queries:
each = each_ori.lower()
if each in known_dict:
f.write('{}\t{}\n'.format(known_dict[each], each_ori))
result.append(known_dict[each])
else:
not_found.append(each_ori)
if known_pair:
print('Success to convert {} genes by querying prior known pair'.format(len(result)))
# find the remained ones
withdraw_dict = self.withdraw_dict() if symbol2id else dict()
withdraw_dict = {x.lower(): y for x, y in withdraw_dict.items()}
failed_ones = []
for each_ori in not_found:
each = each_ori.lower()
if each in s2e:
result.append(s2e[each])
if len(s2e[each]) > 1:
print("{} was found associated with {} genes".format(each_ori, len(s2e[each])))
for g in s2e[each]:
f.write('{}\t{}\n'.format(g, each_ori))
elif each in withdraw_dict:
print('{} was found in withdraw'.format(each_ori))
for new_sym in withdraw_dict[each]:
if new_sym in s2e:
result.append(s2e[new_sym])
for g in s2e[new_sym]:
f.write('{}\t{}\n'.format(g, each_ori))
else:
failed_ones.append(each_ori)
# print('{} is not found'.format(each))
# f.write('{}\t{}\n'.format(each, 'not_found'))
if not_found:
print('Success to convert {} genes by query hgnc_custom'.format(len(not_found)-len(failed_ones)))
if failed_ones:
print("Failed to query: ")
print(failed_ones)
return result
def converting(query, hgnc_custom=None, out='query_result.txt',
prior_known_pair=None, symbol2id=False):
"""
converting ensembl id to symbol or reverse
:param hgnc_custom: https://www.genenames.org/download/custom/, "/nfs2/database/HGNC/custom.txt"
:param sym: 待查询的列表文件
:param out: 输出文件名
:param prior_known_pair: 已经有的ensembl id 和 symbol对应文件, 包含两列; 如提供, 则将优先使用该文件做转换
:param symbol2id: bool, 如果想把symbol转换为id, 则请用此参数
"""
hgnc_custom = hgnc_custom if hgnc_custom is not None else 'hgnc.info.txt'
from urllib.request import urlretrieve
urlretrieve('https://www.genenames.org/cgi-bin/download/custom?col=gd_hgnc_id&col=gd_app_sym&col=gd_app_name&col=gd_prev_sym&col=gd_aliases&col=gd_pub_ensembl_id&status=Approved&status=Entry%20Withdrawn&hgnc_dbtag=on&order_by=gd_app_sym_sort&format=text&submit=submit', hgnc_custom)
object = ParseHGNC(hgnc_custom)
return object.converting(query=query, symbol2id=symbol2id, out=out, known_pair=prior_known_pair)
if __name__ == '__main__':
from xcmds.xcmds import xcmds
xcmds(locals(), include=['converting'])
| [
"822466659@qq.com"
] | 822466659@qq.com |
72d0a91efae53216da61610416aa816a93f0d33a | a6ef13387c24c719a0dcfeb173521cd70beac282 | /python1/mod_call_fibs.py | 9415180ce0991ba45b0e4e2af1218228cf63e8c3 | [] | no_license | youjiahe/python | f60472d61daf58b7f5bb6aa557949de4babf8c9c | 74eb4c5ba211ae5ffed2040576e5eead75d16e7d | refs/heads/master | 2020-03-31T02:35:55.787809 | 2019-12-02T16:32:54 | 2019-12-02T16:32:54 | 151,831,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | #!/usr/bin/env python3
import mod_fibs
import mod_fibs as f
from mod_fibs import fib
mod_fibs.fib()
f.fib(5)
fib(10)
| [
"youjiahe@163.com"
] | youjiahe@163.com |
8d4ed2bfc181059a7886b05021ec328a4fd89b11 | fa736ce1cbfd90bd6a686db0d65fba3674997fc9 | /addons/source-python/plugins/es_emulator/eventscripts/_libs/python/installlib.py | e05bd05ab225687aba9f0d4d7a319c9534a89975 | [
"MIT"
] | permissive | Ayuto/EventScripts-Emulator | 7f6fc17fe16dc2dec7c47de0457631afa018ff47 | 3ea8ddb2c855dc1986244a650fa23475fc69ca7f | refs/heads/master | 2021-07-04T11:27:13.369194 | 2021-06-06T18:48:19 | 2021-06-06T18:48:19 | 61,387,900 | 17 | 12 | null | 2021-06-06T18:48:19 | 2016-06-17T16:37:31 | Python | UTF-8 | Python | false | false | 11,170 | py | # ./addons/eventscripts/_libs/python/installlib.py
import pickle
import es
import os
import path
import zipfile
###
class InstallInfo(object):
"""
Class for keeping installation information for an addon
Information stored includes basename, version, files, and directories
"""
version = ''
def __init__(self, basename):
self.basename = basename
self.files = set()
self.dirs = set()
def getVersion(self):
""" Returns the stored version of the addon """
return self.version
def setVersion(self, version):
""" Sets the stored version of the addon """
self.version = version
def addFile(self, file):
""" Marks a file as belonging to the addon """
self.files.add(file)
def getFiles(self):
""" Returns a set of files belonging to the addon """
return self.files.copy()
def addDir(self, dir):
""" Marks a directory as belonging to the addon """
self.dirs.add(dir)
def getDirs(self):
""" Returns a set of directories belonging to the addon """
return self.dirs.copy()
class InstallInfoManager(object):
""" Class that manages InstallInfo instaces on disk """
infopath = es.getAddonPath('_libs') + '/python/installinfo/'
def __init__(self):
""" Creates the installinfo directory if it doesn't exist """
if not os.path.isdir(self.infopath):
os.mkdir(self.infopath)
def getInfoPath(self, basename):
""" Returns the absolute path to a basename's install info """
return self.infopath + basename + '.db'
def hasInstallInfo(self, basename):
""" Returns True if the basename has registered InstallInfo otherwise returns False """
return os.path.isfile(self.getInfoPath(basename))
def getInstallInfo(self, basename):
""" Returns the InstallInfo instance for the basename """
if self.hasInstallInfo(basename):
infofile = open(self.getInfoPath(basename))
info = pickle.load(infofile)
infofile.close()
else:
info = InstallInfo(basename)
return info
def saveInstallInfo(self, basename, info):
""" Saves the InstallInfo instance provided to the basename """
infofile = open(self.getInfoPath(basename), 'w')
pickle.dump(info, infofile)
infofile.close()
def getInstalled(self):
""" Returns a list of basenames that have InstallInfo instances """
return [addon[addon.replace('\\', '/').rfind('/') + 1:~2] for addon in [x for x in os.listdir(self.infopath) if x.endswith('.db')]]
infomanager = InstallInfoManager()
###
class Addon(object):
""" Base Addon object that holds required attributes and functions for use with AddonInstaller """
infomanager = infomanager
addon_file = None
def __init__(self, basename, addon_file):
"""
Along with basename the Addon object also requires the addon file
That addon file when be given when the download function is called
"""
self.basename = basename
self.addon_file = addon_file
self.approved = '1'
self.currentversion = ''
self.es_load = basename
def download(self):
""" Returns the addon file given in the initialization function """
return self.addon_file
###
class AddonInstaller(object):
""" Class that handles the installing, updating, and uninstalling of addons """
# Status attributes returned by install, update, and uninstall
STATUS_SUCCESSFUL = 0
STATUS_NO_DOWNLOAD = 1
STATUS_NO_INSTALL_INFO = 2
STATUS_ALREADY_DONE = 4
STATUS_NOT_APPROVED = 8
# Useful attributes
gamepath = str(es.ServerVar('eventscripts_gamedir')) + '/'
autoloadpath = es.getAddonPath('_autoload') + '/'
def __init__(self, addon):
""" Installation of an addon requires an Addon-based class instance """
self.addon = addon
self.addondir = es.getAddonPath(self.addon.es_load) + '/'
""" Query functions """
def isInstalled(self):
""" Returns True if the addon is installed otherwise returns False """
filename = self.addon.es_load.split('/')[~0]
return bool(list(filter(os.path.isfile, (self.addondir + filename + '.py', self.addondir + 'es_' + filename + '.txt')))) or self.addon.infomanager.hasInstallInfo(self.addon.basename)
def isAutoLoaded(self):
""" Returns True if the addon is autoloaded otherwise returns False """
return os.path.isfile(self.autoloadpath + self.addon.basename + '.cfg')
def setAutoload(self, state):
""" Sets or removes autoload for the addon based on "state" """
autoloadpath = self.autoloadpath + self.addon.basename + '.cfg'
if state:
autoloadfile = open(autoloadpath, 'w')
autoloadfile.write('es_xload ' + self.addon.es_load + '\n')
autoloadfile.close()
elif os.path.isfile(autoloadpath):
os.remove(autoloadpath)
""" File manipulation functions """
def install(self, autoload=False, addon_file=None):
"""
Installs the addon, setting autoload if necessary. If the optional
"addon_file" keyword is provided that file will be used in lieu of
downloading the addon from the ESAM.
"""
if self.isInstalled(): return self.STATUS_ALREADY_DONE
if self.addon.approved != '1': return self.STATUS_NOT_APPROVED
addon_file = self.addon.download() if addon_file is None else addon_file
if not addon_file: return self.STATUS_NO_DOWNLOAD
self._extract(addon_file, InstallInfo(self.addon.basename))
self.setAutoload(autoload)
return self.STATUS_SUCCESSFUL
def update(self, force=False):
"""
Updates the addon it "force" is True or the ESAM version number is different
from the installed version number.
"""
if not self.addon.infomanager.hasInstallInfo(self.addon.basename): return self.STATUS_NO_INSTALL_INFO
if self.addon.approved != '1': return self.STATUS_NOT_APPROVED
installinfo = self.addon.infomanager.getInstallInfo(self.addon.basename)
if not force and installinfo.getVersion() == self.addon.currentversion and installinfo.getVersion(): return self.STATUS_ALREADY_DONE
autoload = self.isAutoLoaded()
addon_file = self.addon.download()
if not addon_file: return self.STATUS_NO_DOWNLOAD
status = self.uninstall(installinfo)
if status != self.STATUS_SUCCESSFUL: return status
return self.install(autoload, addon_file)
def uninstall(self, installinfo=None):
"""
Unloads the addon if it is loaded
Uninstalls the addon using the InstallInfo of the addon
"""
if installinfo is None:
if not self.addon.infomanager.hasInstallInfo(self.addon.basename): return self.STATUS_NO_INSTALL_INFO
installinfo = self.addon.infomanager.getInstallInfo(self.addon.basename)
if self.addon.es_load in self._getAddonSet(): es.unload(self.addon.es_load)
for filename in filter(os.path.isfile, installinfo.getFiles()):
os.remove(filename)
for dir in filter(os.path.isdir, sorted(installinfo.getDirs(), reverse=True)):
if not os.listdir(dir):
os.rmdir(dir)
for path in filter(os.path.isfile, (self.autoloadpath + self.addon.basename + '.cfg', self.addon.infomanager.getInfoPath(self.addon.basename))):
os.remove(path)
return self.STATUS_SUCCESSFUL
""" Internal functions """
def _extract(self, addon_file, installinfo):
""" Extracts the zip file "addon_file" while updating InstallInfo instance "installinfo" """
addon_zip = zipfile.ZipFile(addon_file)
ziplist = sorted(addon_zip.namelist())
# Adds each directory to the addon's install info and creates the directory if necessary
for dir in [x for x in ziplist if x.endswith('/')]:
installinfo.addDir(self.gamepath + dir)
if not os.path.isdir(self.gamepath + dir):
os.mkdir(self.gamepath + dir)
# Adds the __init__.py and __init__.pyc files that will be generated to the list of files associated with the addon
init_path = es.getAddonPath(self.addon.es_load) + '/__init__.py'
installinfo.addFile(init_path)
installinfo.addFile(init_path + 'c')
# Adds each file's path to the addon's install info and extracts the file
for filename in [x for x in ziplist if not x.endswith('/')]:
installinfo.addFile(self.gamepath + filename)
if filename.endswith('.py'): # Assume every .py file will generate a .pyc
installinfo.addFile(self.gamepath + filename + 'c')
newfile = path.Path(self.gamepath + filename)
newfile.write_bytes(addon_zip.read(filename))
# Stores the version number for update purposes and saves the install info for the addon
installinfo.setVersion(self.addon.currentversion)
self.addon.infomanager.saveInstallInfo(self.addon.basename, installinfo)
addon_zip.close()
def _getAddonSet(self):
""" Returns a set of all ESS and ESP addons """
addon_list = set(es.createscriptlist())
for addon in es.addons.getAddonList():
addon_list.add(addon.__name__[:addon.__name__.rfind('.')].replace('.', '/'))
return addon_list
import esamlib # Don't move this! Some stuff in esamlib depends on the above installlib classes.
def getInstaller(basename, addon_file=None, log=esamlib.dbglog):
"""
Returns an AddonInstaller instance for the given basename. When "addon_file"
is provided it will be the zip file installed, otherwise esamlib will provide
the zip file from the ESAM.
"""
addon = Addon(basename, addon_file) if addon_file else esamlib.getAddon(basename, log)
return None if addon is None else AddonInstaller(addon)
def getInstallerFromPath(basename, path, log=esamlib.dbglog):
""" Returns an AddonInstaller instance for a zip on disk """
return getInstaller(basename, open(path ,'rb'), log)
###
def _executeAutoLoad():
""" Loads all installed addons marked for automatic loading """
autoloadpath = es.getAddonPath('_autoload') + '/'
if not os.path.isdir(autoloadpath):
os.mkdir(autoloadpath)
for filename in [x for x in os.listdir(autoloadpath) if x.endswith('.cfg') and os.path.isfile(autoloadpath + x)]:
es.server.queuecmd('es_xmexec ../addons/eventscripts/_autoload/' + filename)
"""
>>> import installlib
>>> randmaps = installlib.getInstaller('randmaps')
>>> randmaps.install(True) # Install RandMaps with it autoloaded
0 # Install successful
>>> randmaps.update()
4 # Already up to date
>>> randmaps.update(True) # Force update
0 # Update successful
>>> randmaps.uninstall()
0 # Uninstall successful
"""
"""
import es
import installlib
basename = 'iptocountry' # Change to the name of your addon
# Designate a file as part of your addon, to be cleaned up when updated or uninstalled
installinfo = installlib.infomanager.getInstallInfo(basename)
installinfo.addFile(es.getAddonPath('_libs') + '/python/iptocountry.db') # This file is generated by IPToCountry
installlib.infomanager.saveInstallInfo(basename, installinfo)
"""
| [
"ayuto@web.de"
] | ayuto@web.de |
2fbf2ae8e89eae703b9212d1fb08bf1936d70d6a | 589e6909d51b65accb84b47045416ba721ebf0fe | /packets/tcp.py | 4682b2c80480d1a9acf4f4c579ee8a23f2d23f1e | [] | no_license | Denis-Source/sniffer_ru | 69a5f2c30f671e0167823e11f998d3461a91426a | a78ab5e2bc01c78fc0575db922afd891970a1a78 | refs/heads/master | 2023-01-24T05:17:00.504922 | 2020-12-16T19:42:34 | 2020-12-16T19:42:34 | 322,084,584 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,193 | py | import struct
from packets.ipv4 import IPv4Packet
class TCPPacket(IPv4Packet):
"""
Клас пакета TCP
Аргументы:
raw_data - в формате потока битов.
Наследует класс IPv4Packet.
Атрибуты, которые унаследованы:
id_counter : счетчик, используеммый для подсчета количества пакетов;
proto : версия протокола в формате int;
proto_str : версия протокола в формате str;
ttl : время жизни пакета;
src : адрес источника пакета в формате IPv4;
target : афдрес цели пакета в формате IPv4;
data : пакет в формате bytes;
time : время захвата пакета в формате asctime.
Атрибуты:
payload : данные, что являются полезной нагрузкой пакета в формате потока битов;
src_port : порт источника;
dest_port : порт назначения;
sequence;
acknowledgement;
Флаги:
flag_urg;
flag_ack;
flag_psh;
flag_rst;
flag_syn;
flag_fin.
"""
def __init__(self, raw_data):
IPv4Packet.__init__(self, raw_data)
IPv4Packet.id_counter -= 1
(self.src_port, self.dest_port, self.sequence, self.acknowledgement, offset_reserved_flags) = struct.unpack(
"! H H L L H",
raw_data[:14])
self.offset = (offset_reserved_flags >> 12) * 4
self.flag_urg = (offset_reserved_flags & 32) >> 5
self.flag_ack = (offset_reserved_flags & 16) >> 4
self.flag_psh = (offset_reserved_flags & 8) >> 3
self.flag_rst = (offset_reserved_flags & 4) >> 2
self.flag_syn = (offset_reserved_flags & 2) >> 1
self.flag_fin = offset_reserved_flags & 1
self.payload = raw_data[self.offset:]
def __str__(self):
"""
Превращение пакета в форму, удобную для чтения
Возрвращает строку, имеющюю следующий формат:
Ethernet Frame: #1 Time: Tue Dec 8 00:01:28 2019
TTL: 57 Protocol: TCP
Source: 255.255.255.255:80, Destination: 192.168.0.0:65535
Flags: urg: 0, ack: 1, fsh: 1, rst 1, syn: 1, fin: 1
Data:
17 03 03 00 27 73 02 12 E6 F3 6F 3E 1E 43 F9 7B 1B C7 9C D6 35
:return: str
"""
return f"\nEthernet Frame: #{self.id_counter} \tTime: {self.time}\n" \
f"TTL: {self.ttl} Protocol: {self.proto_str}\n" \
f"Source: {self.src}:{self.src_port}, Destination: {self.target}:{self.dest_port}\n" \
f"Flags: urg: {self.flag_urg}, ack: {self.flag_ack}, fsh: {self.flag_psh}, " \
f"rst {self.flag_rst}, syn: {self.flag_rst}, fin: {self.flag_fin}\n" \
f"Data: \n{IPv4Packet.bytes_to_hex(self.payload)}"
| [
"zolotoverkhdenis@gmail.com"
] | zolotoverkhdenis@gmail.com |
605cceafb7745578da0e5040c7db03dccc5e5ffc | c5b69745b12ad36241fa792af44480eb70918cb0 | /ibis/tests/expr/test_literal.py | d43226d69d94263d4b54d263c8d01711f06d49e2 | [
"Apache-2.0"
] | permissive | vishalbelsare/ibis | bdbfde79086b268f4592cde009e0ffa52ece97e6 | 3fd6afb223fa442ccd0d9db69a74a431d5e7bcca | refs/heads/master | 2023-08-16T16:23:00.535467 | 2021-11-23T15:33:35 | 2021-11-23T15:33:35 | 141,067,229 | 0 | 0 | Apache-2.0 | 2021-11-29T20:12:43 | 2018-07-16T00:06:27 | Python | UTF-8 | Python | false | false | 1,388 | py | import ibis
from ibis.expr import datatypes
from ibis.expr.operations import Literal
from ibis.tests.util import assert_pickle_roundtrip
def test_literal_equality_basic():
a = ibis.literal(1).op()
b = ibis.literal(1).op()
assert a == b
assert hash(a) == hash(b)
def test_literal_equality_int_float():
# Note: This is different from the Python behavior for int/float comparison
a = ibis.literal(1).op()
b = ibis.literal(1.0).op()
assert a != b
def test_literal_equality_int16_int32():
# Note: This is different from the Python behavior for int/float comparison
a = Literal(1, datatypes.int16)
b = Literal(1, datatypes.int32)
assert a != b
def test_literal_equality_int_interval():
a = ibis.literal(1).op()
b = ibis.interval(seconds=1).op()
assert a != b
def test_literal_equality_interval():
a = ibis.interval(seconds=1).op()
b = ibis.interval(minutes=1).op()
assert a != b
# Currently these does't equal, but perhaps should be?
c = ibis.interval(seconds=60).op()
d = ibis.interval(minutes=1).op()
assert c != d
def test_pickle_literal():
a = Literal(1, datatypes.int16)
b = Literal(1, datatypes.int32)
assert_pickle_roundtrip(a)
assert_pickle_roundtrip(b)
def test_pickle_literal_interval():
a = ibis.interval(seconds=1).op()
assert_pickle_roundtrip(a)
| [
"noreply@github.com"
] | vishalbelsare.noreply@github.com |
fa48a0d9157450e21a1f4c3f56fe74c3562a244c | 8632e27257ad2cc27251a56693282b3c4ea635d0 | /src/my_project/my_project/urls.py | b9d1bd4a88fcb65a0c95e80bd15f4ae4e76ec6ee | [] | no_license | 160545unsaac/Docker_Django_Postgree_Simple_Crud | 1fa384eaadc42aa9d50aa244a3614c9b3ed305a8 | ba300b50f884ff3c3710f7bb83c46a5d4bfa8b1e | refs/heads/main | 2023-04-01T21:13:32.057982 | 2021-04-10T14:52:35 | 2021-04-10T14:52:35 | 356,608,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | """my_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from rest_framework.urlpatterns import format_suffix_patterns
from django.urls import include, path
from .router import router
urlpatterns = [
url('admin/', admin.site.urls),
path('api/', include(router.urls))
]
| [
"noreply@github.com"
] | 160545unsaac.noreply@github.com |
f68cbe88992b8c2fbede1b8fb70ef94811b7ae46 | 6ff3d92eda3564d014f5a482b8752b5682720a60 | /openfile.py | d0734109b1cc31466111b965f5e18932a9856529 | [] | no_license | NikolayTheBoss/LP | 3b7e7d34f6445499f54f129ac52d0c232b9bbf95 | 5cd88ece5ac9a3373ba6acedd01a04ed6d1f806f | refs/heads/master | 2023-02-03T09:53:47.555340 | 2020-12-29T23:21:12 | 2020-12-29T23:21:12 | 322,188,253 | 0 | 0 | null | 2020-12-17T11:24:11 | 2020-12-17T05:14:18 | Jupyter Notebook | UTF-8 | Python | false | false | 622 | py | # -*- coding: utf-8 -*-
""" Это тестовый файл по открытию файлов"""
def main(*arg):
with open(*arg, 'w') as fout:
fout.write("Hello, World!")
return None
def read_hello(*arg):
path = arg[0]
for fline in open(path, 'r'):
print(fline)
if __name__ == '__main__':
finput = input("Enter the path to the File: ")
print(finput)
main(finput)
read_hello("c:\\users\\user\\text\\test.txt")
while True:
reply = input('Enter text:')
if reply == 'stop':
break
print(reply.upper())
print(reply.upper())
| [
"tnn80@yandex.ru"
] | tnn80@yandex.ru |
c4edf23c3518f6e0b30abb391f2fc6aeac70c547 | 4b6861690f5ef67d4232e133fb35e95e0eeb96a7 | /es_distributed/test.py | 1ba7e6acd2047cf5ab4101dd7cdadde0d271aad9 | [] | no_license | youshaox/deep-neuroevolution | 9bc57fa59cda6ac3b756bdd5f255aaea68123e08 | 66587c80ef21e662a2c81fea42b603a99b90d61c | refs/heads/master | 2020-04-14T07:25:49.646973 | 2019-03-20T23:33:55 | 2019-03-20T23:33:55 | 163,711,908 | 0 | 0 | null | 2019-01-01T03:57:06 | 2019-01-01T03:57:06 | null | UTF-8 | Python | false | false | 1,801 | py | from email.mime.multipart import MIMEMultipart
import smtplib
from email.mime.text import MIMEText
from email.header import Header
import time
import datetime
import requests
SENDER_EMAIL = "Sometimesnaive@126.com"
SENDER_PASSWORD = 'ckRYMMHF6twhCMsm'
def send_email(sender_email, sender_password, starttime, endtime):
reciever_email = 'shawnxys2016@gmail.com'
cc_email = list().append(sender_email)
message = MIMEMultipart()
message['From'] = sender_email
message['cc'] = cc_email
message['To'] = reciever_email
ts = time.time()
time_suffix = str(datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H'))
subject = 'Research实验结果: NSR-ES ' + time_suffix
message['Subject'] = Header(subject, 'utf-8')
# 邮件正文内容
difference = endtime-starttime
time_spending = str(divmod(difference.days * 86400 + difference.seconds, 60))
try:
hostname = requests.request('GET', 'http://myip.dnsomatic.com').text
except Exception as e:
from requests import get
hostname = get('https://api.ipify.org').text
email_context = """
服务器{}实验已经完成。
开始时间: {}
结束时间: {}
耗费时间: {}
""".format(hostname, starttime, endtime, time_spending)
message.attach(MIMEText(email_context, 'plain', 'utf-8'))
server = smtplib.SMTP_SSL("smtp.126.com", 465) # 发件人邮箱中的SMTP服务器,端口是25
server.login(sender_email, sender_password)
server.sendmail(sender_email, [reciever_email, ], message.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件
starttime = datetime.datetime.now()
# time.sleep(2)
endtime = datetime.datetime.now()
send_email(SENDER_EMAIL, SENDER_PASSWORD, starttime, endtime) | [
"youshao.xiao@shuyun.com"
] | youshao.xiao@shuyun.com |
2e71f6ffd062bf3479e6b7de7cd4ab0865859365 | 553d60bcd74ddafdd86cb1de7a1434c3a09cce47 | /blog/urls.py | 802e2f12359703a0384f60b77944d8a1a759d4b9 | [] | no_license | asheed/djangoBlog | d1a7baef217a32a255c346b9d42b9a760afbba75 | 5ab23d610880cfb1db56f7ac0b23bac63a1ebb86 | refs/heads/master | 2020-04-19T09:52:47.925418 | 2017-01-16T13:37:41 | 2017-01-16T13:37:41 | 67,637,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | from django.conf.urls import url
from blog.views import *
urlpatterns = [
# Example: /
url(r'^$', PostLV.as_view(), name='index'),
# Example: /post/ (same as /)
url(r'^post/$', PostLV.as_view(), name='post_list'),
# Example: /post/django-example/
url(r'^post/(?P<slug>[-\w]+)/$', PostDV.as_view(), name='post_detail'),
# Example: /archive/
url(r'^archive/$', PostAV.as_view(), name='post_archive'),
# Example: /2012/
url(r'^(?P<year>\d{4})/$', PostYAV.as_view(), name='post_year_archive'),
# Example: /2012/nov/
url(r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/$', PostMAV.as_view(), name='post_month_archive'),
# Example: /2012/nov/10/
url(r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/$', PostDAV.as_view(), name='post_day_archive'),
# Example: /today/
url(r'^today/$', PostTAV.as_view(), name='post_today_archive'),
] | [
"woojin7.ahn@samsung.com"
] | woojin7.ahn@samsung.com |
577c50a2c7844508dfb431b3bdeba5da905c6b69 | 22160ef43b19721ac456c33826ae8c1321907146 | /b.py | 82e1de5c3f639bf98c45c8a6f86351eee5a907e8 | [] | no_license | YATHARTHVARDANdtu/Multi-Thread-Client-Server | 99e7f38826ff854fb89ac87ffa241a18b7b389e6 | 6b4035bdfe8d60a62af948b77c5af8910dde6773 | refs/heads/main | 2023-07-21T21:14:22.621145 | 2021-09-01T10:04:14 | 2021-09-01T10:04:14 | 402,012,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,941 | py | import socket
import threading
from queue import Queue
IP = socket.gethostbyname(socket.gethostname())
PORT = 5566
ADDR = (IP, PORT)
SIZE = 1024
FORMAT = "utf-8"
DISCONNECT_MSG = "!DISCONNECT"
opponentTurn = "Opponent's Turn"
yourTurn = "Your Turn"
lost = "You Lose"
Win = "You Win"
#available boards for match (currently = 5)
boards = []
#defining a board for the match
class memory:
board = [['|','|','|'],['|','|','|'],['|','|','|']]
turn = -1
moves = 0
flag = [0,0]
result = "unallocated"
def reset(self):
self.turn = 0
self.moves = 0
self.reset = "unallocated"
self.board = [['|','|','|'],['|','|','|'],['|','|','|']]
def changeTurn(self):
if(self.turn == 1):
self.turn = 2
else:
self.turn = 1
def start(self):
self.turn = 2
def move(self,x,y,value):
self.board[x][y] = value
self.moves = self.moves+1
def win(self,player):
if(self.moves < 5):
return False
else:
if(self.board[0][0] == self.board[0][1]==self.board[0][2] and self.board[0][2]!='|'):
result = "finished"
return True
elif(self.board[1][0] == self.board[1][1]==self.board[0][2] and self.board[1][2]!='|'):
result = "finished"
return True
elif(self.board[2][0] == self.board[2][1]==self.board[2][2] and self.board[2][2]!='|'):
result = "finished"
return True
elif(self.board[0][0] == self.board[1][0]==self.board[2][0] and self.board[0][0]!='|'):
result = "finished"
return True
elif(self.board[0][1] == self.board[1][1]==self.board[2][1] and self.board[0][1]!='|'):
result = "finished"
return True
elif(self.board[0][2] == self.board[1][2]==self.board[2][2] and self.board[0][2]!='|'):
result = "finished"
return True
elif(self.board[0][0] == self.board[1][1]==self.board[2][2] and self.board[0][0]!='|'):
result = "finished"
return True
elif(self.board[0][2] == self.board[1][1]==self.board[2][0] and self.board[0][2]!='|'):
result = "finished"
return True
else:
return False
#list of players requesting an opponent
players = Queue(0)
#dictionary to keep the corresponding board
users = {}
foundMatchBoard = {}
def handle_client(conn, addr):
msg = conn.recv(SIZE).decode(FORMAT)
print(f"[NEW SOLDIER] {addr} connected. with username {msg} 🐱👤")
if(players.qsize() == 0):
players.put(msg)
cont = True
while(cont):
if msg in users.keys():
opponent = "Your opponent is " + users[msg] +"\n Waiting for Match to Start .. \n"
del users[msg]
conn.send(opponent.encode(FORMAT))
cont = False
memLoc = -1
cont = True
while(cont):
if msg in users.keys():
memLoc = users[msg]
del users[msg]
cont = False
#TurnMessage = "2"
#conn.send(TurnMessage.encode(FORMAT))
while True:
boards[memLoc].flag[0] = 1
boards[memLoc].turn = 1
i = 0
while(boards[memLoc].flag[1]==1 and boards[memLoc].turn==1):
if i==0:
conn.send(opponentTurn.encode(FORMAT))
i = i+1
if(boards[memLoc].result == "finished"):
conn.send(lost.encode(FORMAT))
break
else:
print(boards[memLoc].turn)
conn.send(yourTurn.encode(FORMAT))
x = conn.recv(1024).decode(FORMAT)
y = conn.recv(1024).decode(FORMAT)
boards[memLoc].flag[0] = 0
#boards[memLoc].move(int(x),int(y),'O')
#if(boards[memLoc].win(1) == True):
# conn.send(Win.encode(FORMAT))
# break
#else:
# boards[memLoc].changeTurn()
conn.close()
else:
while(players.qsize() == 0):
if(players.qsize()>0):
break
opponent = players.get()
cont = True
memLoc = -1
while(cont):
for i in range(len(boards)):
if(boards[i].result == "unallocated"):
cont = False
boards[i].result = "allocated"
memLoc = i
users[opponent] = msg
oppo = "Your opponent is " + opponent
conn.send(oppo.encode(FORMAT))
if opponent in users.keys():
print("Not deleted yet")
else:
print("Key deleted")
users[opponent] = memLoc
#TurnMessage = "1"
#conn.send(TurnMessage.encode(FORMAT))
while True:
boards[memLoc].flag[1] = 1
boards[memLoc].turn = 0
i = 0
while(boards[memLoc].flag[0]==1 and boards[memLoc].turn==0):
if i==0:
conn.send(opponentTurn.encode(FORMAT))
i = i+1
if(boards[memLoc].result == "finished"):
conn.send(lost.encode(FORMAT))
break
else:
print(boards[memLoc].turn)
conn.send(yourTurn.encode(FORMAT))
x = conn.recv(1024).decode(FORMAT)
y = conn.recv(1024).decode(FORMAT)
# boards[memLoc].move(int(x),int(y),'O')
boards[memLoc].flag[1] = 0
#if(boards[memLoc].win(1) == True):
# conn.send(Win.encode(FORMAT))
# break
#else:
# boards[memLoc].changeTurn()
boards[memLoc].reset()
conn.close()
def main():
print("[STARTING] Server is starting...")
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(ADDR)
server.listen()
print(f"[LISTENING] Server is listening on {IP}:{PORT}")
boards.append(memory())
boards.append(memory())
boards.append(memory())
boards.append(memory())
boards.append(memory())
for obj in boards:
print(obj.moves, obj.result,sep = ' ')
while True:
conn, addr = server.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | YATHARTHVARDANdtu.noreply@github.com |
751a731b11d4d64790c1baf3c734d2ecdd5bbf11 | db393b8d48aadbb1216ddff37f86579041d6dbaf | /code.py | 44d083f376c5b7846bd47a3d08c15babc8de9e7f | [] | no_license | corei5/converts-all-pdfs-in-directory-pdfDir-saves-all-resulting-txt-files-to-txtdir | e2ab1fa2d99f1ccee34b917066f1b66dfdd2e3c6 | 27eb39dae3e7de44f9a9fb25bf5b5e38a1447fa2 | refs/heads/master | 2021-08-19T12:34:53.139929 | 2017-11-26T09:10:44 | 2017-11-26T09:10:44 | 112,069,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,820 | py | from io import StringIO
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
import os
import sys, getopt
#converts pdf, returns its text content as a string
def convert(fname):
pages=None
if not pages:
pagenums = set()
else:
pagenums = set(pages)
output = StringIO()
manager = PDFResourceManager()
converter = TextConverter(manager, output, laparams=LAParams())
interpreter = PDFPageInterpreter(manager, converter)
infile = open(fname, 'rb')
for page in PDFPage.get_pages(infile, pagenums):
interpreter.process_page(page)
infile.close()
converter.close()
text = output.getvalue()
output.close
#print(text)
return text
#converts all pdfs in directory pdfDir, saves all resulting txt files to txtdir
def convertMultiple(pdfDir, txtDir):
if pdfDir == "": pdfDir = os.getcwd() + "\\" #if no pdfDir passed in
for pdf in os.listdir(pdfDir): #iterate through pdfs in pdf directory
fileExtension = pdf.decode("utf-8").split(".")[-1]
#print(type(fileExtension))
if fileExtension == "pdf":
pdfFilename = pdfDir + pdf
text = convert(pdfFilename) #get string of text content of pdf
#print(type(txtDir))
#print(type(pdf))
textFilename = txtDir.decode("utf-8") + pdf.decode("utf-8") + ".txt"
textFile = open(textFilename, "w", encoding="utf-8") #make text file
textFile.write(text) #write text to text file
pdfDir = "C:/Users/tourist800/python/tf_idf/Dataset/"
txtDir = "C:/Users/tourist800/python/tf_idf/text_dataset/"
convertMultiple(pdfDir.encode("utf-8"),txtDir.encode("utf-8") )
| [
"noreply@github.com"
] | corei5.noreply@github.com |
ae33e5e64e5edcb987ff8edd262f7a45e2a61f7b | 48c4dda8fbecb5bc9506eb0a318508c9a9f37aca | /deep learning from scratch.py | e10c1b2aa591751efd915dc08f92debac8407696 | [] | no_license | bigeyesung/DLkaggle | f59e8e2fdac430fd5e97cfc67e63c837a8b12cee | f57b10740b206ecff1bcbfdc7d4436ac8dcac28d | refs/heads/master | 2023-07-05T22:16:03.042595 | 2021-08-07T15:48:54 | 2021-08-07T15:48:54 | 262,594,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,766 | py | import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow import keras
img_rows, img_cols = 28, 28
num_classes = 10
def prep_data(raw):
y = raw[:, 0]
out_y = keras.utils.to_categorical(y, num_classes)
x = raw[:,1:]
num_images = raw.shape[0]
out_x = x.reshape(num_images, img_rows, img_cols, 1)
out_x = out_x / 255
return out_x, out_y
fashion_file = "../input/fashionmnist/fashion-mnist_train.csv"
fashion_data = np.loadtxt(fashion_file, skiprows=1, delimiter=',')
x, y = prep_data(fashion_data)
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.deep_learning.exercise_7 import *
print("Setup Complete")
# 1) Start the model
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D
fashion_model= Sequential()
# Add the first layer
fashion_model.add(Conv2D(12,kernel_size=3,activation='relu',input_shape=(img_rows,img_cols,1)))
#3) Add the remaining layers
fashion_model.add(Conv2D(20,kernel_size=3,activation='relu'))
fashion_model.add(Conv2D(20,kernel_size=3,activation='relu'))
fashion_model.add(Flatten())
fashion_model.add(Dense(100,activation='relu'))
fashion_model.add(Dense(num_classes, activation='softmax'))
# 4) Compile Your Model
fashion_model.compile(loss=keras.losses.categorical_crossentropy,optimizer='adam',metrics=['accuracy'])
# 5) Fit The Model
fashion_model.fit(x,y,batch_size=100,epochs=4,validation_split=0.2)
# 6) Create A New Model
second_fashion_model = Sequential()
second_fashion_model.add(Conv2D(12,
activation='relu',
kernel_size=3,
input_shape = (img_rows, img_cols, 1)))
# Changed kernel sizes to be 2
second_fashion_model.add(Conv2D(20, activation='relu', kernel_size=2))
second_fashion_model.add(Conv2D(20, activation='relu', kernel_size=2))
# added an addition Conv2D layer
second_fashion_model.add(Conv2D(20, activation='relu', kernel_size=2))
second_fashion_model.add(Flatten())
second_fashion_model.add(Dense(100, activation='relu'))
# It is important not to change the last layer. First argument matches number of classes. Softmax guarantees we get reasonable probabilities
second_fashion_model.add(Dense(10, activation='softmax'))
second_fashion_model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
second_fashion_model.fit(x, y, batch_size=100, epochs=4, validation_split=0.2)
#second_fashion_model.add(Conv2D(30,kernel_size=3,activation='relu',input_shape=(img_rows,img_cols,1)))
#second_fashion_model.fit(x,y,batch_size=100,epochs=4,validation_split=0.2)
| [
"sungchenhsi@gmail.com"
] | sungchenhsi@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.