blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
71b22fc91381f282dc98817aa113784b5ba94563 | 714fadd433c05b126b7909e1de14ee00bd2b0096 | /tests/test_http_client/test_http_service.py | 1dcba7edf292aa326727edd97db710b4b948af70 | [
"MIT"
] | permissive | Zheaoli/huskar-python | 4b1a30fc6939d387e09aaa484e7afe3ca190d293 | f62a2d3636b2804a552bf59f76903cf2841d75c9 | refs/heads/master | 2020-08-03T03:59:19.279390 | 2019-09-27T06:34:09 | 2019-09-27T06:34:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,669 | py | # -*- coding: utf-8 -*-
from mock import Mock
import pytest
import gevent
from huskar_sdk_v2.http.components.service import Service
initial_service_data = {u'192.168.1.1_17400': {
u'ip': u'192.168.1.1',
u'meta': {
u'control_daemon_port': 5544,
u'protocol': u'thrift',
u'pushSequence': 4974,
u'soaVersion': u'0.14.5.3',
u'weight': 1},
u'name': u'arch.test',
u'port': {u'main': 17400},
u'state': u'up'},
}
added_service_data = {"192.168.1.1_23471": {
"ip": "192.168.1.1",
"state": "up",
"meta": {
"control_daemon_port": 5544,
"soaVersion": "0.14.5.3",
"protocol": "thrift", "weight": 1,
"pushSequence": 4975},
"name": "arch.test",
"port": {"main": 23471}}
}
@pytest.fixture
def service_component(request, requests_mock, started_client):
assert started_client.connected.wait(1)
return Service('arch.test', 'alpha-stable')
@pytest.fixture
def fake_service_component(started_file_cache_client,
fake_service_with_file_cache_client):
started_file_cache_client.watched_configs.add_watch(
"arch.test", 'overall')
started_file_cache_client.watched_switches.add_watch(
"arch.test", 'another-cluster')
started_file_cache_client.watched_services.add_watch(
"arch.test", 'alpha-stable')
return fake_service_with_file_cache_client('arch.test', 'alpha-stable')
def test_service_should_yield_the_same_format_as_old_huskar(
service_component, started_client,
fake_service_component):
assert started_client.connected.wait(1)
assert service_component.get_service_node_list(
'arch.test', 'alpha-stable') == initial_service_data
gevent.sleep(0.5)
assert fake_service_component.get_service_node_list(
'arch.test', 'alpha-stable') == initial_service_data
def test_service_changed_should_change_service_nodes(
requests_mock, service_component, started_client,
fake_service_component):
assert started_client.connected.wait(1)
requests_mock.set_result_file('test_data_changed.txt')
assert requests_mock.wait_processed()
new_service_data = dict(initial_service_data)
new_service_data.update(added_service_data)
assert service_component.get_service_node_list(
'arch.test', 'alpha-stable') == new_service_data
gevent.sleep(0.5)
assert fake_service_component.get_service_node_list(
'arch.test', 'alpha-stable') == new_service_data
def test_service_deleted_should_change_service_nodes(
requests_mock, service_component, started_client,
fake_service_component):
listener = Mock()
assert started_client.connected.wait(1)
service_component.register_hook_function(
'arch.test', 'alpha-stable', listener)
requests_mock.set_result_file('test_data_deleted.txt')
assert requests_mock.wait_processed()
assert listener.call_count == 2
listener.assert_any_call({})
assert service_component.get_service_node_list(
'arch.test', 'alpha-stable') == {}
gevent.sleep(0.5)
assert fake_service_component.get_service_node_list(
'arch.test', 'alpha-stable') == {}
def test_service_node_changed_should_notify_listeners(
requests_mock, service_component, started_client,
fake_service_component):
assert started_client.connected.wait(1)
listener = Mock()
fake_listener = Mock()
service_component.register_hook_function(
'arch.test', 'alpha-stable', listener)
fake_service_component.register_hook_function(
'arch.test', 'alpha-stable', fake_listener)
listener.assert_called_once_with(initial_service_data)
gevent.sleep(0.5)
fake_listener.assert_called_with(initial_service_data)
requests_mock.set_result_file('test_data_changed.txt')
assert requests_mock.wait_processed()
new_service_data = dict(initial_service_data)
new_service_data.update(added_service_data)
listener.assert_any_call(new_service_data)
gevent.sleep(0.5)
fake_listener.assert_any_call(new_service_data)
def test_file_client_add_watch_after_data_already_processed(
requests_mock, service_component, started_client,
fake_service_component):
fake_service_component.client.app_id_cluster_map.pop('arch.test', None)
assert started_client.connected.wait(1)
listener = Mock()
fake_listener = Mock()
service_component.register_hook_function(
'arch.test', 'alpha-stable', listener)
listener.assert_called_once_with(initial_service_data)
gevent.sleep(0.5)
assert ('alpha-stable' not in
fake_service_component.client.app_id_cluster_map['arch.test'])
fake_service_component.register_hook_function(
'arch.test', 'alpha-stable', fake_listener)
fake_listener.assert_called_with(initial_service_data)
assert ('alpha-stable' in
fake_service_component.client.app_id_cluster_map['arch.test'])
def test_service_batch_add_watch(requests_mock, service_component,
started_client, started_file_cache_client,
fake_service_component):
service_component.preprocess_service_mappings({})
fake_service_component.preprocess_service_mappings({})
assert service_component.preprocess_service_mappings({
'arch.test1': {'that-cluster'},
'arch.test2': {'this-cluster'},
}) is True
assert fake_service_component.preprocess_service_mappings({
'arch.test1': {'that-cluster'},
'arch.test2': {'this-cluster'},
}) is True
assert dict(started_client.watched_services.app_id_cluster_map) == {
'arch.test': {'alpha-stable'},
'arch.test1': {'that-cluster'},
'arch.test2': {'this-cluster'},
}
fake_services = started_file_cache_client.watched_services
assert dict(fake_services.app_id_cluster_map) == {
'arch.test': {'alpha-stable'},
'arch.test1': {'that-cluster'},
'arch.test2': {'this-cluster'},
}
def test_legacy_interface(requests_mock, service_component):
service_component.set_min_server_num(1)
def test_add_service_in_the_middle_of_runtime(
requests_mock, service_component,
started_client, fake_service_component):
assert started_client.connected.wait(1)
assert service_component.get_service_node_list(
'arch.test', 'alpha-stable') == initial_service_data
gevent.sleep(0.5)
assert fake_service_component.get_service_node_list(
'arch.test', 'alpha-stable') == initial_service_data
requests_mock.add_response(
r'{"body": {"service": {"arch.test": {"beta-stable": '
r'{"192.168.1.1_9999": {"value": "{\"ip\": \"192.168.1.1\"'
r', \"state\": \"up\", \"meta\": {\"control_daemon_port\": 5544,'
r' \"soaVersion\": \"0.14.5.3\", \"protocol\": \"thrift\",'
r' \"weight\": 1, \"pushSequence\": 4975}, \"name\":'
r' \"arch.test\", \"port\": {\"main\": 9999}}"}}}}},'
r' "message": "update"}')
assert requests_mock.wait_processed()
assert service_component.get_service_node_list(
'arch.test', 'beta-stable') == {}
gevent.sleep(0.5)
assert fake_service_component.get_service_node_list(
'arch.test', 'beta-stable') == {}
assert service_component.add_service('arch.test', 'beta-stable',
timeout=10)
assert fake_service_component.add_service('arch.test', 'beta-stable',
timeout=10)
requests_mock.add_response(
r'{"body": {"service": {"arch.test": {"beta-stable":'
r' {"192.168.1.1_9999": {"value": "{\"ip\":'
r' \"192.168.1.1\", \"state\": \"up\", \"meta\":'
r' {\"control_daemon_port\": 5544, \"soaVersion\": \"0.14.5.3\",'
r' \"protocol\": \"thrift\", \"weight\": 1, \"pushSequence\":'
r' 4975}, \"name\": \"arch.test\", \"port\": {\"main\": 9999'
r'}}"}}}}}, "message": "update"}')
assert requests_mock.wait_processed()
assert service_component.get_service_node_list(
'arch.test', 'beta-stable')
gevent.sleep(0.5)
assert fake_service_component.get_service_node_list(
'arch.test', 'beta-stable')
def test_service_should_not_update_if_watch_is_removed(
requests_mock, service_component,
started_client, fake_service_component):
assert started_client.connected.wait(1)
assert service_component.get_service_node_list(
'arch.test', 'alpha-stable') == initial_service_data
gevent.sleep(0.5)
assert fake_service_component.get_service_node_list(
'arch.test', 'alpha-stable') == initial_service_data
assert service_component.unwatch_service(
'arch.test', 'alpha-stable', timeout=2.0)
assert fake_service_component.unwatch_service(
'arch.test', 'alpha-stable', timeout=2.0)
requests_mock.add_response(
r'{"body": {"service": {"arch.test": {"alpha-stable": '
r'{"192.168.1.1_9999": {"value": "{\"ip\": \"192.168.1.1\",'
r' \"state\": \"up\", \"meta\": {\"control_daemon_port\": 5544,'
r' \"soaVersion\": \"0.14.5.3\", \"protocol\": \"thrift\", \"weight\":'
r' 1, \"pushSequence\": 4975}, \"name\": \"arch.test\", \"port\": '
r'{\"main\": 9999}}"}}}}}, "message": "update"}')
assert requests_mock.wait_processed()
assert service_component.get_service_node_list(
'arch.test', 'alpha-stable') == initial_service_data
assert fake_service_component.get_service_node_list(
'arch.test', 'alpha-stable') == initial_service_data
| [
"mozillazg101@gmail.com"
] | mozillazg101@gmail.com |
6e69623a745e215d65a1524e8506cd9057e79e1a | ac1dc63c3316671b04f5826523b64b0e5f7a8154 | /__init__.py | 12d72caf8579d5620658b97bfdc50fab7e824117 | [] | no_license | leonardocfor/multi-robot-vicsek | 0f2b6188fd41415d8130aa0d69fec1f309f64da0 | 88d5c72f671dd4108bbf65d8bff54157371cf018 | refs/heads/master | 2021-07-06T22:55:58.287060 | 2020-10-03T00:30:27 | 2020-10-03T00:30:27 | 192,839,463 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | __all__ = ['bin','etc','lib']
| [
"lecf.77@gmail.com"
] | lecf.77@gmail.com |
d1f409cf102e8f3f8ab14c629e24c68701bf7f54 | 9620f0d4564be92deb2c09da6895cca920e51435 | /app.py | 65516d178d7bbb58e9def80f5954299db70f0f05 | [] | no_license | talha-ghaffar/articler | 14e4825099f55559b4882f852e30b6bf687dea8a | 6037045775f779722077af809ae4a5474a74a28b | refs/heads/master | 2020-04-04T11:04:17.887176 | 2018-11-02T14:33:27 | 2018-11-02T14:33:27 | 155,878,038 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,481 | py | from flask import Flask, render_template, flash, redirect, url_for, session, request, logging
from flask_mysqldb import MySQL
from wtforms import Form, StringField, TextAreaField, PasswordField, validators
from passlib.hash import sha256_crypt
from functools import wraps
app = Flask(__name__)
# Config MySQL
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = '12345'
app.config['MYSQL_DB'] = 'myflaskapp'
app.config['MYSQL_CURSORCLASS'] = 'DictCursor'
# init MYSQL
mysql = MySQL(app)
# Index
@app.route('/')
def index():
return render_template('home.html')
# About
@app.route('/about')
def about():
return render_template('about.html')
# Articles
@app.route('/articles')
def articles():
# Create cursor
cur = mysql.connection.cursor()
# Get articles
result = cur.execute("SELECT * FROM articles")
articles = cur.fetchall()
if result > 0:
return render_template('articles.html', articles=articles)
else:
msg = 'No Articles Found'
return render_template('articles.html', msg=msg)
# Close connection
cur.close()
#Single Article
@app.route('/article/<string:id>/')
def article(id):
# Create cursor
cur = mysql.connection.cursor()
# Get article
result = cur.execute("SELECT * FROM articles WHERE id = %s", [id])
article = cur.fetchone()
return render_template('article.html', article=article)
# Register Form Class
class RegisterForm(Form):
name = StringField('Name', [validators.Length(min=1, max=50)])
username = StringField('Username', [validators.Length(min=4, max=25)])
email = StringField('Email', [validators.Length(min=6, max=50)])
password = PasswordField('Password', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords do not match')
])
confirm = PasswordField('Confirm Password')
# User Register
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if request.method == 'POST' and form.validate():
name = form.name.data
email = form.email.data
username = form.username.data
password = sha256_crypt.encrypt(str(form.password.data))
# Create cursor
cur = mysql.connection.cursor()
# Execute query
cur.execute("INSERT INTO users(name, email, username, password) VALUES(%s, %s, %s, %s)", (name, email, username, password))
# Commit to DB
mysql.connection.commit()
# Close connection
cur.close()
flash('You are now registered and can log in', 'success')
return redirect(url_for('login'))
return render_template('register.html', form=form)
# User login
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
# Get Form Fields
username = request.form['username']
password_candidate = request.form['password']
# Create cursor
cur = mysql.connection.cursor()
# Get user by username
result = cur.execute("SELECT * FROM users WHERE username = %s", [username])
if result > 0:
# Get stored hash
data = cur.fetchone()
password = data['password']
# Compare Passwords
if sha256_crypt.verify(password_candidate, password):
# Passed
session['logged_in'] = True
session['username'] = username
flash('You are now logged in', 'success')
return redirect(url_for('dashboard'))
else:
error = 'Invalid login'
return render_template('login.html', error=error)
# Close connection
cur.close()
else:
error = 'Username not found'
return render_template('login.html', error=error)
return render_template('login.html')
# Check if user logged in
def is_logged_in(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash('Unauthorized, Please login', 'danger')
return redirect(url_for('login'))
return wrap
# Logout
@app.route('/logout')
@is_logged_in
def logout():
session.clear()
flash('You are now logged out', 'success')
return redirect(url_for('login'))
# Dashboard
@app.route('/dashboard')
@is_logged_in
def dashboard():
# Create cursor
cur = mysql.connection.cursor()
# Get articles
#result = cur.execute("SELECT * FROM articles")
# Show articles only from the user logged in
result = cur.execute("SELECT * FROM articles WHERE author = %s", [session['username']])
articles = cur.fetchall()
if result > 0:
return render_template('dashboard.html', articles=articles)
else:
msg = 'No Articles Found'
return render_template('dashboard.html', msg=msg)
# Close connection
cur.close()
# Article Form Class
class ArticleForm(Form):
title = StringField('Title', [validators.Length(min=1, max=200)])
body = TextAreaField('Body', [validators.Length(min=30)])
# Add Article
@app.route('/add_article', methods=['GET', 'POST'])
@is_logged_in
def add_article():
form = ArticleForm(request.form)
if request.method == 'POST' and form.validate():
title = form.title.data
body = form.body.data
# Create Cursor
cur = mysql.connection.cursor()
# Execute
cur.execute("INSERT INTO articles(title, body, author) VALUES(%s, %s, %s)",(title, body, session['username']))
# Commit to DB
mysql.connection.commit()
#Close connection
cur.close()
flash('Article Created', 'success')
return redirect(url_for('dashboard'))
return render_template('add_article.html', form=form)
# Edit Article
@app.route('/edit_article/<string:id>', methods=['GET', 'POST'])
@is_logged_in
def edit_article(id):
# Create cursor
cur = mysql.connection.cursor()
# Get article by id
result = cur.execute("SELECT * FROM articles WHERE id = %s", [id])
article = cur.fetchone()
cur.close()
# Get form
form = ArticleForm(request.form)
# Populate article form fields
form.title.data = article['title']
form.body.data = article['body']
if request.method == 'POST' and form.validate():
title = request.form['title']
body = request.form['body']
# Create Cursor
cur = mysql.connection.cursor()
app.logger.info(title)
# Execute
cur.execute ("UPDATE articles SET title=%s, body=%s WHERE id=%s",(title, body, id))
# Commit to DB
mysql.connection.commit()
#Close connection
cur.close()
flash('Article Updated', 'success')
return redirect(url_for('dashboard'))
return render_template('edit_article.html', form=form)
# Delete Article
@app.route('/delete_article/<string:id>', methods=['POST'])
@is_logged_in
def delete_article(id):
# Create cursor
cur = mysql.connection.cursor()
# Execute
cur.execute("DELETE FROM articles WHERE id = %s", [id])
# Commit to DB
mysql.connection.commit()
#Close connection
cur.close()
flash('Article Deleted', 'success')
return redirect(url_for('dashboard'))
if __name__ == '__main__':
app.secret_key='secret123'
app.run(debug=True)
| [
"talha.ghaffar@hotmail.com"
] | talha.ghaffar@hotmail.com |
46d58ef295247a199fdfba6578d1c510ede1a49a | 9db50ad2dcb936ff15711271a66a19711030efd2 | /Delete_node_ linked.py | a88505b94b61f4707729ba7cfb89d91a9d10da8e | [] | no_license | sixbo/LeetCode | c3fa7bfb2942ecb23fa6e85cd8876780fc5061df | a6f7161735547b8d74ecdb8d22d9fe681b3b2294 | refs/heads/master | 2020-09-07T11:05:52.908256 | 2019-11-13T07:35:43 | 2019-11-13T07:35:43 | 220,759,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | def deleteNode(listnode, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
if len(listnode)<2:
print("链列表太短了")
elif node==len(listnode):
print("你不能删除最后一个")
else:
for i in range(len(listnode)-1):
if listnode[i]==node:
del listnode[i]
return listnode
list=[1,2,3,2,4,5]
node1=2
x=deleteNode(list,node1)
print(x)
| [
"liubo37@163.com"
] | liubo37@163.com |
7d4d9e1c9a076511f2ab8479fb13a3113ee0c1bb | b2b4cd86da23cfcafa642b01ee86bd12284e6dfa | /ffWarAdminApi/apps.py | 70a2b0313cd620646f566c3b41852550c662ea1a | [] | no_license | bikram-shaw/ffWarApi | d47e1116844654150dc278d0b723e67e7194075d | b16d482680c45423927a002a1ca2e35642beadca | refs/heads/main | 2023-02-28T07:30:51.658906 | 2021-01-31T07:33:29 | 2021-01-31T07:33:29 | 319,536,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | from django.apps import AppConfig
class FfwaradminapiConfig(AppConfig):
name = 'ffWarAdminApi'
| [
"bikramshawbnk@gmail.com"
] | bikramshawbnk@gmail.com |
658ac4aa4accaa322e09456a967e92697c29df16 | 1cee01e4e31672df57d4e7841c6d0f0efda6153b | /RP_impl/hello_world.py | 2801e3a09022ae697763bd254746ebbbbe80364e | [] | no_license | andrealoevemaerke/thesis | 9c3f5ab9416b6da979fcaf16637bd35e28bead2a | 701c0ed2de3c2bae4430238b37739659b883b6d8 | refs/heads/master | 2020-05-19T17:58:36.356030 | 2019-06-12T13:19:10 | 2019-06-12T13:19:10 | 185,146,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,835 | py | import socket
import numpy as np
from threading import Thread
import FFArithmetic as field
import shamir_scheme as ss
import proc
import TcpSocket5 as sock
import time
import queue as que
from participantCodePLOT import party
import matplotlib.pyplot as plt
import os
port = 65
party_addr = [['192.168.100.1', 65], #P0
['192.168.100.2', 65], #P1
['192.168.100.3', 65], #P2
['192.168.100.4', 65], #P3
['192.168.100.5', 65], #P3
['192.168.100.6', 65] #P3
]
ccu_adr = '192.168.100.246'
server_addr = [[ccu_adr, 4031], #P0
[ccu_adr, 4040], #P1
[ccu_adr, 4041], #P2
[ccu_adr, 4050], #P3
[ccu_adr, 4060], #Reciever 4
[ccu_adr, 4061] #Reciever 5
]
class commsThread (Thread):
stop = False
def __init__(self, threadID, name, server_info,q):
Thread.__init__(self)
self.q = q
self.threadID = threadID
self.name = name
self.server_info = server_info # (Tcp_ip, Tcp_port)
self.Rx_packet = [] # tuple [[client_ip, client_port], [Rx_data[n]]]
def run(self):
# print("Starting " + self.name)
#Create TCP socket
tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
tcpsock.bind(tuple(self.server_info))
#Communication loop - Wait->Receive->Put to queue
while not self.stop:
Rx_packet = sock.TCPserver(tcpsock)
# print("Client info:",Rx_packet[0])
# print("Data recv:",Rx_packet[1])
if not self.q.full():
self.q.put(Rx_packet)
print("Exiting " + self.name)
m = 7979490791
F = field.GF(m)
n = 2
t = 1
x = 7
ipv4 = os.popen('ip addr show eth0').read().split("inet ")[1].split("/")[0]
pnr = party_addr.index([ipv4, port])
q = que.Queue()
q2 = que.Queue()
q3 = que.Queue()
#Initialization..
#TCP_IP = '192.168.100.246'
#TCP_PORT = 62
#UDP_PORT2 = 3000
server_info = party_addr[pnr]#(TCP_IP, TCP_PORT)
#server2_info = (ipv4, UDP_PORT2)
# Create new threads..
t1_comms = commsThread(1, "Communication Thread", server_info,q)
#2_commsSimulink = UDPcommsThread(2, "t2_commsSimulink", server2_info)
#ploting = plotter(q3)
#ploting.start()
class party(Thread):
def __init__(self, F, x, n, t, i, q, q2,q3, paddr, saddr):
Thread.__init__(self)
self.c = 0
self.comr = 0
self.recv = {}
self.F = F
self.x = x
self.n = n
self.t = t
self.i = i
self.q = q
self.q2 = q2
self.q3 = q3
self.party_addr = paddr
self.server_addr = saddr
def distribute_shares(self, sec):
shares = ss.share(self.F, sec, self.t, self.n)
for i in range(self.n):
sock.TCPclient(self.party_addr[i][0], self.party_addr[i][1], ['input' + str(self.i) , int(str(shares[i]))])
def broadcast(self, name, s):
for i in range(self.n):
sock.TCPclient(self.party_addr[i][0], self.party_addr[i][1], [name + str(self.i) , int(str(s))])
def readQueue(self):
while not self.q.empty():
b = self.q.get()[1]
self.recv[b[0]] = b[1]
self.q3.put([b[0][-1], b[1]])
def get_shares(self, name):
res = []
for i in range(self.n):
while name + str(i) not in self.recv:
self.readQueue()
res.append(self.F(self.recv[name+str(i)]))
del self.recv[name + str(i)]
return res
def reconstruct_secret(self, name):
return ss.rec(self.F, self.get_shares(name))
def get_share(self, name):
while name not in self.recv:
self.readQueue()
a = self.F(self.recv[name])
del self.recv[name]
return a
def get_triplets(self):
while 'triplets' not in self.recv:
self.readQueue()
b = self.recv['triplets']
res = []
for i in b:
res.append([self.F(j) for j in i])
self.triplets = res
def mult_shares(self, a, b):
r = self.triplets[self.c]
self.c += 1
d_local = a - r[0]
self.broadcast('d' + str(self.comr), d_local)
d_pub = self.reconstruct_secret('d' + str(self.comr))
self.comr +=1
e_local = b - r[1]
self.broadcast('e' + str(self.comr), e_local)
e_pub = self.reconstruct_secret('e' + str(self.comr))
self.comr+=1
return d_pub * e_pub + d_pub*r[1] + e_pub*r[0] + r[2]
def legendreComp(self,a,b):
r = self.triplets[self.c]
self.c+=1
t = self.tt
g = a - b
k = self.mult_shares(t, self.mult_shares(r[0], r[0]))
j_loc = self.mult_shares(g, k)
self.broadcast('j'+ str(self.comr), j_loc)
j_pub = self.reconstruct_secret('j'+str(self.comr))
self.comr+=1
ex = (self.F.p-1)/2
sym = pow(int(str(j_pub)),int(ex), self.F.p)
f = sym * t
c = self.mult_shares((f+1), self.F(2).inverse())
return c
#def run(self):
# self.distribute_shares(x,'x_shares')
p = party(F,int(x),n,t,pnr, q, q2, q3, party_addr, server_addr)
# Start new Threads
#t2_commsSimulink.start()
t1_comms.start()
while True:
try:
sock.TCPclient(party_addr[5][0], party_addr[5][1], ['flag', 1])
break
except:
time.sleep(1)
continue
print('Connected to 5!')
p.start()
sock.TCPclient(party_addr[5][0], party_addr[5][1], ['output', int(p.x)])
while True:
if not q.empty():
print(q.get())
| [
"andreatram@gmail.com"
] | andreatram@gmail.com |
93a85d1ecf6825449a3f7c64ee9f281b94e8bdb4 | be043a809f54e592038e44db56627d5895ea5582 | /torobot/fetcher.py | e08d7a44ceff39072d0ef327fa59e012f42f2461 | [] | no_license | Yeolar/tornado-robot | 4767b862bfec3e21c270813db49305c4571a42d5 | 9f04556da1ff3f4b2614bc605711e6ebbb5a0c1a | refs/heads/master | 2021-01-18T14:11:11.439181 | 2013-08-20T04:53:02 | 2013-08-20T04:53:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Created on 2013-04-12. Yeolar <yeolar@gmail.com>
#
import logging
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
from zmq.eventloop.ioloop import IOLoop
class Fetcher(object):
def __init__(self, *args, **kwargs):
self.identity = ''
self.initialize(*args, **kwargs)
def initialize(self, *args, **kwargs):
pass
def build_request(self, request):
raise NotImplementedError()
def prepare(self):
pass
def on_finish(self):
pass
def fetch(self, request, callback):
raise NotImplementedError()
def __call__(self, request, callback):
self.prepare()
self.fetch(request, callback)
self._log(request)
self.on_finish()
def _log(self, request):
logging.info('[%s] %s %s', self.identity, request.method, request.url)
class HTTPFetcher(Fetcher):
def build_request(self, request):
return HTTPRequest(
url=request.url,
method=request.method,
body=request.body or None,
connect_timeout=request.connect_timeout,
request_timeout=request.request_timeout)
def fetch(self, request, callback):
client = AsyncHTTPClient(IOLoop.instance())
client.fetch(self.build_request(request), callback)
| [
"yeolar@gmail.com"
] | yeolar@gmail.com |
f4de25a9a5ef6b37ebd857b3d91d5bd414e813d3 | 63b468d1bec0e0ba26d07b937f920655eea68fa2 | /Coding Question/Strong number.py | 58dbc61f5315982a5c382229fa377745de0d0108 | [] | no_license | Priyanshuparihar/Competitive_Programming | e3866e1ce753d102140c7cb48036c3dbeee1786d | b33fedeeb24105b24f8a36b244f6d9a04c8a3672 | refs/heads/main | 2023-06-13T22:25:44.964543 | 2021-07-13T16:51:32 | 2021-07-13T16:51:32 | 374,601,192 | 0 | 2 | null | 2021-06-07T14:41:57 | 2021-06-07T09:03:18 | Python | UTF-8 | Python | false | false | 295 | py | def factorial(n):
Sum=1
for i in range(1,n+1):
Sum=Sum*i
return Sum
num=int(input("Enter a no. "))
Sum=0
n=num
while(num>0):
rem=num%10
Sum=Sum+factorial(rem)
num=num//10
if n==Sum:
print("Enter number",n,"Strong no.")
else:
print("Enter number is",n,"not Strong no.") | [
"noreply@github.com"
] | noreply@github.com |
ff842e856c5c79b2b516f75e988b23163b745344 | 86846935921f2a082ea8111ae617b3e10f2fd73d | /usdToRedis.py | 712937e441d47cac7388238d7403b0418d999de5 | [] | no_license | gmiraval/stats | f8d373abeeb7bb3bd06eba7a551977251813fd58 | 91153b5065b4bb46b146209c602b502c7fe7ea2c | refs/heads/master | 2022-03-27T16:49:58.559582 | 2019-12-03T15:07:14 | 2019-12-03T15:07:14 | 198,303,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,954 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import json
import sys
import urllib3
import logging
import boto3 #acceso a aws-poner credenciales en .aws
from decimal import Decimal
from datetime import *
import redis
#seteo nivel de log
#logging.basicConfig(level=logging.DEBUG)
##logging.basicConfig(level=logging.INFO)
## doc api bcra:
## https://estadisticasbcra.com/api/documentation
url = "http://api.estadisticasbcra.com/usd_of"
##en dias poner desde cuantos dias atras arranco a importar
dias = 30
startdate = int((datetime.now()-timedelta(dias)).strftime("%Y%m%d"))
logging.info(startdate)
headers = {
'Authorization': "Bearer eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE1NzM3MzQxNDUsInR5cGUiOiJleHRlcm5hbCIsInVzZXIiOiJtaXJhdmFsbGVzZ0BnbWFpbC5jb20ifQ.4Kb5JA1RiwN9DSxqH319B1aT-DWiYflz6odQbB3cAAL3hCJMOpe8rXOBHvcQyWruoVVME2uUTx4F35ZNQa8dVg",
'Accept': "*/*",
'Accept-Encoding': "gzip, deflate",
'Connection': "keep-alive",
'cache-control': "no-cache"
}
response = requests.request("POST", url, headers=headers).json() #.json() pasa la respuesta a un objeto json
##respuesta ejemplo para debug sin pegarle a la api
##response = json.loads('[{"d": "2003-08-20","v": 2.91},{"d": "2019-07-01","v": 14.05},{"d": "2019-07-02","v": 14.06}]')
##defino conexion a tabla
##dynamodb = boto3.resource('dynamodb')
##table = dynamodb.Table('usd')
# create a connection to the localhost Redis server instance, by
# default it runs on port 6379
redis_db = redis.StrictRedis(host="localhost", port=6379, db=0)
#voy insertando por cada item en tabla
for x in response:
logging.info(x['d'][0:4]+x['d'][5:7]+x['d'][8:10])
date = int(x['d'][0:4]+x['d'][5:7]+x['d'][8:10])
if date > startdate:
## table.put_item(
## Item={
## 'd': x['d'],
## 'v': Decimal(str(x['v']))
## }
##)
redis_db.zadd('usd',date,x['v'])
| [
"ubuntu@ip-172-26-3-231.ec2.internal"
] | ubuntu@ip-172-26-3-231.ec2.internal |
b2be474d2f79c37759554eaa222d04c3458c8283 | 162acc71ad55c1259b534ed41e351e9bdcfd1915 | /env/bin/easy_install | 6a94d7dbc70f012ea0007ce5e88259b453340910 | [] | no_license | merissab44/Events_site | 53f79d1f5256f0af71f4c1f386bb9d5e0efecb0e | 7fc15d6b019ebfe7cb6eeeebf4f062fbf8bb6d69 | refs/heads/main | 2023-04-16T15:26:46.610739 | 2021-04-26T17:32:08 | 2021-04-26T17:32:08 | 357,775,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | #!/Users/merissabridgeman/dev/courses/BEW1.2/BEW-1.2-Events-Homework/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"merissa.bridgeman98@gmail.com"
] | merissa.bridgeman98@gmail.com | |
b1dbf2d9d7130f444f85d058c338082c4cbb3f92 | bfaa36f7fd23adfc7d6bd3f197ce660feccf80d3 | /2nd course/week6/p10.2_w6.py | ab281cd18d244bed77c365abca1fdf660906659a | [] | no_license | omarbakeer/PY4E | c70a8520056560e97e997b1603167be3cb12f5b2 | c06ee82fef15602876748ccd082cf76546cfb250 | refs/heads/master | 2021-03-19T11:09:42.316161 | 2018-06-13T01:37:24 | 2018-06-13T01:37:24 | 107,909,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | name = input("Enter file:")
if len(name) < 1 : name = "mbox-short.txt"
handle = open(name)
lst = list()
dic = dict()
for line in handle :
if not line.startswith("From "):
continue
line = line.split()
time = line[5]
time = time.split(":")
hour = time[0]
lst.append(hour)
for hours in lst:
dic[hours] = dic.get(hours,0)+1
for k,v in sorted(dic.items()):
print(k,v) | [
"omarbakeer@gmail.com"
] | omarbakeer@gmail.com |
941745f8568dcf37e1784153a6ece9ec8c846877 | dc77d93ea860c9963fa09752750261ad75672739 | /tests/test_parsers.py | ccb31266b5e055ae91929259a211b3e12f3fb3b0 | [
"BSD-3-Clause"
] | permissive | pketh/investigation-google-search-audit | 86bef89766fff09ffc539b7729ce711fdd8f5c1c | 38068c598d234b6570046a9c744deea525e5e43b | refs/heads/master | 2022-11-19T07:18:26.304659 | 2020-07-28T02:07:19 | 2020-07-28T02:07:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,088 | py | """
Test Parsers
============
This script is an abstract test for HTML parsers.
Use it for two reasons:
1. Checking if Google has made changes to search results.
2. Assuring parsers are still mostly working.
It is OK if some tests fail, it just means that the specific parser's
target was not in the input examples.
This code ships with a default dataset.
You can include an extra parameter of a directory with new HTML files to test.
Author: Leon Yin
"""
import os
import sys
import glob
import unittest
import inspect
import pandas as pd
from bs4 import BeautifulSoup
from parameterized import parameterized
# the software we're testing is in this directory as `utils`
sys.path.append('..')
import utils.parsers as P
functions_list = [o for o in inspect.getmembers(P)
if inspect.isfunction(o[1]) and
'_parser' in o[0]]
class TestParsers(unittest.TestCase):
'''
This class contains three functions that are run in the order
(the name of the first and last matter!).
`test_abstract` is to be used by all parsers in `functions_list`.
It iterates through the files read from `data_dir`, and sends each
HTML file through the parsers.
If the parser targets exist, the parser will return a list of dictionaries.
'''
data_dir = '../data/test/input_local_searches'
metadata_dir = '../data/test/parser_output'
for d in [data_dir, metadata_dir]:
os.makedirs(d, exist_ok=True)
@classmethod
def setUpClass(cls):
'''
Initializes parameters for HTML parsers.
Note that every file is read into memory and placed within a BeautifulSoup object.
'''
# create an empty dictionary ro record metadata on tests
cls.report = dict()
# select the local directory with HTML files to test.
if not os.path.isdir(cls.data_dir):
raise Exception('The input directory does not exist.')
cls.input_filenames = glob.glob(
os.path.join(cls.data_dir, '*.html')
)
cls.n_inputs = len(cls.input_filenames)
# read each HTML file into Beautiful soup and store them as a list in `parse_trees`
soups = []
for fn in cls.input_filenames:
with open(fn) as f:
filestream = f.read()
soup = BeautifulSoup(filestream, 'lxml')
soups.append(soup)
cls.parse_trees = soups
@parameterized.expand(functions_list)
def test_abstract(self, func_name, parser_func):
'''
This is the abstract of a test, thanks for the decorator,
each test will be parameterized by each tuple in `parser_params`.
The tuple contains a function name and the parser function itself.
The test sends all the input files in `soups`
To make sure these tests are accurate, make sure the at least one
inputs contain elements you're looking for...
The results of the parsers are saved as a key-value pair in the `report` property
'''
results = []
hits = 0
for i, soup in enumerate(self.parse_trees):
elements = parser_func(soup)
if len(elements) != 0:
hits += 1
for item in elements:
item.update({'filename' : os.path.abspath(self.input_filenames[i])})
results.extend(elements)
self.assertTrue(hits != 0)
self.report[func_name] = results
@classmethod
def tearDownClass(cls):
'''
This will provide some sort of aggregate statistic...
We'll figure out what to do with this later.
'''
for test, data in cls.report.items():
df = pd.DataFrame(data)
fn_out = os.path.join(cls.metadata_dir, f"{test}_results.csv")
df.to_csv(fn_out, index=False)
if __name__ == '__main__':
if len(sys.argv) > 1:
TestParsers.data_dir = sys.argv.pop()
unittest.main()
| [
"leon.yin@themarkup.org"
] | leon.yin@themarkup.org |
5ec6d29eb18f4b5a615c47c002f54ce12402b6b1 | 611847354ec077c5bc65fdb08c9f45ff45b4bfcc | /code/docker/python/flask_2/app.py | 234a0768f1eb0f2a9ab328dd7b3a0fc9be9cf1a3 | [
"MIT"
] | permissive | afcarl/pythoh_machine_learning_excerise | 1a572e4c6db11ee28d5c245f20fc81b334d04995 | f2b6e93eb02345f9078642cff3066e3e65557e51 | refs/heads/master | 2020-03-21T06:00:08.623962 | 2017-06-08T23:03:35 | 2017-06-08T23:03:35 | 138,193,305 | 1 | 0 | null | 2018-06-21T16:05:53 | 2018-06-21T16:05:52 | null | UTF-8 | Python | false | false | 2,365 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from flask import Flask, render_template, request
from wtforms import Form, TextAreaField, validators
import pickle
import sqlite3
import os
import numpy as np
from vectorizer import vect
app = Flask(__name__)
cur_dir = os.path.dirname(__file__)
clf = pickle.load(open(os.path.join(cur_dir,
'pickle_objects',
'classifier.pkl'), 'rb'))
db = os.path.join(cur_dir, 'reviews.sqlite')
def classify(document):
label = {0: 'negative', 1: 'positive'}
X =vect.transform([document])
y = clf.predict(X)[0]
proba = np.max(clf.predict_proba(X))
return label[y], proba
def train(document, y):
X = vect.transform([document])
clf.partial_fit(X, [y])
def sqlite_entry(path, document, y):
conn = sqlite3.connect(path)
c = conn.cursor()
c.execute("INSERT INTO review_db (review, sentiment, date)"\
" VALUES (?, ?, DATETIME('now'))", (document, y))
conn.commit()
conn.close()
class ReviewForm(Form):
moviereview = TextAreaField('',
[validators.DataRequired(),
validators.length(min=15)])
@app.route('/')
def index():
form = ReviewForm(request.form)
return render_template('reviewform.html', form=form)
@app.route('/requests', methods=['POST'])
def results():
form = ReviewForm(request.form)
if request.method == 'POST' and form.validate():
review = request.form['movierview']
y, proba = classify(review)
return render_template('results.html',
content=review,
prediction=y,
probability=round(proba * 100, 2))
return render_template('reviewform.html', form=form)
@app.route('/thanks', methods=['POST'])
def feedback():
feedback = request.form['feedback_button']
review = request.form['review']
prediction = request.form['prediction']
inv_label = {'negative': 0, 'positive': 1}
y = inv_label[prediction]
if feedback == 'Incorrect':
y = int(not(y))
train(review, y)
sqlite_entry(db, review, y)
return render_template('thanks.html')
if __name__ == '__main__':
app.run(debug=True) | [
"snow.akogi.pgel@gmail.com"
] | snow.akogi.pgel@gmail.com |
62f3762c1f4cc277c8f0b20c4777ee5c189eb345 | e593f5b34050eba13fbadeee3563346fa0f1c25b | /tests/plugins/test_speedrunslive.py | 81731cbdc212b4e31f3887da8a114f76df267300 | [
"BSD-2-Clause",
"CC-BY-SA-2.0",
"MIT",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license"
] | permissive | fuglede/streamlink | f9e56e434b01ae426edd83f13037384af294838a | 2661d40164986f979edc2e6867f8daeceba73a44 | refs/heads/master | 2020-03-25T08:36:43.175618 | 2018-08-05T15:10:15 | 2018-08-05T15:10:15 | 143,622,979 | 0 | 0 | BSD-2-Clause | 2018-08-05T15:07:12 | 2018-08-05T15:07:12 | null | UTF-8 | Python | false | false | 570 | py | import unittest
from streamlink.plugins.speedrunslive import SpeedRunsLive
class TestPluginSpeedRunsLive(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'http://www.speedrunslive.com/#!/twitch',
]
for url in should_match:
self.assertTrue(SpeedRunsLive.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'https://www.twitch.tv',
]
for url in should_not_match:
self.assertFalse(SpeedRunsLive.can_handle_url(url))
| [
"gravyboat@users.noreply.github.com"
] | gravyboat@users.noreply.github.com |
aaac7828f0ebe58e41fab34c975790676ce05ef9 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_116/ch32_2020_04_08_11_44_06_529462.py | cc4a1b9f2ef0e4e497cbb913dca1ed7af116e79d | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | def lista_primos(n):
a=2
lista=[]
contador = 3
while len(lista)!=n:
if a == 2:
lista.append(a)
a+=1
elif a%2 == 0:
a+=1
elif contador < a :
contador = 3
while contador < a:
if a%contador == 0:
contador+=2
else:
lista.append(a)
contador=a+2
a+=1
else:
a+=1
return lista | [
"you@example.com"
] | you@example.com |
c711158cdb65871fda79be945d0bae0d04d531a8 | 50afc0db7ccfc6c80e1d3877fc61fb67a2ba6eb7 | /challenge20(T-primes)/solutions/Coder45.py | 32ba0e7800485a552e5637112a3fad818e939995 | [
"MIT"
] | permissive | banana-galaxy/challenges | 792caa05e7b8aa10aad8e04369fc06aaf05ff398 | 8655c14828607535a677e2bb18689681ee6312fa | refs/heads/master | 2022-12-26T23:58:12.660152 | 2020-10-06T13:38:04 | 2020-10-06T13:38:04 | 268,851,516 | 11 | 8 | MIT | 2020-09-22T21:21:30 | 2020-06-02T16:24:41 | Python | UTF-8 | Python | false | false | 165 | py | python
def solution(n):
for k in range(n):
y=len([k for k in range(1,n+1) if not n %k])
if y == 3:
return True
else:
return False | [
"cawasp@gmail.com"
] | cawasp@gmail.com |
a393a62bb6e530c315338f5a80fab3f1fc9f9337 | 89d7edca0360c31fe6790b5f988bbbad3621c69a | /domainnet/train.py | 598215bb7cd879331188d3f9a22f34ff6f0a7cc9 | [] | no_license | dpernes/modafm | 33084e8d03e2f456e6287821de6e6714b911f59f | 0049a307cb89fc08119a24fbc7ec7baa1556bc58 | refs/heads/master | 2023-01-30T07:28:30.653211 | 2020-12-10T11:34:45 | 2020-12-10T11:34:45 | 236,744,023 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,253 | py | import sys
sys.path.append('..')
import argparse
from configparser import ConfigParser
import ast
import random
from copy import deepcopy
import numpy as np
import torch
from torch import optim
from torch.utils.data import DataLoader, SubsetRandomSampler
import torchvision.transforms as T
import torchvision.transforms.functional as TF
from dataset import DomainNet
from models import SimpleCNN, MDANet, MODANet
from routines import (fs_train_routine, fm_train_routine, dann_train_routine, mdan_train_routine,
mdan_train_routine, moda_train_routine, moda_fm_train_routine)
from utils import MSDA_Loader, Logger
def main():
parser = argparse.ArgumentParser(description='Domain adaptation experiments with the DomainNet dataset.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-m', '--model', default='MODAFM', type=str, metavar='', help='model type (\'FS\' / \'DANNS\' / \'DANNM\' / \'MDAN\' / \'MODA\' / \'FM\' / \'MODAFM\'')
parser.add_argument('-d', '--data_path', default='/ctm-hdd-pool01/DB/DomainNet192', type=str, metavar='', help='data directory path')
parser.add_argument('-t', '--target', default='clipart', type=str, metavar='', help='target domain (\'clipart\' / \'infograph\' / \'painting\' / \'quickdraw\' / \'real\' / \'sketch\')')
parser.add_argument('-o', '--output', default='msda.pth', type=str, metavar='', help='model file (output of train)')
parser.add_argument('--icfg', default=None, type=str, metavar='', help='config file (overrides args)')
parser.add_argument('--arch', default='resnet152', type=str, metavar='', help='network architecture (\'resnet101\' / \'resnet152\'')
parser.add_argument('--mu_d', type=float, default=1e-2, help="hyperparameter of the coefficient for the domain discriminator loss")
parser.add_argument('--mu_s', type=float, default=0.2, help="hyperparameter of the non-sparsity regularization")
parser.add_argument('--mu_c', type=float, default=1e-1, help="hyperparameter of the FixMatch loss")
parser.add_argument('--n_rand_aug', type=int, default=2, help="N parameter of RandAugment")
parser.add_argument('--m_min_rand_aug', type=int, default=3, help="minimum M parameter of RandAugment")
parser.add_argument('--m_max_rand_aug', type=int, default=10, help="maximum M parameter of RandAugment")
parser.add_argument('--weight_decay', default=0., type=float, metavar='', help='hyperparameter of weight decay regularization')
parser.add_argument('--lr', default=1e-3, type=float, metavar='', help='learning rate')
parser.add_argument('--epochs', default=50, type=int, metavar='', help='number of training epochs')
parser.add_argument('--batch_size', default=8, type=int, metavar='', help='batch size (per domain)')
parser.add_argument('--checkpoint', default=0, type=int, metavar='', help='number of epochs between saving checkpoints (0 disables checkpoints)')
parser.add_argument('--eval_target', default=False, type=int, metavar='', help='evaluate target during training')
parser.add_argument('--use_cuda', default=True, type=int, metavar='', help='use CUDA capable GPU')
parser.add_argument('--use_visdom', default=False, type=int, metavar='', help='use Visdom to visualize plots')
parser.add_argument('--visdom_env', default='domainnet_train', type=str, metavar='', help='Visdom environment name')
parser.add_argument('--visdom_port', default=8888, type=int, metavar='', help='Visdom port')
parser.add_argument('--verbosity', default=2, type=int, metavar='', help='log verbosity level (0, 1, 2)')
parser.add_argument('--seed', default=42, type=int, metavar='', help='random seed')
args = vars(parser.parse_args())
# override args with icfg (if provided)
cfg = args.copy()
if cfg['icfg'] is not None:
cv_parser = ConfigParser()
cv_parser.read(cfg['icfg'])
cv_param_names = []
for key, val in cv_parser.items('main'):
cfg[key] = ast.literal_eval(val)
cv_param_names.append(key)
# dump args to a txt file for your records
with open(cfg['output'] + '.txt', 'w') as f:
f.write(str(cfg)+'\n')
# use a fixed random seed for reproducibility purposes
if cfg['seed'] > 0:
random.seed(cfg['seed'])
np.random.seed(seed=cfg['seed'])
torch.manual_seed(cfg['seed'])
torch.cuda.manual_seed(cfg['seed'])
device = 'cuda' if (cfg['use_cuda'] and torch.cuda.is_available()) else 'cpu'
log = Logger(cfg['verbosity'])
log.print('device:', device, level=0)
# normalization transformation (required for pretrained networks)
normalize = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if 'FM' in cfg['model']:
# weak data augmentation (small rotation + small translation)
data_aug = T.Compose([
# T.RandomCrop(224),
# T.Resize(128),
T.RandomHorizontalFlip(),
T.RandomAffine(5, translate=(0.125, 0.125)),
T.ToTensor(),
# normalize, # normalization disrupts FixMatch
])
eval_transf = T.Compose([
# T.RandomCrop(224),
# T.Resize(128),
T.ToTensor(),
])
else:
data_aug = T.Compose([
# T.RandomCrop(224),
# T.Resize(128),
T.RandomHorizontalFlip(),
T.ToTensor(),
normalize,
])
eval_transf = T.Compose([
# T.RandomCrop(224),
# T.Resize(128),
T.ToTensor(),
normalize,
])
domains = ['clipart', 'infograph', 'painting', 'quickdraw', 'real', 'sketch']
datasets = {domain: DomainNet(cfg['data_path'], domain=domain, train=True, transform=data_aug) for domain in domains}
n_classes = len(datasets[cfg['target']].class_names)
test_set = DomainNet(cfg['data_path'], domain=cfg['target'], train=False, transform=eval_transf)
if 'FM' in cfg['model']:
target_pub = deepcopy(datasets[cfg['target']])
target_pub.transform = eval_transf # no data augmentation in test
else:
target_pub = datasets[cfg['target']]
if cfg['model'] != 'FS':
train_loader = MSDA_Loader(datasets, cfg['target'], batch_size=cfg['batch_size'], shuffle=True, num_workers=0, device=device)
if cfg['eval_target']:
valid_loaders = {'target pub': DataLoader(target_pub, batch_size=6*cfg['batch_size']),
'target priv': DataLoader(test_set, batch_size=6*cfg['batch_size'])}
else:
valid_loaders = None
log.print('target domain:', cfg['target'], '| source domains:', train_loader.sources, level=1)
else:
train_loader = DataLoader(
datasets[cfg['target']],
batch_size=cfg['batch_size'],
shuffle=True)
test_loader = DataLoader(
test_set,
batch_size=cfg['batch_size'])
log.print('target domain:', cfg['target'], level=1)
if cfg['model'] == 'FS':
model = SimpleCNN(n_classes=n_classes, arch=cfg['arch']).to(device)
conv_params, fc_params = [], []
for name, param in model.named_parameters():
if 'fc' in name.lower():
fc_params.append(param)
else:
conv_params.append(param)
optimizer = optim.Adadelta([
{'params':conv_params, 'lr':0.1*cfg['lr'], 'weight_decay':cfg['weight_decay']},
{'params':fc_params, 'lr':cfg['lr'], 'weight_decay':cfg['weight_decay']}
])
valid_loaders = {'target pub': test_loader} if cfg['eval_target'] else None
fs_train_routine(model, optimizer, train_loader, valid_loaders, cfg)
elif cfg['model'] == 'FM':
model = SimpleCNN(n_classes=n_classes, arch=cfg['arch']).to(device)
for name, param in model.named_parameters():
if 'fc' in name.lower():
fc_params.append(param)
else:
conv_params.append(param)
optimizer = optim.Adadelta([
{'params':conv_params, 'lr':0.1*cfg['lr'], 'weight_decay':cfg['weight_decay']},
{'params':fc_params, 'lr':cfg['lr'], 'weight_decay':cfg['weight_decay']}
])
cfg['excl_transf'] = None
fm_train_routine(model, optimizer, train_loader, valid_loaders, cfg)
elif cfg['model'] == 'DANNS':
for src in train_loader.sources:
model = MODANet(n_classes=n_classes, arch=cfg['arch']).to(device)
conv_params, fc_params = [], []
for name, param in model.named_parameters():
if 'fc' in name.lower():
fc_params.append(param)
else:
conv_params.append(param)
optimizer = optim.Adadelta([
{'params':conv_params, 'lr':0.1*cfg['lr'], 'weight_decay':cfg['weight_decay']},
{'params':fc_params, 'lr':cfg['lr'], 'weight_decay':cfg['weight_decay']}
])
dataset_ss = {src: datasets[src], cfg['target']: datasets[cfg['target']]}
train_loader = MSDA_Loader(dataset_ss, cfg['target'], batch_size=cfg['batch_size'], shuffle=True, device=device)
dann_train_routine(model, optimizer, train_loader, valid_loaders, cfg)
torch.save(model.state_dict(), cfg['output']+'_'+src)
elif cfg['model'] == 'DANNM':
model = MODANet(n_classes=n_classes, arch=cfg['arch']).to(device)
conv_params, fc_params = [], []
for name, param in model.named_parameters():
if 'fc' in name.lower():
fc_params.append(param)
else:
conv_params.append(param)
optimizer = optim.Adadelta([
{'params':conv_params, 'lr':0.1*cfg['lr'], 'weight_decay':cfg['weight_decay']},
{'params':fc_params, 'lr':cfg['lr'], 'weight_decay':cfg['weight_decay']}
])
dann_train_routine(model, optimizer, train_loader, valid_loaders, cfg)
elif args['model'] == 'MDAN':
model = MDANet(n_classes=n_classes, n_domains=len(train_loader.sources), arch=cfg['arch']).to(device)
conv_params, fc_params = [], []
for name, param in model.named_parameters():
if 'fc' in name.lower():
fc_params.append(param)
else:
conv_params.append(param)
optimizer = optim.Adadelta([
{'params':conv_params, 'lr':0.1*cfg['lr'], 'weight_decay':cfg['weight_decay']},
{'params':fc_params, 'lr':cfg['lr'], 'weight_decay':cfg['weight_decay']}
])
mdan_train_routine(model, optimizer, train_loader, valid_loaders, cfg)
elif cfg['model'] == 'MODA':
model = MODANet(n_classes=n_classes, arch=cfg['arch']).to(device)
conv_params, fc_params = [], []
for name, param in model.named_parameters():
if 'fc' in name.lower():
fc_params.append(param)
else:
conv_params.append(param)
optimizer = optim.Adadelta([
{'params':conv_params, 'lr':0.1*cfg['lr'], 'weight_decay':cfg['weight_decay']},
{'params':fc_params, 'lr':cfg['lr'], 'weight_decay':cfg['weight_decay']}
])
moda_train_routine(model, optimizer, train_loader, valid_loaders, cfg)
elif cfg['model'] == 'MODAFM':
model = MODANet(n_classes=n_classes, arch=cfg['arch']).to(device)
conv_params, fc_params = [], []
for name, param in model.named_parameters():
if 'fc' in name.lower():
fc_params.append(param)
else:
conv_params.append(param)
optimizer = optim.Adadelta([
{'params':conv_params, 'lr':0.1*cfg['lr'], 'weight_decay':cfg['weight_decay']},
{'params':fc_params, 'lr':cfg['lr'], 'weight_decay':cfg['weight_decay']}
])
cfg['excl_transf'] = None
moda_fm_train_routine(model, optimizer, train_loader, valid_loaders, cfg)
else:
raise ValueError('Unknown model {}'.format(cfg['model']))
torch.save(model.state_dict(), cfg['output'])
if __name__ == '__main__':
main()
| [
"diogo.pernes.cunha@gmail.com"
] | diogo.pernes.cunha@gmail.com |
9adae338d936fc32472ea65cbf215d5c748dbe4b | 6a1a66b83faed0ab803b9fc7c5da44523197d8cc | /GoogleNews/GoogleNews.py | b0151b8d87ff8d2da8c01e7d068dfd01a58a66c4 | [] | no_license | jayrambhia/Extract-News-Summary | 500d20d03e748ccec1340de116c1cb490cbf13e3 | 7aeb53c19649526ce02cbb44584fc275631aaad1 | refs/heads/master | 2021-01-23T23:56:12.219164 | 2012-10-16T19:00:04 | 2012-10-16T19:00:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 974 | py | import urllib2
from xml.dom.minidom import parseString #Standard Python parser for XML files (Non standard HTML)
import sys
def search( term, count ): #Term = query string, count = number of links desired
results = [] #List for storing the URLs
obj = parseString( urllib2.urlopen('http://news.google.com/news?q=%s&output=rss&num=%s' % (term, str(count))).read() ) #Open Google news for the desired query and number of links, get RSS output and parse it as a string with XML DOM
links = obj.getElementsByTagName('link')[2: count+2] #From the parsed string get the Elements with the tag name <link>, skip the first two tags
for link in links:
results.append( link.childNodes[0].data.split('=')[-1] ) #From the data inside the <link> tag, seperate them at "=" and append the URL in the result list
return results
| [
"aag999in@gmail.com"
] | aag999in@gmail.com |
f9d1f1abd78cf0eb8c1ed2be9b6187e2a9f186b2 | 14f9a2beebaba68cfd5ddcf3daac6f4fa74d9977 | /value_predictor.py | 86010e1e37b13726d80c67f262f105d540b2ed25 | [] | no_license | Rsheikh-shab/lauretta.io.test | 6d24e2e127126afb6450d71b65678224ed906f9f | a5409b0daf1435d648d40b933f8703e3f92d24ab | refs/heads/master | 2020-09-24T05:39:39.737419 | 2019-12-03T17:25:07 | 2019-12-03T17:25:07 | 225,676,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | def value_predictor(value):
# check length
n = len(value)
# check at least 6 elements and sort the list
if n >= 6 and sorted(value):
# check even number of value
if n % 2 != 0:
x = float(value[n / 2])
return x
return float(value[int(n / 2)] + value[int((n - 1) / 2)]) / 2.0
else:
return "List must have at least 6 elements"
print(value_predictor([1, 2, 3, 4, 5, 6]))
print(value_predictor([1, 1, 1, 6, 6, 6]))
| [
"rajuiium121@gmail.com"
] | rajuiium121@gmail.com |
41d40d26c70fe221dbe9b131fa40155013a2f449 | 8492601f7325ba93fa14b2af3632b5b2d8aeb471 | /3. Trading/3.1.5 Support_Vector_Machine.py | 5b2c03290264fcc9a07ff3836dac49f748162988 | [
"MIT"
] | permissive | paracats/Financial_Engineering | 20e390b9e36b55e77d7072a896a357cb7d538d6b | 78636026c430c210fbdf4f26a19d6439e9c7236c | refs/heads/master | 2022-11-23T19:35:25.270466 | 2020-08-03T03:31:26 | 2020-08-03T03:31:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,029 | py | """ Python For Quantitative Finance """
""" Support_Vector_Machine """
""" Shaofei Li """
from __future__ import print_function
import pprint
import re
try:
from html.parser import HTMLParser
except ImportError:
from HTMLParser import HTMLParser
from sklearn.cross_validation import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import confusion_matrix
from sklearn.svm import SVC
class ReutersParser(HTMLParser):
"""
ReutersParser subclasses HTMLParser and is used to open the SGML
files associated with the Reuters-21578 categorised test collection.
The parser is a generator and will yield a single document at a time.
Since the data will be chunked on parsing, it is necessary to keep
some internal state of when tags have been "entered" and "exited".
Hence the in_body, in_topics and in_topic_d boolean members.
"""
def __init__(self, encoding=’latin-1’):
"""
Initialise the superclass (HTMLParser) and reset the parser.
Sets the encoding of the SGML files by default to latin-1.
"""
HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def _reset(self):
"""
This is called only on initialisation of the parser class
and when a new topic-body tuple has been generated. It
resets all off the state so that a new tuple can be subsequently
generated.
"""
self.in_body = False
self.in_topics = False
self.in_topic_d = False
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
"""
parse accepts a file descriptor and loads the data in chunks
in order to minimise memory usage. It then yields new documents
as they are parsed.
"""
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_starttag(self, tag, attrs):
"""
This method is used to determine what to do when the parser
comes across a particular tag of type "tag". In this instance
we simply set the internal state booleans to True if that particular
tag has been found.
"""
if tag == "reuters":
pass
elif tag == "body":
self.in_body = True
elif tag == "topics":
self.in_topics = True
elif tag == "d":
self.in_topic_d = True
def handle_endtag(self, tag):
"""
This method is used to determine what to do when the parser
finishes with a particular tag of type "tag".
If the tag is a <REUTERS> tag, then we remove all
white-space with a regular expression and then append the
topic-body tuple.
If the tag is a <BODY> or <TOPICS> tag then we simply set
the internal state to False for these booleans, respectively.
If the tag is a <D> tag (found within a <TOPICS> tag), then we
append the particular topic to the "topics" list and
finally reset it.
"""
if tag == "reuters":
self.body = re.sub(r’\s+’, r’ ’, self.body)
self.docs.append( (self.topics, self.body) )
self._reset()
elif tag == "body":
self.in_body = False
elif tag == "topics":
self.in_topics = False
elif tag == "d":
self.in_topic_d = False
self.topics.append(self.topic_d)
self.topic_d = ""
def handle_data(self, data):
"""
The data is simply appended to the appropriate member state
for that particular tag, up until the end closing tag appears.
"""
if self.in_body:
self.body += data
elif self.in_topic_d:
self.topic_d += data
def obtain_topic_tags():
"""
Open the topic list file and import all of the topic names
taking care to strip the trailing "\n" from each word.
"""
topics = open(
"data/all-topics-strings.lc.txt", "r"
).readlines()
topics = [t.strip() for t in topics]
return topics
def filter_doc_list_through_topics(topics, docs):
"""
Reads all of the documents and creates a new list of two-tuples
that contain a single feature entry and the body text, instead of
a list of topics. It removes all geographic features and only
retains those documents which have at least one non-geographic
topic.
"""
ref_docs = []
for d in docs:
if d[0] == [] or d[0] == "":
continue
for t in d[0]:
if t in topics:
d_tup = (t, d[1])
ref_docs.append(d_tup)
break
return ref_docs
def create_tfidf_training_data(docs):
"""
Creates a document corpus list (by stripping out the
class labels), then applies the TF-IDF transform to this
list.
The function returns both the class label vector (y) and
the corpus token/feature matrix (X).
"""
# Create the training data class labels
y = [d[0] for d in docs]
# Create the document corpus list
corpus = [d[1] for d in docs]
# Create the TF-IDF vectoriser and transform the corpus
vectorizer = TfidfVectorizer(min_df=1)
X = vectorizer.fit_transform(corpus)
return X, y
def train_svm(X, y):
"""
Create and train the Support Vector Machine.
"""
svm = SVC(C=1000000.0, gamma="auto", kernel=’rbf’)
svm.fit(X, y)
return svm
if __name__ == "__main__":
# Create the list of Reuters data and create the parser
files = ["data/reut2-%03d.sgm" % r for r in range(0, 22)]
parser = ReutersParser()
# Parse the document and force all generated docs into
# a list so that it can be printed out to the console
docs = []
for fn in files:
for d in parser.parse(open(fn, ’rb’)):
docs.append(d)
# Obtain the topic tags and filter docs through it
topics = obtain_topic_tags()
ref_docs = filter_doc_list_through_topics(topics, docs)
# Vectorise and TF-IDF transform the corpus
X, y = create_tfidf_training_data(ref_docs)
# Create the training-test split of the data
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Create and train the Support Vector Machine
svm = train_svm(X_train, y_train)
# Make an array of predictions on the test set
pred = svm.predict(X_test)
# Output the hit-rate and the confusion matrix for each model
print(svm.score(X_test, y_test))
print(confusion_matrix(pred, y_test))
| [
"noreply@github.com"
] | noreply@github.com |
3c2bebb59fd7b14c45054f32eb853b2af0270218 | dd62c47450cb7eda635ce9c4aba465b31ce87a47 | /problem1.py | 58db4208bf964e40032cef20edc0dd873f9a1cff | [] | no_license | Shrekinator19/Unit3_Lesson5 | 7ceff1a09e396dbaceaf68a29732706d37c08167 | a3a3a0c1bfe79b2fdeb4eb8b1fb38e9cd94511b9 | refs/heads/master | 2020-04-09T13:33:06.903528 | 2018-12-04T15:04:07 | 2018-12-04T15:04:07 | 160,374,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | name = 'Skye'
print ('name') | [
"noreply@github.com"
] | noreply@github.com |
f44984d9f439fc8006d2dced9d5e8885cd5bc7b3 | 831a0d767921bbbc5e8717b73a6ded33244243cc | /amt/models/onsets_frames/check_serialize.py | 5c7192d20c30d522aac66781293149bff7c63f57 | [] | no_license | faraazn/music-transcription | 56bc8e15f16dc674d80b1a70b9565d3639387719 | c8106c01a3a0286bcc0ddb844f1287bd70a3299e | refs/heads/master | 2021-07-01T01:58:26.728091 | 2019-05-30T00:06:29 | 2019-05-30T00:06:29 | 151,554,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,480 | py | import tensorflow as tf
import numpy as np
from amt.protobuf import music_pb2
from amt.music import audio_io
import wave
import six
def wav_to_num_frames(wav_audio, frames_per_second):
# TODO: make a version using samples, sample rate, hop length
"""Transforms a wav-encoded audio string into number of frames."""
w = wave.open(six.BytesIO(wav_audio))
return np.int32(w.getnframes() / w.getframerate() * frames_per_second)
path = "/home/faraaz/workspace/music-transcription/amt/models/onsets_frames/tfrecord/clean_midi_test.tfrecord"
example = tf.train.Example()
sample_rate = 16000
spec_hop_length = 512
frames_per_sec = sample_rate / spec_hop_length
#print(len([record for record in tf.python_io.tf_record_iterator(path)]))
for record in tf.python_io.tf_record_iterator(path):
example.ParseFromString(record)
#print(example)
f = example.features.feature
song_name = f['id'].bytes_list.value[0].decode('utf-8')
wav_bytes = f['wav'].bytes_list.value[0]
samples_bytes = f['audio'].float_list.value
samples_array = np.asarray(samples_bytes, dtype="float32")
firstsamples = audio_io.load_audio(song_name, 16000)
wav_data = audio_io.samples_to_wav_data(firstsamples, 16000)
og_samples_array = audio_io.wav_data_to_samples(wav_data, 16000)
og_bytes = og_samples_array.tobytes()
x = np.frombuffer(og_bytes, dtype=np.float32)
assert wav_data == wav_bytes
print(wav_to_num_frames(wav_data, frames_per_sec))
print(len(firstsamples)/512)
break
| [
"faraaz.nadeem@gmail.com"
] | faraaz.nadeem@gmail.com |
c84c53395f7b921b51abae53e9b8bec22605a294 | 7818608d4887fc3d2927c7d75a28a727151c8b78 | /greykode/seq2seq_attention.py | b15f55e88927e4cc3959f3ab2debefb9ad61f4d1 | [] | no_license | kiyeonj21/nlp-tutorial | 5a3005aaeaf4e486961dbb20aadfe3993f7fe3a8 | ac0d0ded48392abf03cb2435207c9127d191d8c2 | refs/heads/master | 2023-03-13T06:42:40.145721 | 2021-02-26T21:30:46 | 2021-02-26T21:30:46 | 342,696,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,265 | py | # code by Tae Hwan Jung(Jeff Jung) @graykode
# Reference : https://github.com/hunkim/PyTorchZeroToAll/blob/master/14_2_seq2seq_att.py
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
dtype = torch.FloatTensor
# S: Symbol that shows starting of decoding input
# E: Symbol that shows starting of decoding output
# P: Symbol that will fill in blank sequence if current batch data size is short than time steps
sentences = ['ich mochte ein bier P', 'S i want a beer', 'i want a beer E']
word_list = " ".join(sentences).split()
word_list = list(set(word_list))
word_dict = {w: i for i, w in enumerate(word_list)}
number_dict = {i: w for i, w in enumerate(word_list)}
n_class = len(word_dict) # vocab list
# Parameter
n_hidden = 128
def make_batch(sentences):
input_batch = [np.eye(n_class)[[word_dict[n] for n in sentences[0].split()]]]
output_batch = [np.eye(n_class)[[word_dict[n] for n in sentences[1].split()]]]
target_batch = [[word_dict[n] for n in sentences[2].split()]]
# make tensor
return Variable(torch.Tensor(input_batch)), Variable(torch.Tensor(output_batch)), Variable(torch.LongTensor(target_batch))
class Attention(nn.Module):
def __init__(self):
super(Attention, self).__init__()
self.enc_cell = nn.RNN(input_size=n_class, hidden_size=n_hidden, dropout=0.5)
self.dec_cell = nn.RNN(input_size=n_class, hidden_size=n_hidden, dropout=0.5)
# Linear for attention
self.attn = nn.Linear(n_hidden, n_hidden)
self.out = nn.Linear(n_hidden * 2, n_class)
def forward(self, enc_inputs, hidden, dec_inputs):
enc_inputs = enc_inputs.transpose(0, 1) # enc_inputs: [n_step(=n_step, time step), batch_size, n_class]
dec_inputs = dec_inputs.transpose(0, 1) # dec_inputs: [n_step(=n_step, time step), batch_size, n_class]
# enc_outputs : [n_step, batch_size, num_directions(=1) * n_hidden], matrix F
# enc_hidden : [num_layers(=1) * num_directions(=1), batch_size, n_hidden]
enc_outputs, enc_hidden = self.enc_cell(enc_inputs, hidden)
trained_attn = []
hidden = enc_hidden
n_step = len(dec_inputs)
model = Variable(torch.empty([n_step, 1, n_class]))
for i in range(n_step): # each time step
# dec_output : [n_step(=1), batch_size(=1), num_directions(=1) * n_hidden]
# hidden : [num_layers(=1) * num_directions(=1), batch_size(=1), n_hidden]
dec_output, hidden = self.dec_cell(dec_inputs[i].unsqueeze(0), hidden)
attn_weights = self.get_att_weight(dec_output, enc_outputs) # attn_weights : [1, 1, n_step]
trained_attn.append(attn_weights.squeeze().data.numpy())
# matrix-matrix product of matrices [1,1,n_step] x [1,n_step,n_hidden] = [1,1,n_hidden]
context = attn_weights.bmm(enc_outputs.transpose(0, 1))
dec_output = dec_output.squeeze(0) # dec_output : [batch_size(=1), num_directions(=1) * n_hidden]
context = context.squeeze(1) # [1, num_directions(=1) * n_hidden]
model[i] = self.out(torch.cat((dec_output, context), 1))
# make wn shape [n_step, n_class]
return model.transpose(0, 1).squeeze(0), trained_attn
def get_att_weight(self, dec_output, enc_outputs): # get attention weight one 'dec_output' with 'enc_outputs'
n_step = len(enc_outputs)
attn_scores = Variable(torch.zeros(n_step)) # attn_scores : [n_step]
for i in range(n_step):
attn_scores[i] = self.get_att_score(dec_output, enc_outputs[i])
# Normalize scores to weights in range 0 to 1
return F.softmax(attn_scores).view(1, 1, -1)
def get_att_score(self, dec_output, enc_output): # enc_outputs [batch_size, num_directions(=1) * n_hidden]
score = self.attn(enc_output) # score : [batch_size, n_hidden]
return torch.dot(dec_output.view(-1), score.view(-1)) # inner product make scalar value
input_batch, output_batch, target_batch = make_batch(sentences)
# hidden : [num_layers(=1) * num_directions(=1), batch_size, n_hidden]
hidden = Variable(torch.zeros(1, 1, n_hidden))
model = Attention()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# Train
for epoch in range(2000):
optimizer.zero_grad()
output, _ = model(input_batch, hidden, output_batch)
loss = criterion(output, target_batch.squeeze(0))
if (epoch + 1) % 400 == 0:
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))
loss.backward()
optimizer.step()
# Test
test_batch = [np.eye(n_class)[[word_dict[n] for n in 'SPPPP']]]
test_batch = Variable(torch.Tensor(test_batch))
predict, trained_attn = model(input_batch, hidden, test_batch)
predict = predict.data.max(1, keepdim=True)[1]
print(sentences[0], '->', [number_dict[n.item()] for n in predict.squeeze()])
# Show Attention
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(trained_attn, cmap='viridis')
ax.set_xticklabels([''] + sentences[0].split(), fontdict={'fontsize': 14})
ax.set_yticklabels([''] + sentences[2].split(), fontdict={'fontsize': 14})
plt.show() | [
"kiyeonj21@gmail.com"
] | kiyeonj21@gmail.com |
ca366b16907da7a8db3c9c321fb3152749eba95c | dd8555ab9b10f7f6d9d1541022ff41c4a29e1589 | /manage.py | 84e57f37f83371ed44d9b6ecbd12c77462ea96b0 | [
"MIT"
] | permissive | rashidalabri/forsa-web | 3a4b38283ffcb5c1f756796eb97df7a7bcd185b4 | 5d3aac0c0a240d722d8e1a1fc103aa720471cae2 | refs/heads/master | 2022-12-13T00:29:11.112140 | 2020-07-19T12:53:23 | 2020-07-19T12:53:23 | 194,864,872 | 0 | 0 | MIT | 2022-12-08T10:58:03 | 2019-07-02T13:06:41 | JavaScript | UTF-8 | Python | false | false | 667 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
configuration = os.getenv('ENVIRONMENT', 'development').title()
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'forsa.settings')
os.environ.setdefault('DJANGO_CONFIGURATION', configuration)
try:
from configurations.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
'Couldn\'t import Django. Are you sure it\'s installed and '
'available on your PYTHONPATH environment variable? Did you '
'forget to activate a virtual environment?') from exc
execute_from_command_line(sys.argv)
| [
"itsrashidalabri@gmail.com"
] | itsrashidalabri@gmail.com |
5372587b0dec0bff1cc46831f4323e889d4a4f0e | 2ec28489d025171df96a59db9a1cf1e95cd22330 | /src/moo/cli/viewer.py | 525c60c51144fe03d57de3cb87f27affb158a49d | [] | no_license | krkettle57/moo | e1b0927a8a82c21e0b68bc69ffeb57922c9afa7c | ff736f34adf905bd0365d89a3f42e2af7ced7ce0 | refs/heads/main | 2023-08-17T11:18:13.543651 | 2021-10-08T03:54:14 | 2021-10-08T03:56:27 | 413,745,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,894 | py | from typing import List, get_args
from moo.models.moo import MOO, CallResultSet, Target, TargetLengthOption
class MOOCLIViewer:
def start_verify(self, yes: str = "y", no: str = "n") -> str:
return f"進行中のMOOが存在します。\n新しくMOOを開始しますか?[{yes}/{no}]: "
def start_done(self) -> str:
return "MOOを開始しました"
def start_cancel(self) -> str:
return "新しいMOOの開始を中止しました。"
def giveup(self, target: Target) -> str:
return f"MOOを終了します。ターゲットは{target.target}でした。"
def clear(self, moo: MOO) -> str:
return f"クリア!ターゲット: {moo.target.target}, コール数: {len(moo.called_results)}"
def called_result(self, result: CallResultSet) -> str:
return f"Call: {result.called.called}, {result.num_eat}-EAT, {result.num_bite}-BITE"
def history(self, called_results: List[CallResultSet]) -> str:
hist = [self.called_result(result) for result in called_results]
return "\n".join(["[Called History]"] + hist)
def no_moo_on_play(self) -> str:
return "進行中のMOOが存在しません。 startコマンドで開始して下さい。"
def no_moo_started(self) -> str:
return "MOOのプレイ記録が存在しません。startコマンドで開始して下さい。"
def invalid_target_length(self) -> str:
options = ", ".join([str(i) for i in get_args(TargetLengthOption)])
return f"ターゲットの桁数は{options}のいずれかで入力して下さい。"
def invalid_call_length(self, target_length: int) -> str:
return f"コールする値は{target_length}桁で入力して下さい。"
def invalid_call_value(self) -> str:
return "コールする値の各桁は0~9のいずれかで入力して下さい。"
| [
"krkettle57@gmail.com"
] | krkettle57@gmail.com |
11920ad62472a9f709a48bf0e4b513977c380ed1 | 5620ab50dffe0f8085ec562f83a17d0c0d23b2aa | /Assignment 3 - Recursion/Question1_power.py | 6a4ca56cb00378f7c13ba3006c35276094c9491b | [] | no_license | ginajoerger/Data-Structures | 4f04462c89f56c3f410c5a97e83ff73f26fe4670 | 068836964d987f8888d931ef5b24cbeff1e94207 | refs/heads/main | 2023-02-02T22:58:54.891519 | 2020-12-21T00:50:33 | 2020-12-21T00:50:33 | 323,186,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | def power(x,n):
'''
@x: the base, integer
@n: the exponent, integer
x, n can be negative integer.
@return: x^n
'''
if n == 0:
return 1
if n >= 1:
return x * power(x, n-1)
if n <= -1:
return (1/power(x, -n))
def main():
print(power(-2, 4)) # 16
print(power(4, 3)) # 64
print(power(-2, -3)) # -0.125
#main()
| [
"noreply@github.com"
] | noreply@github.com |
9bf502aa53de5ff285b04513b8db97f45b9147ae | 64d923ab490341af97c4e7f6d91bf0e6ccefdf4b | /tensorforce/core/policies/state_value.py | 3f2776d338b73a577f31700a2da5f1127a5c3642 | [
"Apache-2.0"
] | permissive | tensorforce/tensorforce | 38d458fedeeaa481adf083397829cea434d020cd | 1bf4c3abb471062fb66f9fe52852437756fd527b | refs/heads/master | 2023-08-17T17:35:34.578444 | 2023-08-14T20:14:08 | 2023-08-14T20:14:08 | 85,491,050 | 1,312 | 246 | Apache-2.0 | 2023-08-14T20:14:10 | 2017-03-19T16:24:22 | Python | UTF-8 | Python | false | false | 2,932 | py | # Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorforce.core import SignatureDict, TensorSpec, tf_function
from tensorforce.core.policies import BasePolicy
class StateValue(BasePolicy):
"""
Base class for state-value functions, here categorized as "degenerate" policy.
Args:
device (string): Device name
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
l2_regularization (float >= 0.0): Scalar controlling L2 regularization
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
name (string): <span style="color:#0000C0"><b>internal use</b></span>.
states_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>.
auxiliaries_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>.
actions_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>.
"""
def __init__(
self, *, device=None, l2_regularization=None, name=None, states_spec=None,
auxiliaries_spec=None, actions_spec=None
):
BasePolicy.__init__(
self=self, device=device, l2_regularization=l2_regularization, name=name,
states_spec=states_spec, auxiliaries_spec=auxiliaries_spec, actions_spec=actions_spec
)
def input_signature(self, *, function):
if function == 'state_value':
return SignatureDict(
states=self.states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True)
)
else:
return super().input_signature(function=function)
def output_signature(self, *, function):
if function == 'state_value':
return SignatureDict(
singleton=TensorSpec(type='float', shape=()).signature(batched=True)
)
else:
return super().output_signature(function=function)
@tf_function(num_args=4)
def state_value(self, *, states, horizons, internals, auxiliaries):
raise NotImplementedError
| [
"alexkuhnle@t-online.de"
] | alexkuhnle@t-online.de |
c20bcd3efee415bcd08c02d3fdb1ea616b2b11f0 | 21a66e40c378ebb5c53a61b5c0eb77f17a835eb9 | /adaboost_ex.py | 2cc0b6e4cc3eb7e659ade8db3394ba4a15c63622 | [] | no_license | timellemit/scikit_learn_practice | beb1998ea4f96d58a5f3d77cf868eb6ca41e66e0 | 12037fb4b1f4a3433b425d05ee8213b2da8d01e3 | refs/heads/master | 2021-01-22T05:53:56.345852 | 2015-06-03T09:19:25 | 2015-06-03T09:19:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,204 | py | import pylab as pl
import numpy as np
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
pl.figure(figsize=(10, 5))
# Plot the decision boundaries
pl.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = pl.contourf(xx, yy, Z, cmap=pl.cm.Paired)
pl.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
pl.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=pl.cm.Paired,
label="Class %s" % n)
pl.xlim(x_min, x_max)
pl.ylim(y_min, y_max)
pl.legend(loc='upper right')
pl.xlabel("Decision Boundary")
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
pl.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
pl.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = pl.axis()
pl.axis((x1, x2, y1, y2 * 1.2))
pl.legend(loc='upper right')
pl.ylabel('Samples')
pl.xlabel('Decision Scores')
pl.subplots_adjust(wspace=0.25)
pl.show() | [
"yury.kashnitsky@gmail.com"
] | yury.kashnitsky@gmail.com |
804310d2b0286c2b01fc8d205c8e2eb96507907d | a7b27a328652534b29a5ae1826aa4c2f450dcbc5 | /company/migrations/0003_alter_contactusmodel_created.py | 3f662089149608f9c13bc9d7fdbf13ca0c80edbd | [] | no_license | alienone305/jewlery | 4c091d85839f0ae9427e47812bc51ae2f5ebd99e | c46bf56d11e119988e1d2f3700c4f24b3010239f | refs/heads/main | 2023-08-26T12:33:03.974500 | 2021-11-04T16:48:35 | 2021-11-04T16:48:35 | 403,057,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # Generated by Django 3.2 on 2021-08-07 15:05
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('company', '0002_alter_contactusmodel_created'),
]
operations = [
migrations.AlterField(
model_name='contactusmodel',
name='created',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 7, 15, 5, 29, 722621, tzinfo=utc)),
),
]
| [
"alienone305@gmail.com"
] | alienone305@gmail.com |
17ec3a061f9016f31d5ebf010a19f91f14854e8e | 6d832f5c707e680f52d16328996abc4735995f28 | /client.py | 73c4101db9d7ebb658c670e74586985efd8236ff | [] | no_license | rajkumar368/socket | ef8796569932d1b8b6fc0c870c9beecee5efae39 | 28ce2c8084153fbc3ef10df46672b024a0cd106e | refs/heads/master | 2023-08-14T23:50:12.103411 | 2021-10-17T17:45:18 | 2021-10-17T17:45:18 | 418,209,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | import socket
conn = socket.socket()
conn.connect(("localhost", 9998))
name = input("name please")
conn.send((name).encode())
print(conn.recv(1024).decode())
conn.close()
| [
"rathorerajkumar368@gmail.com"
] | rathorerajkumar368@gmail.com |
208365338817a860c7bc3871e174ece1efd1ae8f | 33f83ef013bdc9b04ee122112b3c4e204fab025c | /tuples.py | d94e0c1ff6bd229392c89f62fa6764fe8f4b5296 | [] | no_license | randomguy069/PythonPractice | 830ba3ab0142dbda81d41bbf4241f45cb134aa2c | 13117319a159cf690e869eebf76d865df3403fef | refs/heads/master | 2021-08-31T14:34:14.060996 | 2017-12-21T18:08:34 | 2017-12-21T18:08:34 | 114,798,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | t = ("Hello","World","Ibrahimovic")
print(dir(t))
| [
"noreply@github.com"
] | noreply@github.com |
5e39a722a04a9e21094fe9ea45f9a9829ae5e969 | 4e1093730eff65a60956330d190c0ad7bf008023 | /test_proj/IPsamp/modules/StaticFullGrowthModel/HuangFullFunc.py | c6d9318b1e02048dfb99e9513e5afefd70cffd57 | [] | no_license | csivilo/IPMP | d37bfee1681877dab629abe3c20c125e4d598c7b | b2ff3617be57fdd9905cee4b2d8c0f04cbe8fbe8 | refs/heads/master | 2021-01-09T20:21:05.661063 | 2016-08-12T16:21:01 | 2016-08-12T16:21:01 | 61,127,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 08 22:30:34 2013
@author: Lihan_Huang
"""
import numpy as np
class HuangFullModelFunc():
def __init__(self, Ymax, Y0, mumax, Lag, x):
b = x + 0.25*np.log(1 + np.exp(-4.0*(x-Lag))) - 0.25*np.log(1+np.exp(4.0*Lag))
self.HuangFull = Y0 + Ymax -np.log(np.exp(Y0) + (np.exp(Ymax)-np.exp(Y0))*np.exp(-mumax*b))
print x, self.HuangFull
def main():
Y0 = 2.0*2.303
Ymax = 8.5*2.303
mumax = 2.0
Lag = 5.0
# generate an x array or list
x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
HuangFullModel = HuangFullModelFunc(Ymax, Y0, mumax, Lag, x)
if __name__ == '__main__':
main() | [
"carlo@sivilotti.com"
] | carlo@sivilotti.com |
3eed1b10050537ad9781069bb46ed2f3703cf569 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_examples/_algorithms_challenges/pybites/intermediate/intermediate-bite-14-generate-a-table-of-n-sequences.py | 55c17a68b9ae77cc101badbd72287e480fb740fa | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,471 | py | """
DATE: 05 Nov 2020
TASK DESCRIPTION:
Write a function that receives one or more sequences. The sequences are already defined for you.
The function should return a table (list of strings) where the columns are the sequences
(example below).
To keep it simple we work with equally sized sequences so you don't have to worry about
handling a missing value (you should end up with a grid of 6 rows x n columns).
There are some Pythonic idioms you can use here, hint: think of pants ;)
Example call (look at the tests for more detail):
>>> generate_table(names, aliases)
['Julian | Pythonista', 'Bob | Nerd', 'PyBites | Coder',
'Dante | Pythonista', 'Martin | Nerd', 'Rodolfo | Coder']
Bonus: use a generator to build up the table rows.
"""
import random
names = 'Julian Bob PyBites Dante Martin Rodolfo'.split()
aliases = 'Pythonista Nerd Coder'.split() * 2
points = random.sample(range(81, 101), 6)
awake = [True, False] * 3
SEPARATOR = ' | '
### ----------- My solution ---------------------------
def my_generate_table(*args):
l = []
result = zip(*args)
for i in result:
s = ""
for t in i:
if s == "":
s = str(t)
else:
s = s + " | " + str(t)
l.append(s)
return l
### ---------- PyBites original solution ---------------
def pyb_generate_table(*sequences):
for seq in zip(*sequences):
seq = [str(val) for val in seq]
yield SEPARATOR.join(seq) | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
3d50d6da604a3d4bcc099f0c83ef6b39d1bb49f3 | 13423e31f3f624e33d442315e64962f08e093144 | /django_signup/cminji_signup/cminji_signup/settings.py | dca524744196e9f67b16bd35f78c8283a3c67364 | [] | no_license | c-min-ji/LikeLion_8th | 14b1a766ef3b2d36389f6f8bcea3c7078168fefd | db86bd2eabb2c7a9dcdea9e6534d6a1def41f93f | refs/heads/master | 2023-08-13T16:42:26.546081 | 2021-09-20T03:25:36 | 2021-09-20T03:25:36 | 407,765,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,426 | py | """
Django settings for cminji_signup project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#1dn^xmakc4ehngc!lme-dh@1$4qld6)zh&a+ar0whgn8au3kg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cminji_signup.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cminji_signup.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
#Email을 발송하는 유저
EMAIL_HOST = 'smtp.gmail.com'
#Gmail 사용위한 Port number - 권장사항
EMAIL_PORT = '587'
EMAIL_HOST_USER = 'mjeewh@gmail.com'
#실제 비번 쓰기
EMAIL_HOST_PASSWORD = '*******'
#TLS
EMAIL_USE_TLS = True
#회신에 대한 기본 설정
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER | [
"mjeewh@gmail.com"
] | mjeewh@gmail.com |
32d7206664bc17538cc22498e21852e555a705a6 | 7d288264c3d14115735af70374dff644236f6608 | /musicbot/exceptions.py | 23ae4d68d495fdbb6ed1faf947f418f2ef3341c5 | [
"MIT"
] | permissive | Dodek69/ChudyBot | fa160628369b7f212448eab9b8967349ed84dba4 | 9d74f9a35397015276f754d3459e06225a2c201f | refs/heads/master | 2020-04-20T17:28:18.159390 | 2019-02-14T19:44:55 | 2019-02-14T19:44:55 | 168,985,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,642 | py | import shutil
import textwrap
class MusicbotException(Exception):
def __init__(self, message, *, expire_in=0):
super().__init__(message)
self._message = message
self.expire_in = expire_in
@property
def message(self):
return self._message
@property
def message_no_format(self):
return self._message
class CommandError(MusicbotException):
pass
class ExtractionError(MusicbotException):
pass
class WrongEntryTypeError(ExtractionError):
def __init__(self, message, is_playlist, use_url):
super().__init__(message)
self.is_playlist = is_playlist
self.use_url = use_url
class FFmpegError(MusicbotException):
pass
class FFmpegWarning(MusicbotException):
pass
class SpotifyError(MusicbotException):
pass
class PermissionsError(CommandError):
@property
def message(self):
return "You don't have permission to use that command.\nReason: " + self._message
class HelpfulError(MusicbotException):
def __init__(self, issue, solution, *, preface="An error has occured:", footnote='', expire_in=0):
self.issue = issue
self.solution = solution
self.preface = preface
self.footnote = footnote
self.expire_in = expire_in
self._message_fmt = "\n{preface}\n{problem}\n\n{solution}\n\n{footnote}"
@property
def message(self):
return self._message_fmt.format(
preface = self.preface,
problem = self._pretty_wrap(self.issue, " Problem:"),
solution = self._pretty_wrap(self.solution, " Solution:"),
footnote = self.footnote
)
@property
def message_no_format(self):
return self._message_fmt.format(
preface = self.preface,
problem = self._pretty_wrap(self.issue, " Problem:", width=None),
solution = self._pretty_wrap(self.solution, " Solution:", width=None),
footnote = self.footnote
)
@staticmethod
def _pretty_wrap(text, pretext, *, width=-1):
if width is None:
return '\n'.join((pretext.strip(), text))
elif width == -1:
pretext = pretext.rstrip() + '\n'
width = shutil.get_terminal_size().columns
lines = textwrap.wrap(text, width=width - 5)
lines = ((' ' + line).rstrip().ljust(width-1).rstrip() + '\n' for line in lines)
return pretext + ''.join(lines).rstrip()
class HelpfulWarning(HelpfulError):
pass
class Signal(Exception):
pass
class RestartSignal(Signal):
pass
class TerminateSignal(Signal):
pass
| [
"dodek@vip.interia.pl"
] | dodek@vip.interia.pl |
7e69c13442bb7cc985e26e463ef190b600acff38 | ad2704933de4502ae9de91e6d915f9dbe010b446 | /bambi/chapter03/knock21.py | 3f84be2aee57cbeac3b09186547391cea273a569 | [] | no_license | tmu-nlp/100knock2017 | 266e68917d8d5a7f5d0c064f1bc2da5fa402a253 | 629bd1155d0fe78cd9302ae9a7cdf0922b778fe7 | refs/heads/master | 2021-01-19T17:36:53.328997 | 2017-07-24T07:09:54 | 2017-07-24T07:09:54 | 88,334,932 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | import json, re
def findTextFromTitle(title):
with open('jawiki-country.json') as json_data:
lines = json_data.readlines()
for line in lines:
data = json.loads(line)
if data["title"] == title:
return data["text"]
def getEnglandArticle():
return findTextFromTitle("イギリス")
def getCategoryRawList(content):
return re.findall(r"\[\[Category:.*?\]\]", content)
result = getCategoryRawList(getEnglandArticle())
print("\n".join(result)) #改行したほうが見やすい(result is list)
| [
"mingchanbambina@gmail.com"
] | mingchanbambina@gmail.com |
98fb0dcf64f5486c42788855054e4d8f97762dd7 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-nlp/huaweicloudsdknlp/v2/model/post_sentence_embedding_req.py | 70377e135a6ed8d93d796857d996707216550b46 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,253 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class PostSentenceEmbeddingReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'sentences': 'list[str]',
'domain': 'str'
}
attribute_map = {
'sentences': 'sentences',
'domain': 'domain'
}
def __init__(self, sentences=None, domain=None):
"""PostSentenceEmbeddingReq
The model defined in huaweicloud sdk
:param sentences: 文本列表,文本长度为1~512,列表大小为1~1000,文本编码为UTF-8。
:type sentences: list[str]
:param domain: 支持的领域类型,目前只支持通用领域,默认为general。
:type domain: str
"""
self._sentences = None
self._domain = None
self.discriminator = None
self.sentences = sentences
if domain is not None:
self.domain = domain
@property
def sentences(self):
"""Gets the sentences of this PostSentenceEmbeddingReq.
文本列表,文本长度为1~512,列表大小为1~1000,文本编码为UTF-8。
:return: The sentences of this PostSentenceEmbeddingReq.
:rtype: list[str]
"""
return self._sentences
@sentences.setter
def sentences(self, sentences):
"""Sets the sentences of this PostSentenceEmbeddingReq.
文本列表,文本长度为1~512,列表大小为1~1000,文本编码为UTF-8。
:param sentences: The sentences of this PostSentenceEmbeddingReq.
:type sentences: list[str]
"""
self._sentences = sentences
@property
def domain(self):
"""Gets the domain of this PostSentenceEmbeddingReq.
支持的领域类型,目前只支持通用领域,默认为general。
:return: The domain of this PostSentenceEmbeddingReq.
:rtype: str
"""
return self._domain
@domain.setter
def domain(self, domain):
"""Sets the domain of this PostSentenceEmbeddingReq.
支持的领域类型,目前只支持通用领域,默认为general。
:param domain: The domain of this PostSentenceEmbeddingReq.
:type domain: str
"""
self._domain = domain
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PostSentenceEmbeddingReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
d0f6f3767403a0426f929d980d9fc2eff41bd052 | 8eabf994204c8dcd4fa2e1903ee3bc413779fd5e | /Cleaning_Merging/Preprocessing.py | 6a64fba645740d82e5e462ec632c155028ad73d5 | [] | no_license | rajdua22/tennis_betting | f12eff365420a7103e286050383568e057419c62 | d09acee14576eb19e12fed2bd41e7f16a9eb4dbf | refs/heads/master | 2020-07-26T06:54:06.467898 | 2019-12-21T10:52:44 | 2019-12-21T10:52:44 | 208,569,545 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,610 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 10:52:54 2019
@author: rajdua
Description: Loads in dataset and completes basic data cleaning and preprocessing.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
import sys
data = pd.read_csv('Final Merged.csv', encoding = "ISO-8859-1", low_memory=False)
data['matches'] = 1
data['Date'] = pd.to_datetime(data['Date'])
def DSW(x):
if (x > 0) & (x < 3):
return 1;
else:
return 0
def CSW(x):
if (x > 2) & (x < 6):
return 1;
else:
return 0
def Tie(x,y):
if (x == 7) & (y == 6):
return 1
else:
return 0
data['DSW_W1'] = data['L1'].apply(DSW)
data['DSW_W2'] = data['L2'].apply(DSW)
data['DSW_W3'] = data['L3'].apply(DSW)
data['DSW_W4'] = data['L4'].apply(DSW)
data['DSW_W5'] = data['L5'].apply(DSW)
data['DSW'] = data.apply(lambda row: row.DSW_W1 + row.DSW_W2 + row.DSW_W3 + row.DSW_W4 + row.DSW_W5, axis=1)
data['DSW_W1'] = data['L1'].apply(CSW)
data['DSW_W2'] = data['L2'].apply(CSW)
data['DSW_W3'] = data['L3'].apply(CSW)
data['DSW_W4'] = data['L4'].apply(CSW)
data['DSW_W5'] = data['L5'].apply(CSW)
data['CSW'] = data.apply(lambda row: row.DSW_W1 + row.DSW_W2 + row.DSW_W3 + row.DSW_W4 + row.DSW_W5, axis=1)
data['DSW_W1'] = data['W1'].apply(DSW)
data['DSW_W2'] = data['W2'].apply(DSW)
data['DSW_W3'] = data['W3'].apply(DSW)
data['DSW_W4'] = data['W4'].apply(DSW)
data['DSW_W5'] = data['W5'].apply(DSW)
data['DSL'] = data.apply(lambda row: row.DSW_W1 + row.DSW_W2 + row.DSW_W3 + row.DSW_W4 + row.DSW_W5, axis=1)
data['DSW_W1'] = data['W1'].apply(CSW)
data['DSW_W2'] = data['W2'].apply(CSW)
data['DSW_W3'] = data['W3'].apply(CSW)
data['DSW_W4'] = data['W4'].apply(CSW)
data['DSW_W5'] = data['W5'].apply(CSW)
data['CSL'] = data.apply(lambda row: row.DSW_W1 + row.DSW_W2 + row.DSW_W3 + row.DSW_W4 + row.DSW_W5, axis=1)
data['DSW_W1'] = data['L1'].apply(DSW)
data['DSW_W2'] = data['L2'].apply(DSW)
data['DSW_W3'] = data['L3'].apply(DSW)
data['DSW_W4'] = data['L4'].apply(DSW)
data['DSW_W5'] = data['L5'].apply(DSW)
data['DSW'] = data.apply(lambda row: row.DSW_W1 + row.DSW_W2 + row.DSW_W3 + row.DSW_W4 + row.DSW_W5, axis=1)
data['DSW_W1'] = data['L1'].apply(CSW)
data['DSW_W2'] = data['L2'].apply(CSW)
data['DSW_W3'] = data['L3'].apply(CSW)
data['DSW_W4'] = data['L4'].apply(CSW)
data['DSW_W5'] = data['L5'].apply(CSW)
data['CSW'] = data.apply(lambda row: row.DSW_W1 + row.DSW_W2 + row.DSW_W3 + row.DSW_W4 + row.DSW_W5, axis=1)
data['DSW_W1'] = data.apply(lambda row: Tie(row['W1'], row['L1']), axis = 1)
data['DSW_W2'] = data.apply(lambda row: Tie(row['W2'], row['L2']), axis = 1)
data['DSW_W3'] = data.apply(lambda row: Tie(row['W3'], row['L3']), axis = 1)
data['DSW_W4'] = data.apply(lambda row: Tie(row['W4'], row['L4']), axis = 1)
data['DSW_W5'] = data.apply(lambda row: Tie(row['W5'], row['L5']), axis = 1)
data['TieW'] = data.apply(lambda row: row.DSW_W1 + row.DSW_W2 + row.DSW_W3 + row.DSW_W4 + row.DSW_W5, axis=1)
data['DSW_W1'] = data.apply(lambda row: Tie(row['L1'], row['W1']), axis = 1)
data['DSW_W2'] = data.apply(lambda row: Tie(row['L2'], row['W2']), axis = 1)
data['DSW_W3'] = data.apply(lambda row: Tie(row['L3'], row['W3']), axis = 1)
data['DSW_W4'] = data.apply(lambda row: Tie(row['L4'], row['W4']), axis = 1)
data['DSW_W5'] = data.apply(lambda row: Tie(row['L5'], row['W5']), axis = 1)
data['TieL'] = data.apply(lambda row: row.DSW_W1 + row.DSW_W2 + row.DSW_W3 + row.DSW_W4 + row.DSW_W5, axis=1)
data['SetsCompleted'] = data['DSW'] + data['DSL'] + data['CSW'] + data['CSL'] + data['TieW'] + data['TieL']
# '1' indicates quarterfinal or more important match
def stage(x):
if (x == 'F') | (x == 'SF') | (x == 'QF'):
return 1;
else:
return 0
data['SOT'] = data['round'].apply(stage)
data['Major'] = (data['Series'] == 'Grand Slam').astype(int)
data = data.drop(columns = ['Unnamed: 0', 'DSW_W1', 'DSW_W2', 'DSW_W3', 'DSW_W4', 'DSW_W5'])
data['games'] = data[['W1', 'L1', 'W2', 'L2', 'W3', 'L3', 'W4','L4', 'W5', 'L5']].sum(axis = 1)
data['oddsw'] = data[['CBW', 'GBW', 'IWW', 'SBW',
'B365W', 'B&WW', 'EXW', 'PSW', 'UBW', 'LBW', 'SJW']].mean(axis = 1)
data['oddsl'] = data[['CBL', 'GBL', 'IWL', 'SBL',
'B365L', 'B&WL', 'EXL', 'PSL', 'UBL', 'LBL', 'SJL']].mean(axis = 1)
data = data.dropna(subset = ['oddsw'])
data = data.dropna(subset = ['oddsl'])
data = data.reset_index(drop = True)
def underdog (x):
if (x.oddsl > x.oddsw):
return 0
else:
return 1
data['underdogWon'] = data.apply(underdog, axis = 1)
# Copp
data= data[~data['WRank'].isnull()]
data= data[~data['LRank'].isnull()]
data= data[data['WRank'].str.isnumeric()]
data= data[data['LRank'].str.isnumeric()]
data['WRank'] = pd.to_numeric(data['WRank'])
data['LRank'] = pd.to_numeric(data['LRank'])
data = data.reset_index(drop = True)
# Added 6/16 - Get win percentage for all ranks agasint each other
winners = data['WRank']
winners = pd.unique(winners)
winners = winners.tolist()
winners.sort()
losers = data['LRank']
losers = pd.unique(losers)
losers = losers.tolist()
losers.sort()
players = list (set(winners) | set(losers))
players.sort()
winners = data['WRank']
losers = data['LRank']
old_dict = dict(enumerate(players))
new_dict = dict([(value, key) for key, value in old_dict.items()])
winners = winners.map(new_dict)
losers = losers.map(new_dict)
matches = pd.concat([winners, losers], axis=1)
m = len(players)
results = np.zeros(shape=(m,m))
for index, row in matches.iterrows():
results[row['WRank'], row['LRank']] += 1
n = len(matches)
percent = np.zeros(n)
totalM = np.zeros(n)
for index, row in matches.iterrows():
currentM = 0
wins = 0
losses = 0
wrank = row['WRank']
print("wrank: ", wrank)
lrank = row['LRank']
print ("lrank: ", lrank)
if (wrank < m) & (wrank >= 0):
if (lrank < m) & (lrank >= 0):
wins = results[wrank, lrank]
print("wins: ", wins)
losses = results[lrank, wrank]
print("losses: ", losses)
currentM += wins
currentM += losses
print("currentM: ", currentM)
i = 1
while currentM < 20:
twins = 0
tlosses = 0
for j in range(wrank - i, wrank + i):
for k in range(lrank - i, lrank + i):
if ((j < m) & (j >= 0)):
if ((k < m) & (k >= 0)):
twins += results[j, k]
tlosses += results[k, j]
currentM += twins
currentM += tlosses
wins += twins
losses += tlosses
i+= 1
print ("wins: ", wins)
print ("losses ", losses)
print(currentM)
if index < n:
totalM[index] = wins + losses
if wins + losses == 0:
percent[index] = np.nan
elif losses == 0:
percent[index] = 1
elif wins == 0:
percent[index] = 0
elif (wins>0) & (losses > 0):
percent[index] = wins / (wins + losses)
data['CoppW'] = percent
data['CoppL'] = 1 - percent
data['MatchesPlayed'] = totalM
# Create a 0/1 column for surface
data['Clay'] = data.Surface == 'Clay'
data['Clay'] *= 1
data['Hard'] = data.Surface == 'Hard'
data['Hard'] *= 1
data['Grass'] = data.Surface == 'Grass'
data['Grass'] *= 1
data['Carpet'] = data.Surface == 'Carpet'
data['Carpet'] *= 1
data['WInverseRank'] = data['WRank'].apply(lambda x: 1 /x)
data['LInverseRank'] = data['LRank'].apply(lambda x: 1 /x)
data.to_csv('Final Merged1.csv')
| [
"rajvirdu@usc.edu"
] | rajvirdu@usc.edu |
43ef6671cbd2943a73a2201439c31fdfc5c0ad9c | 54a745510b16111f5e5f610a07be49ea1e79fccf | /py1810/hello_mysql_01.py | 086070e7afa84b55e4f9c256485a983058c32dcc | [] | no_license | SonDog0/bigdata | 84a5b7c58ad9680cdc0e49ac6088f482e09118a5 | e6cd1e3bbb0bfec0c89a31b3fb4ef66d50c272be | refs/heads/master | 2020-04-22T02:24:16.469718 | 2019-03-13T08:59:26 | 2019-03-13T08:59:26 | 170,047,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,590 | py | # 파이썬으로 MySQL, MariaDB 다루기
# python에서 MySQL 데이터베이스를 지원하려면
# python DB API 규약에 맞게 작성된 mySQL DB 모듈 필요
# 일반적으로 pyMySQL 모듈을 많이 사용
import pymysql
# # mysql connection 생성
# conn = pymysql.connect(host='13.209.88.188', user= 'son', password= '931027',db='SON_MARIADB', charset='utf8')
#
# curs = conn.cursor()
#
# curs.execute('DROP TABLE items')
# curs.execute('''create table items( item_id INTEGER PRIMARY KEY AUTO_INCREMENT, name TEXT, price INTEGER)''' )
# # sql 질의문 실행
# sql = 'select * from books'
# curs.execute(sql)
#
# # 결과 집합 처리
# for rs in curs.fetchall():
# print(rs[0], rs[1], rs[2], rs[3]) #배열 기반 커서
#
#
#
#
# #mysql connection 닫기
# conn.close()
# # mysql connection 생성
# conn = pymysql.connect(host='13.209.88.188', user= 'son', password= '931027',db='SON_MARIADB', charset='utf8')
# # connection 으로부터 dict cursor 생성
# curs = conn.cursor(pymysql.cursors.DictCursor)
#
# # sql 질의문 실행
# sql = 'select * from books'
# curs.execute(sql)
#
# # 결과 집합 처리
# for rs in curs.fetchall():
# print(rs['bno'], rs['bname'], rs['bpub'], rs['bprice']) #사전기반 커서
#
# #mysql connection 닫기
# conn.close()
# 1~100 까지 2배수, 3배수, 5배수 저장
# 테이블 이름은 numbers
# 필드는 no, no2, no3, no5
# mysql connection 생성
conn = pymysql.connect(host='13.209.88.188', user= 'son', password= '931027',db='SON_MARIADB', charset='utf8')
# connection 으로부터 cursor 생성
curs = conn.cursor(pymysql.cursors.DictCursor)
# sql 질의문 실행
create_sql = 'create table numbers( no2 int, no3 int, no5 int )'
drop_sql = 'drop table numbers'
sql = 'insert into numbers values(%s,%s,%s)'
# sql = 'select * from books'
curs.execute(drop_sql)
curs.execute(create_sql)
# 1~ 100까지 2배수, 3배수, 5배수
num1 = 0
num2 = 0
num3 = 0
for i in range (1,101):
if i % 2 == 0:
num1 = i
else:
num1 = 0
if i % 3 == 0:
num2 = i
else:
num2 = 0
if i % 5 == 0:
num3 = i
else:
num3 = 0
curs.execute(sql, (num1, num2, num3))
#변경사항 서버에 적용하기
conn.commit()
# 결과 집합 처리
select_sql = 'select * from numbers'
curs.execute(select_sql)
for rs in curs.fetchall():
print(rs['no2'], rs['no3'], rs['no5']) #사전기반 커서
#mysql connection 닫기
conn.close()
| [
"noreply@github.com"
] | noreply@github.com |
efc5d69a2a469d2882f61cce39f0d465c7cad7f4 | 38b843fd84b1c7c8b23b302538d27fdb77520ef7 | /testclient.py | 48efa3b5b1087835dbef38913cf55a58fc7feda9 | [
"WTFPL"
] | permissive | David-OC/IngredStore | 5ce1f81078dacdf4983f6863b25f35355fe4a42b | 584f6a13176529a6b89405d4ca18f95a4407d2b8 | refs/heads/master | 2021-01-10T18:01:30.394800 | 2016-02-19T11:01:55 | 2016-02-19T11:01:55 | 51,767,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | import sys
from ingredstore import IngredStore
def main(argv):
ingredinst = IngredStore(argv[1])
ingredients = ingredinst.get_ingredients()
for item in ingredients:
print item
if __name__ == "__main__":
main(sys.argv)
| [
"mrdavidoc@gmail.com"
] | mrdavidoc@gmail.com |
83f1e135d224d46ae1f5b6a914d4e8ae20c41a87 | 5d10705fda6f9c38cc28f33a052eb1981eef825d | /modeling/input/gen_grid.py | d2bc32b3e4d9b175041c5b63fcb143fe7a6f6c42 | [] | no_license | raryskin/division_of_labor | 11140b0a37c47512ce5404af0a4b13268bf20d47 | 30ed0b161782e83170438965ceb3e74a2bc949d9 | refs/heads/master | 2023-02-18T22:36:04.695745 | 2021-01-07T07:08:32 | 2021-01-07T07:08:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | import csv
import numpy as np
chainNum = 1
with open('coarse-grid.csv', 'w') as csv_file :
writer = csv.writer(csv_file, delimiter=',')
for alpha in [x for x in np.linspace(1,10,num=9)] :
for perspectiveCost in np.linspace(0, 1, num=21) :
for uttCost in [0.0001, 0.001, 0.009, 0.01, 0.02, 0.03, 0.05, 0.075, 0.1] :
writer.writerow([round(perspectiveCost, 2), round(alpha,2), round(uttCost, 2), chainNum])
chainNum = chainNum + 1
chainNum = 40000
with open('fine-grid.csv', 'w') as csv_file :
writer = csv.writer(csv_file, delimiter=',')
for perspectiveCost in np.linspace(0, 0.5, num=21) :
writer.writerow([perspectiveCost, 2, 0.03, chainNum])
chainNum = chainNum + 1
| [
"hawkrobe@gmail.com"
] | hawkrobe@gmail.com |
89ed2675d9419d6d488d990f069ccd3e7cb482ae | 141b3c810ab8612cd6d9bb89f79d7ce9ac3ce716 | /parsetab.py | 7efe81f50dd3a07918d30dbe52b7bdfa16f40a35 | [] | no_license | TeddyDotNet/pyNase | 61c2cb47432e983f00f8e80f113606b2ede32453 | 42d5b2ed131c2b52a4b98c6160ef6126f607ba6b | refs/heads/master | 2020-05-18T16:39:03.747470 | 2012-12-05T09:28:13 | 2012-12-05T09:28:13 | 7,014,726 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,688 | py |
# parsetab.py
# This file is automatically generated. Do not edit.
_lr_method = 'SLR'
_lr_signature = '\xce\xfe\xae\xcd\xa2<`\xbc\xd1\x0f\x9c\xe9\xd9\xa8\xef\n'
_lr_action_items = {'INLINE_ELSE_SYMBOL':([36,72,32,28,70,35,69,12,48,71,84,38,55,83,37,73,16,68,58,39,29,50,67,57,0,21,49,44,34,51,],[-50,79,-50,-24,-26,-29,-50,-50,-47,-50,-50,-50,-21,-35,-50,-20,-50,-48,-50,-25,-31,-49,-30,-18,-50,-50,-50,-27,-28,-19,]),'PLUS_SYMBOL':([29,67,16,39,49,58,84,36,0,35,68,50,37,70,34,69,12,38,48,21,71,83,44,28,32,],[-31,-30,-50,-25,-50,-50,-50,53,-50,-29,-48,-49,-50,-26,-28,53,-50,53,-47,-50,-50,-35,-27,-24,-50,]),'LE_SYMBOL':([44,32,39,51,49,16,58,35,84,36,0,70,80,45,37,73,50,28,69,21,57,71,83,34,12,38,48,68,29,55,67,],[-27,-50,-25,-19,-50,-50,-50,-29,-50,-50,-50,-26,60,60,-50,-20,-49,-24,-50,-50,-18,-50,-35,-28,-50,-50,-47,-48,-31,-21,-30,]),'LT_SYMBOL':([68,12,44,34,21,57,32,45,80,51,71,83,16,48,29,35,69,50,37,0,38,55,49,58,67,39,84,28,70,73,36,],[-48,-50,-27,-28,-50,-18,-50,61,61,-19,-50,-35,-50,-47,-31,-29,-50,-49,-50,-50,-50,-21,-50,-50,-30,-25,-50,-24,-26,-20,-50,]),'INLINE_IF_SYMBOL':([52,42,77,54,79,31,62,65,40,59,22,76,61,66,82,43,30,74,60,41,27,63,53,64,],[-23,30,-39,30,30,30,30,-44,-32,-46,30,-40,-41,30,30,-33,30,30,-42,-34,30,-43,-22,-45,]),'DIVIDE_SYMBOL':([38,84,71,49,0,12,48,21,67,34,29,28,16,37,35,36,50,69,58,83,68,32,],[-50,-50,-50,-50,-50,-50,-47,-50,-30,-28,-31,43,-50,-50,-29,-50,-49,-50,43,-35,-48,-50,]),'COMMA_SYMBOL':([37,16,],[23,23,]),'ANY_DIGIT':([61,54,53,49,79,59,40,60,62,76,41,74,42,52,31,43,82,32,27,65,77,22,30,63,64,66,],[-41,32,-22,49,32,-46,-32,-42,32,-40,-34,32,32,-23,32,-33,32,49,32,-44,-39,32,32,-43,-45,32,]),'$':([49,28,71,12,16,58,84,36,18,0,20,37,19,8,38,26,21,69,10,32,],[-50,-50,-50,-50,-50,-50,-50,-50,-5,-50,-4,-50,-2,-1,-50,-3,-50,-50,0,-50,]),'MINUS_SYMBOL':([34,16,65,29,63,22,76,64,28,71,48,0,83,60,66,37,59,70,77,12,50,36,67,31,49,84,35,44,30,79,61,62,74,69,32,82,68,39,58,38,21,],[-28,-50,-44,-31,-43,27,-40,-45,-24,-50,-47,-50,-35,-42,27,-50,-46,-26,-39,-50,-49,52,-30,27,-50,-50,-29,-27,27,27,-41,27,27,52,-50,27,-48,-25,-50,52,-50,]),'CLOSE_PARAENTHESIS_SYMBOL':([39,84,69,67,49,51,0,12,83,38,71,55,16,50,36,37,21,44,28,48,70,29,35,32,68,34,47,73,57,58,],[-25,-50,-50,-30,-50,-19,-50,-50,-35,-50,-50,-21,-50,-49,-50,-50,-50,-27,-24,-47,-26,-31,-29,-50,-48,-28,67,-20,-18,-50,]),'EQ_SYMBOL':([70,28,80,44,73,37,21,58,67,51,0,29,39,34,68,57,83,38,48,12,49,84,50,16,45,71,55,69,36,32,35,],[-26,-24,63,-27,-20,-50,-50,-50,-30,-19,-50,-31,-25,-28,-48,-18,-35,-50,-47,-50,-50,-50,-49,-50,63,-50,-21,-50,-50,-50,-29,]),'MODULO_SYMBOL':([38,37,48,35,29,49,16,84,69,32,83,71,34,68,36,21,12,28,58,67,0,50,],[-50,-50,-47,-29,-31,-50,-50,-50,-50,-50,-35,-50,-28,-48,-50,-50,-50,41,41,-30,-50,-49,]),'IDENTIFIER':([9,82,12,21,76,52,27,62,74,63,1,54,61,3,64,40,43,77,79,66,41,30,23,59,42,31,60,18,53,65,5,22,0,],[-16,35,14,14,-40,-23,35,35,35,-43,15,35,-41,16,-45,-32,-33,-39,35,35,-34,35,37,-46,35,35,-42,-5,-22,-44,17,35,14,]),'DELIMITER_SYMBOL':([71,17,24,21,35,11,44,25,32,50,38,33,73,28,29,36,51,2,58,68,48,37,49,0,56,55,39,6,69,57,7,84,4,67,18,15,12,70,16,13,20,83,34,],[-50,-11,-15,-50,-29,18,-27,-13,-50,-49,-50,-17,-20,-24,-31,-50,-19,-7,-50,-48,-47,-50,-50,-50,-14,-21,-25,-10,-50,-18,-9,-50,-8,-30,-5,-12,-50,-26,-50,-6,-10,-35,-28,]),'ASSIGN_SYMBOL':([14,],[22,]),'OPEN_PARAENTHESIS_SYMBOL':([27,52,41,76,42,77,30,66,54,53,61,31,59,79,60,43,22,82,74,62,40,64,63,65,],[31,-23,-34,-40,31,-39,31,31,31,-22,-41,31,-46,31,-42,-33,31,31,31,31,-32,-45,-43,-44,]),'TIMES_SYMBOL':([29,21,34,0,69,36,35,58,83,84,71,32,28,68,49,37,50,16,38,67,48,12,],[-31,-50,-28,-50,-50,-50,-29,40,-35,-50,-50,-50,40,-48,-50,-50,-49,-50,-50,-30,-47,-50,]),'INLINE_FI_SYMBOL':([39,36,50,68,37,55,69,70,83,0,12,58,44,71,28,29,16,73,57,48,35,32,51,84,34,21,38,49,67,81,],[-25,-50,-49,-48,-50,-21,-50,-26,-35,-50,-50,-50,-27,-50,-24,-31,-50,-20,-18,-47,-29,-50,-19,-50,-28,-50,-50,-50,-30,83,]),'WRITE_SYMBOL':([21,0,12,18,],[1,1,1,-5,]),'NE_SYMBOL':([68,32,12,36,55,57,37,16,51,38,84,48,50,45,80,21,70,73,35,44,39,0,71,28,49,69,34,29,58,67,83,],[-48,-50,-50,-50,-21,-18,-50,-50,-19,-50,-50,-47,-49,59,59,-50,-26,-20,-29,-27,-25,-50,-50,-24,-50,-50,-28,-31,-50,-30,-35,]),'GT_SYMBOL':([39,68,45,49,38,0,67,51,57,44,34,50,32,69,29,35,21,37,73,28,80,71,36,55,84,12,48,16,58,70,83,],[-25,-48,64,-50,-50,-50,-30,-19,-18,-27,-28,-49,-50,-50,-31,-29,-50,-50,-20,-24,64,-50,-50,-21,-50,-50,-47,-50,-50,-26,-35,]),'GE_SYMBOL':([68,38,21,57,37,35,80,45,0,28,83,69,49,73,29,32,12,48,70,58,55,71,16,84,44,36,51,39,67,34,50,],[-48,-50,-50,-18,-50,-29,65,65,-50,-24,-35,-50,-50,-20,-31,-50,-50,-47,-26,-50,-21,-50,-50,-50,-27,-50,-19,-25,-30,-28,-49,]),'READ_SYMBOL':([12,21,0,18,],[5,5,5,-5,]),'INT_TYPE_SYMBOL':([0,18,12,21,],[9,-5,9,9,]),'OR_SYMBOL':([21,57,49,34,67,71,12,48,16,36,0,73,35,50,44,39,70,68,29,37,51,58,69,55,28,32,84,83,38,],[-50,-18,-50,-28,-30,76,-50,-47,-50,-50,-50,-20,-29,-49,-27,-25,-26,-48,-31,-50,-19,-50,-50,-21,-24,-50,76,-35,-50,]),'INLINE_THEN_SYMBOL':([36,57,34,37,68,70,78,46,0,69,73,39,50,84,35,16,85,55,48,12,75,51,32,44,71,58,28,67,29,38,49,83,21,],[-50,-18,-28,-50,-48,-26,-37,66,-50,-50,-20,-25,-49,-50,-29,-50,-36,-21,-47,-50,-38,-19,-50,-27,-50,-50,-24,-30,-31,-50,-50,-35,-50,]),'AND_SYMBOL':([69,39,32,84,35,0,73,44,12,50,55,51,48,16,83,57,70,36,37,38,34,29,49,58,67,28,71,68,21,],[-50,-25,-50,77,-29,-50,-20,-27,-50,-49,-21,-19,-47,-50,-35,-18,-26,-50,-50,-50,-28,-31,-50,-50,-30,-24,77,-48,-50,]),}
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
_lr_action[(_x,_k)] = _y
del _lr_action_items
_lr_goto_items = {'inlineIfStatement':([54,74,62,82,30,31,79,42,66,22,27,],[29,29,29,29,29,29,29,29,29,29,29,]),'typeName':([12,0,21,],[3,3,3,]),'boolExpr':([30,],[46,]),'partialDeclaration':([37,16,],[56,25,]),'partialIntTerm':([28,58,],[39,70,]),'addOp':([69,38,36,],[54,54,54,]),'partialStatementSequence':([21,12,],[26,19,]),'boolOp':([71,84,],[74,74,]),'write':([0,12,21,],[7,7,7,]),'program':([0,],[10,]),'statement':([21,0,12,],[21,12,21,]),'empty':([12,32,69,37,38,58,16,28,84,71,21,49,0,36,],[20,50,55,24,55,44,24,44,78,78,20,50,6,55,]),'multOp':([58,28,],[42,42,]),'partialIntExpr':([36,38,69,],[51,57,73,]),'intFactor':([54,62,30,31,82,74,27,42,66,22,79,],[28,28,28,28,28,28,28,58,28,28,28,]),'read':([12,0,21,],[4,4,4,]),'assignment':([12,0,21,],[2,2,2,]),'declaration':([12,21,0,],[13,13,13,]),'integer':([54,74,31,42,66,30,22,82,62,79,27,],[34,34,34,34,34,34,34,34,34,34,34,]),'digits':([32,49,],[48,68,]),'intExpr':([31,30,62,79,74,22,82,66,],[47,45,71,81,80,33,84,72,]),'relationOp':([45,80,],[62,82,]),'partialBoolExpr':([84,71,],[85,75,]),'intTerm':([31,66,30,54,79,27,74,62,22,82,],[36,36,36,69,36,38,36,36,36,36,]),'statementSequence':([0,],[8,]),'partialStatement':([12,21,0,],[11,11,11,]),}
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
_lr_goto[(_x,_k)] = _y
del _lr_goto_items
_lr_productions = [
("S'",1,None,None,None),
('program',1,'p_program','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',11),
('statementSequence',2,'p_statementSequence','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',14),
('partialStatementSequence',2,'p_partialStatementSequence','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',17),
('partialStatementSequence',1,'p_partialStatementSequence','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',18),
('statement',2,'p_statement','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',21),
('partialStatement',1,'p_partialStatement','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',24),
('partialStatement',1,'p_partialStatement','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',25),
('partialStatement',1,'p_partialStatement','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',26),
('partialStatement',1,'p_partialStatement','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',27),
('partialStatement',1,'p_partialStatement','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',28),
('read',2,'p_read','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',31),
('write',2,'p_write','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',34),
('declaration',3,'p_declaration','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',37),
('partialDeclaration',3,'p_partialDeclaration','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',40),
('partialDeclaration',1,'p_partialDeclaration','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',41),
('typeName',1,'p_typeName','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',44),
('assignment',3,'p_assignment','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',47),
('intExpr',3,'p_intExpr','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',50),
('intExpr',2,'p_intExpr','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',51),
('partialIntExpr',3,'p_partialIntExpr','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',54),
('partialIntExpr',1,'p_partialIntExpr','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',55),
('addOp',1,'p_addOp','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',58),
('addOp',1,'p_addOp','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',59),
('intTerm',1,'p_intTerm','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',62),
('intTerm',2,'p_intTerm','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',63),
('partialIntTerm',3,'p_partialIntTerm','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',66),
('partialIntTerm',1,'p_partialIntTerm','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',67),
('intFactor',1,'p_intFactor','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',70),
('intFactor',1,'p_intFactor','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',71),
('intFactor',3,'p_intFactor','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',72),
('intFactor',1,'p_intFactor','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',73),
('multOp',1,'p_multOp','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',76),
('multOp',1,'p_multOp','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',77),
('multOp',1,'p_multOp','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',78),
('inlineIfStatement',7,'p_inlineIfStatement','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',81),
('partialBoolExpr',5,'p_partialBoolExpr','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',84),
('partialBoolExpr',1,'p_partialBoolExpr','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',85),
('boolExpr',4,'p_boolExpr','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',88),
('boolOp',1,'p_boolOp','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',91),
('boolOp',1,'p_boolOp','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',92),
('relationOp',1,'p_relationOp','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',95),
('relationOp',1,'p_relationOp','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',96),
('relationOp',1,'p_relationOp','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',97),
('relationOp',1,'p_relationOp','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',98),
('relationOp',1,'p_relationOp','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',99),
('relationOp',1,'p_relationOp','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',100),
('integer',2,'p_integer','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',103),
('digits',2,'p_digits','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',106),
('digits',1,'p_digits','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',107),
('empty',0,'p_empty','/Users/christopheruldack/Documents/FH/Compilerbau/pyNase2/naseParser.py',111),
]
| [
"christopher.uldack@googlemail.com"
] | christopher.uldack@googlemail.com |
cbbe0bb749a416272fe4eb037682e67caa5ed92f | 57c6f5fe614fc65799832241e5bcb8d11cf8fd4f | /DC/Random Forest/master/master.py | 845c7e24af4266f381abdb93b96bb74985d13f64 | [] | no_license | metonymic-smokey/Apathetic-ML | 0a900b5f6a561eefcce8de49b2fa934b402f640e | c62753bc542e939fee06054d5c795e0ab3b2e7be | refs/heads/master | 2020-06-10T20:19:07.881668 | 2019-08-15T13:02:41 | 2019-08-15T13:02:41 | 193,734,834 | 0 | 0 | null | 2019-06-25T15:28:08 | 2019-06-25T15:28:08 | null | UTF-8 | Python | false | false | 2,431 | py |
import flask
import requests
import subprocess
import time
import threading
from flask_cors import CORS
import os
app = flask.Flask(__name__)
CORS(app)
path_to_run = './' #directory here
py_name = 'RF(Master).py' #fileName here
args = ["python3", "{}{}".format(path_to_run, py_name)]
lrm=None
#iplist=["http://127.0.0.1:3000","http://127.0.0.1:6000"]
s = 'http://worker'
iplist = [s+str(i)+':4000' for i in range(0,3)]
sesh=requests.Session()
os.system("touch out")
os.system("mkdir -p /dev/core/files")
@app.route('/')
def hello():
a= "<html><meta http-equiv=\"refresh\" content=\"5\" ><h1>Master</h1>"
proc = subprocess.Popen(["tac", "out"], stdout=subprocess.PIPE)
(out, err) = proc.communicate()
a = a + "<p>"+str(out.decode('ascii'))+"</p></html>"
return a
@app.route('/api/master/start', methods = ['GET'])
def start():
global lrm
global sesh
global iplist
if lrm is not None: #if process is running
return flask.Response(status=409) #code:conflict
else: #process never run
lrm=subprocess.Popen(args) #start lr(master) api
time.sleep(2)
with open("out",'a') as standardout:
print("Starting Tasks ",file=standardout)
for ip in iplist:
url = ip+'/api/worker/start'
initw = threading.Thread(target=sesh.get, args=(url,))
initw.start() #start lr(worker) api
time.sleep(2)
url='http://localhost:5000/api/master/rf/start'
initmodel = threading.Thread(target=sesh.get, args=(url,))
initmodel.start() #begin training
return flask.Response(status=202) #code:accepted
@app.route('/api/master/stop', methods = ['GET'])
def stop():
global lrm
global sesh
global iplist
if lrm is not None: #process not completed
for ip in iplist:
url = ip+'/api/worker/stop'
stopw = threading.Thread(target=sesh.get, args=(url,))
stopw.start()
lrm.terminate()
lrm=None
with open("out",'a') as standardout:
print("Stopping the entire operation\n",file=standardout)
return flask.Response(status=200) #code:ok
else: #process never run
return flask.Response(status=403) #code:forbidden
if __name__ == '__main__':
app.run(host='0.0.0.0', port=4000)
| [
"30438425+DarkAEther@users.noreply.github.com"
] | 30438425+DarkAEther@users.noreply.github.com |
c61e5282e95887ca44b19a34cba8218d17a61791 | 7e37f6e4fe873496fd769e47ac926bd8863e2524 | /python/common/fileIO/path.py | 84255eafd78af7ee2ba02ffffd2a5ffeea9f9557 | [
"BSD-3-Clause"
] | permissive | 0xb1dd1e/PipelineConstructionSet | 4b585881abfbc6c9209334282af8745bbfeb937b | 621349da1b6d1437e95d0c9e48ee9f36d59f19fd | refs/heads/master | 2021-01-18T00:53:32.987717 | 2014-03-11T22:44:39 | 2014-03-11T22:44:39 | 17,955,574 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 33,185 | py | """ path.py - An object representing a path to a file or directory.
Authors:
Jason Orendorff <jason.orendorff\x40gmail\x2ecom>
Mikhail Gusarov <dottedmag@dottedmag.net>
Others - unfortunately attribution is lost
Example:
from path import path
d = path('/home/guido/bin')
for f in d.files('*.py'):
f.chmod(0755)
This module requires Python 2.2 or later.
"""
# TODO
# - Tree-walking functions don't avoid symlink loops. Matt Harrison
# sent me a patch for this.
# - Bug in write_text(). It doesn't support Universal newline mode.
# - Better error message in listdir() when self isn't a
# directory. (On Windows, the error message really sucks.)
# - Make sure everything has a good docstring.
# - Add methods for regex find and replace.
# - guess_content_type() method?
# - Perhaps support arguments to touch().
from __future__ import generators
import sys, warnings, os, fnmatch, glob, shutil, codecs, hashlib, errno
__version__ = '2.2.2'
__all__ = ['path']
# Platform-specific support for path.owner
if os.name == 'nt':
try:
import win32security
except ImportError:
win32security = None
else:
try:
import pwd
except ImportError:
pwd = None
# Pre-2.3 support. Are unicode filenames supported?
_base = str
_getcwd = os.getcwd
try:
if os.path.supports_unicode_filenames:
_base = unicode
_getcwd = os.getcwdu
except AttributeError:
pass
# Pre-2.3 workaround for booleans
try:
True, False
except NameError:
True, False = 1, 0
# Pre-2.3 workaround for basestring.
try:
basestring
except NameError:
basestring = (str, unicode)
# Universal newline support
_textmode = 'r'
if hasattr(file, 'newlines'):
_textmode = 'U'
class TreeWalkWarning(Warning):
pass
class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def getcwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
getcwd = classmethod(getcwd)
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
r""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, '/' or 'C:\\'). The other items in
the list will be strings.
path.path.joinpath(*result) will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see path.walkdirs).
With the optional 'pattern' argument, this only lists
directories whose names match the given pattern. For
example, d.dirs('build-*').
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see path.walkfiles).
With the optional 'pattern' argument, this only lists files
whose names match the given pattern. For example,
d.files('*.pyc').
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional 'pattern' argument, this yields only
directories whose names match the given pattern. For
example, mydir.walkdirs('*test') yields only directories
with names ending in 'test'.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, pattern, limits the results to files
with names that match the pattern. For example,
mydir.walkfiles('*.tmp') yields only files with the .tmp
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if self.name matches the given pattern.
pattern - A filename pattern with wildcards,
for example '*.py'.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self):
""" Calculate the md5 hash for this file.
This reads through the entire file.
"""
return self.read_hash('md5')
def _hash(self, hash_name):
f = self.open('rb')
try:
m = hashlib.new(hash_name)
while True:
d = f.read(8192)
if not d:
break
m.update(d)
return m
finally:
f.close()
def read_hash(self, hash_name):
""" Calculate given hash for this file.
List of supported hashes can be obtained from hashlib package. This
reads the entire file.
"""
return self._hash(hash_name).digest()
def read_hexhash(self, hash_name):
""" Calculate given hash for this file, returning hexdigest.
List of supported hashes can be obtained from hashlib package. This
reads the entire file.
"""
return self._hash(hash_name).hexdigest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isdir = os.path.isdir
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
getatime = os.path.getatime
atime = property(
getatime, None, None,
""" Last access time of the file. """)
getmtime = os.path.getmtime
mtime = property(
getmtime, None, None,
""" Last-modified time of the file. """)
if hasattr(os.path, 'getctime'):
getctime = os.path.getctime
ctime = property(
getctime, None, None,
""" Creation time of the file. """)
getsize = os.path.getsize
size = property(
getsize, None, None,
""" Size of the file, in bytes. """)
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def get_owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
owner = property(
get_owner, None, None,
""" Name of the owner of this file or directory. """)
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
def chmod(self, mode):
os.chmod(self, mode)
if hasattr(os, 'chown'):
def chown(self, uid, gid):
os.chown(self, uid, gid)
def rename(self, new):
os.rename(self, new)
def renames(self, new):
os.renames(self, new)
# --- Create/delete operations on directories
def mkdir(self, mode=0777):
os.mkdir(self, mode)
def mkdir_p(self, mode=0777):
try:
self.mkdir(mode)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def makedirs(self, mode=0777):
os.makedirs(self, mode)
def makedirs_p(self, mode=0777):
try:
self.makedirs(mode)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def rmdir(self):
os.rmdir(self)
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
def remove(self):
os.remove(self)
def remove_p(self):
try:
self.unlink()
except OSError, e:
if e.errno != errno.ENOENT:
raise
def unlink(self):
os.unlink(self)
def unlink_p(self):
self.remove_p()
# --- Links
if hasattr(os, 'link'):
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = shutil.copyfile
copymode = shutil.copymode
copystat = shutil.copystat
copy = shutil.copy
copy2 = shutil.copy2
copytree = shutil.copytree
if hasattr(shutil, 'move'):
move = shutil.move
rmtree = shutil.rmtree
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
| [
"Count.Zr0@gmail.com"
] | Count.Zr0@gmail.com |
922e0caa34050ba6f31b86911619306eaa5f4db9 | 593ca70126a48727716c72f28c7390b08801c954 | /PhotovaristorSensor.py | b4751d15783bc5dcc6b517eacac39a95f7d109f5 | [] | no_license | wenyan0622/RasberryPiTest | a57eca4249e39373a63fd2205745b8ee205c16ad | 56257f36132006211b92c4a1e7b068c5c4771e8b | refs/heads/master | 2020-03-27T03:30:20.302813 | 2018-08-24T16:41:52 | 2018-08-24T16:41:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | import RPi.GPIO as GPIO
import time
#程序目的:检测光敏传感器的输入信号,检测到光则小灯亮,否则灯灭
#分别指定传感器接口和LED灯接口
LightSensor_PIN=7
LED_PIN=12
#初始化
def init():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(LightSensor_PIN,GPIO.IN)
GPIO.setup(LED_PIN,GPIO.OUT)
pass
#循环检测100次
def detect():
count=1
for count in range(1,100):
if GPIO.input(LightSensor_PIN) == True:
GPIO.output(LED_PIN,True) #检测到灯光则灯亮
print("light on ...",count)
else:
GPIO.output(LED_PIN,False) #没有检测到光则灯灭
print("light off ...",count)
count=count+1
time.sleep(1)#1秒循环
try:
init()
detect()
except KeyboardInterrupt:
pass
#清理工作
GPIO.cleanup() | [
"wuyiliang@e-lead.cn"
] | wuyiliang@e-lead.cn |
6073cfcd12cdbdbc9d81c5279f878f6ed3e2491a | f1dad4036a93b0ae3fb48841c3f1ae1440210085 | /emarket/views.py | 39baf90e3f5d1892dbfa7337958aae37f41a76bf | [
"MIT"
] | permissive | MerlinEmris/ebazar | 2af740fe7d15ca111b5cf7a923464a26cc937206 | 664d093b5addc2612083a7f8e5d5d9441c536c8b | refs/heads/master | 2023-02-09T22:13:04.862064 | 2023-01-21T06:07:34 | 2023-01-21T06:07:34 | 322,202,655 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,486 | py | # from traceback import TracebackException
from django.contrib.auth.forms import UserCreationForm
# from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.postgres.search import SearchVector
from django.core import serializers
from django.http import JsonResponse
from django.views import View
# import os
# from django.contrib.sites.shortcuts import get_current_site
# from django.utils.encoding import force_bytes
# from django.utils.encoding import force_text
# from django.utils.http import urlsafe_base64_encode
# from django.utils.http import urlsafe_base64_decode
# from django.template.loader import render_to_string
from django.http import HttpResponse
import django_filters.rest_framework
from django.shortcuts import render, redirect
from .forms import ProfilePhotoForm, PhotoForm, SignUpForm, ProfileForm, ItemForm, SearchForm
from .models import User, Profile, Item, Category, Item_Image, Favorite_item
from ebazar import settings
from .serializers import ( CategorySerializer,
ItemSerializer,
UserSerializer,
Item_ImageSerializer,)
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework import viewsets, status
# import django_filters.rest_framework
from rest_framework.generics import (
DestroyAPIView,
ListAPIView,
UpdateAPIView,
RetrieveAPIView,
CreateAPIView
)
from rest_framework.views import APIView
import shutil
import os
import datetime
import json
# print console logs
log_prefix = '['+datetime.datetime.now().strftime("%d-%m-%y %H:%M:%S")+']'
log_end = '********'
log_date = datetime.datetime.now().strftime("%d-%m-%y_%H:%M")
# redirect to create user (url(r'^$'))
def index(request):
if request.user:
return redirect('home')
else:
return redirect('home')
# create user with min information
def create_user(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
# form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
print(log_prefix+'user '+form.cleaned_data['username']+'is created'+log_end)
# user.is_active = False
# user.refresh_from_db()
# user.profile.birth_date = form.cleaned_data.get('birth_date')
# user.profile.bio = form.cleaned_data.get('bio')
# user.profile.location = form.cleaned_data.get('location')
# current_site = get_current_site(request)
# subject = 'Activate Your MySite Account'
# message = render_to_string('account_activation_email.html', {
# 'user': user,
# 'domain': current_site.domain,
# 'uid': urlsafe_base64_encode(force_bytes(user.pk)),
# 'token': account_activation_token.make_token(user),
# })
# user.email_user(subject, message)
# return redirect('account_activation_sent')
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
print(log_prefix + 'user ' + username + 'is logged in' + log_end)
return redirect('home')
else:
form = SignUpForm(request.POST)
return render(request, 'registration/create_user.html', {'form': form})
else:
form = SignUpForm()
return render(request, 'registration/create_user.html', {'form': form})
@login_required
def edit_profile(request):
exist = 0
try:
profile = request.user.profile
exist = 1
except Profile.DoesNotExist:
profile = Profile(user=request.user)
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES, instance=profile)
if form.is_valid():
form.save()
print(log_prefix + ' user ' + request.user.username + ' profile is changed ' + log_end)
return redirect('home')
else:
return render(request, 'emarket/profile.html', {'form': form})
else:
form = ProfileForm(instance=profile)
return render(request, 'emarket/profile.html', {'form': form,'exist':exist})
def profile_change_photo(request, prof_id):
if request.method == 'POST':
profile = Profile.objects.filter(user_id=prof_id)[0]
form = ProfilePhotoForm(request.POST, request.FILES, instance=profile)
profile.img.delete(False)
if form.is_valid():
form.save()
return redirect('profile')
else:
form = ProfilePhotoForm()
return render(request, 'emarket/profile_add_image.html', {'form':form,})
print(log_prefix + 'user ' + prof_id + 'profile img is changed' + log_end)
def user(request, user_id):
items = Item.objects.filter(user_id=user_id)
pics = Item_Image.objects.all()
if items:
paginator = Paginator(items, 9)
page = request.GET.get('page')
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return render(request, 'emarket/user.html', {'items': items, 'pics': pics, })
@login_required
def create_item(request):
if request.method == 'POST':
item = Item(user=request.user)
form = ItemForm(request.POST, instance=item)
if form.is_valid():
form.save()
print(log_prefix+'item:'+form.cleaned_data['name']+' is created at '+log_date+log_end)
return redirect('add_item_img', item.id)
else:
return render(request, 'emarket/item_create.html', {'form': form})
else:
form = ItemForm()
return render(request, 'emarket/item_create.html', {'form': form})
@login_required
def edit_item(request, it_id):
try:
item = Item.objects.filter(id=it_id)[0]
except Item.DoesNotExist:
redirect('home')
if request.method == 'POST':
form = ItemForm(request.POST, instance=item)
if form.is_valid():
form.save()
print(log_prefix + ' item ' + it_id + ' is changed ' + log_end)
return redirect('show_item',it_id)
else:
form = ItemForm(instance=item)
return render(request, 'emarket/item_edit.html',{'form':form})
else:
form = ItemForm(instance=item)
return render(request, 'emarket/item_edit.html',{'form':form})
def show_item(request, item_id):
user = request.user
exist = 1
# if user and request.method == "GET":
# favs = Favorite_item.objects.filter(user=user)
#
# for fav in favs:
# if fav.item_id == int(item_id):
# print(fav.item_id)
# exist = 1
# else:
# exist = 0
item = Item.objects.filter(id=item_id)[0]
item_images = Item_Image.objects.filter()
return render(request, 'emarket/item_detail.html', {'item': item,
'pics': item_images,
'exist': exist})
@login_required
def favorite_items(request, user_id):
user = User.objects.filter(id=user_id)
fav_items = Favorite_item.objects.filter(user = user)
item_images = Item_Image.objects.filter()
return render(request, 'emarket/favorite_items.html', {'fav_items': fav_items,
'pics': item_images})
# @login_required
# def add_to_fav(request):
# return redirect('home')
def show_category(request, cat_id):
cat = Category.objects.get(id=cat_id)
items = Item.objects.filter(category=cat)
pics = Item_Image.objects.all()
if items:
paginator = Paginator(items, 9)
page = request.GET.get('page')
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return render(request, 'emarket/show_category.html', {'cat':cat, 'items':items, 'pics':pics})
def home(request):
cats = Category.objects.all()
# item_pic = {}
items = Item.objects.order_by('-price')[0:9]
item_images = Item_Image.objects.filter()
# print(item_images)
# print(items)
# print(categories)
return render(request, 'emarket/home.html', {'cats': cats, 'items': items, 'pics': item_images, })
def search(request, search_word=None):
message = 'Ähli goşlar:'
pics = Item_Image.objects.all()
items = Item.objects.all()
form = SearchForm
if request.method == 'POST':
form = SearchForm(request.POST)
search_word = request.POST.get('search')
location = request.POST.get('location')
user = request.POST.get('user')
if location and user:
items = Item.objects.filter(name__icontains=search_word).filter(user=user).filter(location=location)
elif user:
items = Item.objects.filter(name__icontains=search_word).filter(user=user)
elif location:
items = Item.objects.filter(name__icontains=search_word).filter(location=location)
else:
items = Item.objects.filter(name__icontains=search_word)
if items:
message = 'Netijeler:'
else:
message = 'Hiç zat ýok'
items = None
if items:
paginator = Paginator(items, 18)
page = request.GET.get('page')
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return render(request, 'emarket/expo.html', {'items': items, 'pics': pics, 'ms': message, 's_word': search_word, 'form':form})
@login_required
def add_item_img(request, it_id):
photos = Item_Image.objects.filter()
if request.method == 'POST':
item_img = Item_Image(item_id=it_id)
form = PhotoForm(request.POST, request.FILES, instance=item_img)
if form.is_valid():
form.save()
print(log_prefix+'item_'+it_id+' added image'+str(form.cleaned_data['img'])+log_end)
return redirect('show_item', it_id)
else:
return render(request, 'emarket/item_add_image.html', {'form': form, 'photos': photos})
else:
form = PhotoForm()
return render(request, 'emarket/item_add_image.html', {'form':form, 'photos': photos})
@login_required
def delete_item(request, it_id):
item = Item.objects.filter(id=it_id)
if item:
item.delete()
items_path = os.path.join(settings.MEDIA_ROOT, 'items')
item_id = 'item_'+str(it_id)
item_path = os.path.join(items_path, item_id)
shutil.rmtree(item_path)
print(log_prefix+item_id+' is deleted'+log_end)
return redirect('home')
else:
return redirect('home')
class UserCreate(APIView):
def post(selfs, request, format='json'):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
user = serializer.save()
if user:
print(user)
username = serializer.data.get('username')
print(username)
raw_password = serializer.data.get('password')
print(raw_password)
user_log = authenticate(username=username, password=raw_password)
login(request, user_log)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
print('user create error')
else:
print('user validation failed')
# api for item
class ItemViewSet(ListAPIView):
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
queryset = Item.objects.all()
serializer_class = ItemSerializer
search_fields = ('name',)
ordering_fields = '__all__'
class Item_ImageViewSet(ListAPIView):
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
queryset = Item_Image.objects.all()
serializer_class = Item_ImageSerializer
class Item_ImageDetailViewSet(ListAPIView):
queryset = Item_Image.objects.all()
serializer_class = Item_ImageSerializer
def get_queryset(self):
item = self.kwargs['item']
return Item_Image.objects.filter(item=item)
class ItemCreateViewSet(CreateAPIView):
queryset = Item.objects.all()
serializer_class = ItemSerializer
class ItemDetailViewSet(RetrieveAPIView):
queryset = Item.objects.all()
serializer_class = ItemSerializer
class ItemUpdateViewSet(UpdateAPIView):
queryset = Item.objects.all()
serializer_class = ItemSerializer
class ItemDeleteViewSet(DestroyAPIView):
queryset = Item.objects.all()
serializer_class = ItemSerializer
# api for category
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
| [
"merdanchariyarov@gmail.com"
] | merdanchariyarov@gmail.com |
639b97de6a8ed56bc002ebcf1f711245cbb5584e | 2793721e5cbfccfedac75556e34dba22999530d7 | /Dynamic_Programming/venv/bin/easy_install-3.7 | 4b809b9ca4fadb6a24070714033b737b0d49bbb0 | [] | no_license | iafjayoza/Python | 135e613d1d23c493b05a009843b40cbca6a1d318 | aaa05b0d655c8f0a47ced0100a844d99f852b2a4 | refs/heads/master | 2022-12-07T07:21:46.494885 | 2020-09-06T09:03:27 | 2020-09-06T09:03:27 | 282,707,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | 7 | #!/Users/jo049566/Desktop/Jay/Jay_Data/Study_Repo/Python/Projects/Dynamic_Programming/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"jaykumar.oza@cerner.com"
] | jaykumar.oza@cerner.com |
362b5e6042f67dfb5daeddd0a87ad33b5dceddd8 | 2774e57e315c426e3645c9b2d86d381dd5dc0c66 | /django/academy/api/view/views_course_fbv.py | ef1edf7d72504d8e08eb5b2aa993b1e66d36b2a2 | [] | no_license | aidoka22/Web-project | 27ee981c896fcc90eb8042f7e100582eeaa9a912 | 998439a05c4139cb7f507d77992c916f029c7554 | refs/heads/main | 2023-04-18T18:56:43.271926 | 2021-04-30T21:34:37 | 2021-04-30T21:34:37 | 350,932,105 | 0 | 1 | null | 2021-04-30T13:09:10 | 2021-03-24T03:17:43 | TypeScript | UTF-8 | Python | false | false | 1,703 | py | from rest_framework.decorators import api_view
from rest_framework.request import Request
from rest_framework.response import Response
from api.models import Course
from api.serializers import CourseSerializer2
@api_view(['GET', 'POST'])
def course_list(request):
if request.method == 'GET':
courses = Course.objects.all()
serializer = CourseSerializer2(courses, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = CourseSerializer2(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors)
@api_view(['GET'])
def teacher_courses(request,teacher_id):
if request.method == 'GET':
courses = Course.objects.get(author=teacher_id)
serializer = CourseSerializer2(courses, many=True)
return Response(serializer.data)
@api_view(['GET', 'PUT', 'DELETE'])
def course_detail(request, course_id):
try:
course = Course.objects.get(id=course_id)
except Course.DoesNotExist as e:
return Response({'message': str(e)}, status=400)
if request.method == 'GET':
serializer = CourseSerializer2(course)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = CourseSerializer2(instance=course, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors)
elif request.method == 'DELETE':
course.delete()
return Response({'message': 'deleted'}, status=204)
| [
"noreply@github.com"
] | noreply@github.com |
eb3d9991bac5d69b10d1a291e2e099785c5e1bdb | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/leetCode/Backtracking/90_SubsetsII.py | f0018bb559069fbfa983759b7fcba413f3f6cb4b | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,397 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
c.. Solution o..
___ subsetsWithDup nums
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
__ n.. nums:
r_ []
nums.s.. )
nums_len = l..(nums)
# Keep the subsets without duplicate subsets
subsets = [[nums[0]]]
# Keep the previous subsets which contains previous nums.
pre_subset = [[nums[0]]]
___ i __ r..(1, nums_len
# Combine current num with the previous subsets,
# Then update the previous subsets
__ nums[i] __ nums[i-1]:
___ j __ r..(l..(pre_subset)):
one_set = pre_subset[j][:]
one_set.a.. nums[i])
subsets.a.. one_set)
pre_subset[j] = one_set
# Combine current num with all the subsets before.
# Then update the previous subsets
____
pre_subset # list
___ j __ r..(l..(subsets)):
one_set = subsets[j][:]
one_set.a.. nums[i])
subsets.a.. one_set)
pre_subset.a.. one_set)
pre_subset.a.. [nums[i]])
subsets.a.. [nums[i]])
subsets.a.. [])
r_ subsets
"""
[]
[1,2]
[1,2,2]
[1,2,2,3,3,4,5]
"""
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
8f47727e3686930965396e7beba377a896f9f729 | bd1052ddc8c2450c943d8336c5bee0c9d0b0aa67 | /redirect_service/redirector/interface.py | 92d7b11dcae0358a45fb0a3a492d0665c78616a6 | [] | no_license | iamandesai/ccet-ia-2 | 6561a2417963dbb14faea350c233cd6aa2f804ad | 8c9c19a89152f8fbbac8e1c300a6505a4fc8f3c7 | refs/heads/main | 2023-08-25T08:26:17.598941 | 2021-11-08T14:40:13 | 2021-11-08T14:40:13 | 425,865,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | import traceback
from time import time
from uuid import uuid4
from redirector import datastore, tools, fetcher
def get_redirect_url(urlid):
try:
return datastore.get_redirect_url(urlid)
except Exception as e:
print(e)
return None
def push_data_to_store(data):
try:
data['dataid'] = str(uuid4())
super_ip = tools.supernetter(data['ipaddress'])
snet_values = datastore.get_snet_val(super_ip)
if snet_values:
data['latitude'] = snet_values['latitude']
data['longitude'] = snet_values['longitude']
data['pincode'] = snet_values['zip']
data['city'] = snet_values['city']
else:
details = fetcher.fetch_details_for_ip(super_ip)
if details:
datastore.put_snet_val(details)
data['latitude'] = str(details['latitude'])
data['longitude'] = str(details['longitude'])
data['pincode'] = str(details['zip'])
data['city'] = str(details['city'])
data_new = tools.transform_tpoint(data)
data_new["availableRamKey"] = tools.get_ram_gbs(data.get('availableRam'))
datastore.increment_respective_counts(data['urlid'], data_new['brand'], data_new['os'], data_new['availableRamKey'])
data_new["time"] = str(int(time()))
datastore.put_submitted_data(data_new)
except Exception as e:
traceback.print_exc() | [
"noreply@github.com"
] | noreply@github.com |
c33e533a21cecc0d9b01083a41149fcd12251b68 | d421cc063c084b4a3cb9debb8add5abad9de4ff0 | /env/Scripts/django-admin.py | 6777f98ab8e56932a3ffad082501f51aeac10345 | [] | no_license | Sukhvsin2/Shopping-backend | 4b03a2c2e5b9962b9c906e3dbf8615b1fbbac595 | 65b01e62f2ebd72cbfe0a9bdd2f440a9ff255f07 | refs/heads/master | 2023-02-03T06:21:02.034946 | 2020-12-20T05:37:45 | 2020-12-20T05:37:45 | 316,291,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | #!c:\users\sukhi\desktop\web-development\vue-apps\ecomm-backend\env\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"sukhvsingh2026@gmail.com"
] | sukhvsingh2026@gmail.com |
5427109878d3dc6439276ff55eb29fd7bb028859 | f43fe4fd349a45fdca005c7e210c19c180a64a36 | /run_citation_need_model.py | af8b826eb60647025f03406727eebef869b38399 | [] | no_license | ghassanmas/citation-needed-paper | 0f50704782bf97a3385019845d2343672d2003e9 | 4379272225442a4bd492c8683a8c479e146acbff | refs/heads/master | 2022-03-24T00:23:43.934555 | 2019-11-05T14:49:27 | 2019-11-05T14:49:27 | 219,768,657 | 0 | 0 | null | 2019-11-05T15:39:58 | 2019-11-05T14:44:01 | Python | UTF-8 | Python | false | false | 6,277 | py | import re
import argparse
import pandas as pd
import pickle
import numpy as np
import types
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import confusion_matrix
from keras.utils import to_categorical
from keras import backend as K
K.set_session(K.tf.Session(config=K.tf.ConfigProto(intra_op_parallelism_threads=10, inter_op_parallelism_threads=10)))
'''
Set up the arguments and parse them.
'''
def get_arguments():
parser = argparse.ArgumentParser(
description='Use this script to determinee whether a statement needs a citation or not.')
parser.add_argument('-i', '--input', help='The input file from which we read the statements. Lines contains tab-separated values: the statement, the section header, and additionally the binary label corresponding to whether the sentence has a citation or not in the original text. This can be set to 0 if no evaluation is needed.', required=True)
parser.add_argument('-o', '--out_dir', help='The output directory where we store the results', required=True)
parser.add_argument('-m', '--model', help='The path to the model which we use for classifying the statements.', required=True)
parser.add_argument('-v', '--vocab', help='The path to the vocabulary of words we use to represent the statements.', required=True)
parser.add_argument('-s', '--sections', help='The path to the vocabulary of section with which we trained our model.', required=True)
parser.add_argument('-l', '--lang', help='The language that we are parsing now.', required=False, default='en')
return parser.parse_args()
'''
Parse and construct the word representation for a sentence.
'''
def text_to_word_list(text):
# check first if the statements is longer than a single sentence.
sentences = re.compile('\.\s+').split(str(text))
if len(sentences) != 1:
# text = sentences[random.randint(0, len(sentences) - 1)]
text = sentences[0]
text = str(text).lower()
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
text = re.sub(r":", " : ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r" u s ", " american ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e - mail", "email", text)
text = re.sub(r"j k", "jk", text)
text = re.sub(r"\s{2,}", " ", text)
text = text.strip().split()
return text
'''
Compute P/R/F1 from the confusion matrix.
'''
'''
Create the instances from our datasets
'''
def construct_instance_reasons(statement_path, section_dict_path, vocab_w2v_path, max_len=-1):
# Load the vocabulary
vocab_w2v = pickle.load(open(vocab_w2v_path, 'rb'))
# load the section dictionary.
section_dict = pickle.load(open(section_dict_path, 'rb'))
# Load the statements, the first column is the statement and the second is the label (True or False)
statements = pd.read_csv(statement_path, sep='\t', index_col=None, error_bad_lines=False, warn_bad_lines=False)
# construct the training data
X = []
sections = []
y = []
outstring=[]
for index, row in statements.iterrows():
try:
statement_text = text_to_word_list(row['statement'])
X_inst = []
for word in statement_text:
if max_len != -1 and len(X_inst) >= max_len:
continue
if word not in vocab_w2v:
X_inst.append(vocab_w2v['UNK'])
else:
X_inst.append(vocab_w2v[word])
# extract the section, and in case the section does not exist in the model, then assign UNK
section = row['section'].strip().lower()
sections.append(np.array([section_dict[section] if section in section_dict else 0]))
label = row['citations']
# some of the rows are corrupt, thus, we need to check if the labels are actually boolean.
if type(label) != types.BooleanType:
continue
y.append(label)
X.append(X_inst)
outstring.append(str(row["statement"]))
#entity_id revision_id timestamp entity_title section_id section prg_idx sentence_idx statement citations
except Exception as e:
print row
print e.message
X = pad_sequences(X, maxlen=max_len, value=vocab_w2v['UNK'], padding='pre')
encoder = LabelBinarizer()
y = encoder.fit_transform(y)
y = to_categorical(y)
return X, np.array(sections), y, encoder, outstring
if __name__ == '__main__':
p = get_arguments()
# load the model
model = load_model(p.model)
# load the data
max_seq_length = model.input[0].shape[1].value
X, sections, y, encoder,outstring = construct_instance_reasons(p.input, p.sections, p.vocab, max_seq_length)
# classify the data
pred = model.predict([X, sections])
# store the predictions: printing out the sentence text, the prediction score, and original citation label.
outstr = 'Text\tPrediction\tCitation\n'
for idx, y_pred in enumerate(pred):
outstr += outstring[idx]+'\t'+str(y_pred[0])+ '\t' + str(y[idx]) + '\n'
fout = open(p.out_dir + '/' + p.lang + '_predictions_sections.tsv', 'wt')
fout.write(outstr)
fout.flush()
fout.close()
| [
"noreply@github.com"
] | noreply@github.com |
dde826487405d70366d2287ed93ff3be356ad1b5 | 18a33002b18b1daf3851eb5bd0928bd3f11c1741 | /pyaccords/amazonEc2Interface.py | 23ca86a66cbe772e3f52bbbf3910e4a91facb215 | [
"Apache-2.0"
] | permissive | MarouenMechtri/accords-platform-1 | 3eec269b8df71f7ba9a5071753e873e9baaed632 | 4f950fffd9fbbf911840cc5ad0fe5b5a331edf42 | refs/heads/master | 2021-01-16T21:36:31.279852 | 2013-05-19T09:59:02 | 2013-05-19T09:59:02 | 23,386,851 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,282 | py | ##############################################################################
#copyright 2012, Hamid MEDJAHED & Elyes ZEKRI (hmedjahed@prologue.fr) #
# Prologue #
#Licensed under the Apache License, Version 2.0 (the "License"); #
#you may not use this file except in compliance with the License. #
#You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
#Unless required by applicable law or agreed to in writing, software #
#distributed under the License is distributed on an "AS IS" BASIS, #
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
#See the License for the specific language governing permissions and #
#limitations under the License. #
##############################################################################
#!/usr/bin/env python
# -*- coding: latin-1 -*-
# Implementation of category CRUD functions
import sys
import pycompdev
import pypacksrc
srcdirectory=pypacksrc.srcpydir+"/pyaccords/pysrc/"
sys.path.append(srcdirectory)
from amazonEc2Class import *
""" Note: amazonEc2 is a python class to interface the accords category :amazonEc2.
-Attributes of this category are members of this class.
-List of attributes:
- name
- flavor
- image
- original
- profile
- node
- price
- account
- number
- rootpass
- reference
- network
- access
- accessip
- floating
- floatingid
- publicaddr
- privateaddr
- firewall
- group
- zone
- hostname
- workload
- when
- state
"""
def amazonEc2_create(amazonEc2):
"""Implement here your function"""
return amazonEc2
def amazonEc2_retrieve(amazonEc2):
"""Implement here your function"""
return amazonEc2
def amazonEc2_update(amazonEc2):
"""Implement here your function"""
return amazonEc2
def amazonEc2_delete(amazonEc2):
"""Implement here your function"""
return amazonEc2
| [
"hmedjahed@prologue.fr"
] | hmedjahed@prologue.fr |
3c9ce756eb733e12cf686c8c32401331d69dad3f | 5ee028ee2582a2d566c22a32097a1fcbed314fcc | /openwsn-fw-antiJamming/bsp/chips/si70x/SConscript | 8694311edc5303fbdb892109f6bbf7a70d61cfda | [] | permissive | ssciancalepore/BitTransfer | 70c5b271743ebe683d7a3a37d595dbab132f903e | b9d343b0219259f4870e9362b99c27f544014b89 | refs/heads/master | 2022-06-20T18:38:03.271254 | 2019-09-15T04:56:32 | 2019-09-15T04:56:32 | 199,583,953 | 1 | 1 | BSD-3-Clause | 2022-06-03T22:45:01 | 2019-07-30T05:53:29 | C | UTF-8 | Python | false | false | 116 | Import('env')
localEnv = env.Clone()
source = ['si70x.c']
si70x = localEnv.Object(source=source)
Return('si70x') | [
"savio.sciancalepore@gmail.com"
] | savio.sciancalepore@gmail.com | |
be4f20317825ae5277ab1916c7c3aec239a7362c | 6aeb576b71583af427a6f39b2929d3e9a99d816b | /ml_engine/processing.py | 048fa6e18c0905689fb6d05c700a53a5498f8099 | [] | no_license | MoAdel1/SparkMachineLearningEngine | a594e3f555394f01d4df705e265851a168e1e7bf | 08e2ddffeda205619a7768b9e91223d0d2123875 | refs/heads/master | 2023-03-02T13:51:11.603201 | 2021-02-08T14:33:14 | 2021-02-08T14:33:14 | 336,610,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,336 | py | # Databricks notebook source
# processing functions
def categorical_indexing(df, column, indexer=None):
# string index
if (indexer == None):
stringIndexer = StringIndexer(inputCol=column, outputCol='{}_INDEXED'.format(column))
indexer = stringIndexer.fit(df)
indexed = indexer.transform(df)
return indexed, indexer
def categorical_hotencoding(df, column, categories=None):
if(categories == None):
categories = df.select(column).distinct().rdd.flatMap(lambda x : x).collect()
categories.sort()
for category in categories:
function = udf(lambda item: 1 if item == category else 0, IntegerType())
new_column_name = column+'_'+category.replace('"', '').upper()
df = df.withColumn(new_column_name, function(col(column)))
df = df.drop(column)
return df, categories
def numerical_standardize(df, column, mean_value=None, sttdev_value=None):
if(mean_value == None and sttdev_value == None):
mean_value, sttdev_value = df.select(mean(column), stddev(column)).first()
df = df.withColumn('{}_STANDARD'.format(column), (col(column) - mean_value) / sttdev_value)
df = df.drop(column)
return df, mean_value, sttdev_value
def classify_target(df, column):
function = udf(lambda item: 1 if item > 0 else 0, IntegerType())
df = df.withColumn('{}_CLASS'.format(column), function(col(column)))
return df
def split_data_classification(df, train_percent, test_percent, validate_percent, random_state):
# normalize percentage
train_percent = train_percent/100
test_percent = test_percent/100
validate_percent = validate_percent/100
# convert data into numpy
data = np.array(df.collect(), dtype=np.float64)
# get number of records
records = data.shape[0]
validate_size = int(validate_percent * records)
test_size = int(test_percent * records)
train_size = records - (validate_size + test_size)
# split data
data_train, data_remains = train_test_split(data, train_size=train_size, random_state=random_state, stratify=data[:, -1:])
if validate_size == 0:
data_test = data_remains
data_validate = None
else:
data_validate, data_test = train_test_split(data_remains, train_size=validate_size, random_state=random_state, stratify=data_remains[:, -1:])
# get x and y arrays
x_train = data_train[:, :-1]
y_train = data_train[:, -1:]
x_validate = data_validate[:, :-1] if data_validate is not None else None
y_validate = data_validate[:, -1:] if data_validate is not None else None
x_test = data_test[:, :-1]
y_test = data_test[:, -1:]
# function return
return x_train, y_train, x_validate, y_validate, x_test, y_test
def split_data_regression(df, train_percent, test_percent, validate_percent, random_state):
# normalize percentage
train_percent = train_percent/100
test_percent = test_percent/100
validate_percent = validate_percent/100
# convert data into numpy
data = np.array(df.collect(), dtype=np.float64)
# get number of records
records = data.shape[0]
validate_size = int(validate_percent * records)
test_size = int(test_percent * records)
train_size = records - (validate_size + test_size)
# split data
data_train, data_remains = train_test_split(data, train_size=train_size, random_state=random_state, stratify=data[:, -1:])
if validate_size == 0:
data_test = data_remains
data_validate = None
else:
data_validate, data_test = train_test_split(data_remains, train_size=validate_size, random_state=random_state, stratify=data_remains[:, -1:])
# get x and y arrays
x_train = data_train[:, :-2]
y_train = data_train[:, -2:-1]
x_validate = data_validate[:, :-2] if data_validate is not None else None
y_validate = data_validate[:, -2:-1] if data_validate is not None else None
x_test = data_test[:, :-2]
y_test = data_test[:, -2:-1]
# function return
return x_train, y_train, x_validate, y_validate, x_test, y_test
# COMMAND ----------
# dbfs data functions
def save_datasets(dataset_name, x_train, y_train, x_validate, y_validate, x_test, y_test, data_columns):
# Save data to a local file first.
data_filename = '{}.npz'.format(dataset_name)
local_data_dir = tempfile.mkdtemp()
local_data_path = os.path.join(local_data_dir, data_filename)
np.savez(local_data_path,
x_train=x_train,
y_train=y_train,
x_validate=x_validate,
y_validate=y_validate,
x_test=x_test,
y_test=y_test,
data_columns=data_columns)
# Move it to DBFS, which is shared among cluster nodes.
dbfs_tmp_dir = '/dbfs/ml/tmp/hyperopt'
os.makedirs(dbfs_tmp_dir, exist_ok=True)
dbfs_data_dir = tempfile.mkdtemp(dir=dbfs_tmp_dir)
dbfs_data_path = os.path.join(dbfs_data_dir, data_filename)
shutil.move(local_data_path, dbfs_data_path)
return dbfs_data_path
def load_datasets(path):
dataset = np.load(path, allow_pickle=True)
x_train = dataset['x_train']
y_train = dataset['y_train']
x_validate = dataset['x_validate']
y_validate = dataset['y_validate']
x_test = dataset['x_test']
y_test = dataset['y_test']
data_columns = dataset['data_columns']
return x_train, y_train, x_validate, y_validate, x_test, y_test, data_columns
# COMMAND ----------
# general ML training functions
def generate_weights(vector, threshold, value_1, value_2):
output = np.copy(vector)
output[output == threshold] = value_1
output[output != threshold] = value_2
return output
def generate_class(vector, threshold, value_1, value_2):
output = np.copy(vector)
output[output > threshold] = value_1
output[output <= threshold] = value_2
return output
def save_results(y_validate, y_test, pred_validate, pred_test):
df_validate = pd.DataFrame({'Target': y_validate.reshape(-1), 'Predictions': pred_validate.reshape(-1)})
df_test = pd.DataFrame({'Target': y_test.reshape(-1), 'Predictions': pred_test.reshape(-1)})
df_validate.to_csv('validation_sample.csv', index=False)
df_test.to_csv('test_sample.csv', index=False)
mlflow.log_artifact('validation_sample.csv')
mlflow.log_artifact('test_sample.csv')
def log_tags(params):
mlflow.set_tag('model_type', params['model_type'])
mlflow.set_tag('model_name', params['model_name'])
mlflow.set_tag('training_log_path', params['training_log_path'])
mlflow.set_tag('dataset_size', params['dataset_size'])
mlflow.set_tag('data_query', params['data_query'])
def log_pre_processing(params):
with open('pre_processing.pickle', 'wb') as file:
pickle.dump(params['pre_processing'], file)
mlflow.log_artifact('pre_processing.pickle')
def metrics(pred, actual, suffix=None, monitoring = False):
# define class of predections
pred_class = generate_class(pred, 0, 1, 0)
actual_class = generate_class(actual, 0, 1, 0)
# calculate metrics
rmse = mean_squared_error(actual, pred, squared=False)
mae = mean_absolute_error(actual, pred)
r2 = r2_score(actual, pred)
p_c_1, r_c_1, f_c_1, _ = precision_recall_fscore_support(actual_class, pred_class, average='binary', pos_label=1, warn_for = tuple())
p_c_0, r_c_0, f_c_0, _ = precision_recall_fscore_support(actual_class, pred_class, average='binary', pos_label=0, warn_for = tuple())
output = {'rmse' if suffix == None else 'rmse_{}'.format(suffix): rmse,
'mae' if suffix == None else 'mae_{}'.format(suffix): rmse,
'r2' if suffix == None else 'r2_{}'.format(suffix): r2,
'p_c_1' if suffix == None else 'p_c_1_{}'.format(suffix): p_c_1,
'r_c_1' if suffix == None else 'r_c_1_{}'.format(suffix): r_c_1,
'f_c_1' if suffix == None else 'f_c_1_{}'.format(suffix): f_c_1,
'p_c_0' if suffix == None else 'p_c_0_{}'.format(suffix): p_c_0,
'r_c_0' if suffix == None else 'r_c_0_{}'.format(suffix): r_c_0,
'f_c_0' if suffix == None else 'f_c_0_{}'.format(suffix): f_c_0}
# log metrics
if monitoring == False:
for key in output:
mlflow.log_metric(key, output[key])
return output
def confidence(v_metrics, t_metrics):
output = dict()
output['confidence_rmse'] = 100 - abs(100 * ((t_metrics['rmse_test'] - v_metrics['rmse_validate']) / v_metrics['rmse_validate']))
output['confidence_rmse'] = 100 - abs(100 * ((t_metrics['mae_test'] - v_metrics['mae_validate']) / v_metrics['mae_validate']))
output['confidence_r2'] = 100 - abs(100 * ((t_metrics['r2_test'] - v_metrics['r2_validate']) / v_metrics['r2_validate']))
output['confidence_p_c_1'] = 100 - abs(100 * ((t_metrics['p_c_1_test'] - v_metrics['p_c_1_validate']) / v_metrics['p_c_1_validate']))
output['confidence_p_c_0'] = 100 - abs(100 * ((t_metrics['p_c_0_test'] - v_metrics['p_c_0_validate']) / v_metrics['p_c_0_validate']))
output['confidence_r_c_1'] = 100 - abs(100 * ((t_metrics['r_c_1_test'] - v_metrics['r_c_1_validate']) / v_metrics['r_c_1_validate']))
output['confidence_r_c_0'] = 100 - abs(100 * ((t_metrics['r_c_0_test'] - v_metrics['r_c_0_validate']) / v_metrics['r_c_0_validate']))
output['confidence_f_c_1'] = 100 - abs(100 * ((t_metrics['f_c_1_test'] - v_metrics['f_c_1_validate']) / v_metrics['f_c_1_validate']))
output['confidence_f_c_0'] = 100 - abs(100 * ((t_metrics['f_c_0_test'] - v_metrics['f_c_0_validate']) / v_metrics['f_c_0_validate']))
# log metrics
for key in output:
mlflow.log_metric(key, output[key])
return output | [
"mohammedadel28100@gmail.com"
] | mohammedadel28100@gmail.com |
e0e29101fde9257d518a095feed189c4229f9551 | b164a8099abc0472ad0743c0fb7655e352be7580 | /search_engine_26/webbrowser.py | 01f0e6119d0c199a056b204add795dde6ff39292 | [] | no_license | KevinBean/practice | 411ab52af8a7f4227ca18e98849932f0e07c37de | 335f0937455e6e9b6a9836087b814bd95776f290 | refs/heads/master | 2021-01-20T18:21:01.275786 | 2017-02-05T14:36:51 | 2017-02-05T14:36:51 | 62,072,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | from Tkinter import *
import webbrowser
url = 'http://www.sampleurl.com'
root = Tk()
frame = Frame(root)
frame.pack()
def OpenUrl():
webbrowser.open(url)
button = Button(frame, text="CLICK", command=OpenUrl)
button.pack()
root.mainloop() | [
"bianbin@bianbindeiMac.local"
] | bianbin@bianbindeiMac.local |
ac3891118e031178c270368eee07a0b88bb5c19c | f2e4c4bb2c287a87c9362aa29317b8ad5702e35f | /IECS-Attendance/seg.py | 9c983afcde2a7ca38fd464e986e20ccf1439db4b | [
"MIT"
] | permissive | magictomagic/IECS-Attendance | cc0b1a28844c5f50abad96f3a81fc332bc6e175b | 77f2c8e86b1d28220a53bf64b198f28faf982da9 | refs/heads/master | 2020-12-19T05:05:42.948798 | 2020-04-16T10:53:45 | 2020-04-16T10:53:45 | 235,629,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | import face_recognition
image = face_recognition.load_image_file("/mnt/hgfs/share/raw1.jpg")
face_locations = face_recognition.face_locations(image)
| [
"noreply@github.com"
] | noreply@github.com |
f24083eb0c7654f23ecf8369b85752a9772562e2 | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/ads/googleads/v4/googleads-py/google/ads/googleads/v4/enums/types/targeting_dimension.py | 76a70911a6f15e25b79765af970fefb4bcd708ba | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,273 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v4.enums',
marshal='google.ads.googleads.v4',
manifest={
'TargetingDimensionEnum',
},
)
class TargetingDimensionEnum(proto.Message):
r"""The dimensions that can be targeted. """
class TargetingDimension(proto.Enum):
r"""Enum describing possible targeting dimensions."""
UNSPECIFIED = 0
UNKNOWN = 1
KEYWORD = 2
AUDIENCE = 3
TOPIC = 4
GENDER = 5
AGE_RANGE = 6
PLACEMENT = 7
PARENTAL_STATUS = 8
INCOME_RANGE = 9
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
260cf46cb8ae67305d4ab5c35b6c2811fb620ea8 | a573577081d7d33b4f8f52a7e5cfc5878a82161c | /backend/General/binary_search.py | b819a4ce0d5fc83f0fd3c5ef29ebbb0d91e94671 | [] | no_license | akshaygoregaonkar/Interview_prep | 38d320aa92e0bfba5e6d8c36ad8f93bff6a11e97 | abf6f2ffb969c7e1cd6c80cb79f41d4659de53ad | refs/heads/main | 2023-06-18T00:47:51.102679 | 2021-07-19T06:51:30 | 2021-07-19T06:51:30 | 387,366,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | my_list=[1,2,4,5,6,7,8]
# def binary_search(arr,num):
# first=0
# last=len(arr)-1
# while first <=last:
# mid=(first+last)//2
# if arr[mid]==num:
# return "Found"
# else:
# if arr[mid]<num:
# first=mid+1
# else:
# last=mid-1
# return "Not found"
# print(binary_search(my_list,10))
# print(binary_search(my_list,10))
# def search(arr,num):
# lower=0
# upper=len(arr)-1
# while(lower<=upper):
# mid=(lower+upper)//2
# if arr[mid]==num:
# return True
# else:
# if arr[mid]<num:
# lower=mid+1
# else:
# upper=mid-1
# return False
# print(search(my_list,10))
# print(search(my_list,2))
| [
"akshaygoregaonkar@gmail.com"
] | akshaygoregaonkar@gmail.com |
c35c308f5d7740ec9937b8e41207a492b2bf59e5 | 317fe92deb60efde442ddfd24a286f10e14c2156 | /Q8/Q8/Q8.py | 395effffddf45103d4a2cf7b3c20a8b8332d5dd0 | [] | no_license | mecopel1/Exercise-3 | 549602f89f6553c34084bdebfc3ee3978aa62344 | 1bb0c3893cc35efa64ae557bcf98d5533c6288a6 | refs/heads/master | 2020-04-24T06:41:28.881550 | 2019-02-21T00:59:51 | 2019-02-21T00:59:51 | 171,774,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | import arcpy
arcpy.env.workspace=r'D:\Grad_Spring2019\610-Programming\Exercise3\Exercise 3.gdb'
recordCount=arcpy.GetCount_management('CallsforService')
print(recordCount) | [
"noreply@github.com"
] | noreply@github.com |
abc6121c4cf765d98ae57c3b7c28d3ce6916dfdd | d07ccba788a664362f7dbd532b314b6b39d7811d | /bs4/tests/test_lxml.py | ea6f4ebb0c9e8d0059a3c0a30c4ca0df1db7d658 | [] | no_license | drshah96/Python-for-Everybody | c5b9ae41d9a0170856ecde1969d96ec9ce82921b | 42f3f9ff9b3f492240d2b76a8c63470e51d8edd2 | refs/heads/master | 2022-11-05T23:24:44.837335 | 2020-06-16T17:55:49 | 2020-06-16T17:55:49 | 267,389,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,228 | py | """Tests to ensure that the lxml tree builder generates good trees."""
import warnings
try:
import lxml.etree
LXML_PRESENT = True
LXML_VERSION = lxml.etree.LXML_VERSION
except ImportError as e:
LXML_PRESENT = False
LXML_VERSION = (0,)
if LXML_PRESENT:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
from bs4 import (
BeautifulStoneSoup,
)
from bs4.testing import (
HTMLTreeBuilderSmokeTest,
XMLTreeBuilderSmokeTest,
SoupTest,
skipIf,
)
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its tree builder.")
class LXMLTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilder()
def test_out_of_range_entity(self):
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
# In lxml < 2.3.5, an empty doctype causes a segfault. Skip this
# test if an old version of lxml is installed.
@skipIf(
not LXML_PRESENT or LXML_VERSION < (2, 3, 5, 0),
"Skipping doctype test for old version of lxml to avoid segfault.")
def test_empty_doctype(self):
soup = self.soup("<!DOCTYPE>")
doctype = soup.contents[0]
self.assertEqual("", doctype.strip())
def test_beautifulstonesoup_is_xml_parser(self):
# Make sure that the deprecated BSS class uses an xml builder
# if one is installed.
with warnings.catch_warnings(record=True) as w:
soup = BeautifulStoneSoup("<b />")
self.assertEqual("<b/>", str(soup.b))
self.assertTrue("BeautifulStoneSoup class is deprecated" in str(w[0].message))
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its XML tree builder.")
class LXMLXMLTreeBuilderSmokeTest(SoupTest, XMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilderForXML()
| [
"drshah96@gmail.com"
] | drshah96@gmail.com |
b1a2ca351fec444118b688bab22debb105cc3f40 | 05c955a8007b5845228968f3de1b92e4db3c8b1a | /bin/reserve_urn | 3da9863173d4631dcfe28189070d811c2b802243 | [
"MIT"
] | permissive | runelk/NB_URN_Client_Python | c9c36e5130c6a48db1c4f22cc6c254414179042f | 8b58309f9c48f3f5c10065cee02d9ebe2b20ee2c | refs/heads/master | 2022-07-31T05:59:47.367500 | 2014-06-26T12:43:03 | 2014-06-26T12:43:03 | 20,129,688 | 0 | 1 | MIT | 2022-07-08T19:12:53 | 2014-05-24T13:05:43 | Python | UTF-8 | Python | false | false | 1,008 | #!/usr/bin/env python
import os, sys
from optparse import OptionParser
import nb_urn_client
default_config = os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..', 'config', 'config.yml'
)
parser = OptionParser()
parser.add_option("--urn", dest="urn", help="the URN to reserve")
parser.add_option("--username", dest="username", help="Username (if not provided by the config file)")
parser.add_option("--password", dest="password", help="Password (if not provided by the config file)")
parser.add_option("-c", "--config", dest="config", help="A YAML config file")
(options, args) = parser.parse_args()
if options.urn:
c = nb_urn_client.NbUrnClient(
username=options.username if options.username else None,
password=options.password if options.password else None,
config_file=options.config if options.config else default_config
)
c.login()
print c.reserve_urn(options.urn)
c.logout()
else:
sys.stderr.write("Usage: reserve_urn --urn URN\n")
| [
"rune.knudsen@uib.no"
] | rune.knudsen@uib.no | |
bb8bfb8faa740e6ec8025c6db5e79e0e5ff83ddd | 95b0f53429835929da4bca6d5b5480f96e7514ae | /chrome-screenshot/yahoo_screenshot.py | 4a03ce0640f2b944a37244862e42e16464ca2c14 | [] | no_license | tyogoutomo/web-scraping | 1b23c831f2ed9940ee99d30a8fe65ddaaf36e808 | ddcaa0cb8e08b60ec23d356e9915e15bbe01f36b | refs/heads/master | 2021-03-07T05:05:41.068849 | 2020-03-17T10:39:46 | 2020-03-17T10:39:46 | 246,247,818 | 0 | 0 | null | 2020-03-17T10:39:48 | 2020-03-10T08:35:56 | Python | UTF-8 | Python | false | false | 2,172 | py | import os, time, errno
from datetime import datetime
from optparse import OptionParser
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
CHROME_PATH = '/usr/bin/google-chrome'
CHROMEDRIVER_PATH = '/usr/bin/chromedriver'
WINDOW_SIZE = "2560,1440"
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--window-size=%s" % WINDOW_SIZE)
chrome_options.binary_location = CHROME_PATH
def YahooCrawler(keyword, out_path):
driver = webdriver.Chrome(
executable_path=CHROMEDRIVER_PATH,
chrome_options=chrome_options
)
driver.get("https://images.search.yahoo.com/")
time.sleep(3)
first_search_bar = driver.find_element_by_class_name("yschsp")
first_search_bar.send_keys(keyword)
submit_button = driver.find_element_by_class_name("ygbt")
submit_button.click()
time.sleep(3)
scroll_iterator = 0
height = 0
for i in range(10):
next_height = driver.execute_script("return document.body.scrollHeight")
if next_height == height:
time.sleep(3)
# break
try:
driver.execute_script("window.scrollBy(0,512)")
time.sleep(3)
find_more = driver.find_element_by_name("more-res")
find_more.click()
print("find more clicked")
time.sleep(3)
except:
print("reached end of page")
# driver.close()
break
height = driver.execute_script("return document.body.scrollHeight")
save_path = os.path.join(out_path, keyword)
try:
os.makedirs(save_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
now = datetime.now()
timestamp = datetime.timestamp(now)
print('making Yahoo screenshots for', keyword, (i+1))
driver.save_screenshot(os.path.join(save_path, str(timestamp)+'.png'))
driver.execute_script("window.scrollBy(0,1280)")
time.sleep(5)
scroll_iterator += 1
driver.close() | [
"luqman.rahardjo@gmail.com"
] | luqman.rahardjo@gmail.com |
83c9ffb1329c04e16bd0ef763824c6cb54b19f7b | 3848175d566e8cbd21f66bdca57882706ed6d4ca | /src/7_3_mnist_deep_cnn.py | 72946f6e40a43033f7756e217a03658568372ebf | [] | no_license | kasha-seo/ML_forEveryone | a4b519b002843423dad4feecb9b1e02c3ba23e97 | 0fa42b497995079f2f60fe51f5d03274946ec034 | refs/heads/master | 2023-08-08T13:13:41.365363 | 2018-12-30T10:34:26 | 2018-12-30T10:34:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,162 | py | import tensorflow as tf
import random
# import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
tf.set_random_seed(777) # reproducibility
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Check out https://www.tensorflow.org/get_started/mnist/beginners for
# more information about the mnist dataset
# hyper parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
# dropout (keep_prob) rate 0.7~0.5 on training, but should be 1 for testing
keep_prob = tf.placeholder(tf.float32)
# input place holders
X = tf.placeholder(tf.float32, [None, 784])
X_img = tf.reshape(X, [-1, 28, 28, 1]) # 이미지 입력으로 넣기 위해 reshape. -1은 N개의 값. img 28x28x1 (black/white)
Y = tf.placeholder(tf.float32, [None, 10])
# L1 ImgIn shape=(?, 28, 28, 1)
W1 = tf.Variable(tf.random_normal([3, 3, 1, 32], stddev=0.01))
# Conv -> (?, 28, 28, 32)
# Pool -> (?, 14, 14, 32)
L1 = tf.nn.conv2d(X_img, W1, strides=[1, 1, 1, 1], padding='SAME') # VALID : not using padding
L1 = tf.nn.relu(L1)
L1 = tf.nn.max_pool(L1, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
L1 = tf.nn.dropout(L1, keep_prob=keep_prob)
'''
Tensor("Conv2D:0", shape=(?, 28, 28, 32), dtype=float32)
Tensor("Relu:0", shape=(?, 28, 28, 32), dtype=float32)
Tensor("MaxPool:0", shape=(?, 14, 14, 32), dtype=float32)
Tensor("dropout/mul:0", shape=(?, 14, 14, 32), dtype=float32)
'''
# L2 ImgIn shape=(?, 14, 14, 32)
W2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev=0.01))
# Conv ->(?, 14, 14, 64)
# Pool ->(?, 7, 7, 64)
L2 = tf.nn.conv2d(L1, W2, strides=[1, 1, 1, 1], padding='SAME')
L2 = tf.nn.relu(L2)
L2 = tf.nn.max_pool(L2, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
L2 = tf.nn.dropout(L2, keep_prob=keep_prob)
'''
Tensor("Conv2D_1:0", shape=(?, 14, 14, 64), dtype=float32)
Tensor("Relu_1:0", shape=(?, 14, 14, 64), dtype=float32)
Tensor("MaxPool_1:0", shape=(?, 7, 7, 64), dtype=float32)
Tensor("dropout_1/mul:0", shape=(?, 7, 7, 64), dtype=float32)
'''
# L3 ImgIn shape=(?, 7, 7, 64)
W3 = tf.Variable(tf.random_normal([3, 3, 64, 128], stddev=0.01))
# Conv ->(?, 7, 7, 128)
# Pool ->(?, 4, 4, 128)
# Reshape ->(?, 4 * 4 * 128) # Flatten them for FC
L3 = tf.nn.conv2d(L2, W3, strides=[1, 1, 1, 1], padding='SAME')
L3 = tf.nn.relu(L3)
L3 = tf.nn.max_pool(L3, ksize=[1, 2, 2, 1], strides=[
1, 2, 2, 1], padding='SAME')
L3 = tf.nn.dropout(L3, keep_prob=keep_prob)
L3_flat = tf.reshape(L3, [-1, 128 * 4 * 4])
'''
Tensor("Conv2D_2:0", shape=(?, 7, 7, 128), dtype=float32)
Tensor("Relu_2:0", shape=(?, 7, 7, 128), dtype=float32)
Tensor("MaxPool_2:0", shape=(?, 4, 4, 128), dtype=float32)
Tensor("dropout_2/mul:0", shape=(?, 4, 4, 128), dtype=float32)
Tensor("Reshape_1:0", shape=(?, 2048), dtype=float32)
'''
# L4 FC 4x4x128 inputs -> 625 outputs
W4 = tf.get_variable("W4", shape=[128 * 4 * 4, 625],
initializer=tf.contrib.layers.xavier_initializer())
b4 = tf.Variable(tf.random_normal([625]))
L4 = tf.nn.relu(tf.matmul(L3_flat, W4) + b4)
L4 = tf.nn.dropout(L4, keep_prob=keep_prob)
'''
Tensor("Relu_3:0", shape=(?, 625), dtype=float32)
Tensor("dropout_3/mul:0", shape=(?, 625), dtype=float32)
'''
# L5 Final FC 625 inputs -> 10 outputs
W5 = tf.get_variable("W5", shape=[625, 10],
initializer=tf.contrib.layers.xavier_initializer())
b5 = tf.Variable(tf.random_normal([10]))
logits = tf.matmul(L4, W5) + b5
'''
Tensor("add_1:0", shape=(?, 10), dtype=float32)
'''
# define cost/loss & optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# initialize
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# train my model
print('Learning started. It takes sometime.')
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = {X: batch_xs, Y: batch_ys, keep_prob: 0.7}
c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
print('Learning Finished!')
# Test model and check accuracy
# if you have a OOM error, please refer to lab-11-X-mnist_deep_cnn_low_memory.py
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Accuracy:', sess.run(accuracy, feed_dict={
X: mnist.test.images, Y: mnist.test.labels, keep_prob: 1}))
# Get one and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))
print("Prediction: ", sess.run(
tf.argmax(logits, 1), feed_dict={X: mnist.test.images[r:r + 1], keep_prob: 1}))
# plt.imshow(mnist.test.images[r:r + 1].
# reshape(28, 28), cmap='Greys', interpolation='nearest')
# plt.show() | [
"suhjh14@naver.com"
] | suhjh14@naver.com |
adb2babffe1e8af59930020f6c17f6d45db5f76f | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/request/KoubeiTradeOrderConsultRequest.py | 2defd325c725861c41724ed3832b3e090ad2407b | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 3,936 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiTradeOrderConsultModel import KoubeiTradeOrderConsultModel
class KoubeiTradeOrderConsultRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiTradeOrderConsultModel):
self._biz_content = value
else:
self._biz_content = KoubeiTradeOrderConsultModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.trade.order.consult'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
1545c927376751fd9a43d184b304383c8362e632 | 24372e13ed3d8b6f11fb3681421ac95d571e6320 | /python-plotting-api-master/test/test_app.py | 02ae58a884523c96da72687b688850f45e009aea | [] | no_license | aokada228/pythonplotting | 9ab54eb617d9b317ab4199b4f63dc116f11c8e4a | 235527b4c80e3b0914bff15413d7e29d42043c73 | refs/heads/master | 2023-02-05T12:06:28.473671 | 2019-12-05T20:42:46 | 2019-12-05T20:42:46 | 226,185,986 | 0 | 0 | null | 2023-02-02T05:14:44 | 2019-12-05T20:45:00 | Jupyter Notebook | UTF-8 | Python | false | false | 1,065 | py | from python_plotting_api.app import app
test_client = app.test_client()
def test_root():
response = test_client.get('/')
assert 200 == response.status_code
def test_get_correlation_matrix():
response = test_client.get('/plots/breast_cancer_data/correlation_matrix')
assert 200 == response.status_code
assert 'image/png' == response.content_type
def test_get_pairplot_matrix():
cols = ['worst concave points', 'mean concavity',
'worst perimeter', 'worst radius',
'worst area']
query_string = ','.join(cols)
response = test_client.get(f'/plots/breast_cancer_data/pairplot/features/{query_string}')
assert 200 == response.status_code
assert 'image/png' == response.content_type
cols = ['worst concave points', 'mean concavity',
'worst perimeter', 'worst radius',
'worst area', 'wrong_feature']
query_string = ','.join(cols)
response = test_client.get(f'/plots/breast_cancer_data/pairplot/features/{query_string}')
assert 400 == response.status_code
| [
"azusaokada@bobcat-124-191.bates.edu"
] | azusaokada@bobcat-124-191.bates.edu |
6315e17d884b08aa11eab2a3d71e667e140f18bc | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc040/A/4812227.py | e74fe7e059ba85e08c3cc200758d2172a2a1467f | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | n,x=map(int,input().split());print(min(x-1,n-x)) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
5cf987b549ccdf421a9487686fc93e73f7fe1de1 | 729212d61f20666b5cef0aa65e17ddd3a326c58e | /legacy/servolib2.py | eb961278a68739a9f1d9966445df345528b5a767 | [] | no_license | anatolyilin/OpenCV-bot | 8dce147bf28fa0f4bd828770781d09b9a1dd0974 | 05351c7d1a733ddf667bbf023e56b41618e462dc | refs/heads/master | 2020-03-17T14:01:34.671407 | 2018-06-06T20:27:51 | 2018-06-06T20:27:51 | 133,654,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,888 | py | import RPi.GPIO as GPIO
import pickle
pwm = 0
debug_info = 0
#zero_pos = 1.2 oude waarde
zero_pos = 0
zero_pos_man = 0
rico = 0
left_val = 0
right_val = 0
def init_pickle(file ='servo.conf', req_debug_info = 0):
global debug_info, zero_pos , rico, left_val, right_val, zero_pos_man , pwm
debug_info = req_debug_info
# [ rico , zero_pos, left_val, right_val , manual zero , pin ]
with open(file, 'rb') as fp:
listRead = pickle.load(fp)
if listRead[4] != -1:
zero_pos_man = listRead[4]
else:
zero_pos_man = listRead[1]
zero_pos= listRead[1]
if debug_info == 1:
print "Pickle data" + str(listRead)
pin = listRead[5]
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin, GPIO.OUT)
pwm = GPIO.PWM(pin, 50)
pwm.start(zero_pos)
rico = listRead[0]
left_val =listRead[2]
right_val = listRead[3]
def init_servo(pin=8,req_debug_info = 0, req_zero_pos = 7.5, left_val_req = 2, right_val_req=12.5):
GPIO.setmode(GPIO.BOARD)
global debug_info, zero_pos, rico, left_val, right_val, zero_pos, pwm
debug_info = req_debug_info
left_val = left_val_req
right_val = right_val_req
zero_pos = req_zero_pos
rico = (right_val-left_val)/180
GPIO.setup(pin, GPIO.OUT)
pwm =GPIO.PWM(pin, 50)
pwm.start(zero_pos)
if debug_info == 1:
print "Servo initialled on pin %d \n start postion is set to %f (default: 7.5) \n Call debug(False) to disable" %(pin, zero_pos )
def moveto(posDeg, req_debug_info=0):
pos = -rico*posDeg + zero_pos
pwm.ChangeDutyCycle(pos)
if debug_info == 1 or req_debug_info == 1:
print "Servo set on %f pos, based on start position %f (default: 7.5)" % (pos, zero_pos)
def movetoZero():
moveto(zero_pos_man)
def debug(value = False):
debug_info == 0
if value:
debug_info == 1
def cleanup():
GPIO.cleanup() | [
"user43671@user43671s-MacBook-Pro.local"
] | user43671@user43671s-MacBook-Pro.local |
99006543ae64f269e68e80e36cfcd49436d904c2 | 7a571842e05ce6bdb75a7f247a5113235d4d24f3 | /users/migrations/0004_auto_20200424_1647.py | 200309a89122cc65a210e07821fb8351bc7c89f3 | [] | no_license | bneeland/meal-hippo | 7cb9ad75e32913f809d32070facf0f8318e270e9 | f9ace432b163b2e921a838620696629a0d1be9b2 | refs/heads/master | 2023-01-03T21:02:44.831870 | 2020-10-21T15:39:48 | 2020-10-21T15:39:48 | 254,940,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | # Generated by Django 3.0.4 on 2020-04-24 22:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0003_customuser_instructions'),
]
operations = [
migrations.RemoveField(
model_name='customuser',
name='address',
),
migrations.RemoveField(
model_name='customuser',
name='instructions',
),
migrations.RemoveField(
model_name='customuser',
name='phone',
),
]
| [
"brian.neeland@protonmail.com"
] | brian.neeland@protonmail.com |
b2801badf5bd6284bd289b522b327b3edbb347b5 | 6131b2738a7c087dfa6907c624453576f6f0e393 | /银行转账pmysql版本/Bank_Transfer.py | 34c606ac76f8f664750def22983d82d05855ec09 | [] | no_license | heheddff/myPythonProcess | 60ef240130cd02906dc500eedb397a9662c02e5a | 885a25dd2a9cd43801306d9e70b9ce89daec4406 | refs/heads/master | 2020-04-08T19:09:18.192738 | 2019-08-06T02:52:54 | 2019-08-06T02:52:54 | 159,642,468 | 4 | 5 | null | null | null | null | GB18030 | Python | false | false | 2,049 | py | # coding=gbk
import pymysql
class Money():
def __init__(self,sid,tid,mon):
self.conn = pymysql.connect(
host="127.0.0.1",
port=3306,
user='root',
passwd='****',
db='test'
)
self.cursor = self.conn.cursor();
self.table = "money"
self.sid = sid
self.tid = tid
self.mon = mon
def checkuser(self,userid):
try:
sql = "select userid from "+self.table+" where userid=%s"
self.cursor.execute(sql,(userid,))
res = self.cursor.fetchone()
if res is None:
raise Exception("账号{}不存在".format(userid))
finally:
pass
#self.cursor.close()
#self.conn.close()
def reducemoney(self,userid,money):
try:
sql = "update "+self.table+" set money=money-%s where userid=%s"
self.cursor.execute(sql,(money,userid))
if self.cursor.rowcount != 1:
raise Exception("账号{}转账失败".format(userid))
finally:
pass
#self.cursor.close()
#self.conn.close()
def addmoney(self,userid,money):
try:
sql = "update "+self.table+" set money=money+%s where userid=%s"
self.cursor.execute(sql,(money,userid,))
if self.cursor.rowcount != 1:
raise Exception("账号{}收账失败".format(userid))
finally:
pass
#self.cursor.close()
#self.conn.close()
def checkmoney(self,userid,money):
try:
sql = "select userid from "+self.table+" where userid=%s and money>%s"
self.cursor.execute(sql,(userid,money))
res = self.cursor.fetchone()
if res is None:
raise Exception("账号{}余额小于{}".format(userid,money))
finally:
pass
#self.cursor.close()
#self.conn.close()
def run(self):
try:
self.checkuser(self.sid)
self.checkuser(self.tid)
self.checkmoney(self.sid,self.mon)
self.reducemoney(self.sid,self.mon)
self.addmoney(self.tid,self.mon)
self.conn.commit()
except Exception as e:
self.conn.rollback()
raise e
finally:
#pass
self.cursor.close()
self.conn.close()
try:
m = Money(11,13,100)
m.run()
except Exception as e:
#pass
print(e)
else:
print("转账成功")
| [
"qq2003qq@126.com"
] | qq2003qq@126.com |
f819b4533712c554f55d0b889a5f81e78c9c0c59 | 8bf5e4cb5828dda1dd78063958b097b3a59989ab | /PyHtmlDebuger/wsgi.py | 20a7b2d4c4ccf3953475ec885c60ca45b8b60731 | [] | no_license | WingGao/PyHtmlDebugger | 51bb0f894d470a5777ac6b13a99d685e52fb19ab | 36187bd55a0a28b6be9f17ff01a2219b0fd30a9e | refs/heads/master | 2021-05-04T10:21:57.163472 | 2017-04-25T09:03:38 | 2017-04-25T09:03:38 | 48,358,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for PyHtmlDebuger project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "PyHtmlDebuger.settings")
application = get_wsgi_application()
| [
"wing.gao@live.com"
] | wing.gao@live.com |
d74fcb05f9f029d6ad582d79695f8433a2079244 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_test/_internal/import_analysis.py | 9cc5376feb59d3d3b5b855832763ded054f1feb9 | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 14,066 | py | """Analyze python import statements."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import os
import re
from . import types as t
from .io import (
read_binary_file,
)
from .util import (
display,
ApplicationError,
is_subdir,
)
from .data import (
data_context,
)
VIRTUAL_PACKAGES = set([
'ansible.module_utils.six',
])
def get_python_module_utils_imports(compile_targets):
"""Return a dictionary of module_utils names mapped to sets of python file paths.
:type compile_targets: list[TestTarget]
:rtype: dict[str, set[str]]
"""
module_utils = enumerate_module_utils()
virtual_utils = set(m for m in module_utils if any(m.startswith('%s.' % v) for v in VIRTUAL_PACKAGES))
module_utils -= virtual_utils
imports_by_target_path = {}
for target in compile_targets:
imports_by_target_path[target.path] = extract_python_module_utils_imports(target.path, module_utils)
def recurse_import(import_name, depth=0, seen=None): # type: (str, int, t.Optional[t.Set[str]]) -> t.Set[str]
"""Recursively expand module_utils imports from module_utils files."""
display.info('module_utils import: %s%s' % (' ' * depth, import_name), verbosity=4)
if seen is None:
seen = set([import_name])
results = set([import_name])
# virtual packages depend on the modules they contain instead of the reverse
if import_name in VIRTUAL_PACKAGES:
for sub_import in sorted(virtual_utils):
if sub_import.startswith('%s.' % import_name):
if sub_import in seen:
continue
seen.add(sub_import)
matches = sorted(recurse_import(sub_import, depth + 1, seen))
for result in matches:
results.add(result)
import_path = get_import_path(import_name)
if import_path not in imports_by_target_path:
import_path = get_import_path(import_name, package=True)
if import_path not in imports_by_target_path:
raise ApplicationError('Cannot determine path for module_utils import: %s' % import_name)
# process imports in reverse so the deepest imports come first
for name in sorted(imports_by_target_path[import_path], reverse=True):
if name in virtual_utils:
continue
if name in seen:
continue
seen.add(name)
matches = sorted(recurse_import(name, depth + 1, seen))
for result in matches:
results.add(result)
return results
for module_util in module_utils:
# recurse over module_utils imports while excluding self
module_util_imports = recurse_import(module_util)
module_util_imports.remove(module_util)
# add recursive imports to all path entries which import this module_util
for target_path in imports_by_target_path:
if module_util in imports_by_target_path[target_path]:
for module_util_import in sorted(module_util_imports):
if module_util_import not in imports_by_target_path[target_path]:
display.info('%s inherits import %s via %s' % (target_path, module_util_import, module_util), verbosity=6)
imports_by_target_path[target_path].add(module_util_import)
imports = dict([(module_util, set()) for module_util in module_utils | virtual_utils])
for target_path in imports_by_target_path:
for module_util in imports_by_target_path[target_path]:
imports[module_util].add(target_path)
# for purposes of mapping module_utils to paths, treat imports of virtual utils the same as the parent package
for virtual_util in virtual_utils:
parent_package = '.'.join(virtual_util.split('.')[:-1])
imports[virtual_util] = imports[parent_package]
display.info('%s reports imports from parent package %s' % (virtual_util, parent_package), verbosity=6)
for module_util in sorted(imports):
if not imports[module_util]:
package_path = get_import_path(module_util, package=True)
if os.path.exists(package_path) and not os.path.getsize(package_path):
continue # ignore empty __init__.py files
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_python_module_utils_name(path): # type: (str) -> str
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils'
else:
prefix = 'ansible.module_utils'
if path.endswith('/__init__.py'):
path = os.path.dirname(path)
if path == base_path:
name = prefix
else:
name = prefix + '.' + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
return name
def enumerate_module_utils():
"""Return a list of available module_utils imports.
:rtype: set[str]
"""
module_utils = []
for path in data_context().content.walk_files(data_context().content.module_utils_path):
ext = os.path.splitext(path)[1]
if ext != '.py':
continue
module_utils.append(get_python_module_utils_name(path))
return set(module_utils)
def extract_python_module_utils_imports(path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
:rtype: set[str]
"""
# Python code must be read as bytes to avoid a SyntaxError when the source uses comments to declare the file encoding.
# See: https://www.python.org/dev/peps/pep-0263
# Specifically: If a Unicode string with a coding declaration is passed to compile(), a SyntaxError will be raised.
code = read_binary_file(path)
try:
tree = ast.parse(code)
except SyntaxError as ex:
# Treat this error as a warning so tests can be executed as best as possible.
# The compile test will detect and report this syntax error.
display.warning('%s:%s Syntax error extracting module_utils imports: %s' % (path, ex.lineno, ex.msg))
return set()
finder = ModuleUtilFinder(path, module_utils)
finder.visit(tree)
return finder.imports
def get_import_path(name, package=False): # type: (str, bool) -> str
"""Return a path from an import name."""
if package:
filename = os.path.join(name.replace('.', '/'), '__init__.py')
else:
filename = '%s.py' % name.replace('.', '/')
if name.startswith('ansible.module_utils.') or name == 'ansible.module_utils':
path = os.path.join('lib', filename)
elif data_context().content.collection and (
name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name) or
name == 'ansible_collections.%s.plugins.module_utils' % data_context().content.collection.full_name):
path = '/'.join(filename.split('/')[3:])
else:
raise Exception('Unexpected import name: %s' % name)
return path
def path_to_module(path): # type: (str) -> str
"""Convert the given path to a module name."""
module = os.path.splitext(path)[0].replace(os.path.sep, '.')
if module.endswith('.__init__'):
module = module[:-9]
return module
def relative_to_absolute(name, level, module, path, lineno): # type: (str, int, str, str, int) -> str
"""Convert a relative import to an absolute import."""
if level <= 0:
absolute_name = name
elif not module:
display.warning('Cannot resolve relative import "%s%s" in unknown module at %s:%d' % ('.' * level, name, path, lineno))
absolute_name = 'relative.nomodule'
else:
parts = module.split('.')
if level >= len(parts):
display.warning('Cannot resolve relative import "%s%s" above module "%s" at %s:%d' % ('.' * level, name, module, path, lineno))
absolute_name = 'relative.abovelevel'
else:
absolute_name = '.'.join(parts[:-level] + [name])
return absolute_name
class ModuleUtilFinder(ast.NodeVisitor):
"""AST visitor to find valid module_utils imports."""
def __init__(self, path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
"""
self.path = path
self.module_utils = module_utils
self.imports = set()
# implicitly import parent package
if path.endswith('/__init__.py'):
path = os.path.split(path)[0]
if path.startswith('lib/ansible/module_utils/'):
package = os.path.split(path)[0].replace('/', '.')[4:]
if package != 'ansible.module_utils' and package not in VIRTUAL_PACKAGES:
self.add_import(package, 0)
self.module = None
if data_context().content.is_ansible:
# Various parts of the Ansible source tree execute within diffent modules.
# To support import analysis, each file which uses relative imports must reside under a path defined here.
# The mapping is a tuple consisting of a path pattern to match and a replacement path.
# During analyis, any relative imports not covered here will result in warnings, which can be fixed by adding the appropriate entry.
path_map = (
('^hacking/build_library/build_ansible/', 'build_ansible/'),
('^lib/ansible/', 'ansible/'),
('^test/lib/ansible_test/_data/sanity/validate-modules/', 'validate_modules/'),
('^test/units/', 'test/units/'),
('^test/lib/ansible_test/_internal/', 'ansible_test/_internal/'),
('^test/integration/targets/.*/ansible_collections/(?P<ns>[^/]*)/(?P<col>[^/]*)/', r'ansible_collections/\g<ns>/\g<col>/'),
('^test/integration/targets/.*/library/', 'ansible/modules/'),
)
for pattern, replacement in path_map:
if re.search(pattern, self.path):
revised_path = re.sub(pattern, replacement, self.path)
self.module = path_to_module(revised_path)
break
else:
# This assumes that all files within the collection are executed by Ansible as part of the collection.
# While that will usually be true, there are exceptions which will result in this resolution being incorrect.
self.module = path_to_module(os.path.join(data_context().content.collection.directory, self.path))
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_Import(self, node):
"""
:type node: ast.Import
"""
self.generic_visit(node)
# import ansible.module_utils.MODULE[.MODULE]
# import ansible_collections.{ns}.{col}.plugins.module_utils.module_utils.MODULE[.MODULE]
self.add_imports([alias.name for alias in node.names], node.lineno)
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_ImportFrom(self, node):
"""
:type node: ast.ImportFrom
"""
self.generic_visit(node)
if not node.module:
return
module = relative_to_absolute(node.module, node.level, self.module, self.path, node.lineno)
if not module.startswith('ansible'):
return
# from ansible.module_utils import MODULE[, MODULE]
# from ansible.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
# from ansible_collections.{ns}.{col}.plugins.module_utils import MODULE[, MODULE]
# from ansible_collections.{ns}.{col}.plugins.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
self.add_imports(['%s.%s' % (module, alias.name) for alias in node.names], node.lineno)
def add_import(self, name, line_number):
"""
:type name: str
:type line_number: int
"""
import_name = name
while self.is_module_util_name(name):
if name in self.module_utils:
if name not in self.imports:
display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5)
self.imports.add(name)
return # duplicate imports are ignored
name = '.'.join(name.split('.')[:-1])
if is_subdir(self.path, data_context().content.test_path):
return # invalid imports in tests are ignored
# Treat this error as a warning so tests can be executed as best as possible.
# This error should be detected by unit or integration tests.
display.warning('%s:%d Invalid module_utils import: %s' % (self.path, line_number, import_name))
def add_imports(self, names, line_no): # type: (t.List[str], int) -> None
"""Add the given import names if they are module_utils imports."""
for name in names:
if self.is_module_util_name(name):
self.add_import(name, line_no)
@staticmethod
def is_module_util_name(name): # type: (str) -> bool
"""Return True if the given name is a module_util name for the content under test. External module_utils are ignored."""
if data_context().content.is_ansible and name.startswith('ansible.module_utils.'):
return True
if data_context().content.collection and name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name):
return True
return False
| [
"sifang@cisco.com"
] | sifang@cisco.com |
636c7ba4934f2730f0e4c252572cf5d8c46d814b | 6016100d6707eb8c3a4c53df81b40a0e235e04fd | /whois.py | f5898a69cdda0807e69111fbea760a3b8f6959b9 | [] | no_license | Ulate11/whois-assignment | 86d10cbec1cf472b93f60b9ae264c1cb0068e6e0 | 8537bc231d73d064485a5cea2ef342a2e111d2b7 | refs/heads/main | 2023-08-21T18:49:58.267983 | 2021-09-15T16:22:19 | 2021-09-15T16:22:19 | 406,841,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,125 | py | import bz2, json, pickle, yaml
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from whoisapi import *
# file to store the info provided by whoisxmlapi.
SAVED_DATA_FILE = '/app/yesterday.pic'
# list of domains to verify.
DOMAINS_LIST_FILE = '/app/domains.yml'
# the following is sensitive data, please clean values when publishing the code on public sities like github.
#api key for whoisxmlapi
apiKey = 'your whoisxmlapi key'
# email credentials for sending emails.
smtpPort = 587
smtpServer = 'smtp.gmail.com'
senderMail = 'your email'
senderPw = 'your password'
recipients = ["<mail1>", "<mail2>"]
client = None
# old data loaded from file
yesterdayData = {}
# current data provided by whoisxmlapi
currentData = {}
# domains with updated information, these will be send by e-mail.
updatedData = {}
def loadSettings():
global senderMail, senderPw, smtpServer, smtpPort, recipients, apiKey
y = loadYaml("/app/appSettings.yaml")
senderMail = y['senderMail']
senderPw = y['senderPassword']
recipients = y['recipients']
smtpServer = y['smtpServer']
smtpPort = y['smtpPort']
apiKey = y['whoisApiKey']
def setWhoisClient():
global client
client = Client(api_key = apiKey)
def loadPickle(path:str):
""" loads a compressed pickle file
@param path: the string path to the file to be loaded
@return: the pyhton object from the file.
"""
b = bz2.BZ2File(path, 'rb')
return pickle.load(b)
def savePickle(d, path:str):
""" saves the provided data in the specified path, pickled and compressed in bz2 format.
@param d: the python object to be saved.
@param path: the file to the file in the disk. This file will be replaced if exist.
"""
f= bz2.BZ2File(path, 'wb')
pickle.dump(d, f, 4)
f.close()
def getDomainInfo(domain:str):
""" ask to whoisxmlapi for the information of the domain specified.
note: set the client variable before using this function.
@param domain: a str with the domain
@return whois object.
"""
return client.data(domain)
def loadYaml(path:str):
""" loads a yaml file from the specified path.
@param path: path to te yaml file.
@return the loaded yaml in memory.
"""
with open(path) as f:
return yaml.full_load(f)
def setEmail(d, customField, w):
""" this function checks if the field 'email' exist in the specified object, and checks if the field is not blank. If previous conditions are true, add the value to the provided dictionary in the specified customField.
@param d: the dictionary to add the field, if this field exist.
@param customField: the name of the field as the value will be added to the dictionary.
@param w: the object to be verified.
"""
try:
email = w.email
if email: d[customField] = email
except:
pass
def processDomain (domain:str):
""" this function obtains the domain information, then takes the needed values used in this script.
@param domain: the string domain used to obtain the info from whoisxmlapi
@return: dictionary with the needed information for the assignment.
"""
print ("processing: ", domain)
w = getDomainInfo(domain)
data = {
'whoisCreatedDate': w.created_date,
'whoisUpdatedDate': w.updated_date,
'whoisExpiresDate': w.expires_date,
'domainName': w.domain_name
}
# sometimes registrant is not present.
if w.registrant:
try:
data['registrantName'] = w.registrant.name
except:
pass
# look for available emails.
emails = {}
contactEmail = w.contact_email
if contactEmail:
emails['contactEmail'] = contactEmail
setEmail(emails, 'registrant', w.registrant)
setEmail(emails, 'administrativeContact', w.administrative_contact)
setEmail(emails, 'technicalContact', w.technical_contact)
setEmail(emails, 'billingContact', w.billing_contact)
setEmail(emails, 'zoneContact', w.zone_contact)
data['emails'] = emails
return data
def processYmlDomains(path:str):
""" this function obtains the information from whoisxmlapi, for each domain present in the specified path file.
the info will be stored in the global dictionary currentData.
@param path: the yaml file with the domains list.
"""
d = loadYaml(path)
for k in d['domains']:
currentData[k] = processDomain(k)
def checkUpdatedInfo(oldInfo, newInfo):
""" this function compares the old and new information. If its different, then returns True.
@param oldInfo: old information, typically the stored in SAVED_DATA_FILE
@param newInfo: the new information, typically the provided by whoisxmlapi.
@return: True if the new information is updated, False otherwise.
"""
# check all keys from newInfo compared with oldInfo keys.
for k in newInfo:
# ignore emails and registrantName keys, those shouldn't be checked.
if k in ('emails', 'registrantName'): continue
if oldInfo[k] != newInfo[k]: return True
# the same as above but for emails.
oldEmails = oldInfo['emails']
newEmails = newInfo['emails']
for k in newEmails:
if k not in oldEmails: return True
if oldEmails[k] != newEmails[k]: return True
# since blank fields aren't registered, we need to check if some email address aren't present in the new information.
for k in oldEmails:
if k not in newEmails: return True
return False
def runProcess():
""" the main process of the script
"""
loadSettings()
global yesterdayData
# try to load the file with the old info, the first time the script is executed, this file doesn't exist.
try:
yesterdayData = loadPickle(SAVED_DATA_FILE)
except:
print('unable to load the saved data from yesterday')
setWhoisClient()
processYmlDomains(DOMAINS_LIST_FILE)
# check if any domains information have been updated.
for k in yesterdayData:
if (k in currentData) and checkUpdatedInfo(yesterdayData[k], currentData[k]):
updatedData[k] = currentData[k]
if updatedData:
sendMail("domains were updated!", 'See the info in the attached json', RECIPIENTS, updatedData)
savePickle(currentData, SAVED_DATA_FILE)
print ("Success!")
def sendMail(title, body, recipients, data = None, dataName = 'data.json'):
""" sends an e-mail with the provided info.
@param title: email Subject.
@param body: email body.
@param recipients: recipients email addresses.
@param data: python object to be send in the e-mail as json format.
@param dataName: the json file name to be attached in the email. It will contain the json data.
"""
msg = MIMEMultipart('alternative')
msg['Subject'] = title
msg['From'] = senderMail
msg['To'] = ', '.join(recipients)
msg.attach(MIMEText(body, 'plain'))
if (data):
attachment = MIMEText(json.dumps(data, default=str))
attachment.add_header('Content-Disposition', 'attachment', filename=dataName)
msg.attach(attachment)
context = ssl.create_default_context()
with smtplib.SMTP(smtpServer, smtpPort) as server:
server.starttls(context=context)
server.login(senderMail, senderPw)
server.sendmail(senderMail, recipients, msg.as_string())
if __name__ == "__main__":
runProcess() | [
"noreply@github.com"
] | noreply@github.com |
f7cc63ed9481a1157dda69a78efd84a75f2aceb1 | 86102c0700bc03f4f971e3f0ca6662a6675d0f2b | /scripts/generator.py | 73b8f3f7d0f069431a1ba92a0fc14754f6fc0751 | [
"Apache-2.0"
] | permissive | ChrisStedman/graduate_coding | 5b83455cb10e9dcfefe5a0a38e88c386fa2156cd | 206c5e30e11424f25f2ba73579081e3a64a4f4e2 | refs/heads/main | 2023-05-06T06:50:47.199930 | 2021-05-30T11:12:54 | 2021-05-30T11:12:54 | 371,291,739 | 0 | 0 | Apache-2.0 | 2021-05-27T07:55:09 | 2021-05-27T07:55:08 | null | UTF-8 | Python | false | false | 435 | py | from faker import Faker
import random
import json
fake = Faker(['en-AU'])
members = []
for id in range(15000):
members.append({
'id': id,
'firstName': fake.first_name(),
'lastName': fake.last_name(),
'address': fake.address(),
'active': bool(random.getrandbits(1))
})
json_file = open('members.json', 'w')
json_file.write(json.dumps(members))
json_file.close()
print(fake.address()) | [
"dean.baker@pexa.com.au"
] | dean.baker@pexa.com.au |
512159f46f584c7ac972924a15e750af7c80168a | a51d5e44d7b8f73e55b244e79fdbb67d7dfa72b6 | /00_simple-ml-model.py | 2bdfd08d4d6f42529955535e986ea9c7fa46bf4e | [
"MIT"
] | permissive | mtraina/ml-basis | 3b73ab704726bbb2db849b2f32e538d5740039e2 | 5b6b8a7fad5314fab0b41667a3e6759046b33792 | refs/heads/master | 2020-03-23T15:30:57.718905 | 2018-07-20T20:44:29 | 2018-07-20T20:44:29 | 141,753,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
example took from https://towardsdatascience.com/simple-machine-learning-model-in-python-in-5-lines-of-code-fe03d72e78c6
'''
'''
prepare the data
'''
from random import randint
TRAIN_SET_LIMIT = 10000
TRAIN_SET_COUNT = 4
TRAIN_INPUT = list()
TRAIN_OUTPUT = list()
for i in range(TRAIN_SET_COUNT):
a = randint(0, TRAIN_SET_LIMIT)
b = randint(0, TRAIN_SET_LIMIT)
c = randint(0, TRAIN_SET_LIMIT)
op = a + 2*b + 3*c
TRAIN_INPUT.append([a,b,c])
TRAIN_OUTPUT.append(op)
'''
create and train the model
'''
from sklearn.linear_model import LinearRegression
predictor = LinearRegression()
predictor.fit(X=TRAIN_INPUT, y=TRAIN_OUTPUT)
'''
test the model
'''
X_TEST = [[10,20,30]]
outcome = predictor.predict(X=X_TEST)
coefficients = predictor.coef_
print('Outcome: {}\nCoefficients:{}'.format(outcome, coefficients)) | [
"matteo.traina.mail@gmail.com"
] | matteo.traina.mail@gmail.com |
d47d1f916f00f4dd257ea19a842125ad1ca74602 | c5c20cac7ad8b77c8e850e96527eafeaf88ca29f | /tstapp/urls.py | a45267800827e9f166a03780bb2039b1f4535d90 | [] | no_license | Muhsin345/TSTREP | 2ddfad1076d9f380d1e77c78fc41df55b92a2af9 | 01ec7177162cb1bd536366c8f3b1fa7d6f0afbc2 | refs/heads/main | 2023-08-15T02:37:25.647986 | 2021-09-15T17:08:59 | 2021-09-15T17:08:59 | 399,522,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | from django.urls import path
from . import views
urlpatterns=[
path('Login1/',views.fnLogin),
path('Details/',views.fnDetails),
path('Userhome/',views.fnUser),
path('fbhome/',views.fnFB),
path('prdcts/',views.fnprdcts),
path('bootstrap/',views.fnnav),
path('gridnav/',views.fngrid),
path('facebook/',views.facebook),
path('sample/',views.navsmpl)
] | [
"mzmuhzin123@gmail.com"
] | mzmuhzin123@gmail.com |
d1fb7632fd9a98a58d7346669aaea4a73dda39be | 4106082b62bfad34c0da62514052ba42811d60d9 | /underground/final_statistics/2.5_sort_data.py | bc621b8d6937c5269f0a98262dfe4850fe5a496b | [] | no_license | dcrozz/python | 6bee83ac2b386a5a1d14dab6a53b76be210ff7a0 | 9a3684276a0badf3ca633bf5f21e8c785d65f078 | refs/heads/master | 2020-12-24T16:35:04.409976 | 2017-01-18T13:49:50 | 2017-01-18T13:49:50 | 35,948,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | if __name__ == '__main__':
from glob import glob
file_names = glob('*.txt')
for file_name in file_names:
with open(file_name) as data:
lines = data.readlines()
sort_list = []
for line in lines:
cur_line = line.strip().split(',')
sort_list.append(cur_line)
sort_list.sort(key = lambda l : int(l[-1]))
with open(file_name,'w') as out:
for item in sort_list:
out.write(','.join(item) + '\n')
| [
"ukeyim@gmail.com"
] | ukeyim@gmail.com |
cd7c58f94edaf9c24fad73a64af8e44d5887e94d | 90312ba1088363f12408b9869d89e31d6ad658e5 | /mifare_classic/src/python/mifare_classic.py | c96368023c6e19ad3e9a43933d96a426cec021f3 | [
"ISC"
] | permissive | Tosyk/formats-kaitai-io.github.io | c3e9d0df4deae557f5ac4d36290c7052be4c16bb | 1faec646734b93815d39bc638ead4bc9a37eca3e | refs/heads/master | 2023-07-29T07:22:10.818349 | 2021-09-12T11:41:45 | 2021-09-12T11:41:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,148 | py | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class MifareClassic(KaitaiStruct):
"""You can get a dump for testing by the link: https://github.com/zhovner/mfdread/raw/master/dump.mfd
.. seealso::
Source - https://github.com/nfc-tools/libnfc
https://www.nxp.com/docs/en/data-sheet/MF1S70YYX_V1.pdf
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self._raw_sectors = []
self.sectors = []
i = 0
while not self._io.is_eof():
self._raw_sectors.append(self._io.read_bytes((((4 if i >= 32 else 1) * 4) * 16)))
_io__raw_sectors = KaitaiStream(BytesIO(self._raw_sectors[-1]))
self.sectors.append(MifareClassic.Sector(i == 0, _io__raw_sectors, self, self._root))
i += 1
class Key(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.key = self._io.read_bytes(6)
class Sector(KaitaiStruct):
def __init__(self, has_manufacturer, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.has_manufacturer = has_manufacturer
self._read()
def _read(self):
if self.has_manufacturer:
self.manufacturer = MifareClassic.Manufacturer(self._io, self, self._root)
self._raw_data_filler = self._io.read_bytes(((self._io.size() - self._io.pos()) - 16))
_io__raw_data_filler = KaitaiStream(BytesIO(self._raw_data_filler))
self.data_filler = MifareClassic.Sector.Filler(_io__raw_data_filler, self, self._root)
self.trailer = MifareClassic.Trailer(self._io, self, self._root)
class Values(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.values = []
i = 0
while not self._io.is_eof():
self.values.append(MifareClassic.Sector.Values.ValueBlock(self._io, self, self._root))
i += 1
class ValueBlock(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.valuez = [None] * (3)
for i in range(3):
self.valuez[i] = self._io.read_u4le()
self.addrz = [None] * (4)
for i in range(4):
self.addrz[i] = self._io.read_u1()
@property
def addr(self):
if hasattr(self, '_m_addr'):
return self._m_addr if hasattr(self, '_m_addr') else None
if self.valid:
self._m_addr = self.addrz[0]
return self._m_addr if hasattr(self, '_m_addr') else None
@property
def addr_valid(self):
if hasattr(self, '_m_addr_valid'):
return self._m_addr_valid if hasattr(self, '_m_addr_valid') else None
self._m_addr_valid = ((self.addrz[0] == ~(self.addrz[1])) and (self.addrz[0] == self.addrz[2]) and (self.addrz[1] == self.addrz[3]))
return self._m_addr_valid if hasattr(self, '_m_addr_valid') else None
@property
def valid(self):
if hasattr(self, '_m_valid'):
return self._m_valid if hasattr(self, '_m_valid') else None
self._m_valid = ((self.value_valid) and (self.addr_valid))
return self._m_valid if hasattr(self, '_m_valid') else None
@property
def value_valid(self):
if hasattr(self, '_m_value_valid'):
return self._m_value_valid if hasattr(self, '_m_value_valid') else None
self._m_value_valid = ((self.valuez[0] == ~(self.valuez[1])) and (self.valuez[0] == self.valuez[2]))
return self._m_value_valid if hasattr(self, '_m_value_valid') else None
@property
def value(self):
if hasattr(self, '_m_value'):
return self._m_value if hasattr(self, '_m_value') else None
if self.valid:
self._m_value = self.valuez[0]
return self._m_value if hasattr(self, '_m_value') else None
class Filler(KaitaiStruct):
"""only to create _io."""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.data = self._io.read_bytes(self._io.size())
@property
def block_size(self):
if hasattr(self, '_m_block_size'):
return self._m_block_size if hasattr(self, '_m_block_size') else None
self._m_block_size = 16
return self._m_block_size if hasattr(self, '_m_block_size') else None
@property
def data(self):
if hasattr(self, '_m_data'):
return self._m_data if hasattr(self, '_m_data') else None
self._m_data = self.data_filler.data
return self._m_data if hasattr(self, '_m_data') else None
@property
def blocks(self):
if hasattr(self, '_m_blocks'):
return self._m_blocks if hasattr(self, '_m_blocks') else None
io = self.data_filler._io
_pos = io.pos()
io.seek(0)
self._m_blocks = []
i = 0
while not io.is_eof():
self._m_blocks.append(io.read_bytes(self.block_size))
i += 1
io.seek(_pos)
return self._m_blocks if hasattr(self, '_m_blocks') else None
@property
def values(self):
if hasattr(self, '_m_values'):
return self._m_values if hasattr(self, '_m_values') else None
io = self.data_filler._io
_pos = io.pos()
io.seek(0)
self._m_values = MifareClassic.Sector.Values(io, self, self._root)
io.seek(_pos)
return self._m_values if hasattr(self, '_m_values') else None
class Manufacturer(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.nuid = self._io.read_u4le()
self.bcc = self._io.read_u1()
self.sak = self._io.read_u1()
self.atqa = self._io.read_u2le()
self.manufacturer = self._io.read_bytes(8)
class Trailer(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.key_a = MifareClassic.Key(self._io, self, self._root)
self._raw_access_bits = self._io.read_bytes(3)
_io__raw_access_bits = KaitaiStream(BytesIO(self._raw_access_bits))
self.access_bits = MifareClassic.Trailer.AccessConditions(_io__raw_access_bits, self, self._root)
self.user_byte = self._io.read_u1()
self.key_b = MifareClassic.Key(self._io, self, self._root)
class AccessConditions(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.raw_chunks = [None] * (self._parent.ac_count_of_chunks)
for i in range(self._parent.ac_count_of_chunks):
self.raw_chunks[i] = self._io.read_bits_int_be(4)
class TrailerAc(KaitaiStruct):
def __init__(self, ac, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.ac = ac
self._read()
def _read(self):
pass
@property
def can_read_key_b(self):
"""key A is required."""
if hasattr(self, '_m_can_read_key_b'):
return self._m_can_read_key_b if hasattr(self, '_m_can_read_key_b') else None
self._m_can_read_key_b = self.ac.inv_shift_val <= 2
return self._m_can_read_key_b if hasattr(self, '_m_can_read_key_b') else None
@property
def can_write_keys(self):
if hasattr(self, '_m_can_write_keys'):
return self._m_can_write_keys if hasattr(self, '_m_can_write_keys') else None
self._m_can_write_keys = ((((self.ac.inv_shift_val + 1) % 3) != 0) and (self.ac.inv_shift_val < 6))
return self._m_can_write_keys if hasattr(self, '_m_can_write_keys') else None
@property
def can_write_access_bits(self):
if hasattr(self, '_m_can_write_access_bits'):
return self._m_can_write_access_bits if hasattr(self, '_m_can_write_access_bits') else None
self._m_can_write_access_bits = self.ac.bits[2].b
return self._m_can_write_access_bits if hasattr(self, '_m_can_write_access_bits') else None
@property
def key_b_controls_write(self):
if hasattr(self, '_m_key_b_controls_write'):
return self._m_key_b_controls_write if hasattr(self, '_m_key_b_controls_write') else None
self._m_key_b_controls_write = not (self.can_read_key_b)
return self._m_key_b_controls_write if hasattr(self, '_m_key_b_controls_write') else None
class ChunkBitRemap(KaitaiStruct):
def __init__(self, bit_no, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.bit_no = bit_no
self._read()
def _read(self):
pass
@property
def shift_value(self):
if hasattr(self, '_m_shift_value'):
return self._m_shift_value if hasattr(self, '_m_shift_value') else None
self._m_shift_value = (-1 if self.bit_no == 1 else 1)
return self._m_shift_value if hasattr(self, '_m_shift_value') else None
@property
def chunk_no(self):
if hasattr(self, '_m_chunk_no'):
return self._m_chunk_no if hasattr(self, '_m_chunk_no') else None
self._m_chunk_no = (((self.inv_chunk_no + self.shift_value) + self._parent._parent.ac_count_of_chunks) % self._parent._parent.ac_count_of_chunks)
return self._m_chunk_no if hasattr(self, '_m_chunk_no') else None
@property
def inv_chunk_no(self):
if hasattr(self, '_m_inv_chunk_no'):
return self._m_inv_chunk_no if hasattr(self, '_m_inv_chunk_no') else None
self._m_inv_chunk_no = (self.bit_no + self.shift_value)
return self._m_inv_chunk_no if hasattr(self, '_m_inv_chunk_no') else None
class DataAc(KaitaiStruct):
def __init__(self, ac, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.ac = ac
self._read()
def _read(self):
pass
@property
def read_key_a_required(self):
if hasattr(self, '_m_read_key_a_required'):
return self._m_read_key_a_required if hasattr(self, '_m_read_key_a_required') else None
self._m_read_key_a_required = self.ac.val <= 4
return self._m_read_key_a_required if hasattr(self, '_m_read_key_a_required') else None
@property
def write_key_b_required(self):
if hasattr(self, '_m_write_key_b_required'):
return self._m_write_key_b_required if hasattr(self, '_m_write_key_b_required') else None
self._m_write_key_b_required = (( ((not (self.read_key_a_required)) or (self.read_key_b_required)) ) and (not (self.ac.bits[0].b)))
return self._m_write_key_b_required if hasattr(self, '_m_write_key_b_required') else None
@property
def write_key_a_required(self):
if hasattr(self, '_m_write_key_a_required'):
return self._m_write_key_a_required if hasattr(self, '_m_write_key_a_required') else None
self._m_write_key_a_required = self.ac.val == 0
return self._m_write_key_a_required if hasattr(self, '_m_write_key_a_required') else None
@property
def read_key_b_required(self):
if hasattr(self, '_m_read_key_b_required'):
return self._m_read_key_b_required if hasattr(self, '_m_read_key_b_required') else None
self._m_read_key_b_required = self.ac.val <= 6
return self._m_read_key_b_required if hasattr(self, '_m_read_key_b_required') else None
@property
def decrement_available(self):
if hasattr(self, '_m_decrement_available'):
return self._m_decrement_available if hasattr(self, '_m_decrement_available') else None
self._m_decrement_available = (( ((self.ac.bits[1].b) or (not (self.ac.bits[0].b))) ) and (not (self.ac.bits[2].b)))
return self._m_decrement_available if hasattr(self, '_m_decrement_available') else None
@property
def increment_available(self):
if hasattr(self, '_m_increment_available'):
return self._m_increment_available if hasattr(self, '_m_increment_available') else None
self._m_increment_available = (( ((not (self.ac.bits[0].b)) and (not (self.read_key_a_required)) and (not (self.read_key_b_required))) ) or ( ((not (self.ac.bits[0].b)) and (self.read_key_a_required) and (self.read_key_b_required)) ))
return self._m_increment_available if hasattr(self, '_m_increment_available') else None
class Ac(KaitaiStruct):
def __init__(self, index, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.index = index
self._read()
def _read(self):
pass
class AcBit(KaitaiStruct):
def __init__(self, i, chunk, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.i = i
self.chunk = chunk
self._read()
def _read(self):
pass
@property
def n(self):
if hasattr(self, '_m_n'):
return self._m_n if hasattr(self, '_m_n') else None
self._m_n = ((self.chunk >> self.i) & 1)
return self._m_n if hasattr(self, '_m_n') else None
@property
def b(self):
if hasattr(self, '_m_b'):
return self._m_b if hasattr(self, '_m_b') else None
self._m_b = self.n == 1
return self._m_b if hasattr(self, '_m_b') else None
@property
def bits(self):
if hasattr(self, '_m_bits'):
return self._m_bits if hasattr(self, '_m_bits') else None
_pos = self._io.pos()
self._io.seek(0)
self._m_bits = [None] * (self._parent._parent.ac_bits)
for i in range(self._parent._parent.ac_bits):
self._m_bits[i] = MifareClassic.Trailer.AccessConditions.Ac.AcBit(self.index, self._parent.chunks[i].chunk, self._io, self, self._root)
self._io.seek(_pos)
return self._m_bits if hasattr(self, '_m_bits') else None
@property
def val(self):
"""c3 c2 c1."""
if hasattr(self, '_m_val'):
return self._m_val if hasattr(self, '_m_val') else None
self._m_val = (((self.bits[2].n << 2) | (self.bits[1].n << 1)) | self.bits[0].n)
return self._m_val if hasattr(self, '_m_val') else None
@property
def inv_shift_val(self):
if hasattr(self, '_m_inv_shift_val'):
return self._m_inv_shift_val if hasattr(self, '_m_inv_shift_val') else None
self._m_inv_shift_val = (((self.bits[0].n << 2) | (self.bits[1].n << 1)) | self.bits[2].n)
return self._m_inv_shift_val if hasattr(self, '_m_inv_shift_val') else None
class ValidChunk(KaitaiStruct):
def __init__(self, inv_chunk, chunk, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.inv_chunk = inv_chunk
self.chunk = chunk
self._read()
def _read(self):
pass
@property
def valid(self):
if hasattr(self, '_m_valid'):
return self._m_valid if hasattr(self, '_m_valid') else None
self._m_valid = (self.inv_chunk ^ self.chunk) == 15
return self._m_valid if hasattr(self, '_m_valid') else None
@property
def data_acs(self):
if hasattr(self, '_m_data_acs'):
return self._m_data_acs if hasattr(self, '_m_data_acs') else None
_pos = self._io.pos()
self._io.seek(0)
self._m_data_acs = [None] * ((self._parent.acs_in_sector - 1))
for i in range((self._parent.acs_in_sector - 1)):
self._m_data_acs[i] = MifareClassic.Trailer.AccessConditions.DataAc(self.acs_raw[i], self._io, self, self._root)
self._io.seek(_pos)
return self._m_data_acs if hasattr(self, '_m_data_acs') else None
@property
def remaps(self):
if hasattr(self, '_m_remaps'):
return self._m_remaps if hasattr(self, '_m_remaps') else None
_pos = self._io.pos()
self._io.seek(0)
self._m_remaps = [None] * (self._parent.ac_bits)
for i in range(self._parent.ac_bits):
self._m_remaps[i] = MifareClassic.Trailer.AccessConditions.ChunkBitRemap(i, self._io, self, self._root)
self._io.seek(_pos)
return self._m_remaps if hasattr(self, '_m_remaps') else None
@property
def acs_raw(self):
if hasattr(self, '_m_acs_raw'):
return self._m_acs_raw if hasattr(self, '_m_acs_raw') else None
_pos = self._io.pos()
self._io.seek(0)
self._m_acs_raw = [None] * (self._parent.acs_in_sector)
for i in range(self._parent.acs_in_sector):
self._m_acs_raw[i] = MifareClassic.Trailer.AccessConditions.Ac(i, self._io, self, self._root)
self._io.seek(_pos)
return self._m_acs_raw if hasattr(self, '_m_acs_raw') else None
@property
def trailer_ac(self):
if hasattr(self, '_m_trailer_ac'):
return self._m_trailer_ac if hasattr(self, '_m_trailer_ac') else None
_pos = self._io.pos()
self._io.seek(0)
self._m_trailer_ac = MifareClassic.Trailer.AccessConditions.TrailerAc(self.acs_raw[(self._parent.acs_in_sector - 1)], self._io, self, self._root)
self._io.seek(_pos)
return self._m_trailer_ac if hasattr(self, '_m_trailer_ac') else None
@property
def chunks(self):
if hasattr(self, '_m_chunks'):
return self._m_chunks if hasattr(self, '_m_chunks') else None
_pos = self._io.pos()
self._io.seek(0)
self._m_chunks = [None] * (self._parent.ac_bits)
for i in range(self._parent.ac_bits):
self._m_chunks[i] = MifareClassic.Trailer.AccessConditions.ValidChunk(self.raw_chunks[self.remaps[i].inv_chunk_no], self.raw_chunks[self.remaps[i].chunk_no], self._io, self, self._root)
self._io.seek(_pos)
return self._m_chunks if hasattr(self, '_m_chunks') else None
@property
def ac_bits(self):
if hasattr(self, '_m_ac_bits'):
return self._m_ac_bits if hasattr(self, '_m_ac_bits') else None
self._m_ac_bits = 3
return self._m_ac_bits if hasattr(self, '_m_ac_bits') else None
@property
def acs_in_sector(self):
if hasattr(self, '_m_acs_in_sector'):
return self._m_acs_in_sector if hasattr(self, '_m_acs_in_sector') else None
self._m_acs_in_sector = 4
return self._m_acs_in_sector if hasattr(self, '_m_acs_in_sector') else None
@property
def ac_count_of_chunks(self):
if hasattr(self, '_m_ac_count_of_chunks'):
return self._m_ac_count_of_chunks if hasattr(self, '_m_ac_count_of_chunks') else None
self._m_ac_count_of_chunks = (self.ac_bits * 2)
return self._m_ac_count_of_chunks if hasattr(self, '_m_ac_count_of_chunks') else None
| [
"kaitai-bot@kaitai.io"
] | kaitai-bot@kaitai.io |
1fbcd7bd37afad6462b14d788d8271722fd51e88 | 5168ad378c0b6c1c6e6f8d066df6af4f522a431a | /docs/source/conf.py | efa7d5df358a36ced14be0c93e3094d62ad788cf | [
"MIT"
] | permissive | luizvbo/s2cholar | 4f6d36faa2f83fc6ee57871b95d99c08dc7c1bf7 | 7f2be800168ef792230f7759bc7131c5f59e2103 | refs/heads/main | 2023-08-13T10:29:07.901668 | 2021-09-30T18:33:52 | 2021-09-30T18:33:52 | 405,347,748 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,958 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 's2cholar'
copyright = '2021, Luiz Otavio Vilas Boas Oliveira'
author = 'Luiz Otavio Vilas Boas Oliveira'
# The full version, including alpha/beta/rc tags
release = '0.1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'myst_parser'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pydata_sphinx_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| [
"luiz.vbo@gmail.com"
] | luiz.vbo@gmail.com |
6e3f3b1486dc70ef90cb88af554179fd8f6dc4d5 | 59090da2fe4e848b986c704b1ecf06ebe2d730b1 | /conferences/items.py | 459b144b8d8631dcf42753824838e783ac88a031 | [
"MIT"
] | permissive | manuphatak/conferences | 75449d2b16d546d4c66e9363369331239c74c9bd | 67e8880fe7049c003650d83e090b95cc09b45da5 | refs/heads/master | 2021-06-05T04:03:18.071859 | 2015-12-25T01:13:18 | 2015-12-25T01:13:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ConferencesItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| [
"bionikspoon@gmail.com"
] | bionikspoon@gmail.com |
160a8c1425366e30469d3987b9b3cc9387782023 | b598ad130979c415e9bdbc75b6791cef3e1d4f8c | /cosmetics.py | 8e30552a26acd405fd35d373d2e7d6a6b81a74d4 | [] | no_license | djoverton/whats-in-your-cosmetics | 58ab7a77997b149d2319e6016eee2008b4a8bad9 | 9d6f1952ffbe934ee23c158822733b45e02f8993 | refs/heads/master | 2021-05-31T11:24:56.003869 | 2016-06-10T13:16:48 | 2016-06-10T13:16:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,044 | py | """
Data source: http://www.healthdata.gov/dataset/chemicals-cosmetics
"""
import json
import operator
import matplotlib.pyplot as plt
import numpy as np
datafile = open("chemicals.json")
data = json.load(datafile)
#Count the number of times each chemical appears
chemcounts = {}
for item in data["data"]:
if chemcounts.has_key(item[14]):
chemcounts[item[14]] += 1
else:
chemcounts[item[14]] = 1
#Sort by frequency
sortedcounts = sorted(chemcounts.items(), key=operator.itemgetter(1), reverse=True)
#Plot 10 most frequently occurring chemicals
figure = plt.figure()
width = .35
ind = np.arange(10)
xs = [i[0] for i in sortedcounts[:10]]
ys = [i[1] for i in sortedcounts[:10]]
plt.bar(ind, ys, width=width)
plt.xticks(ind + width / 2, xs, rotation=90)
for i in range(len(xs)):
print str(xs[i]) + ": " + str(ys[i])
plt.show()
"""
Top 10 potentially harmful chemicals found in cosmetics according to the California Safe Cosmetics Program (CSCP) in the California Department of Public Health.
http://www.healthdata.gov/dataset/chemicals-cosmetics
Titanium dioxide: 63864
Retinol/retinyl esters, when in daily dosages in excess of 10,000 IU, or 3,000 retinol equivalents: 2153
Butylated hydroxyanisole: 1832
Cocamide diethanolamine: 1391
Retinyl palmitate: 1042
"Trade Secret": 727
Vitamin A palmitate: 715
Mica: 512
Silica, crystalline (airborne particles of respirable size): 482
Carbon black: 474
"Worst" product categories, according to number of instances of harmful chemicals reported:
Makeup Products (non-permanent): 49459
Nail Products: 9408
Skin Care Products: 5977
Sun-Related Products: 4449
Bath Products: 2324
Hair Coloring Products: 1616
Hair Care Products (non-coloring): 1302
Tattoos and Permanent Makeup: 714
Personal Care Products: 640
Fragrances: 460
"Worst" companies, according to number of instances of harmful chemicals reported:
NYX: 3227
bareMinerals: 2412
Sally Hansen: 1774
Sephora: 1771
Victoria's Secret Beauty: 1721
CoverGirl: 1645
NARS: 1537
No7: 1472
CLARINS PARIS: 1401
Rimmel - London: 1362
""" | [
"djoverton@gmail.com"
] | djoverton@gmail.com |
d7a555b59ad57304e08e0a28c5a8d9d871648a2e | 09946a669abcaf8f9b0f22b56b4b036b38f435ef | /setup.py | c28459796b933829bb6c88ae551de23c13c11f03 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | vmishra-uu/pyCEDLAR | 676a7f03f760bdf4bf95239ceedaf77dd376ad0d | 9544eb39f45c63a6a9d63741019002cdf6c95024 | refs/heads/master | 2022-11-09T20:28:13.717403 | 2020-06-19T09:40:11 | 2020-06-19T09:40:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pyCEDLAR",
version="1.0.0",
author="Zsolt Elter",
description="pyCEDLAR: Package to estimate Cumulative Effective dose and Lifetime Attributable Risk",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ezsolti/pyCEDLAR",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"numpy",
"scipy"
]
)
| [
"zsolt@phy-draupnir.physics.uu.se"
] | zsolt@phy-draupnir.physics.uu.se |
37dcfb2bd2200cc648ab737e317d319edfd9d269 | b87f66b13293782321e20c39aebc05defd8d4b48 | /maps/build/EnthoughtBase/enthought/logger/agent/attachments.py | 4d8f00f577f6d82f2c2ec0b1a7f4b5a14dd94aef | [] | no_license | m-elhussieny/code | 5eae020932d935e4d724c2f3d16126a0d42ebf04 | 5466f5858dbd2f1f082fa0d7417b57c8fb068fad | refs/heads/master | 2021-06-13T18:47:08.700053 | 2016-11-01T05:51:06 | 2016-11-01T05:51:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,835 | py | """ Attach relevant project files.
FIXME: there are no public project plugins for Envisage 3, yet. In any case,
this stuff should not be hard-coded, but extensible via extension points. The
code remains here because we can reuse the zip utility code in that extensible
rewrite.
"""
import logging
import os.path
from email import Encoders
from email.MIMEBase import MIMEBase
from enthought.traits.api import Any, HasTraits
logger = logging.getLogger(__name__)
class Attachments(HasTraits):
application = Any()
message = Any()
def __init__(self, message, **traits):
traits = traits.copy()
traits['message'] = message
super(Attachments, self).__init__(**traits)
# FIXME: all of the package_*() methods refer to deprecated project plugins.
def package_workspace(self):
if self.application is None:
pass
workspace = self.application.get_service('enthought.envisage.project.IWorkspace')
if workspace is not None:
dir = workspace.path
self._attach_directory(dir)
return
def package_single_project(self):
if self.application is None:
pass
single_project = self.application.get_service('enthought.envisage.single_project.ModelService')
if single_project is not None:
dir = single_project.location
self._attach_directory(dir)
def package_any_relevant_files(self):
self.package_workspace()
self.package_single_project()
return
def _attach_directory(self, dir):
relpath = os.path.basename(dir)
import zipfile
from cStringIO import StringIO
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
msg = MIMEBase(maintype, subtype)
file_object = StringIO()
zip = zipfile.ZipFile(file_object, 'w')
_append_to_zip_archive(zip, dir, relpath)
zip.close()
msg.set_payload(file_object.getvalue())
Encoders.encode_base64(msg) # Encode the payload using Base64
msg.add_header('Content-Disposition', 'attachment', filename='project.zip')
self.message.attach(msg)
file_object.close()
def _append_to_zip_archive(zip, dir, relpath):
""" Add all files in and below directory dir into zip archive"""
for filename in os.listdir(dir):
path = os.path.join(dir, filename)
if os.path.isfile(path):
name = os.path.join(relpath, filename)
zip.write(path, name)
logger.debug('adding %s to error report' % path)
else:
if filename != ".svn": # skip svn files if any
subdir = os.path.join(dir, filename)
_append_to_zip_archive(zip, subdir, os.path.join(relpath, filename))
return
| [
"fspaolo@gmail.com"
] | fspaolo@gmail.com |
f6045942d8ce16ba7028a713122f43871d0a5830 | 71c3c9c7732c7662b2af9d533ababd21d1ef430b | /src/sVM.py | 5a1b5c87aa48b71266a76cb3068090082a917067 | [] | no_license | mirzaelahi/MachineLearningGenericInterface | 272c30606447655300509314961e634d20ed7b5f | bf09da8a595617a422a5666ac702d7b804662260 | refs/heads/master | 2021-08-20T03:26:30.298136 | 2017-11-28T04:22:02 | 2017-11-28T04:22:02 | 110,456,752 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,657 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 10 09:05:26 2017
@author: Mirza Elahi
"""
from predictor import predictor, scoring, algo
from sklearn import svm
import logging
import numpy as np
class sVM( predictor ):
def __init__(self, loggingLevel = logging.INFO, enableLoggingTime = False):
# kNN class constructor
super(sVM, self).__init__(loggingLevel, enableLoggingTime)
self.kernel = 'linear'
self.C=1
self.gamma='auto'
self.max_iter = 50
# sweeping for best method with cross validation
self.kernelSweep = ['linear', 'poly', 'rbf']
self.CSweep = [1, 100, 1000]
self.gammaSweep = ['auto', 10, 100]
def toString(self):
""" Print parameters of current model
"""
pStr = "Current model:\n\tSVM model with \n\t\tkernel = %s\n\t\tC = %d\n\t\tgamma = %s\n" \
% (self.kernel, self.C, str(self.gamma))
return pStr
def getModel(self, kernel=None, C=None, gamma=None):
""" Temporary model generation
"""
if kernel is not None:
self.kernel = kernel
if C is not None:
self.C = C
if gamma is not None:
self.gamma = gamma
pModel = svm.SVC(kernel=self.kernel, C=self.C, gamma=self.gamma,
max_iter=self.max_iter)
return pModel
def loadModel(self, kernel=None, C=None, gamma=None):
""" load internal model
"""
if kernel is not None:
self.kernel = kernel
if C is not None:
self.C = C
if gamma is not None:
self.gamma = gamma
self.model = []
self.model = self.getModel(kernel=self.kernel, C=self.C,
gamma=self.gamma)
def makeSweepingList(self, kernelSweep=None, CSweep=None, gammaSweep=None):
""" making a list with all combinations of sweeping parameters
"""
if kernelSweep is not None:
self.kernelSweep = kernelSweep
if CSweep is not None:
self.CSweep = CSweep
if gammaSweep is not None:
self.gammaSweep = gammaSweep
self.sweepingList = [[i, j, k] for i in self.kernelSweep \
for j in self.CSweep for k in self.gammaSweep]
return self.sweepingList
def loadParametersFromList(self, params=['linear', 1, 'auto']):
""" override model parameters for the object from params
"""
self.kernel = params[0]
self.C = params[1]
self.gamma = params[2]
def doubleCrossValidate(self, pfeatures, pClass, nFoldOuter=5,
nFoldInner=4, fileName=None, pModel=None,
scoring=scoring.ACCURACY,
isStratified=False):
"""function for cross validation
"""
# if model is given, override with internal model
if pModel is not None:
self.model = pModel
bestParamList=[]
ValScoreList=[]
ValScoreStdList = []
TestScoreList = []
TestConfList = []
self.makeSweepingList(self.kernelSweep, self.CSweep, self.gammaSweep)
# indexes for train and test
pKF = self.getKFold(pfeatures, nFold=nFoldOuter,
isStratified=isStratified)
foldNo = 1
print( 'Double cross validation with fold %d started ...\n' %(nFoldOuter) )
OuterInnerFoldData = []
# folds loop
for train_index, test_index in pKF.split( pfeatures, pClass ):
pFeatureTrain = pfeatures[train_index]
pFeatureTest = pfeatures[test_index]
pClassTrain= pClass[train_index]
pClassTest= pClass[test_index]
bestScoreMean = -1E5
eachInnerFoldData = []
# param sweeping list loop
for params in self.sweepingList:
# loading parameters from sweeping list
self.loadParametersFromList( params=params )
# loading new model with definite parameters
self.loadModel()
score, \
accuracy, \
conf, \
mccs = self.mySingleCrossValidate( pFeatureTrain, pClassTrain,
scoring=scoring,
nFold=nFoldInner,
isStratified=isStratified)
scoreMean = score.mean()
scoreStd = score.std()
#print params
print scoreMean
if scoreMean > bestScoreMean:
bestScoreMean = scoreMean
bestScoreStd = scoreStd
bestParams = params
#bestModel = self.model
self.saveModel(fileName='best_svm')
eachInnerFoldData.append( [score, accuracy, mccs, conf] )
OuterInnerFoldData.append(eachInnerFoldData)
# loading best model through inner cross validation
# model in 'best_svm'
self.loadSavedModel(fileName='best_svm')
self.trainModel( pFeatureTrain , pClassTrain)
#print(self.model)
# test model
classPred = self.testModel(pFeatureTest)
#metrices
testScore, testaccuracy, avgPrecScore, matConf, matCohenKappa, \
strClassificationReport, mcc = self.getMetrics( classTest = pClassTest,
classPred = classPred,
scoring=scoring,
boolPrint = False)
printstr1 = "Best model for fold #%d is kernel=%s, C=%d, gamma=%s with \n\t" \
% ( foldNo, bestParams[0], bestParams[1], str(bestParams[2]) )
printstr2 = "Val. Score %0.5f\n\t" % ( bestScoreMean )
printstr3 = "Test Score. %0.5f\n" % ( testScore )
print printstr1 + printstr2 + printstr3
ValScoreList.append(bestScoreMean)
ValScoreStdList.append(bestScoreStd)
TestScoreList.append(testScore)
TestConfList.append(matConf)
bestParamList.append(bestParams)
foldNo += 1
if fileName is not None:
# OuterInnerFoldData
# [OuterFoldNo][ParamListIndex][Score, Accu, MCC, Conf][InnerFoldNo]
self.saveDoubleCrossValidData( fileName=fileName,
ValScoreList = ValScoreList,
ValScoreStdList = ValScoreStdList,
TestScoreList = TestScoreList,
TestConfList = TestConfList,
bestParamList = bestParamList,
OuterInnerFoldData= OuterInnerFoldData,
sweepingList = self.sweepingList,
OuterFoldNo = nFoldOuter,
InnerFoldNo = nFoldInner,
scoring = scoring,
algorithm = algo.SVM )
return np.array(ValScoreList), np.array(ValScoreStdList), \
np.array(TestScoreList), bestParamList, OuterInnerFoldData | [
"me5vp@virginia.edu"
] | me5vp@virginia.edu |
149409ce45b8fbf6a81e10f26e291fbce528b5b2 | 76e0e98a3b582763a83291350291579c5906821d | /examples/settings.py | 626265cc852db4f78c09c5297a3d10d951634c41 | [
"MIT"
] | permissive | sehmon/python-client | 2a114c6cab90eeb9e5891329561105abb1ca8aea | ccc641982d65b1ab217985e027a4a3e0f5557d75 | refs/heads/master | 2021-05-28T04:23:31.414595 | 2014-09-25T06:46:06 | 2014-09-25T06:46:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | API_KEY = 'Your API key here'
| [
"pkernoobie@gmail.com"
] | pkernoobie@gmail.com |
991d807c70d59f473ad69db316ee9c131b22f5ab | bea1eea53462b00d948e17d4f916986d9b217df9 | /python/leetcode/p295.py | 79d1ff60e94d4d030867955028e3fcb6d83397e0 | [] | no_license | aloklal99/naukari | 18a64f5573e7e4929d57258ec208c82a5988f467 | 4ed2d3d7a05890e1d39621465e57bc429ccde19b | refs/heads/master | 2020-03-29T17:59:08.867062 | 2019-03-17T06:30:03 | 2019-03-17T06:30:03 | 150,189,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,916 | py | import operator
import collections
class Heap:
def __init__(self):
self._store = [None] # index 0 is not used
def __len__(self):
return len(self._store) - 1 # 0th element isn't used
def _isCorrectlyOrdered(self, parent, child):
raise Exception("Child class must override this method!")
def _pickSwappableChild(self, childrenTs):
raise Exception("Child class must override this method!")
def peekRoot(self):
return self._store[1]
def _swap(self, idx1, idx2):
self._store[idx1], self._store[idx2] = self._store[idx2], self._store[idx1]
def _getParent(self, child):
return int(child/2)
def _bubbleUp(self, child):
parent = self._getParent(child)
# last child is at @2 so we want to stop looking for parent when child is at 1 (head)
while child > 1 and (not self._isCorrectlyOrdered(parent, child)):
self._swap(parent, child)
child = parent
parent = self._getParent(child)
def _percolateDown(self, parent):
print(f"_percolateDown(parent={parent})")
done = False
while not done:
children = self._getSwappableChild(parent)
child = self._pickSwappableChild(children)
print(f"_percolateDown \t child: {child}")
if child:
self._swap(parent, child)
parent = child
else:
done = True
def add(self, val):
self._store.append(val)
self._bubbleUp(len(self._store) - 1)
def removeTop(self):
if len(self._store) == 2:
return self._store.pop()
else:
top = self._store[1]
last = self._store.pop()
self._store[1] = last
self._percolateDown(1)
return top
def _getChildren(self, parent):
return [2*parent+i for i in [0, 1] if 2*parent+i < len(self._store)]
def _getSwappableChild(self, parent):
return (child for child in self._getChildren(parent) if not self._isCorrectlyOrdered(parent, child))
class MinHeap(Heap):
def _isCorrectlyOrdered(self, parent, child):
return False if self._store[parent] > self._store[child] else True
def _pickSwappableChild(self, children):
return min(children, key=lambda x: self._store[x], default=None)
class MaxHeap(Heap):
def _isCorrectlyOrdered(self, parent, child):
return False if self._store[parent] < self._store[child] else True
def _pickSwappableChild(self, children):
return max(children, key=lambda x: self._store[x], default=None)
class MedianFinder0:
def __init__(self):
"""
initialize your data structure here.
"""
self.rightHeap = MinHeap()
self.leftHeap = MaxHeap()
def addNum(self, num):
"""
:type num: int
:rtype: void
"""
if len(self.leftHeap) and len(self.rightHeap):
if num >= self.rightHeap.peekRoot():
if len(self.rightHeap) <= len(self.leftHeap):
self.rightHeap.add(num)
else: # right heap is already larger
self.leftHeap.add(self.rightHeap.removeTop())
self.rightHeap.add(num)
else:
if len(self.leftHeap) <= len(self.rightHeap):
self.leftHeap.add(num)
else: # left heap is already larger
if num > self.leftHeap.peekRoot():
self.rightHeap.add(num)
else:
self.rightHeap.add(self.leftHeap.removeTop())
self.leftHeap.add(num)
elif len(self.leftHeap):
if num > self.leftHeap.peekRoot():
self.rightHeap.add(num)
else:
self.rightHeap.add(self.leftHeap.removeTop())
self.leftHeap.add(num)
elif len(self.rightHeap) > 0:
if num <= self.rightHeap.peekRoot():
self.leftHeap.add(num)
else:
self.leftHeap.add(self.rightHeap.removeTop())
self.rightHeap.add(num)
else: # both heaps are empty!
self.leftHeap.add(num)
def findMedian(self):
"""
:rtype: float
"""
leftL = len(self.leftHeap)
rightL = len(self.rightHeap)
if leftL == rightL:
if leftL:
return (self.rightHeap.peekRoot() + self.leftHeap.peekRoot())/2
else: # both left and right heaps are empty!
return None
elif leftL > rightL:
return self.leftHeap.peekRoot()
else:
return self.rightHeap.peekRoot()
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian()
| [
"allal@ebay.com"
] | allal@ebay.com |
ecbe1ae9755a38424f26c42c80934e9910dfb4db | ad8a4a257d3a261a7f7a67a69418406bfb2cc7ab | /common-scrapers/common_src/scrapers/among_us_scraper.py | 0d28fcfeeaed46d9cb3b366d8869f07a9a673506 | [
"MIT"
] | permissive | mrPaintMan/blog-scraper | b832b72b5e0d35391055d9f2358b754629085475 | 9b1ff3d398bd23d799d86c9a62ec76a6950555cc | refs/heads/develop | 2022-02-04T13:04:12.209678 | 2021-02-15T19:27:12 | 2021-02-15T19:27:12 | 222,109,337 | 0 | 1 | MIT | 2020-12-29T08:25:58 | 2019-11-16T14:14:51 | Python | UTF-8 | Python | false | false | 1,440 | py | from common_src.lib.model.post import Post
from common_src.lib.model.source import Source
from common_src.scrapers.abstract_scraper import make_soup, now, remove_dups
SOURCE_CODE = "among_us"
WEBSITE = "https://innersloth.itch.io/among-us/devlog"
ALT_IMAGE = 'https://img.itch.zone/aW1nLzE3MzAzNTQucG5n/original/6ZlfCk.png'
FILENAME = "../resources/data/among_us.txt"
def get_source():
name = "Among Us"
description = 'The booming murder multiplayer game everyone is talking about!'
profile_image = 'https://img.itch.zone/aW1hZ2UyL3VzZXIvMTg5NzU5LzE3MzAzNTcucG5n/original/7quYQx.png'
return Source(SOURCE_CODE, name, description, profile_image, ALT_IMAGE, None)
def scrape():
soup = make_soup(WEBSITE)
data = []
for post in soup.find("ul", {"class": "blog_post_list_widget"}):
date = post.find("abbr").get("title").replace("-", "").replace(" ", "").replace(":", "")[0:-2]
title = post.find("a", {"class": "title"}).text.strip()
link = post.find("a", {"class": "title"}).get("href")
alt_image = ALT_IMAGE
image_element = post.find("img", {"class": "post_image"})
image = image_element.get("src").replace(" ", "%20") if image_element else ALT_IMAGE
data.append(Post(None, date, title, link, image, alt_image, SOURCE_CODE, None))
if len(data) % 25 == 0:
print(now() + f"Processed {len(data)} posts")
return remove_dups(data)
| [
"fille.palmqvist@icloud.com"
] | fille.palmqvist@icloud.com |
2e89a9bd74c09e3531412e11b310be4b94ae18d1 | 2a39fe8bd203531c9bcdb470d19b80beac665eae | /model_cluster.py | 288cf9409b0694d16f6334c5ea877ffeafd2e726 | [] | no_license | davidharvey1986/lenstoolTools | 7bf11af1a38700503a731c6fe7e83fdc92bf58c1 | 85bcf729603d34341f5f41c57c4e233b08055baa | refs/heads/master | 2021-09-08T14:29:52.695461 | 2018-03-10T13:54:50 | 2018-03-10T13:54:50 | 124,657,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,715 | py | '''
This script has 2 functions:
1. model_cluster( ra, dec, cluster, \
halos=None, \
best_file=None)
This models the input cluster and returns a structure
from simulate_project with shear, chi, etc.
'''
import numpy as np
import ipdb as pdb
import astro_tools as at
import idlsave as idlsave
import lensing as l
import copy as copy
import glob as glob
import os
def model_cluster( ra, dec, cluster, \
halos=None, \
best_file=None):
'''
Model the NFW signal of the cluster using the
input from halos
'''
if best_file is None:
dataDir = '/Users/DavidHarvey/Documents/Work/Trails/data/rerun/'+cluster
best_file = dataDir+'/best.par'
runmode, potentials = l.lenstool.read_best( filename=best_file)
space = l.simulations.templates.space()
space.lens[0].del_profile('isothermal')
space.source[0].ell_disp = 0.
space.source[0].ra = ra
space.source[0].dec = dec
space.telescope.nGalaxies = len(dec)
space.lens[0].redshift = potentials[0]['z_lens']['float']
space.source[0].redshift = 1.0
space.lens[0].ra = potentials[0]['ra']['float']
space.lens[0].dec = potentials[0]['dec']['float']
if halos is not None:
space.lens[0].ra = halos['halos'][0]['gal']['ra'][0]
space.lens[0].dec = halos['halos'][0]['gal']['dec'][0]
space.lens[0].profiles['nfw'].args['mass'] = \
potentials[0]['m200']['str'].astype(np.double)
space.lens[0].profiles['nfw'].args['conc'] = \
potentials[0]['concentration']['float']
space.lens[0].profiles['nfw'].args['ellipticity'] = \
potentials[0]['ellipticite']['float']
space.lens[0].profiles['nfw'].args['potential_angle'] = \
potentials[0]['angle_pos']['float']
scale_radius = l.profiles.nfw.scale_radius(space.lens[0].profiles['nfw'].args['mass'], \
space.lens[0].profiles['nfw'].args['conc'],\
potentials[0]['z_lens']['float'])
space.lens[0].profiles['nfw'].args['scale_radius'] = scale_radius
for iHalo in range(1,len(potentials)):
space.add_lens()
space.lens[iHalo].redshift = potentials[0]['z_lens']['float']
space.source[iHalo].redshift = 1.0
space.lens[iHalo].ra = potentials[iHalo]['ra']['float']
space.lens[iHalo].dec = potentials[iHalo]['dec']['float']
if halos is not None:
space.lens[iHalo].ra = halos['halos'][iHalo]['gal']['ra'][0]
space.lens[iHalo].dec = halos['halos'][iHalo]['gal']['dec'][0]
space.lens[iHalo].profiles['nfw'].args['mass'] = \
potentials[iHalo]['m200']['str'].astype(np.double)
space.lens[iHalo].profiles['nfw'].args['conc'] = \
potentials[iHalo]['concentration']['float']
space.lens[iHalo].profiles['nfw'].args['ellipticity'] = \
potentials[iHalo]['ellipticite']['float']
space.lens[iHalo].profiles['nfw'].args['potential_angle'] = \
potentials[iHalo]['angle_pos']['float']
scale_radius = l.profiles.nfw.scale_radius(space.lens[iHalo].profiles['nfw'].args['mass'], \
space.lens[iHalo].profiles['nfw'].args['conc'],\
potentials[iHalo]['z_lens']['float'])
space.lens[iHalo].profiles['nfw'].args['scale_radius'] = scale_radius
space.reload(positions=False)
space.weak_lensing()
return space
| [
"davidharvey1986@googlemail.com"
] | davidharvey1986@googlemail.com |
d0a1b1145ebbc788688570ca058dc333dd0e511f | 9005ceee1cba387ad2e199941bb3944f307a8701 | /2-2_lists.py | 964f5c1e99968a7fa0bc19c263356a45df7b9940 | [] | no_license | ckoder999/Diving_into_Python | d46b9ca93075c77fd5efd1b6e6c76417659f050d | abc452d6dfaa76d3515e21b71f666171d8a5f440 | refs/heads/master | 2020-12-09T20:27:58.681763 | 2020-02-05T13:22:24 | 2020-02-05T13:22:24 | 233,410,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | import random
numbers = []
for _ in range(10):
numbers.append(random.randint(1, 20))
print(numbers)
print(sorted(numbers, reverse=True))
numbers.sort(reverse=False)
#print(list(reversed(numbers)))
num2 = numbers.copy()
print(num2)
print(numbers.count(10))
| [
"eugene@iMac-Eugene.local"
] | eugene@iMac-Eugene.local |
cd1062321fd6e56a634e4e079cbae30019bcd44c | 4619167ad190cce846812c957e279825153b243f | /singleton.py | 2300b5f16db388d854417496fa0b91b51b69126c | [] | no_license | dimasert/test-designpatterns | bbdc3e006d609f50794b8b3f7bf2491ef4554d33 | 8bc9e6ec93437c06d0685ad58c523535c790e5d2 | refs/heads/master | 2022-11-22T23:51:00.437605 | 2020-07-22T03:30:33 | 2020-07-22T03:30:33 | 281,531,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | class configValues:
__instance = None
@staticmethod
def getInstance():
if configValues.__instance == None:
configValues()
return configValues.__instance
def __init__(self):
""" Virtually private constructor """
if configValues.__instance != None:
raise Exception ("this class is a singleton")
else:
configValues.__instance = self
s = configValues.getInstance()
print(s)
s = configValues.getInstance()
print(s)
| [
"“rivas7676@gmail.com”"
] | “rivas7676@gmail.com” |
c05b1a1ed7b32f4d144a5a7a569da7b418594598 | 45b5860cf499ae9648a6710c31027ba2b8c459a6 | /Lab-10_Locally-Weighted-Regression-Algorithm/Non_parametric_Regression_Algorithm.py | 6056736ba7b94aede1b86eda388b4154d0f452ab | [] | no_license | Swathivcs19/ML_Lab | 21a5fbe46d11384b91129f495045e781bded4195 | be194f2423f0ca4a85213bddab0077f15a1f5ad1 | refs/heads/main | 2023-05-25T21:59:39.942537 | 2021-06-08T05:50:15 | 2021-06-08T05:50:15 | 349,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,276 | py | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def kernel(point, xmat, k):
m, n = np.shape(xmat)
weights = np.mat(np.eye((m)))
for j in range(m):
diff = point - X[j]
weights[j, j] = np.exp(diff * diff.T / (-2.0 * k ** 2))
return weights
def localWeight(point, xmat, ymat, k):
wei = kernel(point, xmat, k)
W = (X.T * (wei * X)).I * (X.T * (wei * ymat.T))
return W
def localWeightRegression(xmat, ymat, k):
m, n = np.shape(xmat)
ypred = np.zeros(m)
for i in range(m):
ypred[i] = xmat[i] * localWeight(xmat[i], xmat, ymat, k)
return ypred
# load data points
data = pd.read_csv('10-dataset.csv')
bill = np.array(data.total_bill)
tip = np.array(data.tip)
# preparing and add 1 in bill
mbill = np.mat(bill)
mtip = np.mat(tip)
m = np.shape(mbill)[1]
one = np.mat(np.ones(m))
X = np.hstack((one.T, mbill.T))
# set k here
ypred = localWeightRegression(X, mtip, 0.5)
SortIndex = X[:, 1].argsort(0)
xsort = X[SortIndex][:, 0]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(bill, tip, color='green')
ax.plot(xsort[:, 1], ypred[SortIndex], color='red', linewidth=5)
plt.xlabel('Total bill')
plt.ylabel('Tip')
plt.show(); | [
"noreply@github.com"
] | noreply@github.com |
a2932f506e12855456644b556531aa00a32eef5a | d4fa73218fed1e79bf267719731cf1b41c8ef4a0 | /remember/migrations/0001_initial.py | dbdbc829bc21b146ba7588bb7c3332988509aa2d | [
"MIT"
] | permissive | omkar-dsd/todoer | fe3ed86be8a47b608e6c3151e22b7994e0cb0596 | 7686fb186f6df2ab50ee9bc11ee03008db8b7aa6 | refs/heads/master | 2021-07-11T14:57:15.183887 | 2017-10-15T18:03:14 | 2017-10-15T18:03:14 | 107,035,648 | 0 | 0 | null | 2017-10-15T17:57:35 | 2017-10-15T17:57:35 | null | UTF-8 | Python | false | false | 772 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-10 05:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('img', models.CharField(max_length=256)),
('desc', models.TextField(max_length=512)),
('link', models.URLField()),
('tag', models.CharField(max_length=20)),
],
),
]
| [
"vaibhavshelke017@gmail.com"
] | vaibhavshelke017@gmail.com |
faf9a8602b87e159207c8d6a814d7066c220e804 | 9e3b8a52dcb0df0a12d740bcc346615a85f4944f | /dropLastColumn.py | 7365eec5db507030dc5a60a8a70690728a5c3ce2 | [] | no_license | AikawaKai/BioinformaticsProject | e66f6bdd0cef2253e0402d91197ab3bce8067a77 | c86ec9c83b8ba0c99e0045221edcebaaa0d4082a | refs/heads/master | 2021-07-12T21:06:07.681876 | 2017-10-17T14:10:14 | 2017-10-17T14:10:14 | 103,046,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | import sys
import csv
from utility.loadDataSet import transpose
if __name__ == '__main__':
filename = sys.argv[1]
with open(filename, 'r') as file_o:
file_r = csv.reader(file_o, delimiter="\t")
rows = [row for row in file_r]
rows = transpose(rows)
rows = rows[:-1]
rows = transpose(rows)
with open(filename+"2.csv", 'w') as file_w:
writer_csv = csv.writer(file_w, delimiter='\t')
for row in rows:
writer_csv.writerow(row)
| [
"marco.odore@gmail.com"
] | marco.odore@gmail.com |
aceb5cec8b4b7951496428cdb03d1db7f1233ec2 | 460249054f5fdd9bc39280fdcfd7841731bbc7be | /Software/Webcam_Foto/foto.py | bb91be83d27a3ebc898bde296ccc1d6ea9da8a3a | [] | no_license | ingridmiranda/apvoice | 6dc9b85b74a7875cdfe885dc9f630c4d2ba2483e | ae2dadb082e69fc778757cfda73383cbcebaa6aa | refs/heads/master | 2021-08-26T07:06:34.277569 | 2017-11-22T02:23:01 | 2017-11-22T02:23:01 | 110,068,738 | 0 | 0 | null | 2017-11-09T04:49:59 | 2017-11-09T04:49:58 | null | UTF-8 | Python | false | false | 809 | py | #! /usr/bin/python
#!-*- coding: utf-8 -*-
import RPi.GPIO as gpio
import time
import pygame, sys
import pygame.camera
import time
WEBCAM_DIR = "/home/pi/projeto/foto_webcam"
pygame.init()
pygame.camera.init()
cam = pygame.camera.Camera("/dev/video0", (640,480))
gpio.setmode(gpio.BCM)
gpio.setup(17, gpio.IN, pull_up_down = gpio.PUD_DOWN)
while True:
if(gpio.input(17) == 0):
("Botão desligado")
else:
print("Botão pressionado")
cam.start()
image = cam.get_image()
cam.stop
timestamp = time.strftime("%d-%m-%Y_%H-%M-%S", time.localtime())
filename = "%s/%s.jpg" % (WEBCAM_DIR, timestamp)
# salvando a imagem
pygame.image.save(image, filename)
print "Salvo"
time.sleep(1)
gpio.cleanup()
exit()
| [
"ingridmsousa@hotmail.com"
] | ingridmsousa@hotmail.com |
0472ec122864f3198a5e0a270e905eca815c99b1 | 6a14702c187139b9189f0d7d5468c98ce03b6560 | /django/hello/hello/urls.py | 389a3e04af2801379125a1a96a505a28145912f1 | [] | no_license | boboivo/python-learning | 902b736fee24facbd773ccac6e6af42ad4663e5e | 67a4267d12b8fab94e955d76cacd29af697288d3 | refs/heads/master | 2023-04-28T09:51:10.850980 | 2019-09-29T10:27:33 | 2019-09-29T10:27:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | """hello URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from first import views
urlpatterns = [
path('admin/', admin.site.urls),
path('articles/<int:year>/', views.year_archive)
]
| [
"innerpeace.zhai@gmail.com"
] | innerpeace.zhai@gmail.com |
a4becb7bb74d1bc89c50607b9bb58cfd03ce77ee | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/littlecodersh_EasierLife/EasierLife-master/Programs/PCMusicViaWechat/run.py | 711a41774b068b3a371af0624be25cf996578762 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 785 | py | #coding=utf8
import os
import itchat
from NetEaseMusicApi import interact_select_song
HELP_MSG = u'''\
欢迎使用微信网易云音乐
帮助: 显示帮助
关闭: 关闭歌曲
歌名: 按照引导播放音乐\
'''
with open('stop.mp3', 'w') as f: pass
def close_music():
os.startfile('stop.mp3')
@itchat.msg_register(itchat.content.TEXT)
def music_player(msg):
if msg['ToUserName'] != 'filehelper': return
if msg['Text'] == u'关闭':
close_music()
itchat.send(u'音乐已关闭', 'filehelper')
if msg['Text'] == u'帮助':
itchat.send(HELP_MSG, 'filehelper')
else:
itchat.send(interact_select_song(msg['Text']), 'filehelper')
itchat.auto_login(True, enableCmdQR=True)
itchat.send(HELP_MSG, 'filehelper')
itchat.run()
| [
"659338505@qq.com"
] | 659338505@qq.com |
218f1ac9b9f52195bde99c8a5159a28cca52e4ec | e1ec811a380d624b3c3c95c88f6369248862ca52 | /matplotlib/Zadanie3.py | 27dd6c27a81b54bc4e4a63f74585f7cbcc70198b | [] | no_license | michalj11121/Wd-155280 | ac53e138de089d9a53fc287582052ccd9ed224a2 | 7eee6bf2334c39ddf0eb93a555df40f1c241ea1a | refs/heads/master | 2022-08-21T16:27:27.506633 | 2020-05-31T17:43:01 | 2020-05-31T17:43:01 | 245,125,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
x=np.arange(0,30.1,0.1)
s=np.sin(x)
c=np.cos(x)
plt.plot(x,s,'-r',label='sin(x)')
plt.plot(x,c,'--b',label='cos(x)')
plt.title("sin(x) i cos(x) dla x[0,30] z krokiem 0.1")
plt.xlabel('x')
plt.ylabel('sin(x) i cos(x)')
plt.xticks(np.arange(0,31))
plt.legend()
plt.show() | [
"noreply@github.com"
] | noreply@github.com |
31a22036f099c73ba1c28df51244f2704b311551 | 891902687207fb335b65dbb8d31d6e20301764f9 | /pe007.py | 3521b25495844f22a773b26856805b717f686ada | [] | no_license | maecchi/PE | 93bd050eaca2733aa37db6ca493b820fe3d7a351 | 3d9092635807f0036719b65adb16f1c0926c2321 | refs/heads/master | 2020-05-04T16:38:36.476355 | 2012-06-10T05:26:10 | 2012-06-10T05:26:10 | 1,746,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#
# pe007.py - Project Euler
#
prime_array = []
sum_of_array = len(prime_array)
number = 1
while (len(prime_array) < 10001) :
is_prime = True
number += 1
if sum_of_array == 0:
if number != 1:
prime_array.append(number)
else:
for i in prime_array :
if not number % i:
is_prime = False
break
if is_prime:
prime_array.append(number)
sum_of_array = len(prime_array)
print prime_array[len(prime_array)-1]
| [
"aos81922710@yahoo.co.jp"
] | aos81922710@yahoo.co.jp |
de025b7b8d83683bd44b1e5b4b977f3007113196 | 2a48fb1c369a97ff82974030289613e9ccabdcd7 | /ml/rl/types.py | 0fca511ab279c28aa71bf2520a63f458fde9a3ca | [
"BSD-3-Clause"
] | permissive | Tsolmon17/Horizon | 9f5a192529f424fb8f1f47a4dddca97963c94aa2 | c13522660be6a63b793424db52a1824b0765b22d | refs/heads/master | 2020-06-23T18:40:44.078097 | 2019-07-24T22:23:39 | 2019-07-24T22:34:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,089 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import dataclasses
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Type, TypeVar, Union, cast
import numpy as np
import torch
@dataclass
class BaseDataClass:
def _replace(self, **kwargs):
return cast(type(self), dataclasses.replace(self, **kwargs))
def pin_memory(self):
pinned_memory = {}
for field in dataclasses.fields(self):
f = getattr(self, field.name)
if isinstance(f, (torch.Tensor, BaseDataClass)):
pinned_memory[field.name] = f.pin_memory()
return self._replace(**pinned_memory)
def cuda(self):
cuda_tensor = {}
for field in dataclasses.fields(self):
f = getattr(self, field.name)
if isinstance(f, torch.Tensor):
cuda_tensor[field.name] = f.cuda(non_blocking=True)
elif isinstance(f, BaseDataClass):
cuda_tensor[field.name] = f.cuda()
return self._replace(**cuda_tensor)
@dataclass
class ValuePresence(BaseDataClass):
value: torch.Tensor
presence: Optional[torch.ByteTensor]
@dataclass
class IdFeatureConfig(BaseDataClass):
"""
This describes how to map raw features to model features
"""
feature_id: int # integer feature ID
id_mapping_name: str # key to ModelPreprocessingConfig.id_mapping_config
@dataclass
class IdFeatureBase(BaseDataClass):
"""
User should subclass this class and define each ID feature as a field w/ torch.Tensor
as the type of the field.
"""
@classmethod
# TODO: This should be marked as abstractmethod but mypi doesn't like it.
# See https://github.com/python/mypy/issues/5374
# @abc.abstractmethod
def get_feature_config(cls) -> Dict[str, IdFeatureConfig]:
"""
Returns mapping from feature name, which must be a field in this dataclass, to
feature config.
"""
raise NotImplementedError
T = TypeVar("T", bound="SequenceFeatureBase")
@dataclass
class FloatFeatureInfo(BaseDataClass):
name: str
feature_id: int
@dataclass
class SequenceFeatureBase(BaseDataClass):
id_features: Optional[IdFeatureBase]
float_features: Optional[ValuePresence]
@classmethod
# TODO: This should be marked as abstractmethod but mypi doesn't like it.
# See https://github.com/python/mypy/issues/5374
# @abc.abstractmethod
def get_max_length(cls) -> int:
"""
Subclass should return the max-length of this sequence. If the raw data is
longer, feature extractor will truncate the front. If the raw data is shorter,
feature extractor will fill the front with zero.
"""
raise NotImplementedError
@classmethod
def get_float_feature_infos(cls) -> List[FloatFeatureInfo]:
"""
Override this if the sequence has float features associated to it.
Float features should be stored as ID-score-list, where the ID part corresponds
to primary entity ID of the sequence. E.g., if this is a sequence of previously
watched videos, then the key should be video ID.
"""
return []
@classmethod
def prototype(cls: Type[T]) -> T:
float_feature_infos = cls.get_float_feature_infos()
float_features = (
torch.rand(1, cls.get_max_length(), len(float_feature_infos))
if float_feature_infos
else None
)
fields = dataclasses.fields(cls)
id_features = None
for field in fields:
if field.name != "id_features" or not isinstance(field.type, type):
continue
id_feature_fields = dataclasses.fields(field.type)
id_features = field.type( # noqa
**{
f.name: torch.randint(1, (1, cls.get_max_length()))
for f in id_feature_fields
}
)
break
return cls(id_features=id_features, float_features=float_features)
U = TypeVar("U", bound="SequenceFeatures")
@dataclass
class SequenceFeatures(BaseDataClass):
"""
A stub-class for sequence features in the model. All fileds should be subclass of
SequenceFeatureBase above.
"""
@classmethod
def prototype(cls: Type[U]) -> U:
fields = dataclasses.fields(cls)
return cls(**{f.name: f.type.prototype() for f in fields}) # type: ignore
@dataclass
class IdMapping(BaseDataClass):
ids: List[int]
@dataclass
class ModelFeatureConfig(BaseDataClass):
float_feature_infos: List[FloatFeatureInfo]
id_mapping_config: Dict[str, IdMapping]
sequence_features_type: Optional[Type[SequenceFeatures]]
@dataclass
class FeatureVector(BaseDataClass):
float_features: ValuePresence
# sequence_features should ideally be Mapping[str, IdListFeature]; however,
# that doesn't work well with ONNX.
# User is expected to dynamically define the type of id_list_features based
# on the actual features used in the model.
sequence_features: Optional[SequenceFeatureBase] = None
# Experimental: sticking this here instead of putting it in float_features
# because a lot of places derive the shape of float_features from
# normalization parameters.
time_since_first: Optional[torch.Tensor] = None
@dataclass
class ActorOutput(BaseDataClass):
action: torch.Tensor
log_prob: Optional[torch.Tensor] = None
@dataclass
class PreprocessedFeatureVector(BaseDataClass):
float_features: torch.Tensor
# Experimental: sticking this here instead of putting it in float_features
# because a lot of places derive the shape of float_features from
# normalization parameters.
time_since_first: Optional[torch.Tensor] = None
@dataclass
class PreprocessedState(BaseDataClass):
"""
This class makes it easier to plug modules into predictor
"""
state: PreprocessedFeatureVector
@classmethod
def from_tensor(cls, state: torch.Tensor):
assert isinstance(state, torch.Tensor)
return cls(state=PreprocessedFeatureVector(float_features=state))
def __init__(self, state):
super().__init__()
if isinstance(state, torch.Tensor):
raise ValueError("Use from_tensor()")
self.state = state
@dataclass
class PreprocessedStateAction(BaseDataClass):
state: PreprocessedFeatureVector
action: PreprocessedFeatureVector
@classmethod
def from_tensors(cls, state: torch.Tensor, action: torch.Tensor):
assert isinstance(state, torch.Tensor)
assert isinstance(action, torch.Tensor)
return cls(
state=PreprocessedFeatureVector(float_features=state),
action=PreprocessedFeatureVector(float_features=action),
)
def __init__(self, state, action):
super().__init__()
if isinstance(state, torch.Tensor) or isinstance(action, torch.Tensor):
raise ValueError(f"Use from_tensors() {type(state)} {type(action)}")
self.state = state
self.action = action
@dataclass
class RawStateAction(BaseDataClass):
state: FeatureVector
action: FeatureVector
@dataclass
class CommonInput(BaseDataClass):
"""
Base class for all inputs, both raw and preprocessed
"""
reward: torch.Tensor
time_diff: torch.Tensor
step: Optional[torch.Tensor]
not_terminal: torch.Tensor
@dataclass
class PreprocessedBaseInput(CommonInput):
state: PreprocessedFeatureVector
next_state: PreprocessedFeatureVector
@dataclass
class PreprocessedDiscreteDqnInput(PreprocessedBaseInput):
action: torch.Tensor
next_action: torch.Tensor
possible_actions_mask: torch.Tensor
possible_next_actions_mask: torch.Tensor
@dataclass
class PreprocessedParametricDqnInput(PreprocessedBaseInput):
action: PreprocessedFeatureVector
next_action: PreprocessedFeatureVector
possible_actions: PreprocessedFeatureVector
possible_actions_mask: torch.ByteTensor
possible_next_actions: PreprocessedFeatureVector
possible_next_actions_mask: torch.ByteTensor
tiled_next_state: PreprocessedFeatureVector
@dataclass
class PreprocessedPolicyNetworkInput(PreprocessedBaseInput):
action: PreprocessedFeatureVector
next_action: PreprocessedFeatureVector
@dataclass
class PreprocessedMemoryNetworkInput(PreprocessedBaseInput):
action: Union[torch.Tensor, torch.Tensor]
@dataclass
class RawBaseInput(CommonInput):
state: FeatureVector
next_state: FeatureVector
@dataclass
class RawDiscreteDqnInput(RawBaseInput):
action: torch.ByteTensor
next_action: torch.ByteTensor
possible_actions_mask: torch.ByteTensor
possible_next_actions_mask: torch.ByteTensor
def preprocess(
self, state: PreprocessedFeatureVector, next_state: PreprocessedFeatureVector
):
assert isinstance(state, PreprocessedFeatureVector)
assert isinstance(next_state, PreprocessedFeatureVector)
return PreprocessedDiscreteDqnInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal.float(),
state,
next_state,
self.action.float(),
self.next_action.float(),
self.possible_actions_mask.float(),
self.possible_next_actions_mask.float(),
)
def preprocess_tensors(self, state: torch.Tensor, next_state: torch.Tensor):
assert isinstance(state, torch.Tensor)
assert isinstance(next_state, torch.Tensor)
return PreprocessedDiscreteDqnInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal.float(),
PreprocessedFeatureVector(float_features=state),
PreprocessedFeatureVector(float_features=next_state),
self.action.float(),
self.next_action.float(),
self.possible_actions_mask.float(),
self.possible_next_actions_mask.float(),
)
@dataclass
class RawParametricDqnInput(RawBaseInput):
action: FeatureVector
next_action: FeatureVector
possible_actions: FeatureVector
possible_actions_mask: torch.ByteTensor
possible_next_actions: FeatureVector
possible_next_actions_mask: torch.ByteTensor
tiled_next_state: FeatureVector
def preprocess(
self,
state: PreprocessedFeatureVector,
next_state: PreprocessedFeatureVector,
action: PreprocessedFeatureVector,
next_action: PreprocessedFeatureVector,
possible_actions: PreprocessedFeatureVector,
possible_next_actions: PreprocessedFeatureVector,
tiled_next_state: PreprocessedFeatureVector,
):
assert isinstance(state, PreprocessedFeatureVector)
assert isinstance(next_state, PreprocessedFeatureVector)
assert isinstance(action, PreprocessedFeatureVector)
assert isinstance(next_action, PreprocessedFeatureVector)
assert isinstance(possible_actions, PreprocessedFeatureVector)
assert isinstance(possible_next_actions, PreprocessedFeatureVector)
assert isinstance(tiled_next_state, PreprocessedFeatureVector)
return PreprocessedParametricDqnInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal,
state,
next_state,
action,
next_action,
possible_actions,
self.possible_actions_mask,
possible_next_actions,
self.possible_next_actions_mask,
tiled_next_state,
)
def preprocess_tensors(
self,
state: torch.Tensor,
next_state: torch.Tensor,
action: torch.Tensor,
next_action: torch.Tensor,
possible_actions: torch.Tensor,
possible_next_actions: torch.Tensor,
tiled_next_state: torch.Tensor,
):
assert isinstance(state, torch.Tensor)
assert isinstance(next_state, torch.Tensor)
assert isinstance(action, torch.Tensor)
assert isinstance(next_action, torch.Tensor)
assert isinstance(possible_actions, torch.Tensor)
assert isinstance(possible_next_actions, torch.Tensor)
assert isinstance(tiled_next_state, torch.Tensor)
return PreprocessedParametricDqnInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal,
PreprocessedFeatureVector(float_features=state),
PreprocessedFeatureVector(float_features=next_state),
PreprocessedFeatureVector(float_features=action),
PreprocessedFeatureVector(float_features=next_action),
PreprocessedFeatureVector(float_features=possible_actions),
self.possible_actions_mask,
PreprocessedFeatureVector(float_features=possible_next_actions),
self.possible_next_actions_mask,
PreprocessedFeatureVector(float_features=tiled_next_state),
)
@dataclass
class RawPolicyNetworkInput(RawBaseInput):
action: FeatureVector
next_action: FeatureVector
def preprocess(
self,
state: PreprocessedFeatureVector,
next_state: PreprocessedFeatureVector,
action: PreprocessedFeatureVector,
next_action: PreprocessedFeatureVector,
):
assert isinstance(state, PreprocessedFeatureVector)
assert isinstance(next_state, PreprocessedFeatureVector)
assert isinstance(action, PreprocessedFeatureVector)
assert isinstance(next_action, PreprocessedFeatureVector)
return PreprocessedPolicyNetworkInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal,
state,
next_state,
action,
next_action,
)
def preprocess_tensors(
self,
state: torch.Tensor,
next_state: torch.Tensor,
action: torch.Tensor,
next_action: torch.Tensor,
):
assert isinstance(state, torch.Tensor)
assert isinstance(next_state, torch.Tensor)
assert isinstance(action, torch.Tensor)
assert isinstance(next_action, torch.Tensor)
return PreprocessedPolicyNetworkInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal,
PreprocessedFeatureVector(float_features=state),
PreprocessedFeatureVector(float_features=next_state),
PreprocessedFeatureVector(float_features=action),
PreprocessedFeatureVector(float_features=next_action),
)
@dataclass
class RawMemoryNetworkInput(RawBaseInput):
action: Union[FeatureVector, torch.ByteTensor]
def preprocess(
self,
state: PreprocessedFeatureVector,
next_state: PreprocessedFeatureVector,
action: Optional[torch.Tensor] = None,
):
assert isinstance(state, PreprocessedFeatureVector)
assert isinstance(next_state, PreprocessedFeatureVector)
if action is not None:
assert isinstance(action, torch.Tensor)
return PreprocessedMemoryNetworkInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal,
state,
next_state,
action,
)
else:
assert isinstance(self.action, torch.ByteTensor)
return PreprocessedMemoryNetworkInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal,
state,
next_state,
self.action.float(),
)
def preprocess_tensors(
self,
state: torch.Tensor,
next_state: torch.Tensor,
action: Optional[torch.Tensor] = None,
):
assert isinstance(state, torch.Tensor)
assert isinstance(next_state, torch.Tensor)
if action is not None:
assert isinstance(action, torch.Tensor)
return PreprocessedMemoryNetworkInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal,
PreprocessedFeatureVector(float_features=state),
PreprocessedFeatureVector(float_features=next_state),
action,
)
else:
assert isinstance(self.action, torch.ByteTensor)
return PreprocessedMemoryNetworkInput(
self.reward,
self.time_diff,
self.step,
self.not_terminal,
PreprocessedFeatureVector(float_features=state),
PreprocessedFeatureVector(float_features=next_state),
self.action.float(),
)
@dataclass
class ExtraData(BaseDataClass):
mdp_id: Optional[
np.ndarray
] = None # Need to use a numpy array because torch doesn't support strings
sequence_number: Optional[torch.Tensor] = None
action_probability: Optional[torch.Tensor] = None
max_num_actions: Optional[int] = None
metrics: Optional[torch.Tensor] = None
@dataclass
class PreprocessedTrainingBatch(BaseDataClass):
training_input: Union[
PreprocessedBaseInput,
PreprocessedDiscreteDqnInput,
PreprocessedParametricDqnInput,
PreprocessedMemoryNetworkInput,
PreprocessedPolicyNetworkInput,
]
extras: Any
def batch_size(self):
return self.training_input.state.float_features.size()[0]
@dataclass
class RawTrainingBatch(BaseDataClass):
training_input: Union[
RawBaseInput, RawDiscreteDqnInput, RawParametricDqnInput, RawPolicyNetworkInput
]
extras: Any
def batch_size(self):
return self.training_input.state.float_features.value.size()[0]
def preprocess(
self,
training_input: Union[
PreprocessedBaseInput,
PreprocessedDiscreteDqnInput,
PreprocessedParametricDqnInput,
PreprocessedMemoryNetworkInput,
PreprocessedPolicyNetworkInput,
],
) -> PreprocessedTrainingBatch:
return PreprocessedTrainingBatch(
training_input=training_input, extras=self.extras
)
@dataclass
class SingleQValue(BaseDataClass):
q_value: torch.Tensor
@dataclass
class AllActionQValues(BaseDataClass):
q_values: torch.Tensor
@dataclass
class MemoryNetworkOutput(BaseDataClass):
mus: torch.Tensor
sigmas: torch.Tensor
logpi: torch.Tensor
reward: torch.Tensor
not_terminal: torch.Tensor
last_step_lstm_hidden: torch.Tensor
last_step_lstm_cell: torch.Tensor
all_steps_lstm_hidden: torch.Tensor
@dataclass
class DqnPolicyActionSet(BaseDataClass):
greedy: int
softmax: int
@dataclass
class SacPolicyActionSet:
greedy: torch.Tensor
greedy_propensity: float
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
5f5c955a40731900ee78a9e0db443561f0717f58 | 69f8b6a0474dc867d7a8e739d0ea0e358a5603b2 | /qq/models.py | 22d3020fe92514755cdbe70a885788417a1d4399 | [] | no_license | tuchuanchuan/qq-spider | 3da29c6295872e3e64156ce84bf36ed7898d6a5b | e4fe01ba4b34c55bd0ea5403c93758d3b89b2770 | refs/heads/master | 2021-05-14T05:52:02.577402 | 2018-01-04T10:44:55 | 2018-01-04T10:44:55 | 116,230,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,987 | py | # coding: utf-8
import pymysql
from sqlalchemy import create_engine, Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
engine = create_engine("mysql+pymysql://root:root@192.168.0.13:3306/albums_from_web?charset=utf8")
class TencentTable(Base):
__tablename__ = 'tencent_albums'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
url = Column(String(100), nullable=False, unique=True)
name = Column(String(1000), nullable=False)
artist_name = Column(String(1000), nullable=False)
release_company = Column(String(100))
release_date = Column(String(10), default='', )
company_id = Column(Integer)
class TencentCompanyTable(Base):
__tablename__ = 'tencent_companies'
id = Column(Integer, primary_key=True)
company = Column(String(100), nullable=False)
company_id = Column(Integer, nullable=True)
album_total = Column(Integer, default=0)
mv_total = Column(Integer, default=0)
song_total = Column(Integer, default=0)
singer_total = Column(Integer, default=0)
# class TencentUrl(Base):
# __tablename__ = 'tencent_url'
# id = Column(Integer, primary_key=True)
# url = Column(String(100), nullable=False, unique=True)
# artist_mid = Column(String(50), nullable=False)
class TencentArtist(Base):
__tablename__ = 'tencent_artists'
id = Column(Integer, primary_key=True)
artist = Column(String(100), nullable=False)
artist_id = Column(Integer, nullable=False)
artist_mid = Column(String(50), nullable=False)
page = Column(Integer, nullable=False)
class TencentGetAlbum(Base):
__tablename__ = 'tencent_get_album'
id = Column(Integer, primary_key=True)
album_mid = Column(String(100), nullable=False)
page = Column(Integer, nullable=False)
release_date = Column(String, default='')
Base.metadata.create_all(engine)
| [
"tuchuanchuan@kanjian.com"
] | tuchuanchuan@kanjian.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.